请教个python scrapy 深层爬虫问题

爬取了导航后,想继续深入导航的URL爬取,然后统一返回值写入xlsx

clipboard.png

-- coding: utf-8 --

from lagou.items import LagouItem;
import scrapy

class LaGouSpider(scrapy.Spider):

name="lagou"
start_urls = ['https://www.lagou.com/']
headers = {
    "Host": "onlinelibrary.wiley.com",
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
    "Accept-Language": "zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3",
    "Accept-Encoding": "gzip, deflate",
    "Referer": "http://onlinelibrary.wiley.com/journal/10.1002/(ISSN)1521-3773",
    "Cookie": "EuCookie='this site uses cookies'; __utma=235730399.1295424692.1421928359.1447763419.1447815829.20; s_fid=2945BB418F8B3FEE-1902CCBEDBBA7EA2; __atuvc=0%7C37%2C0%7C38%2C0%7C39%2C0%7C40%2C3%7C41; __gads=ID=44b4ae1ff8e30f86:T=1423626648:S=ALNI_MalhqbGv303qnu14HBk1HfhJIDrfQ; __utmz=235730399.1447763419.19.2.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; TrackJS=c428ef97-432b-443e-bdfe-0880dcf38417; OLProdServerID=1026; JSESSIONID=441E57608CA4A81DFA82F4C7432B400F.f03t02; WOLSIGNATURE=7f89d4e4-d588-49a2-9f19-26490ac3cdd3; REPORTINGWOLSIGNATURE=7306160150857908530; __utmc=235730399; s_vnum=1450355421193%26vn%3D2; s_cc=true; __utmb=235730399.3.10.1447815829; __utmt=1; s_invisit=true; s_visit=1; s_prevChannel=JOURNALS; s_prevProp1=TITLE_HOME; s_prevProp2=TITLE_HOME",
    "Connection": "keep-alive"
}

pass

def parse(self, response):

    mainNavs = response.xpath('//*[@class="menu_sub dn"]//dl');

    for content in mainNavs:
        item = LagouItem();
        # 搜索mainNavs下的所有必须加上'.//dt', 否则//dt搜索的是整个文档的所有 , .很重要
        item['nav'] = content.xpath(".//dt//span//text()").extract_first();#头描述
        nav_title = content.xpath(".//dd//a");

        for nav in nav_title:
            item['url'] = nav.xpath(".//@href").extract_first()
            item['title'] = nav.xpath(".//text()").extract_first()
            #if item['url'] is not None:

            #得到 tem['url'] 继续 爬取
            #yield item
            # 这里怎么没有返回值了
            request = scrapy.http.Request(item['url'],headers=self.headers,callback=self.load_url);

            yield request;


#继续深入
def load_url(self,response):

    aaa = response.xpath('//title/text()').extract_first()
    print(aaa) #没有值??
    print("----------------------")








阅读 1.4k
撰写回答
你尚未登录,登录后可以
  • 和开发者交流问题的细节
  • 关注并接收问题和回答的更新提醒
  • 参与内容的编辑和改进,让解决方法与时俱进