scrapy 的Requests在一个函数里请求三个不同页面,却只执行最后一个请求

新手上路,请多包涵

如题目所说,下面这是我代码。

# -*- coding: utf-8 -*-
import scrapy
from scrapy.http import Request
import re
from jingdong.items import JingdongItem
from jingdong.settings import *


class GoodsSpider(scrapy.Spider):
    name = 'goods'
    allowed_domains = ['jd.com']
    start_urls = ['http://jd.com/']


    # https://search.jd.com/Search?keyword=图书&enc=utf-8&wq=图书&page=1

    # 搜索的起始页
    url = "https://search.jd.com/Search?keyword={KEYWORDS}&enc=utf-8&wq={KEYWORDS}&page={page}"

    # 电子价格
    Eprice_url = "https://c.3.cn/book?skuId={skuId}&cat={cat}&area=1_72_2799_0&callback=book_jsonp_callback"

    # 商品价格
    price_url = "https://p.3.cn/prices/mgets?type=1&area=1_72_2799_0&pdtk=&pduid=1771569446&pdpin=&pdbp=0&skuIds=J_{skuId}&ext=11100000&callback=jQuery3021180&_=1547383556702"
    price2_url = 'https://c0.3.cn/stock?skuId={skuId}&venderId=1000005720&cat={cat}&area=1_72_2799_0&buyNum=1&extraParam={%22originid%22:%221%22}&ch=1&pduid=1771569446&pdpin=&fqsp=0&callback=getStockCallback'

    # 评论
    comment_url = "https://sclub.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98vv39228&productId={skuId}&score=0&sortType=5&page=0&pageSize=10&isShadowSku=0&fold=1"

    def start_requests(self):
        for k in range(1,PAGE_NUM):
            yield  Request(url=self.url.format(KEYWORDS=KEYWORDS,page=2*k-1),callback=self.page_parse)


    def page_parse(self, response):
        #每页商品ID
        goodsID = response.xpath('//li/@data-sku').extract()
        print(goodsID)

        for each in goodsID:
            goodsurl = "https://item.jd.com/{}.html".format(each)
            yield Request(url=goodsurl,callback=self.get_goods_info)

    def get_goods_info(self,response):

        item = JingdongItem()

        #图书链接
        item["link"] = response.url

        #图书标题
        item["title"] = response.xpath('//div[@class="sku-name"]/text()').extract()[0].strip()

        #作者
        item["writer"] = response.xpath('//div[@class="p-author"]/a/text()').extract()

        #提取商品ID
        skuId = re.compile(r'https:..item.jd.com.(\d+).html').findall(response.url)[0]
        item['Id'] = skuId
        cat = re.compile(r'pcat:\[(.*?)\],').findall(response.text)
        cat = re.sub("\|",",",cat[0]).strip("'")
        item['catId'] = cat
        print(skuId)
        print(cat)

        # 打开电子书价格
        yield Request(url = self.Eprice_url.format(skuId=skuId, cat=cat),meta={'item':item},callback=self.price_parse)
        # 打开原价及京东价
        yield Request(url = self.price_url.format(skuId=skuId),meta={'item':item},callback=self.jingdong_price_parse)
        # 打开评论json
        yield Request(url = self.comment_url.format(skuId=skuId),meta={'item':item},callback=self.comment_parse)



    def price_parse(self,response):
        item =response.meta['item']
        #电子书价格
        item["e_price"] = re.compile('"p":"(.*?)",').findall(response.text)[0]
        yield item

    def jingdong_price_parse(self,response):
        item = response.meta['item']
        #京东价
        item["n_price"] = re.compile('"op":"(.*?)",').findall(response.text)[0]
        print(item["n_price"])
        #原价
        item["o_price"] = re.compile('"m":"(.*?)",').findall(response.text)[0]

        if item["n_price"] == None and item["o_price"]== None:
            yield Request(url=self.price2_url.format(skuId=item['id'],cat=item['catId']),meta={'item':item}, callback=self.jingdong_price_parse)
        else:
            yield item


    def comment_parse(self,response):
        item = response.meta['item']
        #评论数
        item["comment"] =re.compile('"content":"(.*?)",').findall(response.text)

        yield item

下列这三个请求,只执行了最下面的评论。另外两个我尝试过直接用import requests使用相同的匹配规则可以成功获取到信息。所以搞不明白,为什么不能执行???

        # 打开电子书价格
        yield Request(url = self.Eprice_url.format(skuId=skuId, cat=cat),meta={'item':item},callback=self.price_parse)
        # 打开原价及京东价
        yield Request(url = self.price_url.format(skuId=skuId),meta={'item':item},callback=self.jingdong_price_parse)
        # 打开评论json
        yield Request(url = self.comment_url.format(skuId=skuId),meta={'item':item},callback=self.comment_parse)
阅读 1.8k
撰写回答
你尚未登录,登录后可以
  • 和开发者交流问题的细节
  • 关注并接收问题和回答的更新提醒
  • 参与内容的编辑和改进,让解决方法与时俱进
推荐问题