爬虫用python拼接json数据

python爬虫新手,想抓这个网站的内容,请求各位大神帮忙:
http://app.100xuexi.com/TK/Qu...
返回的json数据结构类似于:

{
  result: 1,
  data: {
    name: "2018",
    nodeList: [
      {
        name: "单选题",
        questionLists: [
          {
            questionId: "9d00dd42-bb88-45d4-af3b-7015fb091d2f",
            QuestionScore: "1.50",
          },
          {
            questionId: "72a8285e-57b7-4c17-811e-dc786e4e8114",
            QuestionScore: "1.50",
          },
        ]
      },
      {
        name: "多选题",
        questionLists: [
          {
            questionId: "f0bd5a5d-e486-4148-9f4c-a753ce2f901d",
            QuestionScore: "1.50",
          },
          {
            questionId: "c0860e17-8004-45b4-bd65-40b4110ae4a8",
            QuestionScore: "1.50",
          },
      }
    ]
}    

得到结果中的questionId:"9d00dd42-bb88-45d4-af3b-7015fb091d2f"之后,可以组合成一个链接:http://app.100xuexi.com/TK/TK...
访问这个链接之后得到第二个具体题目的json数据。

  • 问题1:这个网站后台为什么不把这两个数据放在一个mongodb文档里,要分开呢?有什么好处吗?
  • 问题2:现在我想把这部分数据拼接到第一个json文件内,和questionId这个键值对并列,该用什么方法?

我已经写了一部分代码,里面有很多不懂的地方都标注了,请各位大神帮忙。

import requests
import json
import pymongo
# from myLog import MyLog as mylog
# log = mylog()


# 总体逻辑:访问paperID后得不到题目的具体内容,
# 只能得到questionID,然后利用questionID组成url才能得到具体的题目信息,
# 所以考虑拿到题目具体信息后再传给paperID,再存入数据库。
def paperID_spider():
    # 获取paperID内容
    paperID_url = 'http://app.100xuexi.com/TK/QuePlanPractice.ashx?method=GetPaperAnswerSheetNew_v2&paperID=a823eddf-d157-4566-be06-b32fe4ee59ac'
    user_agent = 'SCeBook/5.4.3 (iPhone; iOS 12.1.4; Scale/2.00)'
    headers = {'User-Agent': user_agent}
    try:
        paperID_res = requests.get(paperID_url, headers=headers, timeout=5)
    except:
        # TODO:对于访问失败的情况怎么办?,重试?还是把失败链接保存到日志里,后面再重新访问存储?
        # log.info('paperID_url出错:%s' %paperID_url)
        pass
    paperID_json = paperID_res.json()
    paperID_result_code = paperID_json['result']
    if paperID_result_code == 1:
        # TODO: 返回成功的这个状态码用不用保存,还是只保存有用的data数据
        try:
            # TODO:先保存这部分paperID内容存到mongo再爬取下面的questionID?还是把questionID内容和paparID拼接后再保存?
            save_mongo_paperID_json(paperID_json)
        except:
            # TODO:保存出错该怎么办?
            # log.info('paperID_json保存出错:%s' %paperID_url)
            pass
        # TODO:这里直接传paperID_json,是不是比较占内存?提前for循环传paperID_json['data']['nodeList']会不会占用内存小点
        questionID_spider(paperID_json)
    else:
        # log.info('访问出错:%s' %paperID_url)
        print(paperID_json['result'])
        # TODO:返回状态码不对该怎么操作?
        pass


def questionID_spider(paperID_json):
    # 保存questionID数据到paperID.data.nodeLists.0里面和questionId并列
    nodeLists = paperID_json['data']['nodeList']
    for nodeList in nodeLists:
        questionLists = nodeList['questionLists']
        for questionList in questionLists:
            questionID = questionList['questionId']
            print(questionID)
            questionID_url = 'http://app.100xuexi.com/TK/TKQuestionsHandle.ashx?id=%s&method=Questions' %questionID
            print(questionID_url)
            # TODO:用不用再写一次请求头?
            user_agent = 'SCeBook/5.4.3 (iPhone; iOS 12.1.4; Scale/2.00)'
            headers = {'User-Agent': user_agent}
            questionID_res = requests.get(questionID_url, headers=headers)
            questionID_json = questionID_res.json()
            questionID_result_code = questionID_json['status']
            # TODO:如何保存questionID数据到paperID.data.nodeLists.0里面和questionId并列?
            # TODO:1.直接拼接到paperID数据里
            # TODO:2.保存到mongoDB里
            print(questionID_json['data']['QuestionContent'])


def save_mongo_paperID_json(paperID_json):
    client = pymongo.MongoClient()
    db = client['tiku']
    collection = db['tiku']
    # TODO:这里如何插入paperID_json数据,进行两部分json数据的组合
    tiku = collection.insert(paperID_json)


if __name__ == '__main__':
    eBook = paperID_spider()
阅读 2.2k
撰写回答
你尚未登录,登录后可以
  • 和开发者交流问题的细节
  • 关注并接收问题和回答的更新提醒
  • 参与内容的编辑和改进,让解决方法与时俱进
推荐问题