Python3.6 下的爬虫总是重复爬第一页的内容

问题如题:
改成while,试了很多,然没有效果,请教大家

# coding:utf-8
#  
from lxml import etree
import requests,lxml.html,os

class MyError(Exception):
    def __init__(self, value):
        self.value = value
    def __str__(self):
        return repr(self.value)
      
def get_lawyers_info(url):
    r = requests.get(url)
    html = lxml.html.fromstring(r.content)
    # phones = html.xpath('//span[@class="law-tel"]')
    phones = html.xpath('//span[@class="phone pull-right"]')
    # names = html.xpath('//div[@class="fl"]/p/a')
    names = html.xpath('//h4[@class="text-center"]')
    if(len(phones) == len(names)):
        list(zip(names,phones))
        phone_infos = [(names[i].text, phones[i].text_content()) for i in range(len(names))]
    else:
        error = "Lawyers amount are not equal to the amount of phone_nums: "+url
        raise MyError(error)
    phone_infos_list = []
    for phone_info in phone_infos:
        if(phone_info[0] == ""):
            info = "没留姓名"+": "+phone_info[1]+"\r\n"
        else:
            info = phone_info[0]+": "+phone_info[1]+"\r\n"
        print (info)
        phone_infos_list.append(info)
    return phone_infos_list

dir_path = os.path.abspath(os.path.dirname(__file__))
print (dir_path)
file_path = os.path.join(dir_path,"lawyers_info.txt")
print (file_path)
if os.path.exists(file_path):
    os.remove(file_path)

with open("lawyers_info.txt","ab") as file:  
    for i in range(1000):
        url = "http://www.xxxx.com/cooperative_merchants?searchText=&industry=100&provinceId=19&cityId=0&areaId=0&page="+str(i+1)
        # r = requests.get(url)
        # html = lxml.html.fromstring(r.content)
        # phones = html.xpath('//span[@class="phone pull-right"]')
        # names = html.xpath('//h4[@class="text-center"]')    
        # if phones or names:
        info = get_lawyers_info(url)
        for each in info:
            file.write(each.encode("gbk"))
阅读 4.2k
1 个回答
# coding: utf-8

import requests
from pyquery import PyQuery as Q

url = 'http://www.51myd.com/cooperative_merchants?industry=100&provinceId=19&cityId=0&areaId=0&page='

with open('lawyers_info.txt', 'ab') as f:
    for i in range(1, 5):
        r = requests.get('{}{}'.format(url, i))
        usernames = Q(r.text).find('.username').text().split()
        phones = Q(r.text).find('.phone').text().split()

        print zip(usernames, phones)
撰写回答
你尚未登录,登录后可以
  • 和开发者交流问题的细节
  • 关注并接收问题和回答的更新提醒
  • 参与内容的编辑和改进,让解决方法与时俱进
推荐问题