#coding:utf-8
from collections import Counter
import requests
from bs4 import BeautifulSoup
from urllib import request
import urllib
url = 'https://www.baidu.com.cn/s?wd=' + urllib.parse.quote('python3') + '&pn=' # word为关键词,pn是百度用来分页的..
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36',
'Connection': 'keep-alive',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'}
r=requests.get(url,headers=headers)
soup = BeautifulSoup(r.text.replace('<b>', '').replace('</b>', ''),'lxml')
url_list=soup.select('div.result > h3 > a')
for i in url_list:
print(i.getText())
以下是在浏览器搜索python3,前四个结果,当我用python3以上程序取前十结果时,结果内容是不一致的,试过n多关键词都是,哪位朋友能给讲讲原因,是我程序的问题还是百度做了什么,谢谢!
你把你本地的浏览器里的cookie带上试试