爬虫中requests使用时出现转码错误。

#-*-coding:utf-8-*-
import requests

def load_url(url,file_name):
   try:
       my_headers = {
           'Agent-User': 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_2; en-US) AppleWebKit/533.3 (KHTML, like Gecko) Chrome/5.0.354.0 Safari/533.3'}
       re=requests.get(url,headers=my_headers)
       re.raise_for_status()
       re.encoding=re.apparent_encoding
       print('爬取%s内容完成'%file_name)
       return re.text
   except:
       print('爬取失败!')

def save_data(data,file_name):
    print('开始保存文件%s'%file_name)
    with open(file_name,'w') as f:
        f.write(data)
    print('文件 %s保存完成!'%file_name)

def spider(kw,begin,end):
    for page in range(begin,end+1):
        pn=(begin-1)*50
        kw={'kw':kw}
        full_url='http://tieba.baidu.com/f?'+'kw='+kw['kw']+'&ie=utf-8&pn='+str(pn)
        print(full_url)
        file_name='网页'+str(page)+'.html'
        html=load_url(full_url,file_name)
        save_data(html,file_name)

if __name__=='__main__':
    #url = 'http://tieba.baidu.com/f?'
    kw=input('请输入爬取的贴吧名称:')
    begin=int(input('请输入爬取开始的页号:'))
    end=int(input('爬取结束的页号:'))
    spider(kw,begin,end)


错误提示:
F:\Python\python.exe F:/Python/练习夹/spider/tiebaCase.py
请输入爬取的贴吧名称:战狼2
请输入爬取开始的页号:1
爬取结束的页号:2
http://tieba.baidu.com/f?kw=战狼2&ie=utf-8&pn=0
爬取网页1.html内容完成
Traceback (most recent call last):
开始保存文件网页1.html
  File "F:/Python/练习夹/spider/tiebaCase.py", line 37, in <module>
    spider(kw,begin,end)
  File "F:/Python/练习夹/spider/tiebaCase.py", line 30, in spider
    save_data(html,file_name)
  File "F:/Python/练习夹/spider/tiebaCase.py", line 19, in save_data
    f.write(data)
UnicodeEncodeError: 'gbk' codec can't encode character '\xe7' in position 265: illegal multibyte sequence
这个错误求解决。。。
阅读 4.1k
1 个回答
# coding: utf-8

from __future__ import unicode_literals

import requests
import codecs

def load_url(url, file_name):
    try:
        my_headers = {
            'Agent-User': 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_2; en-US) AppleWebKit/533.3 (KHTML, like Gecko) Chrome/5.0.354.0 Safari/533.3'}
        re = requests.get(url, headers=my_headers)
        print('爬取%s内容完成' % file_name)
        return re.text
    except:
        print('爬取失败!')


def save_data(data, file_name):
    print('开始保存文件%s' % file_name)
    print data
    with codecs.open(file_name, 'w', 'utf-8') as f:
        f.write(data)
    print('文件 %s保存完成!' % file_name)


def spider(kw, begin, end):
    for page in range(begin, end + 1):
        pn = (begin - 1) * 50
        kw = {'kw': kw}
        full_url = 'http://tieba.baidu.com/f?' + 'kw=' + kw['kw'] + '&ie=utf-8&pn=' + str(pn)
        print(full_url)
        file_name = '网页' + str(page) + '.html'
        html = load_url(full_url, file_name)
        save_data(html, file_name)


if __name__ == '__main__':
    spider('战狼2', 1, 2)
撰写回答
你尚未登录,登录后可以
  • 和开发者交流问题的细节
  • 关注并接收问题和回答的更新提醒
  • 参与内容的编辑和改进,让解决方法与时俱进
推荐问题