用requests写的采集百度图片到本地,求改写规范化

今天看requests文档,想试试下载图片的功能。单一张图片,下载成功了,对于新手来说,已经很值得高兴了。后来一琢磨,既然咱学了正则,为啥不用requests和正则多抓几张图片试试?都说学代码要勤动手,那就练习练习试试吧。这是我第一个自己写的完整python代码,用最笨的办法,共计耗时3个小时,完成了我一个自创代码,新鲜刚出炉,通篇没有一个函数(因为不会用。。。。。)求群友给改写优化一下。谢谢

#-*- coding:utf-8 -*-
import requests
from contextlib import closing
import re
word=input('你想搜啥词?').strip()
ye=int(input('你想下载几页?'))
sites=[]
shu=0
offset=0
i=0
start = 'http://image.baidu.com/search/flip?tn=baiduimage&ie=utf-8&word='+word+'&pn='+str(offset)+'&gsm=3c&ct=&ic=0&lm=-1&width=0&height=0'
if ye==1:
    sites.append(start)
elif ye==0:
    print('别闹')
elif ye>1:
    while shu<ye:
        sites.append('http://image.baidu.com/search/flip?tn=baiduimage&ie=utf-8&word='+word+'&pn='+str(offset)+'&gsm=3c&ct=&ic=0&lm=-1&width=0&height=0')
        offset+=20
        shu+=1
print(sites)

while len(sites)>0:
    search_url=sites.pop()
    headers = {'User-Agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'}
    html=requests.get(search_url).text
    pic_url=re.findall(r'objURL":"(.*?)"',html,re.S)
    print(pic_url)
    for surl in pic_url:
        strings='bdpics\\'+str(i)+'.jpg'
        try:
            with closing(requests.get(surl,headers=headers,stream=True,timeout=20)) as ok:
                with open(strings,'wb') as fd:
                    for chunk in ok.iter_content(128):
                        fd.write(chunk)
        except:
            print('这张图片链接失效')
            continue
        i+=1
阅读 3.1k
1 个回答

代码

#!/usr/bin/env python
# -*- coding:utf-8 -*-
import os
import re

import requests


class Downloader(object):
    @staticmethod
    def down_page(url):
        html = requests.get(url).text
        return html

    @staticmethod
    def down_pic(url):
        headers = {'User-Agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'}
        return requests.get(url, headers=headers, stream=True, timeout=20).content


class UrlManager(object):
    def __init__(self):
        self.page_urls = set()

    def gen_urls(self, word, pages):
        for page in xrange(pages):
            url = self.__gen_url__(word, page)
            self.page_urls.add(url)

    def __gen_url__(self, word, page):
        return 'http://image.baidu.com/search/flip?tn=baiduimage&ie=utf-8&word=%s&pn=%s&gsm=3c&ct=&ic=0&lm=-1&width=0&height=0' % (
            word, page * 20)

    def has_new_url(self):
        return len(self.page_urls) > 0

    def get_craw_url(self):
        return self.page_urls.pop()


class Parser(object):
    def parse(self, html):
        return re.findall(r'objURL":"(.*?)"', html, re.S)


class Output(object):
    def __init__(self):
        self.result_dir = 'bdpics/'
        self.__check_dir__(self.result_dir)

    def save(self, file_name, pic_content):
        with open(os.path.join(self.result_dir, file_name), 'wb') as result_file:
            result_file.write(pic_content)

    def __check_dir__(self, dir):
        if not os.path.exists(dir):
            os.makedirs(dir)

    def show(self):
        print "result dir: %s" % os.path.abspath(self.result_dir)


class Spider_Main(object):
    def __init__(self):
        self.urls = UrlManager()
        self.downloader = Downloader()
        self.parser = Parser()
        self.output = Output()

    def crawl(self, word, pages):
        self.__check_params__(word, pages)
        self.urls.gen_urls(word, pages)

        count = 0
        while self.urls.has_new_url():
            page_url = self.urls.get_craw_url()
            html = self.downloader.down_page(page_url)
            pic_urls = self.parser.parse(html)
            for pic_url in pic_urls:
                try:
                    count += 1
                    file_name = '%s_%s' % (count, pic_url[pic_url.rindex("/") + 1:])

                    pic_content = self.downloader.down_pic(pic_url)
                    self.output.save(file_name, pic_content)
                    print "crawl %s url=%s" % (count, pic_url)
                except Exception, e:
                    print "failed %s url=%s, e=%s" % (count, pic_url, e.message)

        self.output.show()

    @staticmethod
    def __check_params__(word, pages):
        if word is None:
            raise Exception('关键字不能为空')
        if pages <= 0:
            raise Exception('页数必须大于0')


if __name__ == '__main__':
    spider_main = Spider_Main()
    spider_main.crawl('狗', 2)

结果

2017-09-15-030458.jpg-o

美女(?)

2017-09-15-031735.jpg-o

撰写回答
你尚未登录,登录后可以
  • 和开发者交流问题的细节
  • 关注并接收问题和回答的更新提醒
  • 参与内容的编辑和改进,让解决方法与时俱进
推荐问题