1

这是一个很好的新手练习项目,爬取豆瓣top250的电影,并分别记录排名、片名、导演、主演、评论等信息,保存在一个txt文档里。
对新手来说,难点部分在于如何找到并成功跳转到下一页,并且在最后一页的时候识别出来并停止爬虫。

一个很基础的爬虫。以下是代码部分。

import requests
from bs4 import BeautifulSoup
import time
import re

lurl = 'https://movie.douban.com/top250'

movie = []

def getlist(listurl):
    time.sleep(2)
    headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.80 Safari/537.36'}
    res = requests.get(listurl)
    soup = BeautifulSoup(res.text, 'html.parser')
    movielist = soup.select('.grid_view li')
    for m in movielist:
        rank = m.select('em')[0].text
        title = m.select('.title')[0].text
        direct = m.select('.info .bd p')[0].text.strip()
        actor = '\n主演:'.join(direct.split('   主演:'))
        director = '年代:'.join(actor.split('                           '))
        if m.select('.inq'):
            comments = m.select('.inq')[0].text.strip()
        else:
            comments = 'None'
        movie.append('排名: '+ rank+ '\n' +'片名: '+ title + '\n'+ director + '\n' + '评论: '+ comments +'\n' + '\n')
    if soup.select('.next a'):
        asoup = soup.select('.next a')[0]['href']
        Next_page = lurl + asoup
        getlist(Next_page)
    else:
        print('结束')
    return movie



movies = getlist(lurl)

with open('movie.txt', 'w') as m:
    for a in movies:
        m.write(a)

Josh
30 声望12 粉丝