PySpider新手,想在爬取内容的过程中把网页整个存下来,不知道有没有现成可用的方法,于是加了一个函数write_file,结果运行说“name 'write_file' is not defined”,这是什么问题呢?大牛们怎么保存网页和网页中的文件的?代码如下:
from pyspider.libs.base_handler import *
import os
from urllib.parse import urlparse
class Handler(BaseHandler):
crawl_config = {
}
def write_file(response):
url=response.url
host,path=urlparse(url)[1:3]
path2=path[0:path.rindex('/')+1]
base='pyspider/'
path3=base+host+path2
os.makedirs(path3)
file=open(base+host+path,'wb')
file.write(response.content)
file.flush()
file.close()
@every(minutes=10)
def on_start(self):
self.crawl('http://www.aaa.com', callback=self.index_page)
@config(age=10 * 24 * 60 * 60)
def index_page(self, response):
for each in response.doc('a[href^="http"]').items():
self.crawl(each.attr.href, callback=self.detail_page)
@config(priority=2)
def detail_page(self, response):
write_file(response)
return {
"url": response.url,
"title": response.doc('title').text(),
"content":response.doc('html').text()
#"content":response.doc('html').text()
}
self.write_file
哦,对了,还有你函数声明要变成
def write_file(self, response)