PySpider Handler中是否允许用户自定义函数?

PySpider新手,想在爬取内容的过程中把网页整个存下来,不知道有没有现成可用的方法,于是加了一个函数write_file,结果运行说“name 'write_file' is not defined”,这是什么问题呢?大牛们怎么保存网页和网页中的文件的?代码如下:


from pyspider.libs.base_handler import *
import os
from urllib.parse import urlparse
class Handler(BaseHandler):
    crawl_config = {
    }    
    def write_file(response):
        url=response.url
        host,path=urlparse(url)[1:3]
        path2=path[0:path.rindex('/')+1]
        base='pyspider/'
        path3=base+host+path2
        os.makedirs(path3)
        file=open(base+host+path,'wb')
        file.write(response.content)
        file.flush()
        file.close()
    
    @every(minutes=10)
    def on_start(self):
        self.crawl('http://www.aaa.com', callback=self.index_page)

    @config(age=10 * 24 * 60 * 60)
    def index_page(self, response):
        for each in response.doc('a[href^="http"]').items():
            self.crawl(each.attr.href, callback=self.detail_page)
      
    @config(priority=2)
    def detail_page(self, response):
        write_file(response)
        return {
            "url": response.url,
            "title": response.doc('title').text(),
            "content":response.doc('html').text() 
            #"content":response.doc('html').text() 
        }
阅读 4.4k
4 个回答

self.write_file
哦,对了,还有你函数声明要变成 def write_file(self, response)

仍然不知道错在哪里,但找到了等效的办法。
参考了大牛写的例子,额外定义一个类,把写文件的函数写在这个类里。

from pyspider.libs.base_handler import *
import os
from urllib.parse import urlparse
def write_file(response):

    url=response.url
    host,path=urlparse(url)[1:3]
    path2=path[0:path.rindex('/')+1]
    base='pyspider/'
    path3=base+host+path2
    os.makedirs(path3)
    file=open(base+host+path,'wb')
    file.write(response.content)
    file.flush()
    file.close()

class Handler(BaseHandler):

crawl_config = {
}    
@every(minutes=10)
def on_start(self):
    self.crawl('http://www.aaa.com', callback=self.index_page)

@config(age=10 * 24 * 60 * 60)
def index_page(self, response):
    for each in response.doc('a[href^="http"]').items():
        self.crawl(each.attr.href, callback=self.detail_page)
  
@config(priority=2)
def detail_page(self, response):
    write_file(response)
    return {
        "url": response.url,
        "title": response.doc('title').text(),
        "content":response.doc('html').text() 
        #"content":response.doc('html').text() 
    }
    第一种
    
    第二种,除非使用类装饰器方式写
    from pyspider.libs.base_handler import *

import os
from urllib.parse import urlparse
class Handler(BaseHandler):

crawl_config = {
}    
def write_file(self,response):
    url=response.url
    host,path=urlparse(url)[1:3]
    path2=path[0:path.rindex('/')+1]
    base='pyspider/'
    path3=base+host+path2
    os.makedirs(path3)
    file=open(base+host+path,'wb')
    file.write(response.content)
    file.flush()
    file.close()

@every(minutes=10)
def on_start(self):
    self.crawl('http://www.aaa.com', callback=self.index_page)

@config(age=10 * 24 * 60 * 60)
def index_page(self, response):
    for each in response.doc('a[href^="http"]').items():
        self.crawl(each.attr.href, callback=self.detail_page)
  
@config(priority=2)
def detail_page(self, response):
    self.write_file(response)
    return {
        "url": response.url,
        "title": response.doc('title').text(),
        "content":response.doc('html').text() 
        #"content":response.doc('html').text() 
    }
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    

在类里面可以自定义变量或者函数,不过需要用 self.xxx 来访问或者调用

撰写回答
你尚未登录,登录后可以
  • 和开发者交流问题的细节
  • 关注并接收问题和回答的更新提醒
  • 参与内容的编辑和改进,让解决方法与时俱进