增量式爬虫:需求 当我们浏览一些网站会发现,某些网站定时的会在原有的基础上更新一些新的数据。如一些电影网站会实时更新最近热门的电影。那么,当我们在爬虫的过程中遇到这些情况时,我们是不是应该定期的更新程序以爬取到更新的新数据?那么,增量式爬虫就可以帮助我们来实现
通过爬虫程序检测某网站数据更新的情况,这样就能爬取到该网站更新出来的数据
movie.py
# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from redis import Redis
from increment1_Pro.items import Increment1ProItem
class MovieSpider(CrawlSpider):
name = 'movie'
# allowed_domains = ['www.xxx.com']
start_urls = ['https://www.4567tv.tv/index.php/vod/show/id/7.html']
rules = (
Rule(LinkExtractor(allow=r'/index.php/vod/show/id/7/page/\d+\.html'), callback='parse_item', follow=True),
)
def parse_item(self, response):
conn = Redis(host='127.0.0.1',port=6379)
detail_url_list= response.xpath('//li[@class="col-md-6 col-sm-4 col-xs-3"]/div/a/@href').extract()
for url in detail_url_list:
ex = conn.sadd('movies_url',url)
#等于1 的时候 说明数据还没有存储到redis中 等于0 的时候 说明redis中已经存在该数据
if ex == 1:
yield scrapy.Request(url=url,callback=self.parse_detail)
else:
print("网站中无数据更新,没有可爬取得数据!!!")
def parse_detail(self,response):
item = Increment1ProItem()
item['name']=response.xpath('/html/body/div[1]/div/div/div/div[2]/h1/text()').extract_first()
item['actor']=response.xpath('/html/body/div[1]/div/div/div/div[2]/p[3]/a/text()').extract_first()
yield item
# item = {}
#item['domain_id'] = response.xpath('//input[@id="sid"]/@value').get()
#item['name'] = response.xpath('//div[@id="name"]').get()
#item['description'] = response.xpath('//div[@id="description"]').get()
# return item
管道文件
from redis import Redis
class Increment1ProPipeline(object):
conn = None
def open_spider(self,spider):
self.conn = Redis(host='127.0.0.1',port=6379)
def process_item(self, item, spider):
print('有新的数据正在入库')
self.conn.lpush('movie_data',item)
return item
qiubai.py
# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from redis import Redis
from increment2_Pro.items import Increment2ProItem
import hashlib
class QiubaiSpider(CrawlSpider):
name = 'qiubai'
# allowed_domains = ['www.xxx.com']
start_urls = ['https://www.qiushibaike.com/text/']
rules = (
Rule(LinkExtractor(allow=r'/text/page/\d+/'), callback='parse_item', follow=True),
)
def parse_item(self, response):
div_list = response.xpath('//div[@class="article block untagged mb15 typs_hot"]')
conn = Redis(host='127.0.0.1',port=6379)
for div in div_list:
item = Increment2ProItem()
item['content'] = div.xpath('.//div[@class="content"]/span//text()').extract()
item['content'] = ''.join(item['content'])
item['author'] = div.xpath('./div/a[2]/h2/text() | ./div[1]/span[2]/h2/text()').extract_first()
source = item['author'] + item['content']
sourse = item['content']+item['author']
#自己定制一种形式得数据指纹
hashvalue = hashlib.sha256(sourse.encode()).hexdigest()
ex = conn.sadd('qiubai_hash',hashvalue)
if ex == 1:
yield item
else:
print('没有可更新的数据可爬取')
# item = {}
#item['domain_id'] = response.xpath('//input[@id="sid"]/@value').get()
#item['name'] = response.xpath('//div[@id="name"]').get()
#item['description'] = response.xpath('//div[@id="description"]').get()
# return item
管道文件
from redis import Redis
class Increment2ProPipeline(object):
conn = None
def open_spider(self,spider):
self.conn = Redis(host='127.0.0.1',port=6379)
def process_item(self, item, spider):
dic = {
'author':item['author'],
'content':item['content']
}
self.conn.lpush('qiubaiData',dic)
print('爬取到一条数据,正在入库......')
return item
原文:https://www.cnblogs.com/mlhz/p/10485666.html