- 說明 1 、: 項目配置文件 settings.py :
- 默認配置:
# 添加 UA
USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36'
# 是否遵循爬蟲協議 , 不聽從
ROBOTSTXT_OBEY = False
# 開啓 scrapy 默認管道
ITEM_PIPELINES = {
'increment1_Pro.pipelines.Increment1ProPipeline': 300,
}
- 說明 2、: items.py 文件 本身根據代碼編寫
# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from redis import Redis
from increment1_Pro.items import Increment1ProItem
class MovieSpider(CrawlSpider):
name = 'movie'
# allowed_domains = ['www.xxx.com']
start_urls = ['https://www.4567tv.tv/index.php/vod/show/class/%E6%81%90%E6%80%96/id/9.html']
rules = (
Rule(LinkExtractor(allow=r'/index.php/vod/show/class/%E6%81%90%E6%80%96/id/9/page/\d+\.html'), callback='parse_item', follow=True),
)
def parse_item(self, response):
conn = Redis(host='127.0.0.1',port=6379)
detail_url_list ='https://www.4567tv.tv' + response.xpath('//li[@class="col-md-6 col-sm-4 col-xs-3"]/div/a/@href')
for url in detail_url_list:
# ex 若 爲 1 ,表示 爲新數據
ex = conn.sadd('movies_url',url)
if ex == 1:
yield scrapy.Request(url=url,callback=self.self.parse_detail)
else:
print("此站<%s>無更新數據,暫無新數據可爬"%url)
def parse_detail(self,response):
item = Increment1ProItem()
item['name'] = response.xpath('//div[@class="stui-content__detail"]/h1/text()').extract_first()
item['actor'] = response.xpath('/html/body/div[1]/div/div/div/div[2]/p[3]/a/text()').extract_first()
yield item
# -*- coding: utf-8 -*-
from redis import Redis
class Increment1ProPipeline(object):
def open_spider(self,spider):
self.conn=Redis(host='127.0.0.1',port=6379)
def process_item(self, item, spider):
# dic = {
# 'name': item['name'],
# 'actor': item['actor'],
# }
print('正在入庫!')
self.conn.lpush('movie_data',item)
return item
# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from increment_data.items import IncrementDataItem
from redis import Redis
import hashlib
class QiubaiSpider(CrawlSpider):
name = 'qiubai'
# allowed_domains = ['www.xx.com']
start_urls = ['https://www.qiushibaike.com/text/']
rules = (
Rule(LinkExtractor(allow=r'/text/page/\d+/'), callback='parse_item', follow=True),
)
def parse_item(self, response):
div_list = response.xpath('//div[@class="article block untagged mb15 typs_hot"]')
conn = Redis(host='127.0.0.1',port=6379)
for div in div_list:
item = IncrementDataItem()
item['author'] = div.xpath('./div[1]/a[2]/h2/text() | ./div[1]/span[2]/h2/text()').extract_first()
# item['content'] = response.xpath('./a[1]/div/span/text()').extract_first()
item['content'] = div.xpath('.//div[@class="content"]/span/text()').extract()
item['content'] = ''.join(item['content'])
source = item['author'] + item['content']
# 自定製 一種形式的 數據指紋, 是數據的惟一標識
hashValue = hashlib.sha256(source.encode()).hexdigest()
ex = conn.sadd("qiubai_hash",hashValue)
if ex == 1:
yield item
else:
print("此數據 已 爬")
# -*- coding: utf-8 -*-
from redis import Redis
##################### 注意 ########################
"""
在這裏 向 redis 的列表中 lpush 字典時 ,則python 中 redis 的版本須要爲 2.10.6,不然報錯
pip3 install redis==2.10.6
"""
class IncrementDataPipeline(object):
conn = Redis(host='127.0.0.1', port=6379)
def process_item(self, item, spider):
dic = {
'author': item['author'],
'content': item['content'],
}
self.conn.lpush('qiubai_data',dic)
print('爬取到一條數據, 正在入庫')
return item