溫州數據採集
這裏採集網站數據是下載pdf:http://wzszjw.wenzhou.gov.cn/col/col1357901/index.html
(涉及的問題就是scrapy 文件的下載設置,以前沒用scrapy下載文件,因此弄了好久才弄好,網上不少不過寫的都不完善。)
主要重點就是設置:
1.piplines.py 文件下載代碼 這部分能夠直接拿來用不須要修改。
2.就是下載文件的url要放在列表裏 item['file_urls']=[url](wenzhou.py)
3. setting.py 主要配置
ITEM_PIPELINES = {
'wenzhou_web.pipelines.WenzhouWebPipeline': 300,
# 下載文件管道
'scrapy.pipelines.MyFilePipeline': 1,
}
#下載路徑
FILES_STORE = './download'
4.下載的文件就會保存到download文件夾中
如圖:
wenzhou.py
# -*- coding: utf-8 -*-
import scrapy
import re
from wenzhou_web.items import WenzhouWebItem
class WenzhouSpider(scrapy.Spider):
name = 'wenzhou'
base_url=['http://wzszjw.wenzhou.gov.cn']
allowed_domains = ['wzszjw.wenzhou.gov.cn']
start_urls = ['http://wzszjw.wenzhou.gov.cn/col/col1357901/index.html']
custom_settings = {
"DOWNLOAD_DELAY": 0.5,
"ITEM_PIPELINES": {
'wenzhou_web.pipelines.MysqlPipeline': 320,
'wenzhou_web.pipelines.MyFilePipeline': 321,
},
"DOWNLOADER_MIDDLEWARES": {
'wenzhou_web.middlewares.WenzhouWebDownloaderMiddleware': 500,
},
}
def parse(self, response):
_response=response.text
tag_list=re.findall("<span>.*?</span><b>·</b><a href=\'(.*?)\'",_response)
for tag in tag_list:
url=self.base_url[0]+tag
# print(url)
yield scrapy.Request(url=url,callback=self.parse_detail)
def parse_detail(self,response):
# _response=response.text.encode('utf8')
# print(_response)
_response=response.text
item=WenzhouWebItem()
pdf_url=re.findall(r'<a target="_blank" href="(.*?)"',_response)
for u in pdf_url:
u=u.split('pdf')[0]
# print(u)
#連接
url="http://wzszjw.wenzhou.gov.cn"+u+"pdf"
# print(url)
item['file_urls']=[url]
yield item
# # #標題
# # try:
# # title=re.findall('<img src=".*?".*?><span style=".*?">(.*?)</span></a></p><meta name="ContentEnd">',_response)
# # print(title[0])
# # except:
# # print('有異常!')
items.py
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class WenzhouWebItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
# pass
#連接
file_urls=scrapy.Field()
middlewares.py
# -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class WenzhouWebSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class WenzhouWebDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
piplines.py
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
from scrapy.conf import settings
from scrapy.exceptions import DropItem
from scrapy.pipelines.files import FilesPipeline
import pymysql
from urllib.parse import urlparse
import scrapy
class WenzhouWebPipeline(object):
def process_item(self, item, spider):
return item
# 數據保存mysql
class MysqlPipeline(object):
def open_spider(self, spider):
self.host = settings.get('MYSQL_HOST')
self.port = settings.get('MYSQL_PORT')
self.user = settings.get('MYSQL_USER')
self.password = settings.get('MYSQL_PASSWORD')
self.db = settings.get(('MYSQL_DB'))
self.table = settings.get('TABLE')
self.client = pymysql.connect(host=self.host, user=self.user, password=self.password, port=self.port, db=self.db, charset='utf8')
def process_item(self, item, spider):
item_dict = dict(item)
cursor = self.client.cursor()
values = ','.join(['%s'] * len(item_dict))
keys = ','.join(item_dict.keys())
sql = 'INSERT INTO {table}({keys}) VALUES ({values})'.format(table=self.table, keys=keys, values=values)
try:
if cursor.execute(sql, tuple(item_dict.values())): # 第一個值爲sql語句第二個爲 值 爲一個元組
print('數據入庫成功!')
self.client.commit()
except Exception as e:
print(e)
print('數據已存在!')
self.client.rollback()
return item
def close_spider(self, spider):
self.client.close()
#定義下載
class MyFilePipeline(FilesPipeline):
def get_media_requests(self, item, info):
for file_url in item['file_urls']:
yield scrapy.Request(file_url)
def item_completed(self, results, item, info):
image_paths = [x['path'] for ok, x in results if ok]
if not image_paths:
raise DropItem("Item contains no file")
item['file_urls'] = image_paths
return item
setting.py
# -*- coding: utf-8 -*-
# Scrapy settings for wenzhou_web project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'wenzhou_web'
SPIDER_MODULES = ['wenzhou_web.spiders']
NEWSPIDER_MODULE = 'wenzhou_web.spiders'
# mysql配置參數
MYSQL_HOST = "192.168.113.129"
MYSQL_PORT = 3306
MYSQL_USER = "root"
MYSQL_PASSWORD = "123456"
MYSQL_DB = 'web_datas'
TABLE = "web_wenzhou"
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'wenzhou_web (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'wenzhou_web.middlewares.WenzhouWebSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
'wenzhou_web.middlewares.WenzhouWebDownloaderMiddleware': 500,
}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'wenzhou_web.pipelines.WenzhouWebPipeline': 300,
# 下載文件管道
'scrapy.pipelines.MyFilePipeline': 1,
}
FILES_STORE = './download'
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'