setting 經常使用配置

一,保存logging 信息

# 保存log信息的文件名
LOG_LEVEL = "INFO"
LOG_STDOUT = True
LOG_ENCODING = 'utf-8'
# 路徑  os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
LOG_FILE = os.path.dirname(__file__) + "/SHANGSHIYAOPINGMULU_error.log"

二,禁止重定向

REDIRECT_ENABLED = False

三,設置延時

import random
DOWNLOAD_DELAY = random.random() + random.random()
RANDOMIZE_DOWNLOAD_DELAY = True

四,設置USER_AGENT

USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko'

五,啓動spider下面的所有爬蟲

1.與spider同級目錄中建立commands文件夾

mkdir commands

2.進入commands文件夾

cd commands

3.建立__init__.py文件

touch __init__.py

4.建立crawlall.py文件

touch crawlall.py

5.打開crawlall文件編寫代碼

vim crawlall.py
from scrapy.commands import ScrapyCommand
from scrapy.crawler import CrawlerRunner
from scrapy.exceptions import UsageError
from scrapy.utils.conf import arglist_to_dict


class Command(ScrapyCommand):
    requires_project = True

    def syntax(self):
        return '[options]'

    def short_desc(self):
        return 'Runs all of the spiders'

    def add_options(self, parser):
        ScrapyCommand.add_options(self, parser)
        parser.add_option("-a", dest="spargs", action="append", default=[], metavar="NAME=VALUE",
                          help="set spider argument (may be repeated)")
        parser.add_option("-o", "--output", metavar="FILE",
                          help="dump scraped items into FILE (use - for stdout)")
        parser.add_option("-t", "--output-format", metavar="FORMAT",
                          help="format to use for dumping items with -o")

    def process_options(self, args, opts):
        ScrapyCommand.process_options(self, args, opts)
        try:
            opts.spargs = arglist_to_dict(opts.spargs)
        except ValueError:
            raise UsageError("Invalid -a value, use -a NAME=VALUE", print_help=False)

    def run(self, args, opts):
        # settings = get_project_settings()

        spider_loader = self.crawler_process.spider_loader
        for spidername in args or spider_loader.list():
            print("*********cralall NewsSpider************")
            self.crawler_process.crawl(spidername, **opts.spargs)
        self.crawler_process.start()

6.配置commands

COMMANDS_MODULE = 'spider.commands'

六,設置從新發請求的狀態碼

RETRY_HTTP_CODES = [500, 520]

七,配置redis

# reids鏈接信息
REDIS_HOST = "192.168.1.235"
REDIS_PORT = 6379
REDIS_PARAMS = {
    "password": "KangCe@0608",
}

# 1(必須). 使用了scrapy_redis的去重組件,在redis數據庫裏作去重
DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter"

# 2(必須). 使用了scrapy_redis的調度器,在redis裏分配請求
SCHEDULER = "scrapy_redis.scheduler.Scheduler"

# 3(必須). 在redis中保持scrapy-redis用到的各個隊列,從而容許暫停和暫停後恢復,也就是不清理redis queues
SCHEDULER_PERSIST = True

# 4(必須). 經過配置RedisPipeline將item寫入key爲 spider.name : items 的redis的list中,供後面的分佈式處理item
# 這個已經由 scrapy-redis 實現,不須要咱們寫代碼,直接使用便可
ITEM_PIPELINES = {
    # 'AQI.pipelines.AqiJsonPipeline': 200,
    # 'AQI.pipelines.AqiCSVPipeline': 300,
    # 'AQI.pipelines.AqiRedisPipeline': 400,
    # 'AQI.pipelines.AqiMongoPipeline': 500,
    'scrapy_redis.pipelines.RedisPipeline': 100
}

 八.settings.py配置文件詳解

# Scrapy項目的名字,這將用來構造默認 User-Agent,同時也用來log,當您使用 startproject 命令建立項目時其也被自動賦值。
BOT_NAME = 'demo1'

# Scrapy搜索spider的模塊列表 默認: [xxx.spiders]
SPIDER_MODULES = ['demo1.spiders']
# 使用 genspider 命令建立新spider的模塊。默認: 'xxx.spiders'
NEWSPIDER_MODULE = 'demo1.spiders'

# 爬取的默認User-Agent,除非被覆蓋
USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'

# 保存log信息的文件名
LOG_LEVEL = "INFO"
LOG_STDOUT = True
LOG_ENCODING = 'utf-8'
# 路徑  os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
LOG_FILE = "SHANGSHIYAOPINGMULU_error.log"

# 禁止重鏡像
REDIRECT_ENABLED = False



# 設置延時
import random
DOWNLOAD_DELAY = random.random() + random.random()
RANDOMIZE_DOWNLOAD_DELAY = True

# 若是啓用,Scrapy將會採用 robots.txt策略
ROBOTSTXT_OBEY = True

# 設置從新發請求的狀態碼
RETRY_HTTP_CODES = [500, 520]

# Scrapy downloader 併發請求(concurrent requests)的最大值,默認: 16
CONCURRENT_REQUESTS = 32

# 爬蟲容許的最大深度,能夠經過meta查看當前深度;0表示無深度
DEPTH_LIMIT = 3

#  爬取時,0表示深度優先Lifo(默認);1表示廣度優先FiFo
# 後進先出,深度優先
DEPTH_PRIORITY = 0
SCHEDULER_DISK_QUEUE = 'scrapy.squeue.PickleLifoDiskQueue'
SCHEDULER_MEMORY_QUEUE = 'scrapy.squeue.LifoMemoryQueue'
# 先進先出,廣度優先
DEPTH_PRIORITY = 1
SCHEDULER_DISK_QUEUE = 'scrapy.squeue.PickleFifoDiskQueue'
SCHEDULER_MEMORY_QUEUE = 'scrapy.squeue.FifoMemoryQueue'

# 調度器隊列
from scrapy.core.scheduler import Scheduler
SCHEDULER = 'scrapy.core.scheduler.Scheduler'

# 訪問URL去重
# DUPEFILTER_CLASS = 'step8_king.duplication.RepeatUrl'

# 爲同一網站的請求配置延遲(默認值:0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
# 下載器在下載同一個網站下一個頁面前須要等待的時間,該選項能夠用來限制爬取速度,減輕服務器壓力。同時也支持小數:0.25 以秒爲單位
DOWNLOAD_DELAY = 3

# 下載延遲設置只有一個有效
# 對單個網站進行併發請求的最大值。
CONCURRENT_REQUESTS_PER_DOMAIN = 16
# 對單個IP進行併發請求的最大值。若是非0,則忽略 CONCURRENT_REQUESTS_PER_DOMAIN 設定,使用該設定。 也就是說,併發限制將針對IP,而不是網站。該設定也影響 DOWNLOAD_DELAY: 若是 CONCURRENT_REQUESTS_PER_IP 非0,下載延遲應用在IP而不是網站上。
CONCURRENT_REQUESTS_PER_IP = 16

# 禁用Cookie(默認狀況下啓用)
COOKIES_ENABLED = False

# 禁用Telnet控制檯(默認啓用)
TELNETCONSOLE_ENABLED = False

# 覆蓋默認請求標頭:
DEFAULT_REQUEST_HEADERS = {
  'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
  'Accept-Language': 'en',
}

# 啓用或禁用蜘蛛中間件
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
SPIDER_MIDDLEWARES = {
   'demo1.middlewares.Demo1SpiderMiddleware': 543,
}

# 啓用或禁用下載器中間件
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
   'demo1.middlewares.MyCustomDownloaderMiddleware': 543,
}

# 啓用或禁用擴展程序
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
EXTENSIONS = {
   'scrapy.extensions.telnet.TelnetConsole': None,
}

# 配置項目管道
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
   'demo1.pipelines.Demo1Pipeline': 300,
}

# 啓用和配置AutoThrottle擴展(默認狀況下禁用)(開始自動限速)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
AUTOTHROTTLE_ENABLED = True

# 初始下載延遲
AUTOTHROTTLE_START_DELAY = 5

# 最大下載延遲
AUTOTHROTTLE_MAX_DELAY = 10

# 禁止重試
RETRY_ENABLED = False

# 下載超時
DOWNLOAD_TIMEOUT = 10

# 下載重試次數
RETRY_ITEMS=5

# 在高延遲的狀況下設置的最大下載延遲
AUTOTHROTTLE_MAX_DELAY = 60


# Scrapy請求的平均數量應該並行發送每一個遠程服務器( 平均每秒併發數)
AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0

# 啓用顯示所收到的每一個響應的調節統計信息:
AUTOTHROTTLE_DEBUG = False

# 啓用和配置HTTP緩存(默認狀況下禁用)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# # 是否啓用緩存策略
HTTPCACHE_ENABLED = True
# 緩存策略:全部請求均緩存,下次在請求直接訪問原來的緩存便可
HTTPCACHE_POLICY = "scrapy.extensions.httpcache.DummyPolicy"
# 緩存策略:根據Http響應頭:Cache-Control、Last-Modified 等進行緩存的策略
HTTPCACHE_POLICY = "scrapy.extensions.httpcache.RFC2616Policy"
# 緩存超時時間
HTTPCACHE_EXPIRATION_SECS = 0
# 緩存保存路徑
HTTPCACHE_DIR = 'httpcache'
# 緩存忽略的Http狀態碼
HTTPCACHE_IGNORE_HTTP_CODES = []
# 緩存存儲的插件
HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'


"""
19. 代理,須要在環境變量中設置
    from scrapy.contrib.downloadermiddleware.httpproxy import HttpProxyMiddleware

    方式一:使用默認
        os.environ
        {
            http_proxy:http://root:woshiniba@192.168.11.11:9999/
            https_proxy:http://192.168.11.11:9999/
        }
    方式二:使用自定義下載中間件

    def to_bytes(text, encoding=None, errors='strict'):
        if isinstance(text, bytes):
            return text
        if not isinstance(text, six.string_types):
            raise TypeError('to_bytes must receive a unicode, str or bytes '
                            'object, got %s' % type(text).__name__)
        if encoding is None:
            encoding = 'utf-8'
        return text.encode(encoding, errors)

    class ProxyMiddleware(object):
        def process_request(self, request, spider):
            PROXIES = [
                {'ip_port': '111.11.228.75:80', 'user_pass': ''},
                {'ip_port': '120.198.243.22:80', 'user_pass': ''},
                {'ip_port': '111.8.60.9:8123', 'user_pass': ''},
                {'ip_port': '101.71.27.120:80', 'user_pass': ''},
                {'ip_port': '122.96.59.104:80', 'user_pass': ''},
                {'ip_port': '122.224.249.122:8088', 'user_pass': ''},
            ]
            proxy = random.choice(PROXIES)
            if proxy['user_pass'] is not None:
                request.meta['proxy'] = to_bytes("http://%s" % proxy['ip_port'])
                encoded_user_pass = base64.encodestring(to_bytes(proxy['user_pass']))
                request.headers['Proxy-Authorization'] = to_bytes('Basic ' + encoded_user_pass)
                print "**************ProxyMiddleware have pass************" + proxy['ip_port']
            else:
                print "**************ProxyMiddleware no pass************" + proxy['ip_port']
                request.meta['proxy'] = to_bytes("http://%s" % proxy['ip_port'])

    DOWNLOADER_MIDDLEWARES = {
       'step8_king.middlewares.ProxyMiddleware': 500,
    }

"""

"""
20. Https訪問
    Https訪問時有兩種狀況:
    1. 要爬取網站使用的可信任證書(默認支持)
        DOWNLOADER_HTTPCLIENTFACTORY = "scrapy.core.downloader.webclient.ScrapyHTTPClientFactory"
        DOWNLOADER_CLIENTCONTEXTFACTORY = "scrapy.core.downloader.contextfactory.ScrapyClientContextFactory"

    2. 要爬取網站使用的自定義證書
        DOWNLOADER_HTTPCLIENTFACTORY = "scrapy.core.downloader.webclient.ScrapyHTTPClientFactory"
        DOWNLOADER_CLIENTCONTEXTFACTORY = "step8_king.https.MySSLFactory"

        # https.py
        from scrapy.core.downloader.contextfactory import ScrapyClientContextFactory
        from twisted.internet.ssl import (optionsForClientTLS, CertificateOptions, PrivateCertificate)

        class MySSLFactory(ScrapyClientContextFactory):
            def getCertificateOptions(self):
                from OpenSSL import crypto
                v1 = crypto.load_privatekey(crypto.FILETYPE_PEM, open('/Users/wupeiqi/client.key.unsecure', mode='r').read())
                v2 = crypto.load_certificate(crypto.FILETYPE_PEM, open('/Users/wupeiqi/client.pem', mode='r').read())
                return CertificateOptions(
                    privateKey=v1,  # pKey對象
                    certificate=v2,  # X509對象
                    verify=False,
                    method=getattr(self, 'method', getattr(self, '_ssl_method', None))
                )
    其餘:
        相關類
            scrapy.core.downloader.handlers.http.HttpDownloadHandler
            scrapy.core.downloader.webclient.ScrapyHTTPClientFactory
            scrapy.core.downloader.contextfactory.ScrapyClientContextFactory
        相關配置
            DOWNLOADER_HTTPCLIENTFACTORY
            DOWNLOADER_CLIENTCONTEXTFACTORY

"""



"""
21. 爬蟲中間件
    class SpiderMiddleware(object):

        def process_spider_input(self,response, spider):
            '''
            下載完成,執行,而後交給parse處理
            :param response: 
            :param spider: 
            :return: 
            '''
            pass

        def process_spider_output(self,response, result, spider):
            '''
            spider處理完成,返回時調用
            :param response:
            :param result:
            :param spider:
            :return: 必須返回包含 Request 或 Item 對象的可迭代對象(iterable)
            '''
            return result

        def process_spider_exception(self,response, exception, spider):
            '''
            異常調用
            :param response:
            :param exception:
            :param spider:
            :return: None,繼續交給後續中間件處理異常;含 Response 或 Item 的可迭代對象(iterable),交給調度器或pipeline
            '''
            return None


        def process_start_requests(self,start_requests, spider):
            '''
            爬蟲啓動時調用
            :param start_requests:
            :param spider:
            :return: 包含 Request 對象的可迭代對象
            '''
            return start_requests

    內置爬蟲中間件:
        'scrapy.contrib.spidermiddleware.httperror.HttpErrorMiddleware': 50,
        'scrapy.contrib.spidermiddleware.offsite.OffsiteMiddleware': 500,
        'scrapy.contrib.spidermiddleware.referer.RefererMiddleware': 700,
        'scrapy.contrib.spidermiddleware.urllength.UrlLengthMiddleware': 800,
        'scrapy.contrib.spidermiddleware.depth.DepthMiddleware': 900,

"""
# from scrapy.contrib.spidermiddleware.referer import RefererMiddleware
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
SPIDER_MIDDLEWARES = {
   # 'step8_king.middlewares.SpiderMiddleware': 543,
}


"""
22. 下載中間件
    class DownMiddleware1(object):
        def process_request(self, request, spider):
            '''
            請求須要被下載時,通過全部下載器中間件的process_request調用
            :param request:
            :param spider:
            :return:
                None,繼續後續中間件去下載;
                Response對象,中止process_request的執行,開始執行process_response
                Request對象,中止中間件的執行,將Request從新調度器
                raise IgnoreRequest異常,中止process_request的執行,開始執行process_exception
            '''
            pass



        def process_response(self, request, response, spider):
            '''
            spider處理完成,返回時調用
            :param response:
            :param result:
            :param spider:
            :return:
                Response 對象:轉交給其餘中間件process_response
                Request 對象:中止中間件,request會被從新調度下載
                raise IgnoreRequest 異常:調用Request.errback
            '''
            print('response1')
            return response

        def process_exception(self, request, exception, spider):
            '''
            當下載處理器(download handler)或 process_request() (下載中間件)拋出異常
            :param response:
            :param exception:
            :param spider:
            :return:
                None:繼續交給後續中間件處理異常;
                Response對象:中止後續process_exception方法
                Request對象:中止中間件,request將會被從新調用下載
            '''
            return None


    默認下載中間件
    {
        'scrapy.contrib.downloadermiddleware.robotstxt.RobotsTxtMiddleware': 100,
        'scrapy.contrib.downloadermiddleware.httpauth.HttpAuthMiddleware': 300,
        'scrapy.contrib.downloadermiddleware.downloadtimeout.DownloadTimeoutMiddleware': 350,
        'scrapy.contrib.downloadermiddleware.useragent.UserAgentMiddleware': 400,
        'scrapy.contrib.downloadermiddleware.retry.RetryMiddleware': 500,
        'scrapy.contrib.downloadermiddleware.defaultheaders.DefaultHeadersMiddleware': 550,
        'scrapy.contrib.downloadermiddleware.redirect.MetaRefreshMiddleware': 580,
        'scrapy.contrib.downloadermiddleware.httpcompression.HttpCompressionMiddleware': 590,
        'scrapy.contrib.downloadermiddleware.redirect.RedirectMiddleware': 600,
        'scrapy.contrib.downloadermiddleware.cookies.CookiesMiddleware': 700,
        'scrapy.contrib.downloadermiddleware.httpproxy.HttpProxyMiddleware': 750,
        'scrapy.contrib.downloadermiddleware.chunked.ChunkedTransferMiddleware': 830,
        'scrapy.contrib.downloadermiddleware.stats.DownloaderStats': 850,
        'scrapy.contrib.downloadermiddleware.httpcache.HttpCacheMiddleware': 900,
    }

"""
# from scrapy.contrib.downloadermiddleware.httpauth import HttpAuthMiddleware
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# DOWNLOADER_MIDDLEWARES = {
#    'step8_king.middlewares.DownMiddleware1': 100,
#    'step8_king.middlewares.DownMiddleware2': 500,
# }
相關文章
相關標籤/搜索