scrapy 和 scrapy_redis 安裝

安裝sqlslte,scrapy須要這個模塊python

yum install sqlite-develmysql

python3.5git

下載包本身編譯安裝github

./configureredis

makesql

make install框架

 

自帶pip,升到最新版scrapy

pip3 install --upgrade pipide

 

python3 MySQL模塊fetch

pip3 install pymysql

 

安裝Twisted,scrapy使用的線程框架

wget https://pypi.python.org/packages/6b/23/8dbe86fc83215015e221fbd861a545c6ec5c9e9cd7514af114d1f64084ab/Twisted-16.4.1.tar.bz2#md5=c6d09bdd681f538369659111f079c29d

解包

tar -jxvf Twisted-16.4.1.tar.bz2

進目錄
cd Twisted-16.4.1

安裝

python3 setup.py install

 

安裝scrapy

pip install scrapy

 

安裝redis

 yum install redis

 

安裝scrapy-redis

git clone https://github.com/rolando/scrapy-redis.git

cd scrapy-redis/

python3 setup.py install

 

因爲python2和python3字符串不一樣引發的bug,github上臨時解決的方法

#util.py
import six


def bytes_to_str(s, encoding='utf-8'):
    """Returns a str if a bytes object is given."""

    if six.PY3 and isinstance(s, bytes):
        return s.decode(encoding)

    return s

  

# spider.pyimport six

from scrapy import signals
from scrapy.exceptions import DontCloseSpider
from scrapy.spiders import Spider, CrawlSpider

from . import connection
from .utils import bytes_to_str

# Default batch size matches default concurrent requests setting.
DEFAULT_START_URLS_BATCH_SIZE = 16
DEFAULT_START_URLS_KEY = '%(name)s:start_urls'


class RedisMixin(object):
    """Mixin class to implement reading urls from a redis queue."""
    # Per spider redis key, default to DEFAULT_KEY.
    redis_key = None
    # Fetch this amount of start urls when idle. Default to DEFAULT_BATCH_SIZE.
    redis_batch_size = None
    redis_encoding = 'utf-8'
    # Redis client instance.
    server = None

    def start_requests(self):
        """Returns a batch of start requests from redis."""
        return self.next_requests()

    def setup_redis(self, crawler=None):
        """Setup redis connection and idle signal.

        This should be called after the spider has set its crawler object.
        """
        if self.server is not None:
            return

        if crawler is None:
            # We allow optional crawler argument to keep backwards
            # compatibility.
            # XXX: Raise a deprecation warning.
            crawler = getattr(self, 'crawler', None)

        if crawler is None:
            raise ValueError("crawler is required")

        settings = crawler.settings

        if self.redis_key is None:
            self.redis_key = settings.get(
                'REDIS_START_URLS_KEY', DEFAULT_START_URLS_KEY,
            )

        self.redis_key = self.redis_key % {'name': self.name}

        if not self.redis_key.strip():
            raise ValueError("redis_key must not be empty")

        if self.redis_batch_size is None:
            self.redis_batch_size = settings.getint(
                'REDIS_START_URLS_BATCH_SIZE', DEFAULT_START_URLS_BATCH_SIZE,
            )

        try:
            self.redis_batch_size = int(self.redis_batch_size)
        except (TypeError, ValueError):
            raise ValueError("redis_batch_size must be an integer")

        self.logger.info("Reading start URLs from redis key '%(redis_key)s' "
                         "(batch size: %(redis_batch_size)s)", self.__dict__)

        self.server = connection.from_settings(crawler.settings)
        # The idle signal is called when the spider has no requests left,
        # that's when we will schedule new requests from redis queue
        crawler.signals.connect(self.spider_idle, signal=signals.spider_idle)

    def next_requests(self):
        """Returns a request to be scheduled or none."""
        use_set = self.settings.getbool('REDIS_START_URLS_AS_SET')
        fetch_one = self.server.spop if use_set else self.server.lpop
        # XXX: Do we need to use a timeout here?
        found = 0
        while found < self.redis_batch_size:
            data = fetch_one(self.redis_key)
            if not data:
                # Queue empty.
                break
            req = self.make_request_from_data(data)
            if req:
                yield req
                found += 1
            else:
                self.logger.debug("Request not made from data: %r", data)

        if found:
            self.logger.debug("Read %s requests from '%s'", found, self.redis_key)

    def make_request_from_data(self, data):
        # By default, data is an URL.

        if not isinstance(data, six.string_types):
            # XXX: Shall we log and continue?
            self.logger.error("Wrong type for data: %s" % type(data))
            url = bytes_to_str(data, self.redis_encoding)
        else:
            url = data

        # FIXME: This is a naive guard against using a wrong redis_key where
        # data are not string URLs.
        if '://' not in url:
            # XXX: Shall this be an exception?
            self.logger.error("Missing scheme in URL: '%s'", url)

        return self.make_requests_from_url(url)

    def schedule_next_requests(self):
        """Schedules a request if available"""
        for req in self.next_requests():
            self.crawler.engine.crawl(req, spider=self)

    def spider_idle(self):
        """Schedules a request if available, otherwise waits."""
        # XXX: Handle a sentinel to close the spider.
        self.schedule_next_requests()
        raise DontCloseSpider


class RedisSpider(RedisMixin, Spider):
    """Spider that reads urls from redis queue when idle."""

    @classmethod
    def from_crawler(self, crawler, *args, **kwargs):
        obj = super(RedisSpider, self).from_crawler(crawler, *args, **kwargs)
        obj.setup_redis(crawler)
        return obj


class RedisCrawlSpider(RedisMixin, CrawlSpider):
    """Spider that reads urls from redis queue when idle."""

    @classmethod
    def from_crawler(self, crawler, *args, **kwargs):
        obj = super(RedisCrawlSpider, self).from_crawler(crawler, *args, **kwargs)
        obj.setup_redis(crawler)
        return obj
相關文章
相關標籤/搜索