scrapy爬取所有知乎用戶信息

# -*- coding: utf-8 -*-
# scrapy爬取所有知乎用戶信息
# 1:是否遵照robbots_txt協議改成False
# 2: 加入爬取所需的headers: user-agent,authorazation
# 3:肯定爬取任務:即想要獲得的用戶信息
# 4: 爬取思路解析
# 總體思路:從起始大v開始,得到其關注列表和粉絲列表;解析列表,能夠獲得每個用戶的詳細信息地址,組成每個用戶的url;
# 從用戶的url開始,解析用戶詳細信息,取到詳細信息。同時又能夠解析到每個用戶的關注列表和粉絲列表,循環請求。
# 分步驟以下:
# 4-1:找到起始大v,請求其頁面,循環翻頁獲取其所有的關注列表,粉絲列表
# 4-2:列表步驟:解析關注列表,粉絲列表,從全部列表中取得用戶的url_token,組成用戶url,執行用戶步驟4-3
# 4-3:用戶步驟:解析用戶url,該步驟能夠得到1.該用戶詳細信息 2.該用戶所有的關注列表與粉絲列表,返回列表步驟4-2
# 4-4:同步存儲item到數據庫mongodb,去重設計。
import json
import scrapy
from zhihu2.items import Zhihu2Item

class ZhihuuserSpider(scrapy.Spider):
    name = 'zhihuuser'
    allowed_domains = ['www.zhihu.com']
    start_urls = ['http://www.zhihu.com/']
    start_user = 'excited-vczh'
    # 一:對用戶關注列表的請求構造
    # 用戶關注列表 start_user爲起始大v,followees_include爲請求參數,limit爲每頁顯示用戶數,默認20,offset爲頁碼參數,首頁爲0
    followees_url = 'https://www.zhihu.com/api/v4/members/{user}/followees?include={include}&offset={offset}&limit={limit}'
    followees_include = 'data[*].answer_count,articles_count,gender,follower_count,is_followed,is_following,badge[?(type=best_answerer)].topics'

    # 二:對用戶粉絲列表的請求構造
    # 用戶關注列表 start_user爲起始大v,followees_include爲請求參數,limit爲每頁顯示用戶數,默認20,offset爲頁碼參數,首頁爲0
    followers_url = 'https://www.zhihu.com/api/v4/members/{user}/followers?include={include}&offset={offset}&limit={limit}'
    followers_include = 'data[*].answer_count,articles_count,gender,follower_count,is_followed,is_following,badge[?(type=best_answerer)].topics'

    # 三:對用戶詳細信息的請求構造
    user_url = 'https://www.zhihu.com/api/v4/members/{user}?include={include}'
    user_include = 'allow_message,is_followed,is_following,is_org,is_blocking,employments,answer_count,follower_count,articles_count,gender,badge[?(type=best_answerer)].topics'
    def start_requests(self):
        # 分別舉列表url和用戶url示例,以驗證是否可以爬取
        # 關注列表url示例
        # 返回401是請求驗證用戶的身份,知乎的首頁是要求驗證用戶的身份才能進入,因此須要在settings裏面設置authorization
        # url='https://www.zhihu.com/api/v4/members/excited-vczh/followees?include=data%5B*%5D.answer_count%2Carticles_count%2Cgender%2Cfollower_count%2Cis_followed%2Cis_following%2Cbadge%5B%3F(type%3Dbest_answerer)%5D.topics&offset=60&limit=20'
        # 用戶詳細url示例
        # url='https://www.zhihu.com/api/v4/members/lanfengxing?include=allow_message%2Cis_followed%2Cis_following%2Cis_org%2Cis_blocking%2Cemployments%2Canswer_count%2Cfollower_count%2Carticles_count%2Cgender%2Cbadge%5B%3F(type%3Dbest_answerer)%5D.topics'
        # yield scrapy.Request(url, callback=self.parse)
        # 構造用戶關注列表的請求 主要用到format方法

        yield scrapy.Request(url=self.followees_url.format(user=self.start_user, include=self.followees_include, offset=0, limit=20), callback=self.parse_followees)
        # 構造用戶粉絲列表的請求 主要用到format方法
        yield scrapy.Request(url=self.followers_url.format(user=self.start_user, include=self.followers_include, offset=0, limit=20),callback=self.parse_followers)
        # 對用戶詳細信息的請求構造
        yield scrapy.Request(url=self.user_url.format(user=self.start_user, include=self.user_include),callback=self.parse_user)
    # 解析關注列表
    def parse_followees(self, response):
        results = json.loads(response.text)
        if 'data' in results.keys():
            for result in results.get('data'):
                # 解析關注列表,獲得所關注人的url_token,構造解析詳細信息請求
                yield scrapy.Request(url=self.user_url.format(user=result.get('url_token'), include=self.user_include),callback=self.parse_user)
        # 構造翻頁請求
        if 'paging' in results.keys() and results.get('paging').get('is_end')==False:
            next = results.get('paging').get('next')
            yield scrapy.Request(url=next, callback=self.parse_followees)

    # 解析粉絲列表
    def parse_followers(self, response):
        results = json.loads(response.text)
        if 'data' in results.keys():
            for result in results.get('data'):
                # 解析關注列表,獲得所關注人的url_token,構造解析詳細信息請求
                yield scrapy.Request(url=self.user_url.format(user=result.get('url_token'), include=self.user_include),
                                     callback=self.parse_user)
        # 構造翻頁請求
        if 'paging' in results.keys() and results.get('paging').get('is_end') == False:
            next = results.get('paging').get('next')
            yield scrapy.Request(url=next, callback=self.parse_followers)

    # 解析用戶詳細信息,因爲咱們任務的目標是獲取用戶詳細信息,所以在這一步要肯定哪些信息是被使用,在items裏面作相應設置
    def parse_user(self, response):
        item = Zhihu2Item()
        # 返回的response是json格式,所以須要解析json
        results = json.loads(response.text)
        # 遍歷item數據結構的鍵名,item.field能夠獲得數據結構的全部鍵
        for field in item.fields:
            # 若是item的鍵名在網頁裏面,則遍歷賦值
            if field in results.keys():
                item[field]=results.get(field)
        yield item

        # 提取用戶的關注列表
        yield scrapy.Request(url=self.followees_url.format(user=results.get('url_token'),include = self.followees_include,offset=0, limit=20),callback=self.parse_followees)
        # 提取用戶的粉絲列表
        yield scrapy.Request(url=self.followers_url.format(user=results.get('url_token'), include=self.followers_include, offset=0, limit=20),callback=self.parse_followers)
# -*- coding: utf-8 -*-

# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html

import scrapy

# 想要獲取的用戶信息設置
class Zhihu2Item(scrapy.Item):
    # define the fields for your item here like:
    # name = scrapy.Field()

    # 姓名 
    name = scrapy.Field()
    # 性別
    gender = scrapy.Field()
    # 職業
    employments = scrapy.Field()
    # 級別
    badge = scrapy.Field()
    # 一句話介紹
    headline = scrapy.Field()
    # 粉絲數
    follower_count = scrapy.Field()
    # 回答問題數
    answer_count = scrapy.Field()
    # 撰寫文章數
    articles_count = scrapy.Field()
    # 頭像
    avatar_url = scrapy.Field()
    avatar_url_template = scrapy.Field()
    # id
    id = scrapy.Field()
    # 註冊類型
    type = scrapy.Field()
    # 註冊url
    url = scrapy.Field()
    # 主頁地址,惟一識別碼
    url_token = scrapy.Field()
    # 用戶類型
    user_type = scrapy.Field()
# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymongo
# 項目管道用來處理獲得的item信息,這裏設置存儲到MongoDB的class
class MongoPipeline(object):

    #初始化變量, 這裏須要傳入mongo_uri,mongo_db兩個參數,這兩個參數能夠從類方法裏面得到
    def __init__(self,mongo_uri,mongo_db):
        self.mongo_uri = mongo_uri
        self.mongo_db = mongo_db

    # 定義類方法,得到mongo_uri,mongo_db
    @classmethod
    def from_crawler(cls,crawler):
        return cls(
        mongo_uri = crawler.settings.get('MONGO_URI'),
        mongo_db = crawler.settings.get('MONGO_DB')
        )

    # 初始化mongodb的變量,client, 與db,爬蟲啓動時即開始初始化
    def open_spider(self,spider):
        self.client = pymongo.MongoClient(self.mongo_uri)
        self.db = self.client[self.mongo_db]

    # 存儲主體進程,返回item或者DropItem,這裏設置update方法設置去重, 若是有同名就更新,沒有就從新創建
    def process_item(self, item, spider):
        name = item.__class__.__name__
        self.db[name].update({'url_token':item['url_token']}, {'$set':item}, True)
        return item

    # 關閉mongodb
    def close_spider(self,spider):
        self.client.close()
# -*- coding: utf-8 -*-

# Scrapy settings for zhihu2 project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     http://doc.scrapy.org/en/latest/topics/settings.html
#     http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#     http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html

BOT_NAME = 'zhihu2'

SPIDER_MODULES = ['zhihu2.spiders']
NEWSPIDER_MODULE = 'zhihu2.spiders'

MONGO_URI = 'localhost'
MONGO_DB = 'zhihu2'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'zhihu2 (+http://www.yourdomain.com)'

# Obey robots.txt rules
ROBOTSTXT_OBEY = False

# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32

# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16

# Disable cookies (enabled by default)
#COOKIES_ENABLED = False

# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False

# Override the default request headers:

DEFAULT_REQUEST_HEADERS = {
  'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
  'Accept-Language': 'en',
  'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',
  'authorization':'oauth c3cef7c66a1843f8b3a9e6a1e3160e20',
}

# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
#    'zhihu2.middlewares.Zhihu2SpiderMiddleware': 543,
#}

# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
#    'zhihu2.middlewares.MyCustomDownloaderMiddleware': 543,
#}

# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
#    'scrapy.extensions.telnet.TelnetConsole': None,
#}

# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
   'zhihu2.pipelines.MongoPipeline': 300,
}

# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
相關文章
相關標籤/搜索