python豆瓣多線程爬蟲加IP代理(免費的通常是不穩定)

最近研究了一下python爬蟲,因此寫一下本身的經驗,爬取豆瓣電影的信息。(第一次寫這個!)html

1,首先找一些代理用戶代理(user_agent)python

self.user_agent = [
            "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
            "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)",
            "Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
            "Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)",
            "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
            "Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
            "Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
            "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
            "Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
            "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
            "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0",
            "Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5",
            "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
            "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20",
            "Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52",
        ]
複製代碼

2.找了一個獲取代理ip的網址json

www.kuaidaili.com/ops/proxyli…api

3.測試代理ip是否有效的網址數組

'http://icanhazip.com'bash

4.下面代碼是爬取信息所用的時間。app

# 計算運行所需的時間
def run_time(func):
    def wrapper(*args, **kw):
        start = time.time()
        func(*args, **kw)
        end = time.time()
        print('running', end-start, 's')
    return wrapper
複製代碼

5.定義一些常量python爬蟲

def __init__(self):
        # 豆瓣連接
        self.start_url = 'https://movie.douban.com/top250'
        # 獲取代理IP連接,不能確保代理ip真實可用
        self.getProxy =  'http://www.xicidaili.com/nn/1/'
        # 測試代理IP是否代理成功
        self.testIP = 'http://icanhazip.com'
        # 爬取豆瓣信息的隊列
        self.qurl = Queue()
        # 爬取代理ip信息的隊列
        self.IPQurl = Queue()
        self.data = list()
        self.item_num = 5 # 限制每頁提取個數(也決定了二級頁面數量)防止對網頁請求過多
        self.thread_num = 10 # 抓取二級頁面線程數量
        self.first_running = True
        # 設置代理ip
        self.proxy = {}
        # 設置獲取代理ip的代理ip
        self.proxy1 = {}
        # 不設置代理ip
        self.notproxy = {}
        self.user_agent = [
            "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
            "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)",
            "Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
            "Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)",
            "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
            "Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
            "Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
            "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
            "Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
            "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
            "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0",
            "Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5",
            "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
            "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20",
            "Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52",
        ]
複製代碼

6.爬取代理ip的信息放入隊列裏,方便爬取豆瓣信息時獲取。dom

def get_proxy(self):
        url = self.getProxy
        try:
            # random.choice(self.user_agent)是隨機獲取一個用戶代理。
            # 由於免費的代理ip不穩定因此這裏設置self.notproxy,若是穩定的話就換成self.proxy1
            r = requests.get(url, headers={'User-Agent': random.choice(self.user_agent)}, proxies=self.notproxy,verify=False,timeout=1)
            r.encoding = 'utf-8'
            if (r.status_code == 200):
                soup = BeautifulSoup(r.content, 'html.parser')
                ip_list = soup.find_all('table', id='ip_list')
                if(len(ip_list)):
                    tr_list =  ip_list[0].find_all('tr')[1:10]
                    for i in tr_list:
                        td_list = i.find_all('td')
                        temp = td_list[5].text + ',' + td_list[1].text + ':' +td_list[2].text
                        self.IPQurl.put(temp)
                    return True
                else:
                    print('頁面查詢不到該id')
                    return False
            else:
                print('沒法獲取代理ip')
                return False
        except Exception as e:
            print('獲取代理ip出錯--',str(e))
            return False
複製代碼

7.設置代理ip的值ide

def set_proxy(self):
        if self.IPQurl.empty():
            if self.get_proxy():
                arr = self.IPQurl.get().split(',')
                arr1 = self.IPQurl.get().split(',')
                if arr[0].find('HTTPS') == -1:
                    self.proxy = {arr[0].lower(): 'http://'+arr[1]}
                else:
                    self.proxy = {arr[0].lower(): 'https://'+arr[1]}
                if arr1[0].find('HTTPS') == -1:
                    self.proxy1 = {arr1[0].lower(): 'http://'+arr1[1]}
                else:
                    self.proxy1 = {arr1[0].lower(): 'https://'+arr1[1]}
            else:
                self.proxy = {}
                self.proxy1 = {}
        else:
            arr = self.IPQurl.get().split(',')
            if arr[0].find('HTTPS') == -1:
                self.proxy = {arr[0].lower(): 'http://' + arr[1]}
            else:
                self.proxy = {arr[0].lower(): 'http://' + arr[1]}
複製代碼

8.開始爬取豆瓣top250數據的連接。

def parse_first(self, url):
        print('crawling,parse_first', url)
        self.set_proxy()
        try:
            # 由於免費的代理ip不穩定因此這裏設置self.notproxy,若是穩定的話就換成self.proxy
            r = requests.get(url, headers={'User-Agent': random.choice(self.user_agent)},proxies=self.notproxy,verify=False,timeout=5)
            r.encoding = 'utf-8'
            if r.status_code == 200:
                soup = BeautifulSoup(r.content, 'html.parser')
                # 每一頁爬數據條數
                movies = soup.find_all('div', class_='info')[:self.item_num]
                for movie in movies:
                    url = movie.find('div', class_='hd').a['href']
                    self.qurl.put(url)

                nextpage = soup.find('span', class_='next').a
                if nextpage:
                    nexturl = self.start_url + nextpage['href']
                    self.parse_first(nexturl)
                else:
                    self.first_running = False
            else:
                print('ip被屏蔽')
                self.proxy = {}
                self.proxy1 = {}
                self.first_running = False
        except Exception as e:
            self.proxy = {}
            self.proxy1 = {}
            self.first_running = False
            print('代理ip代理失敗--',str(e))
複製代碼

9.是時候爬取真正的信息了,爬取的信息寫入數組裏面。

def parse_second(self):
        # 中止觸發的條件是self.first_running = False和self.qurl爲空。
        while self.first_running or not self.qurl.empty():
            if not self.qurl.empty():
                url = self.qurl.get()
                print('crawling,parse_second', url)
                self.set_proxy()
                try:
                    r = requests.get(url,headers={'User-Agent': random.choice(self.user_agent)},proxies=self.notproxy,verify=False,timeout=5)
                    r.encoding = 'utf-8'
                    if r.status_code == 200:
                        soup = BeautifulSoup(r.content, 'html.parser')
                        mydict = {}
                        mydict['url'] = url
                        title = soup.find('span', property = 'v:itemreviewed')
                        mydict['title'] = title.text if title else None
                        duration = soup.find('span', property = 'v:runtime')
                        mydict['duration'] = duration.text if duration else None
                        addtime = soup.find('span', property = 'v:initialReleaseDate')
                        mydict['addtime'] = addtime.text if addtime else None
                        average = soup.find('strong',  property = 'v:average')
                        mydict['average'] = average.text if average else None
                        imgSrc = soup.find_all('div', id='mainpic')[0].img['src']
                        mydict['imgSrc'] = imgSrc if imgSrc else None
                        mydict['play'] = []
                        ul = soup.find_all('ul', class_='bs')
                        if len(ul):
                            li = ul[0].find_all('li')
                            for i in li:
                                obj = {
                                    'url':urllib.parse.unquote(i.a['href'].replace('https://www.douban.com/link2/?url=','')),
                                    'text':i.a.text.replace(' ', '').replace('\n','')
                                }
                                mydict['play'].append(obj)
                        self.data.append(mydict)

                        # 線程隨機休眠
                        time.sleep(random.random() * 5)
                    else:
                        print('ip被屏蔽')
                except Exception as e:
                    self.proxy = {}
                    self.proxy1 = {}
                    print('代理ip代理失敗2--',str(e))
複製代碼

10.真正要運行的函數是這裏。

#這個是函數運行完所須要的時間
    @run_time
    def run(self):
        ths = []
        th1 = Thread(target=self.parse_first, args=(self.start_url, ))
        th1.start()
        ths.append(th1)

        for _ in range(self.thread_num):
            th = Thread(target=self.parse_second,)
            th.setDaemon(True)
            th.start()
            ths.append(th)

        for th in ths:
            # 等待線程終止
            th.join()

        s = json.dumps(self.data, ensure_ascii=False, indent=4)
        with open('top250.json', 'w', encoding='utf-8') as f:
            f.write(s)

        print('Data crawling is finished.')
複製代碼

11.開始運行啦。

if __name__ == '__main__':
    Spider().run()
複製代碼

12.最後運行完啦。這裏只爬取了50條數據,用了20秒。

爬取出來的數據格式是這樣子的。

13.最後附上全部代碼,但願能幫到你。

import time
import json
import random
import logging
import requests
import urllib.parse
from queue import Queue
from threading import Thread
from bs4 import BeautifulSoup

logging.captureWarnings(True)

# 爬豆瓣電影 Top 250

# 計算運行所需的時間
def run_time(func):
    def wrapper(*args, **kw):
        start = time.time()
        func(*args, **kw)
        end = time.time()
        print('running', end-start, 's')
    return wrapper

class Spider():
    def __init__(self):
        self.start_url = 'https://movie.douban.com/top250'
        # 獲取代理IP連接,不能確保代理ip真實可用
        self.getProxy =  'http://www.xicidaili.com/nn/1/'
        # 測試代理IP是否代理成功
        self.testIP = 'http://icanhazip.com'
        self.qurl = Queue()
        self.IPQurl = Queue()
        self.data = list()
        self.item_num = 5 # 限制每頁提取個數(也決定了二級頁面數量)防止對網頁請求過多
        self.thread_num = 10 # 抓取二級頁面線程數量
        self.first_running = True
        # 設置代理ip
        self.proxy = {}
        # 設置獲取代理ip的代理ip
        self.proxy1 = {}
        # 不設置代理ip
        self.notproxy = {}
        self.user_agent = [
            "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
            "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)",
            "Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
            "Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)",
            "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
            "Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
            "Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
            "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
            "Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
            "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
            "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0",
            "Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5",
            "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
            "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20",
            "Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52",
        ]

    def get_proxy(self):
        url = self.getProxy
        try:
            r = requests.get(url, headers={'User-Agent': random.choice(self.user_agent)}, proxies=self.notproxy,verify=False,timeout=1)
            r.encoding = 'utf-8'
            if (r.status_code == 200):
                soup = BeautifulSoup(r.content, 'html.parser')
                ip_list = soup.find_all('table', id='ip_list')
                if(len(ip_list)):
                    tr_list =  ip_list[0].find_all('tr')[1:10]
                    for i in tr_list:
                        td_list = i.find_all('td')
                        temp = td_list[5].text + ',' + td_list[1].text + ':' +td_list[2].text
                        self.IPQurl.put(temp)
                    return True
                else:
                    print('頁面查詢不到該id')
                    return False
            else:
                print('沒法獲取代理ip')
                return False
        except Exception as e:
            print('獲取代理ip出錯--',str(e))
            return False


    def set_proxy(self):
        if self.IPQurl.empty():
            if self.get_proxy():
                arr = self.IPQurl.get().split(',')
                arr1 = self.IPQurl.get().split(',')
                if arr[0].find('HTTPS') == -1:
                    self.proxy = {arr[0].lower(): 'http://'+arr[1]}
                else:
                    self.proxy = {arr[0].lower(): 'https://'+arr[1]}
                if arr1[0].find('HTTPS') == -1:
                    self.proxy1 = {arr1[0].lower(): 'http://'+arr1[1]}
                else:
                    self.proxy1 = {arr1[0].lower(): 'https://'+arr1[1]}
            else:
                self.proxy = {}
                self.proxy1 = {}
        else:
            arr = self.IPQurl.get().split(',')
            if arr[0].find('HTTPS') == -1:
                self.proxy = {arr[0].lower(): 'http://' + arr[1]}
            else:
                self.proxy = {arr[0].lower(): 'http://' + arr[1]}

    def parse_first(self, url):
        print('crawling,parse_first', url)
        self.set_proxy()
        try:
            r = requests.get(url, headers={'User-Agent': random.choice(self.user_agent)},proxies=self.notproxy,verify=False,timeout=5)
            r.encoding = 'utf-8'
            if r.status_code == 200:
                soup = BeautifulSoup(r.content, 'html.parser')
                # 每一頁爬數據條數
                movies = soup.find_all('div', class_='info')[:self.item_num]
                for movie in movies:
                    url = movie.find('div', class_='hd').a['href']
                    self.qurl.put(url)

                nextpage = soup.find('span', class_='next').a
                if nextpage:
                    nexturl = self.start_url + nextpage['href']
                    self.parse_first(nexturl)
                else:
                    self.first_running = False
            else:
                print('ip被屏蔽')
                self.proxy = {}
                self.proxy1 = {}
                self.first_running = False
        except Exception as e:
            self.proxy = {}
            self.proxy1 = {}
            self.first_running = False
            print('代理ip代理失敗--',str(e))

    def parse_second(self):
        while self.first_running or not self.qurl.empty():
            if not self.qurl.empty():
                url = self.qurl.get()
                print('crawling,parse_second', url)
                self.set_proxy()
                try:
                    r = requests.get(url,headers={'User-Agent': random.choice(self.user_agent)},proxies=self.notproxy,verify=False,timeout=5)
                    r.encoding = 'utf-8'
                    if r.status_code == 200:
                        soup = BeautifulSoup(r.content, 'html.parser')
                        mydict = {}
                        mydict['url'] = url
                        title = soup.find('span', property = 'v:itemreviewed')
                        mydict['title'] = title.text if title else None
                        duration = soup.find('span', property = 'v:runtime')
                        mydict['duration'] = duration.text if duration else None
                        addtime = soup.find('span', property = 'v:initialReleaseDate')
                        mydict['addtime'] = addtime.text if addtime else None
                        average = soup.find('strong',  property = 'v:average')
                        mydict['average'] = average.text if average else None
                        imgSrc = soup.find_all('div', id='mainpic')[0].img['src']
                        mydict['imgSrc'] = imgSrc if imgSrc else None
                        mydict['play'] = []
                        ul = soup.find_all('ul', class_='bs')
                        if len(ul):
                            li = ul[0].find_all('li')
                            for i in li:
                                obj = {
                                    'url':urllib.parse.unquote(i.a['href'].replace('https://www.douban.com/link2/?url=','')),
                                    'text':i.a.text.replace(' ', '').replace('\n','')
                                }
                                mydict['play'].append(obj)
                        self.data.append(mydict)

                        # 線程隨機休眠
                        time.sleep(random.random() * 5)
                    else:
                        print('ip被屏蔽')
                except Exception as e:
                    self.proxy = {}
                    self.proxy1 = {}
                    print('代理ip代理失敗2--',str(e))
    @run_time
    def run(self):
        ths = []
        th1 = Thread(target=self.parse_first, args=(self.start_url, ))
        th1.start()
        ths.append(th1)

        for _ in range(self.thread_num):
            th = Thread(target=self.parse_second,)
            th.setDaemon(True)
            th.start()
            ths.append(th)

        for th in ths:
            # 等待線程終止
            th.join()

        s = json.dumps(self.data, ensure_ascii=False, indent=4)
        with open('top250.json', 'w', encoding='utf-8') as f:
            f.write(s)

        print('Data crawling is finished.')

if __name__ == '__main__':
    Spider().run()

複製代碼
相關文章
相關標籤/搜索