Python爬取大量數據時防止被封IP

繼續老套路,這兩天我爬取了豬八戒上的一些數據 網址是:http://task.zbj.com/t-ppsj/p1s5.html,多是因爲爬取的數據量有點多吧,結果個人IP被封了,須要本身手動來驗證解封ip,但這顯然阻止了我爬取更多的數據了。html

豬八戒IP 被封

下面是我寫的爬取豬八戒的被封IP的代碼python

# coding=utf-8
import requests
from lxml import etree

def getUrl():
    for i in range(33):
        url = 'http://task.zbj.com/t-ppsj/p{}s5.html'.format(i+1)
        spiderPage(url)

def spiderPage(url):
    if url is None:
        return None

    htmlText = requests.get(url).text
    selector = etree.HTML(htmlText)
    tds = selector.xpath('//*[@class="tab-switch tab-progress"]/table/tr')
    try:
        for td in tds:
            price = td.xpath('./td/p/em/text()')
            href = td.xpath('./td/p/a/@href')
            title = td.xpath('./td/p/a/text()')
            subTitle = td.xpath('./td/p/text()')
            deadline = td.xpath('./td/span/text()')
            price = price[0] if len(price)>0 else ''    # python的三目運算 :爲真時的結果 if 斷定條件 else 爲假時的結果
            title = title[0] if len(title)>0 else ''
            href = href[0] if len(href)>0 else ''
            subTitle = subTitle[0] if len(subTitle)>0 else ''
            deadline = deadline[0] if len(deadline)>0 else ''
            print price,title,href,subTitle,deadline
            print '---------------------------------------------------------------------------------------'
            spiderDetail(href)
    except:
        print '出錯'

def spiderDetail(url):
    if url is None:
        return None

    try:
        htmlText = requests.get(url).text
        selector = etree.HTML(htmlText)
        aboutHref = selector.xpath('//*[@id="utopia_widget_10"]/div[1]/div/div/div/p[1]/a/@href')
        price = selector.xpath('//*[@id="utopia_widget_10"]/div[1]/div/div/div/p[1]/text()')
        title = selector.xpath('//*[@id="utopia_widget_10"]/div[1]/div/div/h2/text()')
        contentDetail = selector.xpath('//*[@id="utopia_widget_10"]/div[2]/div/div[1]/div[1]/text()')
        publishDate = selector.xpath('//*[@id="utopia_widget_10"]/div[2]/div/div[1]/p/text()')
        aboutHref = aboutHref[0] if len(aboutHref) > 0 else ''  # python的三目運算 :爲真時的結果 if 斷定條件 else 爲假時的結果
        price = price[0] if len(price) > 0 else ''
        title = title[0] if len(title) > 0 else ''
        contentDetail = contentDetail[0] if len(contentDetail) > 0 else ''
        publishDate = publishDate[0] if len(publishDate) > 0 else ''
        print aboutHref,price,title,contentDetail,publishDate
    except:
      print '出錯'

if '_main_':
  getUrl()

我發現代碼運行完後,後面有幾頁數據沒有被爬取,我再也沒有辦法去訪問豬八戒網站了,等過了一段時間才能去訪問他們的網站,這就很尷尬了,我得防止被封IPmysql

如何防止爬取數據的時候被網站封IP這裏有一些套路.查了一些套路web

1.修改請求頭

以前的爬蟲代碼沒有添加頭部,這裏我添加了頭部,模擬成瀏覽器去訪問網站sql

user_agent = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.104 Safari/537.36 Core/1.53.4295.400'

        headers = {'User-Agent': user_agent}
        htmlText = requests.get(url, headers=headers, proxies=proxies).text
2.採用代理IP

當本身的ip被網站封了以後,只能採用代理ip的方式進行爬取,因此每次爬取的時候儘可能用代理ip來爬取,封了代理還有代理。數據庫

這裏我引用了這個博客的一段代碼來生成ip地址:http://blog.csdn.net/lammonpeter/article/details/52917264瀏覽器

生成代理ip,你們能夠直接把這個代碼拿去用app

# coding=utf-8
# IP地址取自國內髙匿代理IP網站:http://www.xicidaili.com/nn/
# 僅僅爬取首頁IP地址就足夠通常使用

from bs4 import BeautifulSoup
import requests
import random

def get_ip_list(url, headers):
    web_data = requests.get(url, headers=headers)
    soup = BeautifulSoup(web_data.text, 'lxml')
    ips = soup.find_all('tr')
    ip_list = []
    for i in range(1, len(ips)):
        ip_info = ips[i]
        tds = ip_info.find_all('td')
        ip_list.append(tds[1].text + ':' + tds[2].text)
    return ip_list

def get_random_ip(ip_list):
    proxy_list = []
    for ip in ip_list:
        proxy_list.append('http://' + ip)
    proxy_ip = random.choice(proxy_list)
    proxies = {'http': proxy_ip}
    return proxies

if __name__ == '__main__':
    url = 'http://www.xicidaili.com/nn/'
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36'
    }
    ip_list = get_ip_list(url, headers=headers)
    proxies = get_random_ip(ip_list)
    print(proxies)

好了我用上面的代碼給我生成了一批ip地址(有些ip地址可能無效,但只要不封我本身的ip就能夠了,哈哈),而後我就能夠在個人請求頭部添加ip地址dom

給咱們的請求添加代理ipide

proxies = {
            'http': 'http://124.72.109.183:8118',
            'http': 'http://49.85.1.79:31666'

        }
        user_agent = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.104 Safari/537.36 Core/1.53.4295.400'

        headers = {'User-Agent': user_agent}
        htmlText = requests.get(url, headers=headers, timeout=3, proxies=proxies).text

目前知道的就

最後完整代碼以下:

# coding=utf-8

import requests
import time
from lxml import etree

def getUrl():
    for i in range(33):
        url = 'http://task.zbj.com/t-ppsj/p{}s5.html'.format(i+1)
        spiderPage(url)

def spiderPage(url):
    if url is None:
        return None

    try:
        proxies = {
            'http': 'http://221.202.248.52:80',

        }
        user_agent = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.104 Safari/537.36 Core/1.53.4295.400'

        headers = {'User-Agent': user_agent}
        htmlText = requests.get(url, headers=headers,proxies=proxies).text

        selector = etree.HTML(htmlText)
        tds = selector.xpath('//*[@class="tab-switch tab-progress"]/table/tr')
        for td in tds:
            price = td.xpath('./td/p/em/text()')
            href = td.xpath('./td/p/a/@href')
            title = td.xpath('./td/p/a/text()')
            subTitle = td.xpath('./td/p/text()')
            deadline = td.xpath('./td/span/text()')
            price = price[0] if len(price)>0 else ''    # python的三目運算 :爲真時的結果 if 斷定條件 else 爲假時的結果
            title = title[0] if len(title)>0 else ''
            href = href[0] if len(href)>0 else ''
            subTitle = subTitle[0] if len(subTitle)>0 else ''
            deadline = deadline[0] if len(deadline)>0 else ''
            print price,title,href,subTitle,deadline
            print '---------------------------------------------------------------------------------------'
            spiderDetail(href)
    except Exception,e:
        print '出錯',e.message

def spiderDetail(url):
    if url is None:
        return None

    try:
        htmlText = requests.get(url).text
        selector = etree.HTML(htmlText)
        aboutHref = selector.xpath('//*[@id="utopia_widget_10"]/div[1]/div/div/div/p[1]/a/@href')
        price = selector.xpath('//*[@id="utopia_widget_10"]/div[1]/div/div/div/p[1]/text()')
        title = selector.xpath('//*[@id="utopia_widget_10"]/div[1]/div/div/h2/text()')
        contentDetail = selector.xpath('//*[@id="utopia_widget_10"]/div[2]/div/div[1]/div[1]/text()')
        publishDate = selector.xpath('//*[@id="utopia_widget_10"]/div[2]/div/div[1]/p/text()')
        aboutHref = aboutHref[0] if len(aboutHref) > 0 else ''  # python的三目運算 :爲真時的結果 if 斷定條件 else 爲假時的結果
        price = price[0] if len(price) > 0 else ''
        title = title[0] if len(title) > 0 else ''
        contentDetail = contentDetail[0] if len(contentDetail) > 0 else ''
        publishDate = publishDate[0] if len(publishDate) > 0 else ''
        print aboutHref,price,title,contentDetail,publishDate
    except:
      print '出錯'

if '_main_':
    getUrl()

最後程序完美運行,再也沒有出現被封IP的狀況。固然防止被封IP確定不止這些了,這還須要進一步探索!

最後

固然數據我是已經抓取過來了,可是個人數據都沒有完美呈現出來,我應該寫入execl文件,或者數據庫中啊,這樣才能方便採用.因此接下來我準備了使用Python操做execl,mysql,mongoDB

相關文章
相關標籤/搜索