爬蟲的驗證碼處理,圖片懶加載,selenium和 PhantomJS,requests模塊的session,線程池

一.驗證碼處理php

1.雲打碼平臺處理驗證碼的實現流程:html

1.對攜帶驗證碼的頁面數據進行抓取
2.能夠將頁面數據中驗證碼進行解析,驗證碼圖片下載到本地
3.能夠將驗證碼圖片提交給三方平臺進行識別,返回驗證碼圖片上的數據值
    - 雲打碼平臺:
        - 1.在官網中進行註冊(普通用戶和開發者用戶)
        - 2.登陸開發者用戶:
            - 1.實例代碼的下載(開發文檔-》調用實例及最新的DLL-》PythonHTTP實例下載)
            - 2.建立一個軟件:個人軟件-》添加新的軟件
        -3.使用示例代碼中的源碼文件中的代碼進行修改,讓其識別驗證碼圖片中的數據值

示例:python

import http.client, mimetypes, urllib, json, time, requests

######################################################################

class YDMHttp:

    apiurl = 'http://api.yundama.com/api.php'
    username = ''
    password = ''
    appid = ''
    appkey = ''

    def __init__(self, username, password, appid, appkey):
        self.username = username  
        self.password = password
        self.appid = str(appid)
        self.appkey = appkey

    def request(self, fields, files=[]):
        response = self.post_url(self.apiurl, fields, files)
        response = json.loads(response)
        return response
    
    def balance(self):
        data = {'method': 'balance', 'username': self.username, 'password': self.password, 'appid': self.appid, 'appkey': self.appkey}
        response = self.request(data)
        if (response):
            if (response['ret'] and response['ret'] < 0):
                return response['ret']
            else:
                return response['balance']
        else:
            return -9001
    
    def login(self):
        data = {'method': 'login', 'username': self.username, 'password': self.password, 'appid': self.appid, 'appkey': self.appkey}
        response = self.request(data)
        if (response):
            if (response['ret'] and response['ret'] < 0):
                return response['ret']
            else:
                return response['uid']
        else:
            return -9001

    def upload(self, filename, codetype, timeout):
        data = {'method': 'upload', 'username': self.username, 'password': self.password, 'appid': self.appid, 'appkey': self.appkey, 'codetype': str(codetype), 'timeout': str(timeout)}
        file = {'file': filename}
        response = self.request(data, file)
        if (response):
            if (response['ret'] and response['ret'] < 0):
                return response['ret']
            else:
                return response['cid']
        else:
            return -9001

    def result(self, cid):
        data = {'method': 'result', 'username': self.username, 'password': self.password, 'appid': self.appid, 'appkey': self.appkey, 'cid': str(cid)}
        response = self.request(data)
        return response and response['text'] or ''

    def decode(self, filename, codetype, timeout):
        cid = self.upload(filename, codetype, timeout)
        if (cid > 0):
            for i in range(0, timeout):
                result = self.result(cid)
                if (result != ''):
                    return cid, result
                else:
                    time.sleep(1)
            return -3003, ''
        else:
            return cid, ''

    def report(self, cid):
        data = {'method': 'report', 'username': self.username, 'password': self.password, 'appid': self.appid, 'appkey': self.appkey, 'cid': str(cid), 'flag': '0'}
        response = self.request(data)
        if (response):
            return response['ret']
        else:
            return -9001

    def post_url(self, url, fields, files=[]):
        for key in files:
            files[key] = open(files[key], 'rb');
        res = requests.post(url, files=files, data=fields)
        return res.text

 

def getCodeDate(userName,pwd,codePath,codeType):
    # 用戶名(普通用戶)
    username    = userName

    # 密碼
    password    = pwd                            

    # 軟件ID,開發者分紅必要參數。登陸開發者後臺【個人軟件】得到!
    appid       = 6003                                    

    # 軟件密鑰,開發者分紅必要參數。登陸開發者後臺【個人軟件】得到!
    appkey      = '1f4b564483ae5c907a1d34f8e2f2776c'    

    # 圖片文件
    filename    = codePath                       

    # 驗證碼類型,# 例:1004表示4位字母數字,不一樣類型收費不一樣。請準確填寫,不然影響識別率。在此查詢全部類型 http://www.yundama.com/price.html
    codetype    = codeType

    # 超時時間,秒
    timeout     = 2                                   
    result = None
    # 檢查
    if (username == 'username'):
        print('請設置好相關參數再測試')
    else:
        # 初始化
        yundama = YDMHttp(username, password, appid, appkey)

        # 登錄雲打碼
        uid = yundama.login();
        #print('uid: %s' % uid)

        # 查詢餘額
        balance = yundama.balance();
        #print('balance: %s' % balance)

        # 開始識別,圖片路徑,驗證碼類型ID,超時時間(秒),識別結果
        cid, result = yundama.decode(filename, codetype, timeout);
        #print('cid: %s, result: %s' % (cid, result))
    return result

 

#人人網的模擬登陸
import requests
import urllib
from lxml import etree
#獲取session對象
session = requests.Session()
#將驗證碼圖片進行下載
headers = {
    'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36'
}
url = 'http://www.renren.com/'
page_text = requests.get(url=url,headers=headers).text

tree = etree.HTML(page_text)
code_img_url = tree.xpath('//*[@id="verifyPic_login"]/@src')[0]
urllib.request.urlretrieve(url=code_img_url,filename='code.jpg')

#識別驗證碼圖片中的數據值
code_data = getCodeDate('bobo328410948','bobo328410948','./code.jpg',2004)

#模擬登陸
login_url = 'http://www.renren.com/ajaxLogin/login?1=1&uniqueTimestamp=201914927558'
data = {
    "email":"www.zhangbowudi@qq.com",
    "icode":code_data,
    "origURL":"http://www.renren.com/home",
    "domain":"renren.com",
    "key_id":"1",
    "captcha_type":"web_login",
    "password":"4f0350f09aeffeef86307747218b214b0960bdf35e30811c0d611fe39db96ec1",
    "rkey":"9e75e8dc3457b14c55a74627fa64fb43",
    "f":"http%3A%2F%2Fwww.renren.com%2F289676607",
}
#該次請求產生的cookie會被自動存儲到session對象中
session.post(url=login_url,data=data,headers=headers)

url = 'http://www.renren.com/289676607/profile'
page_text = session.get(url=url,headers=headers).text

with open('renren.html','w',encoding='utf-8') as fp:
    fp.write(page_text)

  模擬登陸古詩文網web

import requests
import urllib
from lxml import etree
headers = {
    'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36'
}

s = requests.Session()
login_url = 'https://so.gushiwen.org/user/login.aspx?from=http://so.gushiwen.org/user/collect.aspx'
page_text = requests.get(url=login_url,headers=headers).text
tree = etree.HTML(page_text)
img_src = 'https://so.gushiwen.org'+tree.xpath('//*[@id="imgCode"]/@src')[0]
img_data = s.get(url=img_src,headers=headers).content
with open('./img.jpg','wb') as fp:
    fp.write(img_data)
img_text = getCodeDate('bobo328410948','bobo328410948','./img.jpg',1004)

#模擬登陸
url = 'https://so.gushiwen.org/user/login.aspx?from=http%3a%2f%2fso.gushiwen.org%2fuser%2fcollect.aspx'
data = {
    "__VIEWSTATE":"9AsGvh3Je/0pfxId7DYRUi258ayuEG4rrQ1Z3abBgLoDSOeAUatOZOrAIxudqiOauXpR9Zq+dmKJ28+AGjXYHaCZJTTtGgrEemBWI1ed7oS7kpB7Rm/4yma/+9Q=",
    "__VIEWSTATEGENERATOR":"C93BE1AE",
    "from":"http://so.gushiwen.org/user/collect.aspx",
    "email":"www.zhangbowudi@qq.com",
    "pwd":"bobo328410948",
    "code":img_text,
    "denglu":"登陸",
}
page_text = s.post(url=url,headers=headers,data=data).text
with open('./gushiwen.html','w',encoding='utf-8') as fp:
    fp.write(page_text)

二.圖片懶加載,selenium,PhantomJSajax

1.圖片懶加載的概念:chrome

  圖片懶加載是一種網頁優化技術。圖片做爲一種網絡資源,在被請求時也與普通靜態資源同樣,將佔用網絡資源,而一次性將整個頁面的全部圖片加載完,將大大增長頁面的首屏加載時間。爲了解決這種問題,經過先後端配合,使圖片僅在瀏覽器當前視窗內出現時才加載該圖片,達到減小首屏圖片請求數的技術就被稱爲「圖片懶加載」。json

2.如何實現圖片懶加載技術:windows

  在網頁源碼中,在img標籤中首先會使用一個「僞屬性」(一般使用src2,original......)去存放真正的圖片連接而並不是是直接存放在src屬性中。當圖片出現到頁面的可視化區域中,會動態將僞屬性替換成src屬性,完成圖片的加載。後端

#!/usr/bin/env python
# -*- coding:utf-8 -*-
import requests
from lxml import etree

if __name__ == "__main__":
     url = 'http://sc.chinaz.com/tupian/gudianmeinvtupian.html'
     headers = {
         'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36',
     }
     #獲取頁面文本數據
     response = requests.get(url=url,headers=headers)
     response.encoding = 'utf-8'
     page_text = response.text
     #解析頁面數據(獲取頁面中的圖片連接)
     #建立etree對象
     tree = etree.HTML(page_text)
     div_list = tree.xpath('//div[@id="container"]/div')
     #解析獲取圖片地址和圖片的名稱
     for div in div_list:
         image_url = div.xpath('.//img/@src'2) #src2僞屬性
         image_name = div.xpath('.//img/@alt')
         print(image_url) #打印圖片連接
         print(image_name)#打印圖片名稱

3.seleniumapi

  是Python的一個第三方庫,對外提供的接口能夠操做瀏覽器,而後讓瀏覽器完成自動化的操做。

  環境的創建

安裝selenum:pip install selenium

獲取某一款瀏覽器的驅動程序(以谷歌瀏覽器爲例)

  谷歌瀏覽器驅動下載地址:http://chromedriver.storage.googleapis.com/index.html

  下載的驅動程序必須和瀏覽器的版本統一,你們能夠根據http://blog.csdn.net/huilan_same/article/details/51896672中提供的版本映射表進行對應

示例:

from selenium import webdriver
from time import sleep

bro = webdriver.Chrome(executable_path=r"E:\python學習\數據+爬蟲\第三天\chromedriver_win32\chromedriver.exe")
bro.get(url='https://www.baidu.com/')
sleep(2)
text_input = bro.find_element_by_id("kw")

text_input.send_keys('人民幣')
sleep(2)

bro.find_element_by_id('su').click()

sleep(3)

print(bro.page_source)
bro.quit()

代碼介紹:

#導包
from selenium import webdriver  
#建立瀏覽器對象,經過該對象能夠操做瀏覽器
browser = webdriver.Chrome('驅動路徑')
#使用瀏覽器發起指定請求
browser.get(url)

#使用下面的方法,查找指定的元素進行操做便可
    find_element_by_id            根據id找節點
    find_elements_by_name         根據name找
    find_elements_by_xpath        根據xpath查找
    find_elements_by_tag_name     根據標籤名找
    find_elements_by_class_name   根據class名字查找

  獲取豆瓣電影中更多電影詳情數據

url = 'https://movie.douban.com/typerank?type_name=%E6%83%8A%E6%82%9A&type=19&interval_id=100:90&action='
bro = webdriver.Chrome(executable_path=r'C:\Users\Administrator\Desktop\爬蟲+數據\day_03_爬蟲\chromedriver.exe')
bro.get(url)
sleep(3)
bro.execute_script('window.scrollTo(0,document.body.scrollHeight)')
sleep(3)
bro.execute_script('window.scrollTo(0,document.body.scrollHeight)')
sleep(3)
bro.execute_script('window.scrollTo(0,document.body.scrollHeight)')
sleep(2)
page_text = bro.page_source

with open('./douban.html','w',encoding='utf-8') as fp:
    fp.write(page_text)

sleep(1)
bro.quit()

4.phantomJs

  PhantomJS是一款無界面的瀏覽器,其自動化操做流程和上述操做谷歌瀏覽器是一致的。因爲是無界面的,爲了可以展現自動化操做流程,PhantomJS爲用戶提供了一個截屏的功能,使用save_screenshot函數實現。

#phantomJs
#獲取豆瓣電影中更多電影詳情數據
url = 'https://movie.douban.com/typerank?type_name=%E6%83%8A%E6%82%9A&type=19&interval_id=100:90&action='
bro = webdriver.Chrome(executable_path=r'C:\Users\Administrator\Desktop\爬蟲+數據\day_03_爬蟲\phantomjs-2.1.1-windows\bin\phantomjs.exe')
bro.get(url)
sleep(3)
bro.execute_script('window.scrollTo(0,document.body.scrollHeight)')
sleep(3)
bro.execute_script('window.scrollTo(0,document.body.scrollHeight)')
sleep(3)
bro.execute_script('window.scrollTo(0,document.body.scrollHeight)')
sleep(2)
page_text = bro.page_source

with open('./douban.html','w',encoding='utf-8') as fp:
    fp.write(page_text)

sleep(1)
bro.quit()

5.谷歌無頭瀏覽器

#谷歌無頭瀏覽器
from selenium.webdriver.chrome.options import Options
chrome_options = Options()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-gpu')

#獲取豆瓣電影中更多電影詳情數據
url = 'https://movie.douban.com/typerank?type_name=%E6%83%8A%E6%82%9A&type=19&interval_id=100:90&action='
bro = webdriver.Chrome(executable_path=r'C:\Users\Administrator\Desktop\爬蟲+數據\day_03_爬蟲\chromedriver.exe',chrome_options=chrome_options)
bro.get(url)
sleep(3)
bro.execute_script('window.scrollTo(0,document.body.scrollHeight)')
sleep(3)
bro.execute_script('window.scrollTo(0,document.body.scrollHeight)')
sleep(3)
bro.execute_script('window.scrollTo(0,document.body.scrollHeight)')
sleep(2)
page_text = bro.page_source

with open('./douban.html','w',encoding='utf-8') as fp:
    fp.write(page_text)
print(page_text)
sleep(1)
bro.quit()

三.requests模塊的cookie和線程池

1.cookie的概念:

  當用戶經過瀏覽器首次訪問一個域名時,訪問的web服務器會給客戶端發送數據,以保持web服務器與客戶端之間的狀態保持,這些數據就是cookie。

2.cookie的做用:

  咱們在瀏覽器中,常常涉及到數據的交換,好比你登陸郵箱,登陸一個頁面。咱們常常會在此時設置30天內記住我,或者自動登陸選項,Cookie是由HTTP服務器設置的,保存在瀏覽器中,但HTTP協議是一種無狀態協議,在數據交換完畢後,服務器端和客戶端的連接就會關閉,每次交換數據都須要創建新的連接。

3.思路:

  咱們須要使用爬蟲程序對人人網的登陸時的請求進行一次抓取,獲取請求中的cookie數據

  在使用我的信息頁的url進行請求時,該請求須要攜帶 1 中的cookie,只有攜帶了cookie後,服務器纔可識別此次請求的用戶信息,方可響應回指定的用戶信息頁數據。

示例:

#!/usr/bin/env python
# -*- coding:utf-8 -*-
import requests
if __name__ == "__main__":

    #登陸請求的url(經過抓包工具獲取)
    post_url = 'http://www.renren.com/ajaxLogin/login?1=1&uniqueTimestamp=201873958471'
    #建立一個session對象,該對象會自動將請求中的cookie進行存儲和攜帶
    session = requests.session()
   #假裝UA
    headers={
        'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36',
    }
    formdata = {
        'email': '17701256561',
        'icode': '',
        'origURL': 'http://www.renren.com/home',
        'domain': 'renren.com',
        'key_id': '1',
        'captcha_type': 'web_login',
        'password': '7b456e6c3eb6615b2e122a2942ef3845da1f91e3de075179079a3b84952508e4',
        'rkey': '44fd96c219c593f3c9612360c80310a3',
        'f': 'https%3A%2F%2Fwww.baidu.com%2Flink%3Furl%3Dm7m_NSUp5Ri_ZrK5eNIpn_dMs48UAcvT-N_kmysWgYW%26wd%3D%26eqid%3Dba95daf5000065ce000000035b120219',
    }
    #使用session發送請求,目的是爲了將session保存該次請求中的cookie
    session.post(url=post_url,data=formdata,headers=headers)

    get_url = 'http://www.renren.com/960481378/profile'
    #再次使用session進行請求的發送,該次請求中已經攜帶了cookie
    response = session.get(url=get_url,headers=headers)
    #設置響應內容的編碼格式
    response.encoding = 'utf-8'
    #將響應內容寫入文件
    with open('./renren.html','w') as fp:
        fp.write(response.text)

2.基於multiprocessing.dummy線程池的數據爬取

實例:

  爬取梨視頻數據 

普通爬取:

import requests
import random
from lxml import etree
import re
from fake_useragent import UserAgent
#安裝fake-useragent庫:pip install fake-useragent
url = 'http://www.pearvideo.com/category_1'
#隨機產生UA,若是報錯則能夠添加以下參數:
#ua = UserAgent(verify_ssl=False,use_cache_server=False).random
#禁用服務器緩存:
#ua = UserAgent(use_cache_server=False)
#不緩存數據:
#ua = UserAgent(cache=False)
#忽略ssl驗證:
#ua = UserAgent(verify_ssl=False)

ua = UserAgent().random
headers = {
    'User-Agent':ua
}
#獲取首頁頁面數據
page_text = requests.get(url=url,headers=headers).text
#對獲取的首頁頁面數據中的相關視頻詳情連接進行解析
tree = etree.HTML(page_text)
li_list = tree.xpath('//div[@id="listvideoList"]/ul/li')
detail_urls = []
for li in li_list:
    detail_url = 'http://www.pearvideo.com/'+li.xpath('./div/a/@href')[0]
    title = li.xpath('.//div[@class="vervideo-title"]/text()')[0]
    detail_urls.append(detail_url)
for url in detail_urls:
    page_text = requests.get(url=url,headers=headers).text
    vedio_url = re.findall('srcUrl="(.*?)"',page_text,re.S)[0]
    
    data = requests.get(url=vedio_url,headers=headers).content
    fileName = str(random.randint(1,10000))+'.mp4' #隨機生成視頻文件名稱
    with open(fileName,'wb') as fp:
        fp.write(data)
        print(fileName+' is over')

基於線程池的爬取

import requests
import re
from lxml import etree
from multiprocessing.dummy import Pool
import random


#實例化一個線程池對象
pool = Pool(5)
url = 'https://www.pearvideo.com/category_1'
headers = {
    'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36'
}
page_text = requests.get(url=url,headers=headers).text
tree = etree.HTML(page_text)
li_list = tree.xpath('//div[@id="listvideoList"]/ul/li')

video_url_list = []
for li in li_list:
    detail_url = 'https://www.pearvideo.com/'+li.xpath('./div/a/@href')[0]
    detail_page = requests.get(url=detail_url,headers=headers).text
    video_url = re.findall('srcUrl="(.*?)",vdoUrl',detail_page,re.S)[0]
    video_url_list.append(video_url)
    
video_data_list = pool.map(getVideoData,video_url_list)

pool.map(saveVideo,video_data_list)
def getVideoData(url):
    return requests.get(url=url,headers=headers).content

def saveVideo(data):
    fileName = str(random.randint(0,5000))+'.mp4'
    with open(fileName,'wb') as fp:
        fp.write(data)
相關文章
相關標籤/搜索