python 網絡爬蟲requests處理cookie,代理IP,雲打碼介紹

一。基於requests模塊的cookie操做(session處理cookie)php

 cookie概念:當用戶經過瀏覽器首次訪問一個域名時,訪問的web服務器會給客戶端發送數據,以保持web服務器與客戶端之間的狀態保持,這些數據就是cookie。 cookie做用:咱們在瀏覽器中,常常涉及到數據的交換,好比你登陸郵箱,登陸一個頁面。咱們常常會在此時設置30天內記住我,或者自動登陸選項。那麼它們是怎麼記錄信息的呢,答案就是今天的主角cookie了,Cookie是由HTTP服務器設置的,保存在瀏覽器中,但HTTP協議是一種無狀態協議,在數據交換完畢後,服務器端和客戶端的連接就會關閉,每次交換數據都須要創建新的連接。就像咱們去超市買東西,沒有積分卡的狀況下,咱們買完東西以後,超市沒有咱們的任何消費信息,但咱們辦了積分卡以後,超市就有了咱們的消費信息。cookie就像是積分卡,能夠保存積分,商品就是咱們的信息,超市的系統就像服務器後臺,http協議就是交易的過程。
cookie介紹
import requests from lxml import etree headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36' } #建立一個會話對象:能夠像requests模塊同樣發起請求。若是請求過程當中會產生cookie的話,則cookie會被會話自動處理
s = requests.Session() first_url = 'https://xueqiu.com/'
#請求過程當中會產生cookie,cookie就會被存儲到session對象中
s.get(url=first_url,headers=headers) url = 'https://xueqiu.com/v4/statuses/public_timeline_by_category.json?since_id=-1&max_id=-1&count=10&category=-1' json_obj = s.get(url=url,headers=headers).json() print(json_obj)
需求:cookie操做案例-雪球網

 

二。打碼驗證識別驗證碼實現模擬登錄html

雲打碼使用方法:https://i.cnblogs.com/EditPosts.aspx?postid=10839009&update=1python

 

 獲取驗證碼圖片:code_img_src = tree.xpath('//*[@id="verifyPic_login"]/@src')[0]web

點擊登陸會進行頁面跳轉,Preserve log選項,這個選項是保留跳轉與跳轉以前全部記錄ajax

import http.client, mimetypes, urllib, json, time, requests ######################################################################

class YDMHttp: apiurl = 'http://api.yundama.com/api.php' username = '' password = '' appid = '' appkey = ''

    def __init__(self, username, password, appid, appkey): self.username = username self.password = password self.appid = str(appid) self.appkey = appkey def request(self, fields, files=[]): response = self.post_url(self.apiurl, fields, files) response = json.loads(response) return response def balance(self): data = {'method': 'balance', 'username': self.username, 'password': self.password, 'appid': self.appid, 'appkey': self.appkey} response = self.request(data) if (response): if (response['ret'] and response['ret'] < 0): return response['ret'] else: return response['balance'] else: return -9001
    
    def login(self): data = {'method': 'login', 'username': self.username, 'password': self.password, 'appid': self.appid, 'appkey': self.appkey} response = self.request(data) if (response): if (response['ret'] and response['ret'] < 0): return response['ret'] else: return response['uid'] else: return -9001

    def upload(self, filename, codetype, timeout): data = {'method': 'upload', 'username': self.username, 'password': self.password, 'appid': self.appid, 'appkey': self.appkey, 'codetype': str(codetype), 'timeout': str(timeout)} file = {'file': filename} response = self.request(data, file) if (response): if (response['ret'] and response['ret'] < 0): return response['ret'] else: return response['cid'] else: return -9001

    def result(self, cid): data = {'method': 'result', 'username': self.username, 'password': self.password, 'appid': self.appid, 'appkey': self.appkey, 'cid': str(cid)} response = self.request(data) return response and response['text'] or ''

    def decode(self, filename, codetype, timeout): cid = self.upload(filename, codetype, timeout) if (cid > 0): for i in range(0, timeout): result = self.result(cid) if (result != ''): return cid, result else: time.sleep(1) return -3003, ''
        else: return cid, ''

    def report(self, cid): data = {'method': 'report', 'username': self.username, 'password': self.password, 'appid': self.appid, 'appkey': self.appkey, 'cid': str(cid), 'flag': '0'} response = self.request(data) if (response): return response['ret'] else: return -9001

    def post_url(self, url, fields, files=[]): for key in files: files[key] = open(files[key], 'rb'); res = requests.post(url, files=files, data=fields) return res.text
雲打碼識別模塊:YunCode.py
import requests from lxml import etree from YunCode import YDMHttp #該函數是用來返回驗證碼圖片顯示的數據值
def getCodeText(codeType,imgPath): result = None # 普通者用戶名
    username = '用戶名'
    
    # 普通者密碼
    password = '密碼'
    
    # 軟件ID,開發者分紅必要參數。登陸開發者後臺【個人軟件】得到!
    appid = id # 軟件密鑰,開發者分紅必要參數。登陸開發者後臺【個人軟件】得到!
    appkey = '軟件密鑰'
    
    # 圖片文件
    filename = imgPath # 驗證碼類型,# 例:1004表示4位字母數字,不一樣類型收費不一樣。請準確填寫,不然影響識別率。在此查詢全部類型 http://www.yundama.com/price.html
    codetype = codeType # 超時時間,秒
    timeout = 30
    
    # 檢查
    if (username == 'username'): print('請設置好相關參數再測試') else: # 初始化
        yundama = YDMHttp(username, password, appid, appkey) # 登錄雲打碼
        uid = yundama.login(); print('uid: %s' % uid) # 查詢餘額
        balance = yundama.balance(); print('balance: %s' % balance) # 開始識別,圖片路徑,驗證碼類型ID,超時時間(秒),識別結果
        cid, result = yundama.decode(filename, codetype, timeout); print('cid: %s, result: %s' % (cid, result)) return result headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36' } #進行驗證碼的識別: #1.將驗證碼進行本地下載,將其提交給打碼平臺進行識別
url = 'http://www.renren.com/' page_text = requests.get(url=url,headers=headers).text #解析出驗證碼圖片的url
tree = etree.HTML(page_text) code_img_src = tree.xpath('//*[@id="verifyPic_login"]/@src')[0] code_img_data = requests.get(url=code_img_src,headers=headers).content with open('./code.jpg','wb') as fp: fp.write(code_img_data) #將本地保存好的驗證碼圖片交給打碼平臺識別
code_text = getCodeText(2004,'./code.jpg') #模擬登陸(發送post請求)
post_url = 'http://www.renren.com/ajaxLogin/login?1=1&uniqueTimestamp=2019361852954' data = { 'email': 'www.zhangbowudi@qq.com', 'icode': code_text, 'origURL': 'http://www.renren.com/home', 'domain': 'renren.com', 'key_id': '1', 'captcha_type': 'web_login', 'password': '784601bfcb6b9e78d8519a3885c4a3de0aa7c3f597477e00d26a7aa6598e83bf', 'rkey': '00313a9752665df609d455d36edfbe94', 'f':'', } page_text = requests.post(url=post_url,headers=headers,data=data).text with open('./renren.html','w',encoding='utf-8') as fp: fp.write(page_text)
需求:模擬人人網驗證碼效驗登陸

 

三。proxies參數設置請求代理ipjson

代理:(快代理西刺代理goubanjia)api

類型:http與https瀏覽器

#!/usr/bin/env python # -*- coding:utf-8 -*-
import requests import random if __name__ == "__main__": # 不一樣瀏覽器的UA
    header_list = [ # 遨遊
        {"user-agent": "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)"}, # 火狐
        {"user-agent": "Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1"}, # 谷歌
 { "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11"} ] # 不一樣的代理IP
    proxy_list = [ {"http": "111.206.6.101:80"}, {'http': '39.137.107.98:80'} ] # 隨機獲取UA和代理IP
    header = random.choice(header_list) proxy = random.choice(proxy_list) url = 'http://www.baidu.com/s?ie=UTF-8&wd=ip'
    # 參數3:設置代理
    response = requests.get(url=url, headers=header, proxies=proxy) response.encoding = 'utf-8' with open('daili.html', 'wb') as fp: fp.write(response.content) # 切換成原來的IP
    requests.get(url, proxies={"http": ""})
需求:驗證代理ip

 

四。基於multiprocessing.dummy線程池的數據爬取緩存

安裝fake-useragent庫:  pip install fake-useragent
需求:爬取梨視頻的視頻信息,並計算其爬取數據的耗時
import requests import random from lxml import etree import re from fake_useragent import UserAgent  #隨機生成UA模塊

# 安裝fake-useragent庫:pip install fake-useragent
url = 'http://www.pearvideo.com/category_1'
# 隨機產生UA,若是報錯則能夠添加以下參數: # ua = UserAgent(verify_ssl=False,use_cache_server=False).random # 禁用服務器緩存: # ua = UserAgent(use_cache_server=False) # 不緩存數據: # ua = UserAgent(cache=False) # 忽略ssl驗證: # ua = UserAgent(verify_ssl=False)
 ua = UserAgent().random headers = { 'User-Agent': ua } # 獲取首頁頁面數據
page_text = requests.get(url=url, headers=headers).text # 對獲取的首頁頁面數據中的相關視頻詳情連接進行解析
tree = etree.HTML(page_text) li_list = tree.xpath('//div[@id="listvideoList"]/ul/li') detail_urls = [] for li in li_list: detail_url = 'http://www.pearvideo.com/' + li.xpath('./div/a/@href')[0] title = li.xpath('.//div[@class="vervideo-title"]/text()')[0] detail_urls.append(detail_url) for url in detail_urls: page_text = requests.get(url=url, headers=headers).text vedio_url = re.findall('srcUrl="(.*?)"', page_text, re.S)[0] data = requests.get(url=vedio_url, headers=headers).content fileName = str(random.randint(1, 10000)) + '.mp4'  # 隨機生成視頻文件名稱
    with open(fileName, 'wb') as fp: fp.write(data) print(fileName + ' is over')
普通爬取
import requests import random from lxml import etree import re from fake_useragent import UserAgent # 安裝fake-useragent庫:pip install fake-useragent # 導入線程池模塊
from multiprocessing.dummy import Pool # 實例化線程池對象
pool = Pool() url = 'http://www.pearvideo.com/category_1'
# 隨機產生UA
ua = UserAgent().random headers = { 'User-Agent': ua } # 獲取首頁頁面數據
page_text = requests.get(url=url, headers=headers).text # 對獲取的首頁頁面數據中的相關視頻詳情連接進行解析
tree = etree.HTML(page_text) li_list = tree.xpath('//div[@id="listvideoList"]/ul/li') detail_urls = []  # 存儲二級頁面的url
for li in li_list: detail_url = 'http://www.pearvideo.com/' + li.xpath('./div/a/@href')[0] title = li.xpath('.//div[@class="vervideo-title"]/text()')[0] detail_urls.append(detail_url) vedio_urls = []  # 存儲視頻的url
for url in detail_urls: page_text = requests.get(url=url, headers=headers).text vedio_url = re.findall('srcUrl="(.*?)"', page_text, re.S)[0] vedio_urls.append(vedio_url) # 使用線程池進行視頻數據下載
func_request = lambda link: requests.get(url=link, headers=headers).content video_data_list = pool.map(func_request, vedio_urls) # 使用線程池進行視頻數據保存
func_saveData = lambda data: save(data) pool.map(func_saveData, video_data_list) def save(data): fileName = str(random.randint(1, 10000)) + '.mp4' with open(fileName, 'wb') as fp: fp.write(data) print(fileName + '已存儲') pool.close() pool.join()
基礎線程池爬取
相關文章
相關標籤/搜索