python爬蟲之requests模塊2

python網絡爬蟲之requests模塊
  • session處理cookie
  • proxies參數設置請求代理ip
  • 基於線程池的數據爬取
一 獲取驗證碼

步驟:php

​ 1 註冊雲大碼 http://www.yundama.com/about.htmlhtml

1551528358914

​ 2登陸 開發者登陸 和用戶者登陸python

1551528609826

3 在開發者登陸成功以後,建立新軟件web

1551528809987

4點擊開發者中心ajax

1551529001185

5 點擊進入Pythonhttp下載json

1551529128722

6 選擇所需的版本下載便可api

獲取驗證碼瀏覽器

import http.client, mimetypes, urllib, json, time, requests

######################################################################

class YDMHttp:

    apiurl = 'http://api.yundama.com/api.php'
    username = ''
    password = ''
    appid = ''
    appkey = ''

    def __init__(self, username, password, appid, appkey):
        self.username = username  
        self.password = password
        self.appid = str(appid)
        self.appkey = appkey

    def request(self, fields, files=[]):
        response = self.post_url(self.apiurl, fields, files)
        response = json.loads(response)
        return response
    
    def balance(self):
        data = {'method': 'balance', 'username': self.username, 'password': self.password, 'appid': self.appid, 'appkey': self.appkey}
        response = self.request(data)
        if (response):
            if (response['ret'] and response['ret'] < 0):
                return response['ret']
            else:
                return response['balance']
        else:
            return -9001
    
    def login(self):
        data = {'method': 'login', 'username': self.username, 'password': self.password, 'appid': self.appid, 'appkey': self.appkey}
        response = self.request(data)
        if (response):
            if (response['ret'] and response['ret'] < 0):
                return response['ret']
            else:
                return response['uid']
        else:
            return -9001

    def upload(self, filename, codetype, timeout):
        data = {'method': 'upload', 'username': self.username, 'password': self.password, 'appid': self.appid, 'appkey': self.appkey, 'codetype': str(codetype), 'timeout': str(timeout)}
        file = {'file': filename}
        response = self.request(data, file)
        if (response):
            if (response['ret'] and response['ret'] < 0):
                return response['ret']
            else:
                return response['cid']
        else:
            return -9001

    def result(self, cid):
        data = {'method': 'result', 'username': self.username, 'password': self.password, 'appid': self.appid, 'appkey': self.appkey, 'cid': str(cid)}
        response = self.request(data)
        return response and response['text'] or ''

    def decode(self, filename, codetype, timeout):
        cid = self.upload(filename, codetype, timeout)
        if (cid > 0):
            for i in range(0, timeout):
                result = self.result(cid)
                if (result != ''):
                    return cid, result
                else:
                    time.sleep(1)
            return -3003, ''
        else:
            return cid, ''

    def report(self, cid):
        data = {'method': 'report', 'username': self.username, 'password': self.password, 'appid': self.appid, 'appkey': self.appkey, 'cid': str(cid), 'flag': '0'}
        response = self.request(data)
        if (response):
            return response['ret']
        else:
            return -9001

    def post_url(self, url, fields, files=[]):
        for key in files:
            files[key] = open(files[key], 'rb');
        res = requests.post(url, files=files, data=fields)
        return res.text

######################################################################

# 雲打碼中的用戶名(普通用戶)
username    = 'molihua'

# 雲打碼中的密碼
password    = 'MLH19960208'                            

# 軟件ID,開發者分紅必要參數。登陸開發者後臺【個人軟件】得到!
appid       = 7025                                  

# 軟件密鑰,開發者分紅必要參數。登陸開發者後臺【個人軟件】得到!
appkey      = '2d96c723a682c882faa73257e98440d7 '    

# 圖片文件  
filename    = 'getimage.jpg'                        

# 驗證碼類型,# 例:1004表示4位字母數字,不一樣類型收費不一樣。請準確填寫,不然影響識別率。在此查詢全部類型 http://www.yundama.com/price.html
codetype    = 1004

# 超時時間,秒 自定義
timeout     = 10                                   

# 檢查
if (username == 'username'):
    print('請設置好相關參數再測試')
else:
    # 初始化
    yundama = YDMHttp(username, password, appid, appkey)

    # 登錄雲打碼
    uid = yundama.login();
    print('uid: %s' % uid)

    # 查詢餘額
    balance = yundama.balance();
    print('balance: %s' % balance)

    # 開始識別,圖片路徑,驗證碼類型ID,超時時間(秒),識別結果
    cid, result = yundama.decode(filename, codetype, timeout);
    print('cid: %s, result: %s' % (cid, result))
需求 經過登陸人人網來獲取主頁面
import requests
import urllib
from lxml import etree

#獲取session對象
session=requests.Session()
url='http://www.renren.com'
headers={
    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:65.0) Gecko/20100101 Firefox/65.0'
}
renren_text = requests.get(url=url,headers=headers).text
tree = etree.HTML(renren_text)
code_img_url = tree.xpath('//*[@id="verifyPic_login"]/@src')[0]
urllib.requests.urlretrieve(url=code_img_url,filename='code.jpg')

#識別驗證碼圖片中的數據值

code_data = getCodeDate('15204558261','MLH19960208','./code.jpg',2004)
print(code_data)

login_url = 'http://www.renren.com/ajaxLogin/login?1=1&uniqueTimestamp=2019142013687'

##經過抓包工具來獲取
data ={
    'email':'15204558261'
    'icode': code_data
    'origURL':'http://www.renren.com/home'
    'domain':'renren.com'
    'key_id':'1'
    'captcha_type':'web_login'
    'password':'7bf638cc5b01b15b9416bf17fb98a1eda46da861c139b563a4c670fb21884336'
    'rkey':'cf9180c5afba43cb1f089b953e67b567'
    'f':'http%3A%2F%2Fwww.renren.com%2F296856777%2Fprofile'
}
#該次請求產生的cookie會被自動存儲到session對象中

session.post(url=login_url,data=data,headers=headers)
url='http://www.renren.com/296856777/profile'
page_text = session.get(url=url,headers=headers).text

with open('renren.html','w',encoding='utf-8') as fp:
    fp.write(page_text)
基於multiprocessing.dummy線程池的數據爬取
import requests
import random
from lxml import etree
import re
from fake_useragent import UserAgent
#安裝fake-useragent庫:pip install fake-useragent
#導入線程池模塊
from multiprocessing.dummy import Pool
#實例化線程池對象
pool = Pool()
url = 'http://www.pearvideo.com/category_1'
#隨機產生UA
ua = UserAgent().random
headers = {
    'User-Agent':ua
}
#獲取首頁頁面數據
page_text = requests.get(url=url,headers=headers).text
#對獲取的首頁頁面數據中的相關視頻詳情連接進行解析
tree = etree.HTML(page_text)
li_list = tree.xpath('//div[@id="listvideoList"]/ul/li')

detail_urls = []#存儲二級頁面的url
for li in li_list:
    detail_url = 'http://www.pearvideo.com/'+li.xpath('./div/a/@href')[0]
    title = li.xpath('.//div[@class="vervideo-title"]/text()')[0]
    detail_urls.append(detail_url)
    
vedio_urls = []#存儲視頻的url
for url in detail_urls:
    page_text = requests.get(url=url,headers=headers).text
    vedio_url = re.findall('srcUrl="(.*?)"',page_text,re.S)[0]
    vedio_urls.append(vedio_url) 
#使用線程池進行視頻數據下載    
func_request = lambda link:requests.get(url=link,headers=headers).content
video_data_list = pool.map(func_request,vedio_urls)
#使用線程池進行視頻數據保存
func_saveData = lambda data:save(data)
pool.map(func_saveData,video_data_list)
def save(data):
    fileName = str(random.randint(1,10000))+'.mp4'
    with open(fileName,'wb') as fp:
        fp.write(data)
        print(fileName+'已存儲')
        
pool.close()
pool.join()
requests模塊的代理
什麼是代理???

​ 代理就是第三方代替本體來處理相關的事務。列如:中介,微商,代購等cookie

那麼問題來了,爬蟲爲何要用到代理呢?網絡

​ 這是由於有一些網站它會採起相關的反爬措施。列如一些網站會經過檢測某一段時間某個IP訪問的次數,若是訪問太過於頻繁,那麼它就會知道可能不是正常的用戶,會禁止掉這個IP的訪問。因此咱們可使用代理IP來爬取咱們須要的數據,就算是某一個IP一段時間被禁止掉,也能夠換另外一個代理

IP去爬取數據。是否是以爲很是的神奇呢

代理的分類:

正向代理:代理客戶端獲取數據

反向代理:代理服務端提供數據

免費代理IP的網站:

http://www.goubanjia.com/

快代理

import requests
import random
if __name__ == "__main__":
    #不一樣瀏覽器的UA
    header_list = [
        # 遨遊
        {"user-agent": "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)"},
        # 火狐
        {"user-agent": "Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1"},
        # 谷歌
        {
            "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11"}
    ]
    #不一樣的代理IP
    proxy_list = [
        {"http": "112.115.57.20:3128"},
        {'http': '121.41.171.223:3128'}
    ]
    #隨機獲取UA和代理IP
    header = random.choice(header_list)
    proxy = random.choice(proxy_list)

    url = 'http://www.baidu.com/s?ie=UTF-8&wd=ip'
    #參數3:設置代理
    response = requests.get(url=url,headers=header,proxies=proxy)
    response.encoding = 'utf-8'
    
    with open('daili.html', 'wb') as fp:
        fp.write(response.content)
    #切換成原來的IP
    requests.get(url, proxies={"http": ""})
相關文章
相關標籤/搜索