第一個爬蟲程式php
import urllib.request #1.指定url url = ("http://www.eastmoney.com/") #2。發起請求,urlopen能夠根據url返回一個響應對象 response = urllib.request.urlopen(url=url) #3.獲取頁面數據 text = response.read() #4.持續化存儲 with open('./baidu.html','wb') as f: f.write(text) print('success')
第二個解決編碼問題html
import urllib.request import urllib.parse #指定url url = 'https://www.sogou.com/web?query=' #url特性:url不能夠存在非ASCII編碼的字符數據 word = urllib.parse.quote("人民幣") url += word #有效的url #發請求 response = urllib.request.urlopen(url=url) #獲取頁面數據 page_text = response.read() with open('renminbi.html','wb') as fp: fp.write(page_text)
UA的身份偽裝python
import urllib.request url = 'https://www.baidu.com/' #UA假裝 #1.子制定一個請求對象 headers = { #存儲任意的請求頭信息 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.67 Safari/537.36' } #該請求對象的UA進行了成功的假裝 request = urllib.request.Request(url=url,headers=headers) #2.針對自制定的請求對象發起請求 response = urllib.request.urlopen(request) print(response.read())
用post請求web
import urllib.request import urllib.parse #1.指定url url = 'https://fanyi.baidu.com/sug' #post請求攜帶的參數進行處理 流程: #1.將post請求參數封裝到字典 data = { 'kw':'西瓜' } #2.使用parse模塊中的urlencode(返回值類型爲str)進行編碼處理 data = urllib.parse.urlencode(data) #3.將步驟2的編碼結果轉換成byte類型 data = data.encode() #2.發起post請求:urlopen函數的data參數表示的就是通過處理以後的post請求攜帶的參數 response = urllib.request.urlopen(url=url,data=data) response.read()
request模塊:ajax
get請求json
import requests url = 'https://www.sogou.com/' response = requests.get(url = url) text = response.text with open('./sougou.html','w',encoding='utf-8') as f: f.write(text) print('finsh!')
post請求,有兩種方式api
方式1:cookie
import requests url = 'https://www.sogou.com/web?query=周杰倫&ie=utf-8' #這種相對以前方法便利,能夠直接幫你utf,不用本身轉化 response = requests.get(url=url) page_text = response.text with open('./zhou.html','w',encoding='utf-8') as fp: fp.write(page_text)
方式2:session
import requests #自定義請求頭信息 headers={ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36', } #指定url url = 'https://www.sogou.com/web' #封裝get請求參數 prams = { 'query':'周杰倫', 'ie':'utf-8' } #發起請求 response = requests.get(url=url,params=prams) print(response.content)
基於requests的post請求:app
import requests #1.指定post請求的url url = 'https://accounts.douban.com/login' #封裝post請求的參數 data = { "source": "movie", "redir": "https://movie.douban.com/", "form_email": "15027900535", "form_password": "bobo@15027900535", "login": "登陸", } #自定義請求頭信息 headers = { 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36', } #2.發起post請求 response = requests.post(url=url,data=data,headers=headers) #3.獲取響應對象中的頁面數據 page_text = response.text #4.持久化操做 with open('./douban.html','w',encoding='utf-8') as fp: fp.write(page_text)
使用requests模塊發出ajax的get請求:
import requests url = 'https://movie.douban.com/j/chart/top_list?' params = { 'type':'13', 'interval_id':'100:90', 'action':'', 'start':'10', #能夠通過修改start和limit兩個屬性來控制輸出的爬取內容 'limit':'20', } headers={ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36', } response = requests.get(url=url,params=params,headers=headers) print(response.text) #response.content 是二進制
使用requests模塊的綜合練習
實現功能:用戶輸入搜索字,再輸入起始與結束頁,輸出對應的搜索頁面
import os import requests if not os.path.exists('./path'): os.mkdir('./path') word = input('Enter the word:') url = 'https://zhihu.sogou.com/zhihu' start_pag = int(input('Enter the start page:')) end_pag = int(input('Enter the end page:')) headers = { 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36', } for page in range(start_pag,end_pag+1): params={ 'query':word, 'page':page, 'ie':'utf-8', } response = requests.get(url=url,params=params,headers=headers) page_text = response.text file_name = word+'_'+str(page) with open('./path/'+file_name,'w',encoding='utf-8') as f: f.write(page_text) print(str(page)+'finish')
使用session進行的登錄操做之後顯示的個人主頁
import requests session = requests.session() #1.發起登陸請求:將cookie獲取,切存儲到session對象中 login_url = 'https://accounts.douban.com/login' data = { "source": "None", "redir": "https://www.douban.com/people/185687620/", "form_email": "15027900535", "form_password": "bobo@15027900535", "login": "登陸", } headers={ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36', } #使用session發起post請求 login_response = session.post(url=login_url,data=data,headers=headers) #2.對我的主頁發起請求(session(cookie)),獲取響應頁面數據 url = 'https://www.douban.com/people/185687620/' response = session.get(url=url,headers=headers) page_text = response.text with open('./douban110.html','w',encoding='utf-8') as fp: fp.write(page_text)
使用requests模塊進行代理請求,推薦生成代理IP網站:http://www.goubanjia.com/
import requests url = 'http://www.baidu.com/s?wd=ip&ie=utf-8' proxy = { 'http':'39.137.69.7:8080' } response = requests.get(url = url,proxies=proxy) with open('./daili.html','w',encoding='utf-8') as f: f.write(response.text) print('finish')
驗證碼處理:
雲打碼平臺處理驗證碼的實現流程:
- 1.對攜帶驗證碼的頁面數據進行抓取 - 2.能夠將頁面數據中驗證碼進行解析,驗證碼圖片下載到本地 - 3.能夠將驗證碼圖片提交給三方平臺進行識別,返回驗證碼圖片上的數據值 - 雲打碼平臺: - 1.在官網中進行註冊(普通用戶和開發者用戶) - 2.登陸開發者用戶: - 1.實例代碼的下載(開發文檔-》調用實例及最新的DLL-》PythonHTTP實例下載) - 2.建立一個軟件:個人軟件-》添加新的軟件 -3.使用示例代碼中的源碼文件中的代碼進行修改,讓其識別驗證碼圖片中的數據值
#該函數就調用了打碼平臺的相關的接口對指定的驗證碼圖片進行識別,返回圖片上的數據值 def getCode(codeImg): # 雲打碼平臺普通用戶的用戶名 username = 'bobo328410948' # 雲打碼平臺普通用戶的密碼 password = 'bobo328410948' # 軟件ID,開發者分紅必要參數。登陸開發者後臺【個人軟件】得到! appid = 6003 # 軟件密鑰,開發者分紅必要參數。登陸開發者後臺【個人軟件】得到! appkey = '1f4b564483ae5c907a1d34f8e2f2776c' # 驗證碼圖片文件 filename = codeImg # 驗證碼類型,# 例:1004表示4位字母數字,不一樣類型收費不一樣。請準確填寫,不然影響識別率。在此查詢全部類型 http://www.yundama.com/price.html codetype = 3000 # 超時時間,秒 timeout = 20 # 檢查 if (username == 'username'): print('請設置好相關參數再測試') else: # 初始化 yundama = YDMHttp(username, password, appid, appkey) # 登錄雲打碼 uid = yundama.login(); print('uid: %s' % uid) # 查詢餘額 balance = yundama.balance(); print('balance: %s' % balance) # 開始識別,圖片路徑,驗證碼類型ID,超時時間(秒),識別結果 cid, result = yundama.decode(filename, codetype, timeout); print('cid: %s, result: %s' % (cid, result)) return result
import requests from lxml import etree import json import time import re #1.對攜帶驗證碼的頁面數據進行抓取 url = 'https://www.douban.com/accounts/login?source=movie' headers = { 'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Mobile Safari/537.36' } page_text = requests.get(url=url,headers=headers).text #2.能夠將頁面數據中驗證碼進行解析,驗證碼圖片下載到本地 tree = etree.HTML(page_text) codeImg_url = tree.xpath('//*[@id="captcha_image"]/@src')[0] #獲取了驗證碼圖片對應的二進制數據值 code_img = requests.get(url=codeImg_url,headers=headers).content #獲取capture_id '<img id="captcha_image" src="https://www.douban.com/misc/captcha?id=AdC4WXGyiRuVJrP9q15mqIrt:en&size=s" alt="captcha" class="captcha_image">' c_id = re.findall('<img id="captcha_image".*?id=(.*?)&.*?>',page_text,re.S)[0] with open('./code.png','wb') as fp: fp.write(code_img) #得到了驗證碼圖片上面的數據值 codeText = getCode('./code.png') print(codeText) #進行登陸操做 post = 'https://accounts.douban.com/login' data = { "source": "movie", "redir": "https://movie.douban.com/", "form_email": "15027900535", "form_password": "bobo@15027900535", "captcha-solution":codeText, "captcha-id":c_id, "login": "登陸", } print(c_id) login_text = requests.post(url=post,data=data,headers=headers).text with open('./login.html','w',encoding='utf-8') as fp: fp.write(login_text)
#這個是雲打碼上面有的
class YDMHttp: apiurl = 'http://api.yundama.com/api.php' username = '' password = '' appid = '' appkey = '' def __init__(self, username, password, appid, appkey): self.username = username self.password = password self.appid = str(appid) self.appkey = appkey def request(self, fields, files=[]): response = self.post_url(self.apiurl, fields, files) response = json.loads(response) return response def balance(self): data = {'method': 'balance', 'username': self.username, 'password': self.password, 'appid': self.appid, 'appkey': self.appkey} response = self.request(data) if (response): if (response['ret'] and response['ret'] < 0): return response['ret'] else: return response['balance'] else: return -9001 def login(self): data = {'method': 'login', 'username': self.username, 'password': self.password, 'appid': self.appid, 'appkey': self.appkey} response = self.request(data) if (response): if (response['ret'] and response['ret'] < 0): return response['ret'] else: return response['uid'] else: return -9001 def upload(self, filename, codetype, timeout): data = {'method': 'upload', 'username': self.username, 'password': self.password, 'appid': self.appid, 'appkey': self.appkey, 'codetype': str(codetype), 'timeout': str(timeout)} file = {'file': filename} response = self.request(data, file) if (response): if (response['ret'] and response['ret'] < 0): return response['ret'] else: return response['cid'] else: return -9001 def result(self, cid): data = {'method': 'result', 'username': self.username, 'password': self.password, 'appid': self.appid, 'appkey': self.appkey, 'cid': str(cid)} response = self.request(data) return response and response['text'] or '' def decode(self, filename, codetype, timeout): cid = self.upload(filename, codetype, timeout) if (cid > 0): for i in range(0, timeout): result = self.result(cid) if (result != ''): return cid, result else: time.sleep(1) return -3003, '' else: return cid, '' def report(self, cid): data = {'method': 'report', 'username': self.username, 'password': self.password, 'appid': self.appid, 'appkey': self.appkey, 'cid': str(cid), 'flag': '0'} response = self.request(data) if (response): return response['ret'] else: return -9001 def post_url(self, url, fields, files=[]): for key in files: files[key] = open(files[key], 'rb'); res = requests.post(url, files=files, data=fields) return res.text
使用爬蟲爬取頁面的圖片並保存
import os,re,requests url = 'https://www.qiushibaike.com/pic/' headers = { 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36', } response = requests.get(url=url,headers=headers) page_text = response.text img_list = re.findall('<div class="thumb">.*?<img src="(.*?)".*?>.*?</div>',page_text,re.S) if not os.path.exists('./imgs'): os.mkdir('imgs') print(img_list) for i in img_list: img_url = 'https:'+i img_data = requests.get(url=img_url,headers=headers).content imgName = i.split('/')[-1] imgPath = './imgs/'+imgName with open(imgPath,'wb') as f: f.write(img_data) print(imgName+'寫入成功')
使用bs4實踐
import requests from bs4 import BeautifulSoup url = 'http://www.shicimingju.com/book/sanguoyanyi.html' headers={ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36', } def get_content(url): content_page = requests.get(url=url,headers=headers).text soup = BeautifulSoup(content_page,'lxml') div = soup.find('div',class_='chapter_content') return div.text page_text = requests.get(url = url,headers=headers).text soup = BeautifulSoup(page_text,'lxml') a_list = soup.select('.book-mulu > ul > li > a') fp = open('./sanguo3.txt','w',encoding='utf-8') for a in a_list: title = a.text content_url = 'http://www.shicimingju.com' + a['href'] content = get_content(content_url) fp.write(title+":"+content+'\n\n\n\n') print('章節寫入成功')