---恢復內容開始---php
import http.client, mimetypes, urllib, json, time, requests ###################################################################### class YDMHttp: apiurl = 'http://api.yundama.com/api.php' username = '' password = '' appid = '' appkey = '' def __init__(self, username, password, appid, appkey): self.username = username self.password = password self.appid = str(appid) self.appkey = appkey def request(self, fields, files=[]): response = self.post_url(self.apiurl, fields, files) response = json.loads(response) return response def balance(self): data = {'method': 'balance', 'username': self.username, 'password': self.password, 'appid': self.appid, 'appkey': self.appkey} response = self.request(data) if (response): if (response['ret'] and response['ret'] < 0): return response['ret'] else: return response['balance'] else: return -9001 def login(self): data = {'method': 'login', 'username': self.username, 'password': self.password, 'appid': self.appid, 'appkey': self.appkey} response = self.request(data) if (response): if (response['ret'] and response['ret'] < 0): return response['ret'] else: return response['uid'] else: return -9001 def upload(self, filename, codetype, timeout): data = {'method': 'upload', 'username': self.username, 'password': self.password, 'appid': self.appid, 'appkey': self.appkey, 'codetype': str(codetype), 'timeout': str(timeout)} file = {'file': filename} response = self.request(data, file) if (response): if (response['ret'] and response['ret'] < 0): return response['ret'] else: return response['cid'] else: return -9001 def result(self, cid): data = {'method': 'result', 'username': self.username, 'password': self.password, 'appid': self.appid, 'appkey': self.appkey, 'cid': str(cid)} response = self.request(data) return response and response['text'] or '' def decode(self, filename, codetype, timeout): cid = self.upload(filename, codetype, timeout) if (cid > 0): for i in range(0, timeout): result = self.result(cid) if (result != ''): return cid, result else: time.sleep(1) return -3003, '' else: return cid, '' def report(self, cid): data = {'method': 'report', 'username': self.username, 'password': self.password, 'appid': self.appid, 'appkey': self.appkey, 'cid': str(cid), 'flag': '0'} response = self.request(data) if (response): return response['ret'] else: return -9001 def post_url(self, url, fields, files=[]): for key in files: files[key] = open(files[key], 'rb'); res = requests.post(url, files=files, data=fields) return res.text ###################################################################### # 用戶名(普通用戶) username = 'bobo328410948' # 密碼 password = 'bobo328410948' # 軟件ID,開發者分紅必要參數。登陸開發者後臺【個人軟件】得到! appid = 6003 # 軟件密鑰,開發者分紅必要參數。登陸開發者後臺【個人軟件】得到! appkey = '1f4b564483ae5c907a1d34f8e2f2776c' # 圖片文件 filename = 'getimage.jpg' # 驗證碼類型,# 例:1004表示4位字母數字,不一樣類型收費不一樣。請準確填寫,不然影響識別率。在此查詢全部類型 http://www.yundama.com/price.html codetype = 1004 # 超時時間,秒 timeout = 10 # 檢查 if (username == 'username'): print('請設置好相關參數再測試') else: # 初始化 yundama = YDMHttp(username, password, appid, appkey) # 登錄雲打碼 uid = yundama.login(); print('uid: %s' % uid) # 查詢餘額 balance = yundama.balance(); print('balance: %s' % balance) # 開始識別,圖片路徑,驗證碼類型ID,超時時間(秒),識別結果 cid, result = yundama.decode(filename, codetype, timeout); print('cid: %s, result: %s' % (cid, result)) ######################################################################
def getCodeDate(userName,pwd,codePath,codeType): # 用戶名(普通用戶) username = userName # 密碼 password = pwd # 軟件ID,開發者分紅必要參數。登陸開發者後臺【個人軟件】得到! appid = 6003 # 軟件密鑰,開發者分紅必要參數。登陸開發者後臺【個人軟件】得到! appkey = '1f4b564483ae5c907a1d34f8e2f2776c' # 圖片文件 filename = codePath # 驗證碼類型,# 例:1004表示4位字母數字,不一樣類型收費不一樣。請準確填寫,不然影響識別率。在此查詢全部類型 http://www.yundama.com/price.html codetype = codeType # 超時時間,秒 timeout = 2 result = None # 檢查 if (username == 'username'): print('請設置好相關參數再測試') else: # 初始化 yundama = YDMHttp(username, password, appid, appkey) # 登錄雲打碼 uid = yundama.login(); #print('uid: %s' % uid) # 查詢餘額 balance = yundama.balance(); #print('balance: %s' % balance) # 開始識別,圖片路徑,驗證碼類型ID,超時時間(秒),識別結果 cid, result = yundama.decode(filename, codetype, timeout); #print('cid: %s, result: %s' % (cid, result)) return result
#人人網的模擬登陸 import requests import urllib from lxml import etree #獲取session對象 session = requests.Session() #將驗證碼圖片進行下載 headers = { 'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36' } url = 'http://www.renren.com/' page_text = requests.get(url=url,headers=headers).text tree = etree.HTML(page_text) code_img_url = tree.xpath('//*[@id="verifyPic_login"]/@src')[0] urllib.request.urlretrieve(url=code_img_url,filename='code.jpg') #識別驗證碼圖片中的數據值 code_data = getCodeDate('bobo328410948','bobo328410948','./code.jpg',2004) #模擬登陸 login_url = 'http://www.renren.com/ajaxLogin/login?1=1&uniqueTimestamp=201914927558' data = { "email":"www.zhangbowudi@qq.com", "icode":code_data, "origURL":"http://www.renren.com/home", "domain":"renren.com", "key_id":"1", "captcha_type":"web_login", "password":"4f0350f09aeffeef86307747218b214b0960bdf35e30811c0d611fe39db96ec1", "rkey":"9e75e8dc3457b14c55a74627fa64fb43", "f":"http%3A%2F%2Fwww.renren.com%2F289676607", } #該次請求產生的cookie會被自動存儲到session對象中 session.post(url=login_url,data=data,headers=headers) url = 'http://www.renren.com/289676607/profile' page_text = session.get(url=url,headers=headers).text with open('renren.html','w',encoding='utf-8') as fp: fp.write(page_text)
from fake_useragent import UserAgent ua = UserAgent(verify_ssl=False,use_cache_server=False).random print(ua) import requests import urllib from lxml import etree headers = { 'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36' } #模擬登陸古詩文網 s = requests.Session() login_url = 'https://so.gushiwen.org/user/login.aspx?from=http://so.gushiwen.org/user/collect.aspx' page_text = requests.get(url=login_url,headers=headers).text tree = etree.HTML(page_text) img_src = 'https://so.gushiwen.org'+tree.xpath('//*[@id="imgCode"]/@src')[0] img_data = s.get(url=img_src,headers=headers).content with open('./img.jpg','wb') as fp: fp.write(img_data) img_text = getCodeDate('bobo328410948','bobo328410948','./img.jpg',1004) #模擬登陸 url = 'https://so.gushiwen.org/user/login.aspx?from=http%3a%2f%2fso.gushiwen.org%2fuser%2fcollect.aspx' data = { "__VIEWSTATE":"9AsGvh3Je/0pfxId7DYRUi258ayuEG4rrQ1Z3abBgLoDSOeAUatOZOrAIxudqiOauXpR9Zq+dmKJ28+AGjXYHaCZJTTtGgrEemBWI1ed7oS7kpB7Rm/4yma/+9Q=", "__VIEWSTATEGENERATOR":"C93BE1AE", "from":"http://so.gushiwen.org/user/collect.aspx", "email":"www.zhangbowudi@qq.com", "pwd":"bobo328410948", "code":img_text, "denglu":"登陸", } page_text = s.post(url=url,headers=headers,data=data).text with open('./gushiwen.html','w',encoding='utf-8') as fp: fp.write(page_text)
from selenium.webdriver.common.keys import Keys from selenium import webdriver import time driver = webdriver.Chrome() driver.get('http://www.baidu.com') time.sleep(3) driver.find_element_by_id('kw').send_keys(u'我愛你') time.sleep(3) # 表示ctrl+a : 全選 driver.find_element_by_id('kw').send_keys(Keys.CONTROL, 'a') time.sleep(3) # 表示ctrl+x: 剪貼 driver.find_element_by_id('kw').send_keys(Keys.CONTROL, 'x') time.sleep(3) driver.find_element_by_id('kw').send_keys(u'我愛愛你') driver.find_element_by_id('su').click() time.sleep(3) # 退出瀏覽器 driver.quit()
2 時間等待
selenium 由網頁驅動,驅使瀏覽器進行操做,速度慢是一大特色,常常會出現代碼執行完了,可是網頁內容尚未加載完畢, 裏面的標籤沒有顯示出來,若是這時候操做裏面的標籤,就會爆出異常, NoSuchElementException
解決辦法: 時間休眠 , 無論頁面的內容有沒有加載完畢, 必定要休眠夠指定的秒數css
from selenium.webdriver.support.ui import WebDriverWait from selenium import webdriver driver = webdriver.Chrome() driver.get('http://www.baidu.com') driver.find_element_by_id('kw').send_keys(u'我愛你') button = driver.find_element_by_id('su') # WebDriverWait: 網頁等待 # 值1: 等待的對象 # 值2: 等待的時間 # WebDriverWait 常常和 until 以及 until not 一塊兒使用 until:直到... # 等待直到目標標籤出現 is_visible = WebDriverWait(button, 10).until(lambda x: button.is_displayed()) print(is_visible) # 返回True button.click()
WebDriverWait 和time.sleep() 的異同:
1 都是讓程序等待指定的時間,
2 time 的時間是固定的,時間長短不會隨着標籤的加載速度而改變,
WebDriverWait 時間是不固定的,等待多少時間要看標籤的加載時間和指定的固定時間
3 若是在指定的時間內,標籤仍然沒有加載出來,那麼time 和WebDriverWait都會爆出異常html
from selenium import webdriver from selenium.webdriver.common.action_chains import ActionChains from selenium.webdriver.support.ui import WebDriverWait driver = webdriver.Chrome() driver.get('http://www.baidu.com') # 三者輸出的內容是同樣的 logo = driver.find_element_by_xpath('//div[@id="lg"]/img') logo2 = driver.find_element_by_class_name('index-logo-src') logo3 = driver.find_element_by_css_selector('#lg > img') WebDriverWait(driver, 10).until(lambda x: logo.is_displayed()) ActionChains.double_click(logo).perform() # context.click 表示右擊 action = ActionChains.context_click(logo) # 操做事件會跑到perform 隊列裏面 action.perform() # 鼠標移動 more = driver.find_element_by_class_name('bri') WebDriverWait(driver, 10).until(lambda x: more.is_displayed()) ActionChains.move_to_element(more).perform()
這個是要解析的網頁python
<!DOCTYPE html> <html lang="en"> <head> <meta charset="UTF-8"> <title>Title</title> </head> <body> <button id="red" class="red" onclick="fun1()">按鈕1</button> <button type="button" name="username" onclick="fun2()">按鈕2</button> <button type="button" onclick="fun3()">按鈕3</button> <button id="yellow" onclick="fun4()">按鈕4</button> <script> function fun1(){ document.body.style.backgroundColor = 'black' } function fun2(){ document.body.style.backgroundColor = 'purple' } function fun3(){ document.body.style.backgroundColor = 'pink' } function fun4(){ document.body.style.backgroundColor = 'yellow' } </script> </body> </html>
標籤選擇的具體用法:web
from selenium import webdriver from selenium.webdriver.common.by import By import os driver = webdriver.Chrome() driver.get('file:///' + os.path.abspath('4 index.html')) # 經過標籤名字來找到指定的標籤 # 注意這裏是elements btns = driver.find_elements_by_tag_name('button') btns[1].click() # 1 經過索引來找到指定的標籤 for btn in btns: btn.click() # 2 經過屬性來找到特定的標籤 if btns.get_attribute('name') == 'username': btns.click() # find_element_by_XXX 經過XXX來找到全部標籤當中的第一個標籤 # find_elements_by_XXX 經過XXX 來找到全部符合的標籤 # 下面這個找到的是第一個標籤名字是button的 btn = driver.find_element_by_tag_name('button').click() # 彈出指定的元素,若是不寫索引,默認是最後一個 driver.find_elements_by_css_selector('button').pop(1).click() # [type=button] [] 裏面爲限制條件,限制選擇的內容 # 它是一個列表,[0] 表明的就是符合type=button的全部標籤,裏面的第一個元素 driver.find_elements_by_css_selector('button[type=button]')[0].click() # 經過...來找到指定標籤,至關於一個總和 driver.find_element(by=By.id, value='yellow').click()
from selenium import webdriver from selenium.webdriver.support.ui import WebDriverWait import time driver = webdriver.Chrome() driver.get('http://www.baidu.com') # 獲取當前window對象 current_window = driver.current_window_handle print(current_window, driver.title) # CDwindow-E5F3FC897FF4B4F7EA29CE1D42CCF738 百度一下,你就知道 time.sleep(3) driver.find_element_by_name('tj_trnews').click() news = WebDriverWait(driver, 10).until(lambda x: driver.find_element_by_css_selector('.hdline0 .a3')) news.click() all_windows = driver.window_handles for window in all_windows: if window != current_window: time.sleep(4) driver.switch_to_window(window) # 獲取百度新聞 h1 標題 title = driver.find_element_by_xpath('//div[@class="cnt_bd"]/h1') WebDriverWait(title, 10).until(lambda x: title.is_displayed()) print(title.text) # 關閉瀏覽器 # driver.quit() driver.switch_to_window(current_window) print(driver.find_element_by_css_selector('#footer span').text)
from selenium import webdriver from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.common.action_chains import ActionChains import time driver = webdriver.Chrome() driver.get('http://www.taobao.com') driver.find_element_by_id('q').send_keys(u'辣條') time.sleep(3) # 下面是尋找四種‘點擊搜索‘按鈕的方式: # 標籤name之間有空格,只取最後一個 # driver.find_element_by_css_selector('.J_SearchPanel .tb-bg').click() # 這個是隻取, 空格前面的第一個 。也能夠加載 # driver.find_element_by_css_selector('.search-panel .btn-search').click() # 這個是根據標籤的屬性值來尋找,也能夠執行 # driver.find_element_by_css_selector('button[data-spm-click="gostr=/tbindex;locaid=d13"]').click() # 根據xpath也能夠執行 點擊按鈕 driver.find_element_by_xpath('//div[@class="search-button"]/button').click() for page in range(1, 3): print('正在爬取第{}頁數據'.format(page)) # 這裏爬取的電腦頁面,一頁總共顯示13行,一個屏幕最多顯示兩行 for row in range(1, 13, 2): # x 表示把頁面平均分紅12份 x = float(row) / 12 # document : 網頁 # documentElement: 網頁標籤 # scroll: 滑動 # scrollTop: 從屏幕頂部往下滑動多少 # 計算每次移動的js 代碼片斷, # 在Python裏不能直接執行Js代碼,因此將代碼寫成字符串形式 # scrollTop 整個網頁內從上到下能夠滑動的長度 js = 'document.documentElement.scrollTop=document.\ documentElement.scrollHeight * {}'.format(x) # js = 'document.documentElement.scrollTop=document\ .documentElement.scrollHeight * %f' % x driver.execute_script(js) time.sleep(3) item_list = driver.find_elements_by_class_name('J_MouserOnverReq') for item in item_list: with open('lianxi.txt', 'a', encoding='utf8') as f: f.write(item.text) f.write('\n') # 這個表示跳轉到下一頁 driver.find_element_by_xpath('//li[@class="item next"]/a').click()
from selenium import webdriver from time import sleep bro = webdriver.Chrome(executable_path=r'C:\Users\Administrator\Desktop\爬蟲+數據\day_03_爬蟲\chromedriver.exe') bro.get(url='https://www.baidu.com/') sleep(2) text_input = bro.find_element_by_id('kw') text_input.send_keys('人民幣') sleep(2) bro.find_element_by_id('su').click() sleep(3) #獲取當前的頁面源碼數據 print(bro.page_source) bro.quit()
#獲取豆瓣電影中更多電影詳情數據 url = 'https://movie.douban.com/typerank?type_name=%E6%83%8A%E6%82%9A&type=19&interval_id=100:90&action=' bro = webdriver.Chrome(executable_path=r'C:\Users\Administrator\Desktop\爬蟲+數據\day_03_爬蟲\chromedriver.exe') bro.get(url) sleep(3) bro.execute_script('window.scrollTo(0,document.body.scrollHeight)') sleep(3) bro.execute_script('window.scrollTo(0,document.body.scrollHeight)') sleep(3) bro.execute_script('window.scrollTo(0,document.body.scrollHeight)') sleep(2) page_text = bro.page_source with open('./douban.html','w',encoding='utf-8') as fp: fp.write(page_text) sleep(1) bro.quit()
#谷歌無頭瀏覽器 from selenium.webdriver.chrome.options import Options chrome_options = Options() chrome_options.add_argument('--headless') chrome_options.add_argument('--disable-gpu') #獲取豆瓣電影中更多電影詳情數據 url = 'https://movie.douban.com/typerank?type_name=%E6%83%8A%E6%82%9A&type=19&interval_id=100:90&action=' bro = webdriver.Chrome(executable_path=r'C:\Users\Administrator\Desktop\爬蟲+數據\day_03_爬蟲\chromedriver.exe',chrome_options=chrome_options) bro.get(url) sleep(3) bro.execute_script('window.scrollTo(0,document.body.scrollHeight)') sleep(3) bro.execute_script('window.scrollTo(0,document.body.scrollHeight)') sleep(3) bro.execute_script('window.scrollTo(0,document.body.scrollHeight)') sleep(2) page_text = bro.page_source with open('./douban.html','w',encoding='utf-8') as fp: fp.write(page_text) print(page_text) sleep(1) bro.quit()
#phantomJs #獲取豆瓣電影中更多電影詳情數據 url = 'https://movie.douban.com/typerank?type_name=%E6%83%8A%E6%82%9A&type=19&interval_id=100:90&action=' bro = webdriver.Chrome(executable_path=r'C:\Users\Administrator\Desktop\爬蟲+數據\day_03_爬蟲\phantomjs-2.1.1-windows\bin\phantomjs.exe') bro.get(url) sleep(3) bro.execute_script('window.scrollTo(0,document.body.scrollHeight)') sleep(3) bro.execute_script('window.scrollTo(0,document.body.scrollHeight)') sleep(3) bro.execute_script('window.scrollTo(0,document.body.scrollHeight)') sleep(2) page_text = bro.page_source with open('./douban.html','w',encoding='utf-8') as fp: fp.write(page_text) sleep(1) bro.quit()
#qq空間 bro = webdriver.Chrome(executable_path=r'C:\Users\Administrator\Desktop\爬蟲+數據\day_03_爬蟲\chromedriver.exe') url = 'https://qzone.qq.com/' bro.get(url=url) sleep(2) #定位到一個具體的iframe bro.switch_to.frame('login_frame') bro.find_element_by_id('switcher_plogin').click() sleep(2) bro.find_element_by_id('u').send_keys('460086804') bro.find_element_by_id('p').send_keys('shuo0127') bro.find_element_by_id('login_button').click() sleep(5) page_text = bro.page_source with open('qq.html','w',encoding='utf-8') as fp: fp.write(page_text) bro.quit()
#爬取梨視頻數據 import requests import re from lxml import etree from multiprocessing.dummy import Pool import random #實例化一個線程池對象 pool = Pool(5) url = 'https://www.pearvideo.com/category_1' headers = { 'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36' } page_text = requests.get(url=url,headers=headers).text tree = etree.HTML(page_text) li_list = tree.xpath('//div[@id="listvideoList"]/ul/li') video_url_list = [] for li in li_list: detail_url = 'https://www.pearvideo.com/'+li.xpath('./div/a/@href')[0] detail_page = requests.get(url=detail_url,headers=headers).text video_url = re.findall('srcUrl="(.*?)",vdoUrl',detail_page,re.S)[0] video_url_list.append(video_url) video_data_list = pool.map(getVideoData,video_url_list) pool.map(saveVideo,video_data_list) def getVideoData(url): return requests.get(url=url,headers=headers).content def saveVideo(data): fileName = str(random.randint(0,5000))+'.mp4' with open(fileName,'wb') as fp: fp.write(data)
---恢復內容結束---ajax