---恢復內容開始---html
NumPy:多維數組的有效操做。 高效的數學函數。python
Matplotlib:可視化:2D和(最近)3D圖git
SciPy:大型庫實現各類數值算法,例如: github
Sympy:符號計算(解析的 Analytical)web
Pandas:統計與數據分析(明天)算法
NumPy提供了一種新的數據類型:ndarray(n維數組)。數據庫
np.array([2, 3, 6, 7])
np.array( [2, 3, 6, 7+ij])
import numpy as np # arange:range(start, stop, step)的全部三個參數 # 即起始值,結束值,步長都是能夠用的 另外還有一個dtype參數,數據類型 a=np.arange(5) b=np.arange(10,100,20,dtype = float) #linspace(start,stop,num)返回數字間隔均勻的樣本,按區間[start,stop]計算: c=np.linspace(0.,2.5,5)
import numpy as np a = np.array([[1, 2, 3] ,[4, 5, 6]]) print(a) print(a.shape)#行,列數 print(a.ndim)#維度數 print(a.size)#元素個數
import numpy as np a = np .arange(0, 20, 1) #1維 b = a.reshape((4, 5)) #4行5列 c = a.reshape((20, 1)) #2維 d = a.reshape((-1, 4)) #-1:自動肯定 e = a.shape =(4, 5) #改變a的形狀
import numpy as np a = np.array([1,2,3,4,5]) b = a.copy () c1 = np.dot(np.transpose(a), b)#矩陣乘法dot print(c1) c = np.array([[1,2,3],[4,5,6],[7,8,9]]) print(c) tran = c.transpose()#轉置矩陣transpose print(tran) ax = np.reshape(a, (5,1)) bx = np.reshape(b, (1,5)) c = np.dot(ax, bx) print(c)
import numpy as np數組
a = np.zeros(3)#零填充
b = np.zeros((2, 2), complex)#complex 複數
c = np.ones((2, 3))
d = np.random.rand(2, 4)#rand: 0和1之間均勻分佈的隨機數
e = np.random.randn(2, 4)#randn: 均值爲0,標準差爲1的標準(高斯)正態分佈瀏覽器
import numpy as np a = np.arange(5) print(a) b = a[2:];b[0] = 100 print(b) print(a) c = a[2:].copy(); c[0]=99 print(c) print(a)
運算符 * 表示元素乘法,而不是矩陣乘法:服務器
使用dot()函數進行矩陣乘法:
import numpy as np A = np.array([[1, 2],[3, 4]]) print(A * A) print(np.dot(A,A))
其餘可用的格式(參見API文檔)。
save()將表保存爲Numpy「.npy」格式的二進制文件
# Year Min temp. Max temp.
1990 -1.5 25.3
1991 -3.2 21.2
import numpy as np a = np.linspace(0, 1, 12) a.shape=(3,4) np.savetxt('myfile.txt',a) np.save('myfile',a) table = np.loadtxt("data.txt")
以上只是數值計算,接下來涉及符號計算,python經過模塊sysmpy來進行符號計算,相似於方程求解,積分等的顯式求解。
import sympy as sy #聲明x,y爲變量 x = sy.Symbol('x') y = sy.Symbol('y') a, b = sy.symbols('a b') #建立一個新符號(不是函數 f = x**2 + y**2 -2*x*y + 5 print(f) #自動簡化 g = x**2 + 2 - 2*x + x**2 -1 print(g)
import sympy as sy x = sy.Symbol ('x') y = sy.Symbol('y') # 給定[-1,1] (give [-1, 1]) print(sy.solve (x**2 - 1)) # 無解 (no guarantee for solution) print(sy.solve(x**3 + 0.5*x**2 - 1)) # 用x的表達式表示y (exepress x in terms of y) print (sy.solve(x**3 + y**2)) # 錯誤:找不到算法 (error: no algorithm can be found) print(sy.solve(x**x + 2*x - 1))
import sympy as sy x = sy.Symbol('x') y = sy.Symbol( 'y') a,b = sy.symbols ( 'a b') # 單變量 single variable f = sy.sin(x) + sy.exp(x) print(sy.integrate(f, (x, a, b))) print(sy.integrate(f, (x, 1, 2))) print(sy.integrate(f, (x, 1.0,2.0))) # 多變量 multi variables g = sy.exp(x) + x * sy.sin(y) print(sy.integrate(g, (y,a,b)))
import sympy as sy x = sy.Symbol( 'x') y = sy.Symbol( 'y') # 單變量 (single variable) f = sy.cos(x) + x**x print(sy . diff (f , x)) # 多變量 (multi variables) g = sy.cos(y) * x + sy.log(y) print(sy.diff (g, y))
---恢復內容結束---
爬蟲原理:
模擬瀏覽器 --> 往目標站點發送請求 --> 接收響應數據 --> 提取有用的數據 --> 保存到本地/數據庫。
爬蟲的全過程:
一、發送請求 (請求庫)
- requests模塊
- selenium模塊
二、獲取響應數據(服務器返回)
三、解析並提取數據(解析庫)
- re正則
- bs4(BeautifulSoup4)
- Xpath
四、保存數據(存儲庫)
-MongoDB
import requests import re # 正則模塊 # uuid.uuid4() 能夠根據時間戳生成一段世界上惟一的隨機字符串 import uuid # 爬蟲三部曲 # 一、發送請求 def get_page(url): response = requests.get(url) return response # 二、解析數據 # 解析主頁獲取視頻詳情頁ID def parse_index(text): res = re.findall('<a href="video_(.*?)"', text, re.S) #re.findall('正則匹配規則','解析文本','正則模式') # print(res) detail_url_list = [] for m_id in res: # 拼接詳情頁url detail_url = 'https://www.pearvideo.com/video_' + m_id # print(detail_url) detail_url_list.append(detail_url) # print(detail_url_list) return detail_url_list # 解析詳情頁獲取視頻url def parse_detail(text): '''''' ''' (.*?): 提取括號的內容 .*?: 直接匹配 <video webkit-playsinline="" playsinline="" x-webkit-airplay="" autoplay="autoplay" src="https://video.pearvideo.com/mp4/adshort/20190613/cont-1566073-14015522_adpkg-ad_hd.mp4" style="width: 100%; height: 100%;"></video> 正則: <video.*?src="(.*?)" # 以上是分析過程,不須要寫 正則: srcUrl="(.*?)" ''' movie_url = re.findall('srcUrl="(.*?)"', text, re.S)[0] return movie_url # 三、保存數據 def save_movie(movie_url): response = requests.get(movie_url) # 把視頻寫到本地 with open(f'{uuid.uuid4()}.mp4', 'wb') as f: f.write(response.content) f.flush() if __name__ == '__main__': # main + 回車鍵 # 一、對主頁發送請求 index_res = get_page(url='https://www.pearvideo.com/') # 二、對主頁進行解析、獲取詳情頁id detail_url_list = parse_index(index_res.text) # print(detail_url_list) # 三、對每一個詳情頁url發送請求 for detail_url in detail_url_list: detail_res = get_page(url=detail_url) print(detail_res.text) # 四、解析詳情頁獲取視頻url movie_url = parse_detail(detail_res.text) print(movie_url) # 五、保存視頻 save_movie(movie_url)
採用多線程方
import requests import re#正則模塊 from concurrent.futures import ThreadPoolExecutor #限制50個線程 pool=ThreadPoolExecutor(50) def get_page(url): print(f"異步任務{url}") response=requests.get(url) return response def parse_index(res): response=res.result() res = re.findall('<a href="video_(.*?)"',response.text,re.S) for m_id in res: detail_url='https://www.pearvideo.com/video_'+m_id pool.submit(get_page,detail_url).add_done_callback(parse_detail) def parse_detail(res): response=res.result() movie_url=re.findall('srcUrl="(.*?)"',response.text,re.S)[0] movie_name=re.findall('<title>(.*?)<',response.text,re.S)[0] pool.submit(save_movie,movie_url,movie_name) def save_movie(movie_url,movie_name): response=requests.get(movie_url) with open(f'{movie_name}.mp4','wb') as f: f.write(response.content) f.flush() if __name__=='__main__': url='https://www.pearvideo.com/' pool.submit(get_page,url).add_done_callback(parse_index)
GET請求講解
User-Agent
# 訪問知乎發現
請求url:
https://www.zhihu.com/explore
請求方式:
GET
請求頭:
user-agent: Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.146 Safari/537.36
cookies
import requests # 請求頭字典 # headers = { # 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.146 Safari/537.36' # } # 在get請求內,添加user-agent # response = requests.get(url='https://www.zhihu.com/explore', headers=headers) # print(response.status_code) # 200 # # print(response.text) # with open('zhihu.html', 'w', encoding='utf-8') as f: # f.write(response.text) ''' params請求參數 訪問百度搜查安徽工程大學url https://www.baidu.com/s?wd=安徽工程大學&pn=10第二頁 https://www.baidu.com/s?wd=安徽工程大學&pn=20第三頁 # ''' from urllib.parse import urlencode # url = 'https://www.baidu.com/s?wd=%E8%94%A1%E5%BE%90%E5%9D%A4' # url = 'https://www.baidu.com/s?' + urlencode({"wd": "蔡徐坤"}) url = 'https://www.baidu.com/s?' headers = { 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.146 Safari/537.36' } # print(url) # 在get方法中添加params參數 # response = requests.get(url, headers=headers, params={"wd": "安徽工程大學"}) response = requests.get(url, headers=headers, params={"wd": "安徽工程大學", "pn": "20"}) # print(response.text) with open('gongcheng2.html', 'w', encoding='utf-8') as f: f.write(response.text)
攜帶登陸cookies破解github登陸驗證
請求url:https://github.com/settings/emails
請求方式:
GET
請求頭:
User-Agen
Cookie:
import requests # 請求url url = 'https://github.com/settings/emails' # 請求頭 headers = { 'user-agent': '', # 在請求頭中拼接cookies # 'Cookie': '' } # github_res = requests.get(url, headers=headers) import requests cookies = { 'Cookie': '' } github_res = requests.get(url, headers=headers, cookies=cookies) print('15622792660' in github_res.text)
爬取豆瓣電影
import requests import re url = 'https://movie.douban.com/top250' headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.146 Safari/537.36' } # 一、往豆瓣TOP250發送請求獲取響應數據 response = requests.get(url, headers=headers) # print(response.text) # 二、經過正則解析提取數據 # 電影詳情頁url、圖片連接、電影名稱、電影評分、評價人數 movie_content_list = re.findall( # 正則規則 '<div class="item">.*?href="(.*?)">.*?src="(.*?)".*?<span class="title">(.*?)</span>.*?<span class="rating_num".*?>(.*?)</span>.*?<span>(.*?)人評價', # 解析文本 response.text, # 匹配模式 re.S) for movie_content in movie_content_list: # 解壓賦值每一部電影 detail_url, movie_jpg, name, point, num = movie_content data = f'電影名稱:{name}, 詳情頁url:{detail_url}, 圖片url:{movie_jpg}, 評分: {point}, 評價人數: {num} \n' print(data) # 三、保存數據,把電影信息寫入文件中 with open('douban.txt', 'a', encoding='utf-8') as f: f.write(data)
爬取豆瓣TOP250
import requests import re url_1 = 'https://movie.douban.com/top250?start=' headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.146 Safari/537.36' } for i in range(0,250,25): url=f'{url_1}{i}&filter=' response = requests.get(url, headers=headers) movie_content_list = re.findall( '<div class="item">.*?href="(.*?)">.*?src="(.*?)".*?<span class="title">(.*?)</span>.*?導演:(.*?)<br>.*?\n(.*?) / (.*?) / (.*?)\n.*?<span class="rating_num".*?>(.*?)</span>.*?<span>(.*?)人評價.*?class="inq">(.*?)</span>', response.text, re.S) for movie_content in movie_content_list: detail_url, movie_jpg, name, director,year,country,kind, point, num,profile = movie_content director=director.replace(' ',' ') data = f'電影名稱:{name},導演:{director},{year.strip()},{country},{kind},評分: {point}, 評價人數: {num},{profile},詳情頁url:{detail_url}, 圖片url:{movie_jpg}\n' with open('douban.txt', 'a', encoding='utf-8') as f: f.write(data)
效果圖