轉自:原文連接:http://www.cnblogs.com/ssyfj/p/9222342.htmlphp
import asyncio,aiohttp async def fetch_async(url): print(url) async with aiohttp.request("GET",url) as r: reponse = await r.text(encoding="utf-8") #或者直接await r.read()不編碼,直接讀取,適合於圖像等沒法編碼文件 print(reponse) tasks = [fetch_async('http://www.baidu.com/'), fetch_async('http://www.chouti.com/')] event_loop = asyncio.get_event_loop() results = event_loop.run_until_complete(asyncio.gather(*tasks)) event_loop.close()
import asyncio,aiohttp async def fetch_async(url): print(url) async with aiohttp.ClientSession() as session: #協程嵌套,只須要處理最外層協程便可fetch_async async with session.get(url) as resp: print(resp.status) print(await resp.text()) #由於這裏使用到了await關鍵字,實現異步,全部他上面的函數體須要聲明爲異步async tasks = [fetch_async('http://www.baidu.com/'), fetch_async('http://www.cnblogs.com/ssyfj/')] event_loop = asyncio.get_event_loop() results = event_loop.run_until_complete(asyncio.gather(*tasks)) event_loop.close()
session.put('http://httpbin.org/put', data=b'data') session.delete('http://httpbin.org/delete') session.head('http://httpbin.org/get') session.options('http://httpbin.org/get') session.patch('http://httpbin.org/patch', data=b'data')
不要爲每次的鏈接都建立一次session,通常狀況下只須要建立一個session,而後使用這個session執行全部的請求。html
每一個session對象,內部包含了一個鏈接池,而且將會保持鏈接和鏈接複用(默認開啓)能夠加快總體的性能。python
import asyncio,aiohttp
async def func1(url,params):
async with aiohttp.ClientSession() as session:
async with session.get(url,params=params) as r:
print(r.url)
print(await r.read())
tasks = [func1('https://www.ckook.com/forum.php',{"gid":6}),]
event_loop = asyncio.get_event_loop()
results = event_loop.run_until_complete(asyncio.gather(*tasks))
event_loop.close()
async def func1(url,params): async with aiohttp.ClientSession() as session: async with session.get(url,params=params) as r: print(r.url) print(r.charset) #查看默認編碼爲utf-8 print(await r.text()) #不編碼,則是使用默認編碼 使用encoding指定編碼
async def func1(url,params): async with aiohttp.ClientSession() as session: async with session.get(url,params=params) as r: print(r.url) print(await r.read())
async def func1(url,params): async with aiohttp.ClientSession() as session: async with session.get(url,params=params) as r: print(r.url) print(r.charset) print(await r.json()) #能夠設置編碼,設置處理函數
async def func1(url,params): async with aiohttp.ClientSession() as session: async with session.get(url,params=params) as r: print(await r.content.read(10)) #讀取前10字節
async def func1(url,params,filename): async with aiohttp.ClientSession() as session: async with session.get(url,params=params) as r: with open(filename,"wb") as fp: while True: chunk = await r.content.read(10) if not chunk: break fp.write(chunk) tasks = [func1('https://www.ckook.com/forum.php',{"gid":6},"1.html"),]
async with session.get(url,params=params) as r: #異步上下文管理器
with open(filename,"wb") as fp: #普通上下文管理器
在於異步上下文管理器中定義了git
__aenter__和__aexit__方法github
異步上下文管理器指的是在enter
和exit
方法處可以暫停執行的上下文管理器web
爲了實現這樣的功能,須要加入兩個新的方法:__aenter__
和__aexit__
。這兩個方法都要返回一個 awaitable類型的值。json
推文:異步上下文管理器async with和異步迭代器async forsegmentfault
async def func1(url,params,filename): async with aiohttp.ClientSession() as session: headers = {'Content-Type':'text/html; charset=utf-8'} async with session.get(url,params=params,headers=headers) as r: with open(filename,"wb") as fp: while True: chunk = await r.content.read(10) if not chunk: break fp.write(chunk)
class ClientSession: def __init__(self, *, connector=None, loop=None, cookies=None, headers=None, skip_auto_headers=None, auth=None, json_serialize=json.dumps, request_class=ClientRequest, response_class=ClientResponse, ws_response_class=ClientWebSocketResponse, version=http.HttpVersion11, cookie_jar=None, connector_owner=True, raise_for_status=False, read_timeout=sentinel, conn_timeout=None, timeout=sentinel, auto_decompress=True, trust_env=False, trace_configs=None):
使用:安全
cookies = {'cookies_are': 'working'} async with ClientSession(cookies=cookies) as session:
async with session.get(url) as resp: print(resp.cookies)
async with session.get(url) as resp: print(resp.status)
resp.headers 來查看響應頭,獲得的值類型是一個dict:
resp.raw_headers 查看原生的響應頭,字節類型
resp.history #查看被重定向以前的響應頭
默認的IO操做都有5分鐘的響應時間 咱們能夠經過 timeout 進行重寫:服務器
async with session.get('https://github.com', timeout=60) as r: ...
若是 timeout=None 或者 timeout=0 將不進行超時檢查,也就是不限時長。
async def func1(): cookies = {'my_cookie': "my_value"} async with aiohttp.ClientSession(cookies=cookies) as session: async with session.get("https://segmentfault.com/q/1010000007987098") as r: print(session.cookie_jar.filter_cookies("https://segmentfault.com")) async with session.get("https://segmentfault.com/hottest") as rp: print(session.cookie_jar.filter_cookies("https://segmentfault.com"))
Set-Cookie: PHPSESSID=web2~d8grl63pegika2202s8184ct2q Set-Cookie: my_cookie=my_value Set-Cookie: PHPSESSID=web2~d8grl63pegika2202s8184ct2q Set-Cookie: my_cookie=my_value
咱們最好使用session.cookie_jar.filter_cookies()獲取網站cookie,不一樣於requests模塊,雖然咱們可使用rp.cookies有可能獲取到cookie,但彷佛並未獲取到全部的cookies。
async def func1(): cookies = {'my_cookie': "my_value"} async with aiohttp.ClientSession(cookies=cookies) as session: async with session.get("https://segmentfault.com/q/1010000007987098") as rp: print(session.cookie_jar.filter_cookies("https://segmentfault.com")) print(rp.cookies) #Set-Cookie: PHPSESSID=web2~jh3ouqoabvr4e72f87vtherkp6; Domain=segmentfault.com; Path=/ #首次訪問會獲取網站設置的cookie async with session.get("https://segmentfault.com/hottest") as rp: print(session.cookie_jar.filter_cookies("https://segmentfault.com")) print(rp.cookies) #爲空,服務端未設置cookie async with session.get("https://segmentfault.com/newest") as rp: print(session.cookie_jar.filter_cookies("https://segmentfault.com")) print(rp.cookies) #爲空,服務端未設置cookie
當咱們使用rp.cookie時,只會獲取到當前url下設置的cookie,不會維護整站的cookie 而session.cookie_jar.filter_cookies("https://segmentfault.com")會一直保留這個網站的全部設置cookies,含有咱們在會話時設置的cookie,而且會根據響應修改更新cookie。這個纔是咱們須要的 而咱們設置cookie,也是須要在aiohttp.ClientSession(cookies=cookies)中設置
ClientSession 還支持 請求頭,keep-alive鏈接和鏈接池(connection pooling)
默認ClientSession使用的是嚴格模式的 aiohttp.CookieJar. RFC 2109,明確的禁止接受url和ip地址產生的cookie,只能接受 DNS 解析IP產生的cookie。能夠經過設置aiohttp.CookieJar 的 unsafe=True 來配置:
jar = aiohttp.CookieJar(unsafe=True) session = aiohttp.ClientSession(cookie_jar=jar)
TCPConnector維持連接池,限制並行鏈接的總量,當池滿了,有請求退出再加入新請求
async def func1(): cookies = {'my_cookie': "my_value"} conn = aiohttp.TCPConnector(limit=2) #默認100,0表示無限 async with aiohttp.ClientSession(cookies=cookies,connector=conn) as session: for i in range(7,35): url = "https://www.ckook.com/list-%s-1.html"%i async with session.get(url) as rp: print('---------------------------------') print(rp.status)
限制同時打開限制同時打開鏈接到同一端點的數量((host, port, is_ssl) 三的倍數),能夠經過設置 limit_per_host 參數:
limit_per_host: 同一端點的最大鏈接數量。同一端點即(host, port, is_ssl)徹底相同
conn = aiohttp.TCPConnector(limit_per_host=30)#默認是0
在協程下測試效果不明顯
咱們能夠指定域名服務器的 IP 對咱們提供的get或post的url進行解析:
from aiohttp.resolver import AsyncResolver resolver = AsyncResolver(nameservers=["8.8.8.8", "8.8.4.4"]) conn = aiohttp.TCPConnector(resolver=resolver)
aiohttp支持使用代理來訪問網頁:
async with aiohttp.ClientSession() as session: async with session.get("http://python.org", proxy="http://some.proxy.com") as resp: print(resp.status)
固然也支持須要受權的頁面:
async with aiohttp.ClientSession() as session: proxy_auth = aiohttp.BasicAuth('user', 'pass') #用戶,密碼 async with session.get("http://python.org", proxy="http://some.proxy.com", proxy_auth=proxy_auth) as resp: print(resp.status)
或者經過這種方式來驗證受權:
session.get("http://python.org", proxy="http://user:pass@some.proxy.com")
payload = {'key1': 'value1', 'key2': 'value2'} async with session.post('http://httpbin.org/post', data=payload) as resp: print(await resp.text())
注意:data=dict的方式post的數據將被轉碼,和form提交數據是同樣的做用,若是你不想被轉碼,能夠直接以字符串的形式 data=str 提交,這樣就不會被轉碼。
payload = {'some': 'data'} async with session.post(url, data=json.dumps(payload)) as resp:
其實json.dumps(payload)返回的也是一個字符串,只不過這個字符串能夠被識別爲json格式
url = 'http://httpbin.org/post' files = {'file': open('report.xls', 'rb')} await session.post(url, data=files)
url = 'http://httpbin.org/post' data = FormData() data.add_field('file', open('report.xls', 'rb'), filename='report.xls', content_type='application/vnd.ms-excel') await session.post(url, data=data)
若是將文件對象設置爲數據參數,aiohttp將自動以字節流的形式發送給服務器。
aiohttp支持多種類型的文件以流媒體的形式上傳,因此咱們能夠在文件未讀入內存的狀況下發送大文件。
@aiohttp.streamer def file_sender(writer, file_name=None): with open(file_name, 'rb') as f: chunk = f.read(2**16) while chunk: yield from writer.write(chunk) chunk = f.read(2**16) # Then you can use `file_sender` as a data provider: async with session.post('http://httpbin.org/post', data=file_sender(file_name='huge_file')) as resp: print(await resp.text())
r = await session.get('http://python.org') await session.post('http://httpbin.org/post',data=r.content)
在經過aiohttp發送前就已經壓縮的數據, 調用壓縮函數的函數名(一般是deflate 或 zlib)做爲content-encoding的值:
async def my_coroutine(session, headers, my_data): data = zlib.compress(my_data) headers = {'Content-Encoding': 'deflate'} async with session.post('http://httpbin.org/post', data=data, headers=headers) pass