二 . 爬蟲 requests模塊使用 urllib模塊 和 請求響應相關參數

一 . requests模塊使用 和 請求響應相關參數

https://www.cnblogs.com/wupeiqi/articles/6283017.htmlhtml

1. requests  get請求相關參數

import requests

url = 'http://httpbin.org/get?name=bob'   # 請求url,?後面拼接的是參數

params = {'name': 'nick','age': '18'}   # 參數,與url上的參數同時存在,沒有優先級,若key相同,則值以列表形式存在

cookies = {'xxx': '111','yyy': '222'}  # cookie值,若headers中有cookie,則使用headers中的cookie

headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36",  # 若未設置User-Agent,則會檢測爲python請求
    "Cookie": 'aaa=aaa;bbb=bbb'
}

proxies = {'http':'http://ip:端口'}  # 代理地址,固定的格式,若是是https就用https

timeout = 0.5  # 設置超時時間,若請求超出時間則報錯

allow_redirects = True  # 重定向中使用,是否容許跳轉

res = requests.get(url=url,headers=headers,params=params,cookies=cookies,timeout=timeout,allow_redirects=allow_redirects)
print(res.text)


{
  "args": {
    "age": "18", 
    "name": [
      "bob", 
      "nick"
    ]
  }, 
  "headers": {
    "Accept": "*/*", 
    "Accept-Encoding": "gzip, deflate", 
    "Cookie": "aaa=aaa;bbb=bbb", 
    "Host": "httpbin.org", 
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36"
  }, 
  "origin": "117.172.254.245, 117.172.254.245", 
  "url": "https://httpbin.org/get?name=bob&name=nick&age=18"
}

2. requests  post請求相關參數

import requests

url = 'http://httpbin.org/post'  # 請求url
data = {
    'name': 'nick',  # form數據
    'age': '18',
}
json = {"sex":'man'}  # json格式數據,若是有data時,json爲null
files = {'file':open('aa','rt',encoding='utf8')}  # 文件數據

cookies = {
    'xxx': 'xxx',
    'yyy': 'yyy'
}
headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36",
    "Cookie": 'aaa=aaa;bbb=bbb'
}
timeout = 0.5  # 超時時間
allow_redirects = True  # 是否容許重定向
res = requests.post(url=url, headers=headers, data=data,cookies=cookies,json=json,files=files)
print(res.text)



{
  "args": {}, 
  "data": "", 
  "files": {
    "file": "1111111111111111111111111111\u5a03\u5a03\u8ba4\u4e3a\u4eba"
  }, 
  "form": {
    "age": "18", 
    "name": "nick"
  }, 
  "headers": {
    "Accept": "*/*", 
    "Accept-Encoding": "gzip, deflate", 
    "Content-Length": "356", 
    "Content-Type": "multipart/form-data; boundary=e4ee34734e2325fdc6fa1eb84d070882", 
    "Cookie": "aaa=aaa;bbb=bbb", 
    "Host": "httpbin.org", 
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36"
  }, 
  "json": null, 
  "origin": "117.172.254.245, 117.172.254.245", 
  "url": "https://httpbin.org/post"
}

3.  request請求

import requests
requests.request(method,url,**kwargs)  # method爲請求方式,url請求地址。get,post請求本質上是繼承了request請求

4. 響應相關參數

import requests
r = requests.get(....)

r.url  # 請求的url

r.text  # 得到響應體文本信息

r.encoding = 'gbk'  # 設置編碼方式,用來解決亂碼

r.content  # 二進制信息

r.json  # 至關於json.loads(r.text),若返回的不是json數據,會報錯

r.status_code  # 響應狀態碼

r.headers  # 響應頭

r.cookies  # 拿cookie

r.history  # 有重定向時,取到的是 [響應對象1,響應對象2...]

5. 自動保存cookie的請求

session = requests.session()   
r = session.get(...)  # 會將cookie保存在seesion中,發次發請求時會帶上cookie

# 補充(保存cookie到本地)
import http.cookiejar as cookiejar
import requests
session = requests.session()
session.cookies = cookiejar.LWPCookieJar()

session.cookies.load(filename='cookie.txt')  # 取cookie

res = session.get('http://www.baidu.com')
session.cookies.save(filename='cookie.txt')  # 存cookie





import requests
session = requests.Session()
i1 = session.get(url="http://dig.chouti.com/help/service")
i2 = session.post(
    url="http://dig.chouti.com/login",
    data={
        'phone': "8615131255089",
        'password': "xxooxxoo",
        'oneMonth': ""
    }
)
i3 = session.post(
    url="http://dig.chouti.com/link/vote?linksId=8589523"
)
print(i3.text)

 

 

# 1. 方法
    requests.get
    requests.post 
    requests.put 
    requests.delete 
    ...
    requests.request(method='POST')

# 2. 參數

    2.1  url
    2.2  headers
    2.3  cookies
    2.4  params
    2.5  data,傳請求體
            
            requests.post(
                ...,
                data={'user':'alex','pwd':'123'}
            )
            
            GET /index http1.1\r\nhost:c1.com\r\n\r\nuser=alex&pwd=123
            
    2.6  json,傳請求體
            requests.post(
                ...,
                json={'user':'alex','pwd':'123'}
            )
            
            GET /index http1.1\r\nhost:c1.com\r\nContent-Type:application/json\r\n\r\n{"user":"alex","pwd":123}
    2.7 代理 proxies
        # 無驗證
            proxie_dict = {
                "http": "61.172.249.96:80",
                "https": "http://61.185.219.126:3128",
            }
            ret = requests.get("https://www.proxy360.cn/Proxy", proxies=proxie_dict)
            
        
        # 驗證代理
            from requests.auth import HTTPProxyAuth
            
            proxyDict = {
                'http': '77.75.105.165',
                'https': '77.75.106.165'
            }
            auth = HTTPProxyAuth('用戶名', '密碼')
            
            r = requests.get("http://www.google.com",data={'xxx':'ffff'} proxies=proxyDict, auth=auth)
            print(r.text)
    -----------------------------------------------------------------------------------------
    2.8 文件上傳 files
        # 發送文件
            file_dict = {
                'f1': open('xxxx.log', 'rb')
            }
            requests.request(
                method='POST',
                url='http://127.0.0.1:8000/test/',
                files=file_dict
            )
            
    2.9 認證 auth
    
        內部:
            用戶名和密碼,用戶和密碼加密,放在請求頭中傳給後臺。
            
                - "用戶:密碼"
                - base64("用戶:密碼")
                - "Basic base64("用戶|密碼")"
                - 請求頭:
                    Authorization: "basic base64("用戶|密碼")"
            
        from requests.auth import HTTPBasicAuth, HTTPDigestAuth

        ret = requests.get('https://api.github.com/user', auth=HTTPBasicAuth('wupeiqi', 'sdfasdfasdf'))
        print(ret.text)
        
    2.10 超時 timeout 
        # ret = requests.get('http://google.com/', timeout=1)
        # print(ret)
    
        # ret = requests.get('http://google.com/', timeout=(5, 1))
        # print(ret)
        
    2.11 容許重定向  allow_redirects
        ret = requests.get('http://127.0.0.1:8000/test/', allow_redirects=False)
        print(ret.text)
        
    2.12 大文件下載 stream
        from contextlib import closing
        with closing(requests.get('http://httpbin.org/get', stream=True)) as r1:
        # 在此處理響應。
        for i in r1.iter_content():
            print(i)
            
    2.13 證書 cert
        - 百度、騰訊 => 不用攜帶證書(系統幫你作了)
        - 自定義證書
            requests.get('http://127.0.0.1:8000/test/', cert="xxxx/xxx/xxx.pem")
            requests.get('http://127.0.0.1:8000/test/', cert=("xxxx/xxx/xxx.pem","xxx.xxx.xx.key"))
    2.14 確認 verify =False 



requests.get('http://127.0.0.1:8000/test/', cert="xxxx/xxx/xxx.pem")

 

二 .urllib模塊使用 和 請求響應相關參數

http://www.javashuo.com/article/p-pcwcpteh-c.htmlpython

http://www.javashuo.com/article/p-gjvaxqcd-d.htmlgit

其主要包括一下模塊:

urllib.request 請求模塊

urllib.error 異常處理模塊

urllib.parse url解析模塊

urllib.robotparser robots.txt解析模塊

 

import urllib.request

response = urllib.request.urlopen('https://www.python.org')
print(response.read().decode('utf-8'))

print(response.status)
print(response.getheaders())
print(response.getheader('Server'))




print("#####################################################################################3333")



from urllib import request, parse

url = 'http://httpbin.org/post'
headers = {
    'User-Agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)',
    'Host': 'httpbin.org'
}
dict = {
    'name': 'Germey'
}
data = bytes(parse.urlencode(dict), encoding='utf8')
req = request.Request(url=url, data=data, headers=headers, method='POST')
response = request.urlopen(req)
print(response.read().decode('utf-8'))
相關文章
相關標籤/搜索