python網絡爬蟲(4)結構與基本概念

基本模型

請求與響應html

import urllib.request as urllib2
request=urllib2.Request('http://www.zhihu.com')
response=urllib2.urlopen(request)
html=response.read()
print(html)

Cookie處理

呵呵python

import urllib.request as urllib2
import  http.cookiejar as cookielib

cookie = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookie))
response = opener.open('http://www.zhihu.com')
for item in cookie:
    print(item.name+':'+item.value)

如下自定義Cookie內容????
opener = urllib2.build_opener()
opener.addheaders.append( ( 'Cookie', 'email=' + "xxxxxxx@163.com" ) )
req = urllib2.Request( "http://www.zhihu.com/" )
response = opener.open(req)
print(response.headers)
retdata = response.read()

Timeout處理

設置局部的Timeout

超時會拋出異常服務器

import urllib.request as urllib2
import  http.cookiejar as cookielib

request=urllib2.Request('http://www.zhihu.com')
response = urllib2.urlopen(request,timeout=0.01)
html=response.read()
print(html)

修改全局的Timeout

import urllib2
import socket
socket.setdefaulttimeout(10) # 10 秒鐘後超時
urllib2.socket.setdefaulttimeout(10) # 另外一種方式

返回響應代碼

正常200,網頁丟失404cookie

import urllib.request as urllib2
import  http.cookiejar as cookielib

try:
    response = urllib2.urlopen('http://www.samoy.cn/seoganhuo/1')
    print(response)
    print(response.getcode())
except urllib2.HTTPError as e:
    if hasattr(e, 'code'):
        print('Error code:',e.code)

檢查重定向問題

當訪問的網址返回後仍然是該網址,則未發生重定向。app

import urllib.request as urllib2
import  http.cookiejar as cookielib

response = urllib2.urlopen('http://www.baidu.cn')
isRedirected = response.geturl() == 'http://www.baidu.cn'
print(isRedirected)

另一種使用類進行的重定向檢查dom

import urllib.request as urllib2
import  http.cookiejar as cookielib

class RedirectHandler(urllib2.HTTPRedirectHandler):
    def http_error_301(self, req, fp, code, msg, headers):
        pass
    def http_error_302(self, req, fp, code, msg, headers):
        result = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp, code, msg, headers)
        result.status = code
        result.newurl = result.geturl()
        return result
opener = urllib2.build_opener(RedirectHandler)
result=opener.open('http://www.baidu.cn')
print(result.newurl)
print(result.status)

代理設置

使用install_opener更新全局的Proxy。socket

import urllib.request as urllib2
import  http.cookiejar as cookielib

proxy = urllib2.ProxyHandler({'http': '127.0.0.1:8087'})
opener = urllib2.build_opener(proxy)
urllib2.install_opener(opener)
response = urllib2.urlopen('http://www.zhihu.com/')
print(response.read())

更新局部post

import urllib.request as urllib2
import  http.cookiejar as cookielib

proxy = urllib2.ProxyHandler({'http': '127.0.0.1:8087'})
opener = urllib2.build_opener(proxy)
response = opener.open("http://www.zhihu.com/")
print(response.read())

使用requests實現http請求

參見:包括get post 響應,編碼,請求頭處理,超時斷定ui

Cookie處理

獲取

import requests
user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
headers={'User-Agent':user_agent}
r = requests.get('http://www.baidu.com',headers=headers)
#遍歷出全部的cookie字段的值
for cookie in r.cookies.keys():
    print(cookie+':'+r.cookies.get(cookie))

發送自定義

import requests
user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
headers={'User-Agent':user_agent}
cookies = dict(name='qiye',age='10')
r = requests.get('http://www.baidu.com',headers=headers,cookies=cookies)
print (r.text)

帶帳號密碼發送cookie用於登陸

import requests
loginUrl = 'http://www.xxxxxxx.com/login'
s = requests.Session()
#首先訪問登陸界面,做爲遊客,服務器會先分配一個cookie
r = s.get(loginUrl,allow_redirects=True)
datas={'name':'qiye','passwd':'qiye'}
#向登陸連接發送post請求,驗證成功,遊客權限轉爲會員權限
r = s.post(loginUrl, data=datas,allow_redirects= True)
print(r.text)

重定向檢驗

import requests
r = requests.get('http://www.baidu.cn')
print(r.url)
print(r.status_code)
print(r.history)

代理設置

舉例編碼

import requests
proxies = {
  "http": "http://10.10.1.10:3128",
  "https": "http://10.10.1.10:1080",
}
requests.get("https://www.baidu.com", proxies=proxies)

或使用https://doman@host方式設置proxies,進行代理

相關文章
相關標籤/搜索