request=urllib.request.Request(url)
response=urllib.request.urlopen(request)
#response是一個http.client.HTTPResponse對象
print(response.geturl()) #獲取網址
print(response.info()) #獲取信息
print(response.getcode()) # http狀態碼
html=response.read()
urllib.request.Request(url, data=None, headers={}, origin_req_host=None, unverifiable=False, method=None)
1.data參數:the HTTP request will be a POST instead of a GET when the data parameter is provided.data should be a buffer in the standard application/x-www-form-urlencoded format. The urllib.parse.urlencode() function takes a mapping or sequence of 2-tuples and returns a string in this format.
html
data={}
data['type']='AUTO'
data['i']=content
data['doctype']='json'
data['xmlVersion']=1.8
data['keyfrom']='fanyi.web'
data['ue']='UTF-8'
data['action']='FY_BY_CLICKBUTTON'
data['typoResult']='true'
data=urllib.parse.urlencode(data).encode('utf-8')
2.headers:一個字典,能夠直接從瀏覽器中複製過來python
header={}
#添加header的第一種方法
header['User-Agent']='Mozilla/5.0 (Windows NT 10.0; WOW64; rv:43.0) Gecko/20100101 Firefox/43.0'
request=urllib.request.Request(url,data,header)
#也可以使用如下方法
request.add_header('User-Agent','Mozilla/5.0 (Windows NT 10.0; WOW64; rv:43.0) Gecko/20100101 Firefox/43.0')
下面代碼是用來判斷 網頁是否使用gzip
壓縮過。web
for i in response.getheaders():
if i[0]=="Content-Encoding":
if(i[-1]=="gzip"):
html=gzip.decompress(html)
1.參數是一個字典{‘類型’:‘代理ip:端口號’}
proxy_support=urllib.request.ProxyHandler({})
2.定製、建立一個openner
opener=urllib.request.build_opener(proxy_support)
urllib.request.install_opener(opener)
3a.安裝opener
opener.open(url)`json
3b.調用opener
proxy_support=urllib.request.ProxyHandler({'http':random.choice(iplist)})
opener=urllib.request.build_opener(proxy_support)
opener.addheaders=[('User-Agent','Mozilla/5.0 (Windows NT 10.0; WOW64; rv:43.0) Gecko/20100101 Firefox/43.0')]
urllib.request.install_opener(opener)
req=urllib.request.Request(url)
response=urllib.request.urlopen(req)
import urllib.request
import os
import random
#打開網頁
def url_open(url):
iplist=[
'49.77.22.1:8118',
'58.134.102.3:12696',
'120.26.213.55:9999'...]
proxy_support=urllib.request.ProxyHandler({'http':random.choice(iplist)})
opener=urllib.request.build_opener(proxy_support)
opener.addheaders=[('User-Agent','Mozilla/5.0 (Windows NT 10.0; WOW64; rv:43.0) Gecko/20100101 Firefox/43.0')]
urllib.request.install_opener(opener)
req=urllib.request.Request(url)
response=urllib.request.urlopen(req)
html=response.read()
return html
#獲取圖片地址,返回圖片地址的list
def get_imgs(url):
html=url_open(url).decode('utf-8')
img_address=[]
a=html.find('data-original')
while(a!=-1):
b=html.find('.jpg',a,a+300)
if(b!=-1):
# print(html[a+15:b+4])
img_address.append(html[a+15:b+4])
else:
b=a+9
a=html.find('data-original=',b)
for i in img_address:
print(i)
return img_address
#存儲到本地
def save_imgs(img_address):
for i in img_address:
# print(i)
filename=i.split('/')[-1]
with open(filename,'wb') as f:
img=url_open(i)
f.write(img)
def zhihuPic(url,folder="zhihu"):
if(os.path.exists(folder)):
os.chdir(folder)
else:
os.mkdir(folder)
os.chdir(folder)
img_address=get_imgs(url)
save_imgs(img_address)
if __name__=='__main__':
zhihuPic("https://www.zhihu.com/question/22070147")