python2.7爬取可用代理IP

import urllib2html

import randomapp

import timedom

import reide

#from lxml import etree  #第三方模塊測試



def get_proxy(page):ui

headers = {url

'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36'spa

}代理

req = urllib2.Request('http://www.xicidaili.com/nn/{}'.format(page),headers=headers) #構造一個Request對象orm

response = urllib2.urlopen(req) #發送請求

html = response.read()

proxy_list = []

ip_port_list = re.findall(r'<tr class=.*?>(.*?)</tr>',html,re.S)

#ip_list = re.findall(r'\d+\.\d+\.\d+\.\d+\',html)

print len(ip_port_list)

for i in ip_port_list:

ip = re.findall('\d+\.\d+\.\d+\.\d+\.',i)[0]

port = re.findall(r'<td>(\d+)</td>',i)[0]

#print ip,port  #打印測試

proxy = '{}:{}'.format(ip,port)

proxy_list.append(proxy)

return proxy_list

def proxy_read(proxy_list,i):

proxy = proxy_list[i]

print u'當前代理IP:{}'.format(proxy)

sleep_time = random.randint(1,3)

print '等待{}秒'.format(sleep_time)

time.sleep(sleep_time)

#urllib2 裏面的方法

proxt_suport = urllib2.ProxyHandler({'http':proxy}) #構建代理Handler

opener = urllib2.build_opener(proxt_suport) #經過build_opener方法來使用Handler對象,而後建立opener對象

urllib2.install_opener(opener) #把opener對象變成全局的,以後使用的urlopen對象都是全局的

req = urllib2.Request('http://httpbin.org/ip')

try:

html = urllib2.urlopen(req).read()

print html

except Exception as e:

print e

print u'***打開失敗***'

print u'當前ip不可用'


if __name__ == '__name__':

    proxy_list = get_proxy(1)

print '開始測試'

for i in range(100):

proxy.read(proxt_list,i)

相關文章
相關標籤/搜索