python爬蟲日誌(9)爬取代理

話很少說,直接上代碼,很簡單,很容易看懂python

import requests
from bs4 import BeautifulSoup
import random


def get_ip_list():
    print("正在獲取代理列表...")
    ip_url = 'http://www.xicidaili.com/nn/'
    headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36"}
    wb_data = requests.get(ip_url, headers=headers)
    soup = BeautifulSoup(wb_data.text, 'lxml')
    raw_list = soup.select('#ip_list tr')
    mature_list = []
    for i in raw_list:
        if i == raw_list[0]:
            continue
        td_list = i.find_all('td')
        ip_info = 'http://' + td_list[1].text + ':' + td_list[2].text  # proxy參數的格式 http://ip_number:port_number
        mature_list.append(ip_info)
    print("代理列表抓取成功")
    return mature_list


def get_random_ip(ip_list):
    print("正在設置隨機代理...")
    proxy_ip = random.choice(ip_list)  # 隨機選擇一個
    proxies = {'http': proxy_ip}
    print("代理設置成功.")
    return proxies
相關文章
相關標籤/搜索