爬蟲

爬蟲:編寫程序向網站發起請求,獲取資源後分析並提取有用數據html

requests

get請求

# 1、無參數實例
  
import requests
  
ret = requests.get('https://github.com/timeline.json')
  
print ret.url
print ret.text
  
  
  
# 2、有參數實例
  
import requests
  
payload = {'key1': 'value1', 'key2': 'value2'}
ret = requests.get("http://httpbin.org/get", params=payload)
  
print ret.url
print ret.text
View Code

post請求

# 1、基本POST實例
  
import requests
  
payload = {'key1': 'value1', 'key2': 'value2'}
ret = requests.post("http://httpbin.org/post", data=payload)
  
print ret.text
  
  
# 2、發送請求頭和數據實例
  
import requests
import json
  
url = 'https://api.github.com/some/endpoint'
payload = {'some': 'data'}
headers = {'content-type': 'application/json'}
  
ret
= requests.post(url, data=json.dumps(payload), headers=headers) 或
ret = requests.post(url, json=payload, headers=headers)
  
print ret.text
print ret.cookies

 

其它請求

requests.get(url, params=None, **kwargs)
requests.post(url, data=None, json=None, **kwargs)
requests.put(url, data=None, **kwargs)
requests.head(url, **kwargs)
requests.delete(url, **kwargs)
requests.patch(url, data=None, **kwargs)
requests.options(url, **kwargs)
  
# 以上方法均是在此方法的基礎上構建
requests.request(method, url, **kwargs)
View Code

參數

def param_method_url():
    # requests.request(method='get', url='http://127.0.0.1:8000/test/')
    # requests.request(method='post', url='http://127.0.0.1:8000/test/')
    pass


def param_param():
    # - 能夠是字典
    # - 能夠是字符串
    # - 能夠是字節(ascii編碼之內)

    # requests.request(method='get',
    # url='http://127.0.0.1:8000/test/',
    # params={'k1': 'v1', 'k2': '水電費'})

    # requests.request(method='get',
    # url='http://127.0.0.1:8000/test/',
    # params="k1=v1&k2=水電費&k3=v3&k3=vv3")

    # requests.request(method='get',
    # url='http://127.0.0.1:8000/test/',
    # params=bytes("k1=v1&k2=k2&k3=v3&k3=vv3", encoding='utf8'))

    # 錯誤
    # requests.request(method='get',
    # url='http://127.0.0.1:8000/test/',
    # params=bytes("k1=v1&k2=水電費&k3=v3&k3=vv3", encoding='utf8'))
    pass


def param_data():
    # 能夠是字典
    # 能夠是字符串
    # 能夠是字節
    # 能夠是文件對象

    # requests.request(method='POST',
    # url='http://127.0.0.1:8000/test/',
    # data={'k1': 'v1', 'k2': '水電費'})

    # requests.request(method='POST',
    # url='http://127.0.0.1:8000/test/',
    # data="k1=v1; k2=v2; k3=v3; k3=v4"
    # )

    # requests.request(method='POST',
    # url='http://127.0.0.1:8000/test/',
    # data="k1=v1;k2=v2;k3=v3;k3=v4",
    # headers={'Content-Type': 'application/x-www-form-urlencoded'}
    # )

    # requests.request(method='POST',
    # url='http://127.0.0.1:8000/test/',
    # data=open('data_file.py', mode='r', encoding='utf-8'), # 文件內容是:k1=v1;k2=v2;k3=v3;k3=v4
    # headers={'Content-Type': 'application/x-www-form-urlencoded'}
    # )
    pass


def param_json():
    # 將json中對應的數據進行序列化成一個字符串,json.dumps(...)
    # 而後發送到服務器端的body中,而且Content-Type是 {'Content-Type': 'application/json'}
    requests.request(method='POST',
                     url='http://127.0.0.1:8000/test/',
                     json={'k1': 'v1', 'k2': '水電費'})


def param_headers():
    # 發送請求頭到服務器端
    requests.request(method='POST',
                     url='http://127.0.0.1:8000/test/',
                     json={'k1': 'v1', 'k2': '水電費'},
                     headers={'Content-Type': 'application/x-www-form-urlencoded'}
                     )


def param_cookies():
    # 發送Cookie到服務器端
    requests.request(method='POST',
                     url='http://127.0.0.1:8000/test/',
                     data={'k1': 'v1', 'k2': 'v2'},
                     cookies={'cook1': 'value1'},
                     )
    # 也可使用CookieJar(字典形式就是在此基礎上封裝)
    from http.cookiejar import CookieJar
    from http.cookiejar import Cookie

    obj = CookieJar()
    obj.set_cookie(Cookie(version=0, name='c1', value='v1', port=None, domain='', path='/', secure=False, expires=None,
                          discard=True, comment=None, comment_url=None, rest={'HttpOnly': None}, rfc2109=False,
                          port_specified=False, domain_specified=False, domain_initial_dot=False, path_specified=False)
                   )
    requests.request(method='POST',
                     url='http://127.0.0.1:8000/test/',
                     data={'k1': 'v1', 'k2': 'v2'},
                     cookies=obj)

                     
def param_proxies():
    #代理,若是封了ip就能夠用代理

    # proxies = {
    # "http": "61.172.249.96:80",
    # "https": "http://61.185.219.126:3128",
    # }

    # proxies = {'http://10.20.1.128': 'http://10.10.1.10:5323'}

    # ret = requests.get("http://www.proxy360.cn/Proxy", proxies=proxies)
    # print(ret.headers)


    # from requests.auth import HTTPProxyAuth
    #
    # proxyDict = {
    # 'http': '77.75.105.165',
    # 'https': '77.75.105.165'
    # }
    # auth = HTTPProxyAuth('username', 'mypassword')
    #
    # r = requests.get("http://www.google.com", proxies=proxyDict, auth=auth)
    # print(r.text)

    pass


def param_files():
    # 發送文件
    # file_dict = {
    # 'f1': open('readme', 'rb')
    # }
    # requests.request(method='POST',
    # url='http://127.0.0.1:8000/test/',
    # files=file_dict)

    # 發送文件,定製文件名
    # file_dict = {
    # 'f1': ('test.txt', open('readme', 'rb'))
    # }
    # requests.request(method='POST',
    # url='http://127.0.0.1:8000/test/',
    # files=file_dict)

    # 發送文件,定製文件名
    # file_dict = {
    # 'f1': ('test.txt', "hahsfaksfa9kasdjflaksdjf")
    # }
    # requests.request(method='POST',
    # url='http://127.0.0.1:8000/test/',
    # files=file_dict)

    # 發送文件,定製文件名
    # file_dict = {
    #     'f1': ('test.txt', "hahsfaksfa9kasdjflaksdjf", 'application/text', {'k1': '0'})
    # }
    # requests.request(method='POST',
    #                  url='http://127.0.0.1:8000/test/',
    #                  files=file_dict)

    pass


def param_auth():
    from requests.auth import HTTPBasicAuth, HTTPDigestAuth

    ret = requests.get('https://api.github.com/user', auth=HTTPBasicAuth('wupeiqi', 'sdfasdfasdf'))
    print(ret.text)

    # ret = requests.get('http://192.168.1.1',
    # auth=HTTPBasicAuth('admin', 'admin'))
    # ret.encoding = 'gbk'
    # print(ret.text)

    # ret = requests.get('http://httpbin.org/digest-auth/auth/user/pass', auth=HTTPDigestAuth('user', 'pass'))
    # print(ret)
    #


def param_timeout():
    # ret = requests.get('http://google.com/', timeout=1)
    # print(ret)

    # ret = requests.get('http://google.com/', timeout=(5, 1))
    # print(ret)
    pass


def param_allow_redirects():
    ret = requests.get('http://127.0.0.1:8000/test/', allow_redirects=False)
    print(ret.text)




def param_stream():
    ret = requests.get('http://127.0.0.1:8000/test/', stream=True)
    print(ret.content)
    ret.close()

    # from contextlib import closing
    # with closing(requests.get('http://httpbin.org/get', stream=True)) as r:
    # # 在此處理響應。
    # for i in r.iter_content():
    # print(i)


def requests_session():
    import requests

    session = requests.Session()

    ### 1、首先登錄任何頁面,獲取cookie

    i1 = session.get(url="http://dig.chouti.com/help/service")

    ### 2、用戶登錄,攜帶上一次的cookie,後臺對cookie中的 gpsd 進行受權
    i2 = session.post(
        url="http://dig.chouti.com/login",
        data={
            'phone': "8615131255089",
            'password': "xxxxxx",
            'oneMonth': ""
        }
    )

    i3 = session.post(
        url="http://dig.chouti.com/link/vote?linksId=8589623",
    )
    print(i3.text)
View Code

BeautifulSoup

BeautifulSoup是一個模塊,該模塊用於接收一個HTML或XML字符串,而後將其進行格式化,以後則可使用他提供的方法進行快速查找指定元素,從而使得在HTML或XML中查找指定元素變得簡單git

安裝:pip3 install beautifulsoup4github

使用:json

 from bs4 import BeautifulSoup
bs4的用處:
  -解析爬蟲數據
  -解析XHML數據
  -用戶提交數據,進行格式校驗(KindEditor、UEditor)
1 soup = BeautifulSoup(html, "html.parser")
2 # 找到第一個a標籤
3 tag1 = soup.find(name='a')
4 # 找到全部的a標籤
5 tag2 = soup.find_all(name='a')
6 # 找到id=link2的標籤
7 tag3 = soup.select(attes={id='link2')

實例

import requests
from bs4 import BeautifulSoup

# 一、下載頁面
ret=requests.get(
    url="https://www.autohome.com.cn/news/",
    )

# 字節內容
# print(ret.content)

# 該網頁的字節編碼
# print(ret.apparent_encoding)
ret.encoding=ret.apparent_encoding
# 將byte轉化爲字符串格式
# print(ret.text)

# 二、解析
# 獲取指定內容
soup=BeautifulSoup(ret.text,"html.parser")  #解析器

div=soup.find(name="div",id="auto-channel-lazyload-article")

li_list=div.find_all(name="li")
# print(li_list)
for li in li_list:
    h3=li.find(name="h3")
    if not h3:
        continue

    a=li.find(name="a")
    href=(a.get("href")).strip("//")

    p=li.find(name="p")

    img=li.find(name="img")
    src=img.get("src")

    file_name=src.split("__")[1]
    # 獲取圖片
    img_list=requests.get(url="https:%s"%src)
    print(img_list)
    # 寫入文件
    with open("img/%s"%file_name,"wb") as f:
        f.write(img_list.content)


    # print(h3.text)
    # print(href)
    # print(p.text)
汽車之家新聞
import requests
from bs4 import BeautifulSoup

# 獲取未受權的cookies
ret1=requests.get(
    url="https://dig.chouti.com/",
    headers={
        "User-Agent":'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'
    }
    )
ret1_cookie=ret1.cookies.get_dict()


# 登陸
ret=requests.post(
    url='https://dig.chouti.com/login',
    data={
        "phone":'xx',
        "password":"xx",
        "oneMonth":1
    },
    headers={
        "User-Agent":'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'
    },
    cookies=ret1_cookie
)

# 獲取每頁id
for id in range(1,2):

    ret2=requests.get(
        url="https://dig.chouti.com/all/hot/recent/%s"%id,
        headers={
            "User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'
        },
        cookies=ret1_cookie

    )

    # print(ret2.text)

    soup=BeautifulSoup(ret2.text,"html.parser")

    div=soup.find(name="div",attrs={"class":"content-list","id":"content-list"})

    items=div.find_all(name="div",attrs={"class":"item"})


    for i in items:
        par2=i.find(name="div",attrs={"class":"part2"})
        nid=par2.get("share-linkid")

        # 點贊
        ret3=requests.post(
            url="https://dig.chouti.com/link/vote?linksId=%s"%nid,
            headers={
            "User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'
         },
            cookies=ret1_cookie
        )
        print(ret3.text)
抽屜登陸點贊

 模擬微信登陸並獲取信息

一、顯示二維碼 api

  url+時間戳*1000,並保存xuuid(僞uuid)瀏覽器

  長輪詢:瀏覽器向微信服務器發送一個請求,服務器hold次鏈接(有一點時間限制),在次期間若是用戶掃碼則當即響應  服務器

  輪詢:瀏覽器短期內,一直向服務器發送請求,微信

二、掃碼   cookie

   返回201和img,session

三、確認登陸

  返回200和url ,在此url+「xxx」,再次發送請求,獲取登陸用戶憑證相關的數據(XHML), 將憑證數據和cookies保存

四、用戶信息初始化

  發post請求,發送用戶憑證信息  json數據

五、獲取 頭像

    - 圖片防盜鏈
      - Referer
      - cookie

五、獲取聯繫人信息

  發請求帶上cookies

 

示例:

相關文章
相關標籤/搜索