SEO工具箱:提取網頁全部列表的文章標題

import pycurl,StringIO,json,time,re,sys,urllib2
from lxml import etree

# headers = {
#     "Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
#     "Accept-Encoding":"gzip, deflate, sdch",
#     "Accept-Language":"zh-CN,zh;q=0.8",
#     "Cache-Control":"max-age=0",
#     "Connection":"keep-alive",
#     "Cookie":"Hm_lvt_fa633287999535c3e5f5a63e82308549=1462868485; Hm_lpvt_fa633287999535c3e5f5a63e82308549=1462868485; CNZZDATA5838747=cnzz_eid%3D1693591872-1459152412-http%253A%252F%252Fwww.1396app.com%252F%26ntime%3D1462865237",
#     "Host":"www.1396app.com",
#     "Upgrade-Insecure-Requests":"1",
#     "User-Agent":"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.112 Safari/537.36",
# }

reload(sys)
sys.setdefaultencoding('utf-8')

def gethtml(url,headers):
    c = pycurl.Curl()    #經過curl方法構造一個對象
    #c.setopt(pycurl.REFERER, 'http://qy.m.58.com/')    #設置referer
    c.setopt(pycurl.FOLLOWLOCATION, True)    #自動進行跳轉抓取
    c.setopt(pycurl.MAXREDIRS,5)            #設置最多跳轉多少次
    c.setopt(pycurl.CONNECTTIMEOUT, 60)        #設置連接超時
    c.setopt(pycurl.TIMEOUT,120)            #下載超時
    c.setopt(pycurl.ENCODING, 'gzip,deflate')    #處理gzip內容,有些傻逼網站,就算你給的請求沒有gzip,它仍是會返回一個gzip壓縮後的網頁
    # c.setopt(c.PROXY,ip)    # 代理
    c.fp = StringIO.StringIO()
    c.setopt(pycurl.URL, url)    #設置要訪問的URL
    # c.setopt(pycurl.HTTPHEADER,headers)        #傳入請求頭
    c.setopt(c.WRITEFUNCTION, c.fp.write)    #回調寫入字符串緩存
    c.perform()        

    code = c.getinfo(c.HTTP_CODE)    #返回狀態碼
    html = c.fp.getvalue()    #返回源代碼

    return code

write_key = open('key.txt','a+')

for list_url in range(0,441):
    url = 'http://www.icaile.com/tag/gl-45-%s.html' % list_urlfor key in re.findall(r'title="(.*?)"',gethtml(url)):
        key = key.decode('utf-8','ignore')
        write_key.write(key+'\n')
        print key

headers 加不加均可以,建議是加一個吧,最後這段用re.findall提取 title="" 的內容也是技術不到位html

用xpath會比較精準。。。僅用於記錄web

    url_range = etree.HTML(gethtml(url).decode('utf-8','ignore'))
    dateil_url = url_range.xpath('/html/body/div[3]/div[3]/div[1]/div[1]/div/ul/li[1]/a')[0].text
    print dateil

寫得比較粗,沒有去重json

關於對SEO能有什麼用處,我感受抓關鍵詞比較爽。。。再改改就當火車頭使了,效率比火車高不少,寫個多線程的話(聽說Pycurl也是帶多線程的)緩存

相關文章
相關標籤/搜索