net spider(python 網絡爬蟲)

# -*- coding: utf-8 -*-
import  urllib2,cookielib
from   bs4 import  BeautifulSoup
url="http://www.baidu.com"


#第一種方法
response1=urllib2.urlopen(url)
print response1.getcode()
print len(response1.read())

#第二種方法
request=urllib2.Request(url)
request.add_header("user-agent","Mozilla/5.0")
response2=urllib2.urlopen(request)
print response2.getcode()
print len(response2.read())


#第三種方法
cj=cookielib.CookieJar()
opener=urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
urllib2.install_opener(opener)
response3=urllib2.urlopen(url)
print response3.getcode()
print cj
print response3.read()


#BeautifulSoup實例
html_doc="""********************************************************************************************************"""
soup=BeautifulSoup(html_doc,
                   'html.parser',
                   from_encoding='utf-8')
print "獲取全部的連接"
links=soup.find_all("a")
for link in links:
    print  link.name,link['href'],link.get_text()
print '獲取單個連接'
link_node=soup.find('a',href='http://example.com/lacie')
print  link_node.name, link_node['href'], link_node.get_text()


print "正則表達式"
link_node=soup.find('a',href=re.compile(r"ill"))
print link_node.name,link_node['href'],link_node.get_text()


print "獲取p段落文字"
p_node=soup.find('p',class_="title")
print p_node.name,p_node.get_text()
相關文章
相關標籤/搜索