使用python抓取網頁圖片的示例(python網絡爬蟲)。html
代碼:python
#-*- encoding: utf-8 -*-'''Created on 2014-4-24 @author: Leon Wong'''import urllib2import urllibimport reimport timeimport osimport uuid#獲取二級頁面urldef findUrl2(html): re1 = r'http://tuchong.com/\d+/\d+/|http://\w+(?<!photos).tuchong.com/\d+/'url2list = re.findall(re1,html) url2lstfltr = list(set(url2list)) url2lstfltr.sort(key=url2list.index)#print url2lstfltrreturn url2lstfltr#獲取html文本def getHtml(url): html = urllib2.urlopen(url).read().decode('utf-8')#解碼爲utf-8return html#下載圖片到本地def download(html_page , pageNo): #定義文件夾的名字x = time.localtime(time.time()) foldername = str(x.__getattribute__("tm_year"))+"-"+str(x.__getattribute__("tm_mon"))+"-"+str(x.__getattribute__("tm_mday")) re2=r'http://photos.tuchong.com/.+/f/.+\.jpg'imglist=re.findall(re2,html_page)print imglist download_img=Nonefor imgurl in imglist: picpath = 'D:\\TuChong\\%s\\%s' % (foldername,str(pageNo)) filename = str(uuid.uuid1())if not os.path.exists(picpath): os.makedirs(picpath) target = picpath+"\\%s.jpg" % filenameprint "The photos location is:"+target download_img = urllib.urlretrieve(imgurl, target)#將圖片下載到指定路徑中time.sleep(1)print(imgurl)return download_img# def callback(blocknum, blocksize, totalsize):# '''回調函數 www.jbxue.com# @blocknum: 已經下載的數據塊# @blocksize: 數據塊的大小# @totalsize: 遠程文件的大小# '''# print str(blocknum),str(blocksize),str(totalsize)# if blocknum * blocksize >= totalsize:# print '下載完成'def quitit():print "Bye!"exit(0)if __name__ == '__main__':print ''' ***************************************** ** Welcome to Spider for TUCHONG ** ** Created on 2014-4-24 ** ** @author: Leon Wong ** *****************************************'''pageNo = raw_input("Input the page number you want to scratch (1-100),please input 'quit' if you want to quit>")while not pageNo.isdigit() or int(pageNo) > 100 :if pageNo == 'quit':quitit()print "Param is invalid , please try again."pageNo = raw_input("Input the page number you want to scratch >")#針對圖蟲人像模塊來爬取html = getHtml("http://tuchong.com/tags/%E4%BA%BA%E5%83%8F/?page="+str(pageNo)) detllst = findUrl2(html)for detail in detllst: html2 = getHtml(detail) download(html2,pageNo)print "Finished."