xmfdsh我真是興趣多多,怎麼總是靜不下心來搞定一方面的技術,再學點其餘的東西,按部就班,好吧,我又研究網絡爬蟲去了,這是一個簡單版的,參考了網上不少資料,C#來編寫,專門抓取圖片,可以抓取一些須要cookie的網站,因此功能上仍是挺完善的,xmfdsh只研究了三天,所以還有大把須要改進的地方,往後再 慢慢改進,在本文後面附上整個項目,在此獻給喜歡研究C#的朋友們,讓我慢慢地道來:css
#region 訪問數據 + Request(int index) /// <summary> /// 訪問數據 /// </summary> private void Request(int index) { try { int depth; string url = ""; //lock鎖住Dictionary,由於Dictionary多線程會出錯 lock (_locker) { //查看是否還存在未下載的連接 if (UnDownLoad.Count <= 0) { _workingSignals.FinishWorking(index); return; } //不然的話,把該蟲子標記爲在工做 _reqsBusy[index] = true; depth = UnDownLoad.First().Value; url = UnDownLoad.First().Key; IsDownLoad.Add(url, depth);//將URL 加入到已經下載集合裏 UnDownLoad.Remove(url); } //網絡協議的東西,不懂網上搜一下,(HttpWebRequest)的使用 //這個須要一點日子理解,xmfdsh不是一會兒就弄懂 HttpWebRequest request = (HttpWebRequest)WebRequest.Create(url); request.Method = "GET"; request.Accept = "text/html"; request.UserAgent = "Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; Trident/4.0)"; request.CookieContainer = cookies;//cookie 嘗試 RequestState rs = new RequestState(request, url, depth, index); //回調函數,若是接受到數據的處理方法 var result = request.BeginGetResponse(new AsyncCallback(Received), rs); //也是回調函數,超時的回調函數 ThreadPool.RegisterWaitForSingleObject(result.AsyncWaitHandle, TimeOut, rs, MAXTIME, true); } catch (Exception ex) { _reqsBusy[index] = false; DispatchWork(); } } #endregion
首先上來的是請求某個鏈接的方法,裏面有兩個回調函數,說明這裏的編程時異步的,這個異步編程我的以爲須要花點時間去了解和學習,這個不是一朝一夕的,我的以爲C#比較難的地方就在一些接口,工具類,還有異步事件等等。html
#region 超時的方法 + TimeOut(object state, bool timedOut) /// <summary> /// 超時的方法 /// </summary> /// <param name="state"></param> /// <param name="timedOut"></param> private void TimeOut(object state, bool timedOut) { //判斷是否超時 if (timedOut) { RequestState rs = state as RequestState; if (rs != null) { //撤銷internet請求 rs.Request.Abort(); } DispatchWork(); } } #endregion
這個是超時的方法,當超時的時候,默認撤銷internet的請求,並回滾,因此這個連接下的東西就沒有了,固然超時後,繼續請求下一個連接的資源編程
#region 獲取數據 異步 + Received(IAsyncResult ar) /// <summary> /// 獲取數據 異步 /// </summary> /// <param name="ar"></param> private void Received(IAsyncResult ar) { try { //獲得請求進來的參數 RequestState rs = (RequestState)ar.AsyncState; HttpWebRequest request = rs.Request; string url = rs.Url; //獲取響應 HttpWebResponse response = (HttpWebResponse)request.EndGetResponse(ar); if (response != null && response.StatusCode == HttpStatusCode.OK)//成功獲取響應 { //獲得資源流 Stream responseStream = response.GetResponseStream(); rs.ResponseStream = responseStream; //處理讀取數據的異步方法 ReceivedData var result = responseStream.BeginRead(rs.Data, 0, rs.BufferSize, new AsyncCallback(ReceivedData), rs); } //響應失敗 else { response.Close(); rs.Request.Abort(); _reqsBusy[rs.Index] = false; DispatchWork(); } } catch (Exception ex) { RequestState rs = (RequestState)ar.AsyncState; _reqsBusy[rs.Index] = false; DispatchWork(); } } #endregion
這個是收到了網站響應的時候作的事情,固然用到了異步來請求的話,數據也只能異步的讀取,所以有了ReceivedData方法來接受數據,並處理,若是出錯或者獲取相應失敗,相應的,回滾,並把該爬蟲蟲子的工做狀態從新置爲準備狀態,爲下一個連接作好準備。cookie
#region 異步操做讀寫 + ReceivedData(IAsyncResult ar) /// <summary> /// 異步操做讀寫 /// </summary> /// <param name="ar">異步操做狀態</param> private void ReceivedData(IAsyncResult ar) { //獲取異步狀態參數 RequestState rs = (RequestState)ar.AsyncState; HttpWebRequest request = rs.Request; System.Net.HttpWebResponse responseImg = request.GetResponse() as System.Net.HttpWebResponse; Stream responseStream = rs.ResponseStream; string url = rs.Url; int depth = rs.Depth; string html = ""; int index = rs.Index; int read = 1; try { //若是改連接爲圖片來的,須要保存此圖片 if (url.Contains(".jpg") || url.Contains(".png")) { read = responseStream.EndRead(ar); if (read > 0) { MemoryStream ms = new System.IO.MemoryStream(rs.Data, 0, read); BinaryReader reader = new BinaryReader(ms); byte[] buffer = new byte[32 * 1024]; while ((read = reader.Read(buffer, 0, buffer.Length)) > 0) { rs.memoryStream.Write(buffer, 0, read); } //遞歸 再次請求數據 var result = responseStream.BeginRead(rs.Data, 0, rs.BufferSize, new AsyncCallback(ReceivedData), rs); return; } } else { read = responseStream.EndRead(ar); if (read > 0) { //建立內存流 MemoryStream ms = new MemoryStream(rs.Data, 0, read); StreamReader reader = new StreamReader(ms, Encoding.GetEncoding("gb2312")); string str = reader.ReadToEnd(); //添加到末尾 rs.Html.Append(str); //遞歸 再次請求數據 var result = responseStream.BeginRead(rs.Data, 0, rs.BufferSize, new AsyncCallback(ReceivedData), rs); return; } } if (url.Contains(".jpg") || url.Contains(".png")) { //images = rs.Images; SaveContents(rs.memoryStream.GetBuffer(), url); } else { html = rs.Html.ToString(); //保存 SaveContents(html, url); //獲取頁面的連接 } } catch (Exception ex) { _reqsBusy[rs.Index] = false; DispatchWork(); } List<string> links = GetLinks(html,url); //獲得過濾後的連接,並保存到未下載集合 AddUrls(links, depth + 1); _reqsBusy[index] = false; DispatchWork(); } #endregion
這個即是對數據的處理,這裏就是重點的,其實也不難,判斷是否爲圖片,若是爲圖片,保存此圖片,由於目前網絡爬蟲作的還不夠高級的時候爬圖片是比較實際也比較好玩的(還不趕忙找找哪些網站有好多妹子圖片),若是不是圖片,咱們認爲它爲普通html頁面,便讀取其中html代碼,若是有發現有連接http或者href便加入到未下載連接中。固然讀到的連接咱們對一些js或者一些css等作了限制(不去讀取這類東西)。網絡
private void SaveContents(byte[] images, string url) { if (images.Count() < 1024*30) return; if (url.Contains(".jpg")) { File.WriteAllBytes(@"d:\網絡爬蟲圖片\" + _index++ + ".jpg", images); Console.WriteLine("圖片保存成功" + url); } else { File.WriteAllBytes(@"d:\網絡爬蟲圖片\" + _index++ + ".png", images); Console.WriteLine("圖片保存成功" + url); } }
#region 提取頁面連接 + List<string> GetLinks(string html) /// <summary> /// 提取頁面連接 /// </summary> /// <param name="html"></param> /// <returns></returns> private List<string> GetLinks(string html,string url) { //匹配http連接 const string pattern2 = @"http://([\w-]+\.)+[\w-]+(/[\w- ./?%&=]*)?"; Regex r2 = new Regex(pattern2, RegexOptions.IgnoreCase); //得到匹配結果 MatchCollection m2 = r2.Matches(html); List<string> links = new List<string>(); for (int i = 0; i < m2.Count; i++) { //這個緣由是w3school的網址,但裏面的東西明顯不是咱們想要的 if (m2[i].ToString().Contains("www.w3.org")) continue; links.Add(m2[i].ToString()); } //匹配href裏面的連接,並加到主網址上(學網站的你懂的) const string pattern = @"href=([""'])?(?<href>[^'""]+)\1[^>]*"; Regex r = new Regex(pattern, RegexOptions.IgnoreCase); //得到匹配結果 MatchCollection m = r.Matches(html); // List<string> links = new List<string>(); for (int i = 0; i < m.Count; i++) { string href1 = m[i].ToString().Replace("href=", ""); href1 = href1.Replace("\"", ""); //找到符合的,添加到主網址(一開始輸入的網址)裏面去 string href = RootUrl + href1; if (m[i].ToString().Contains("www.w3.org")) continue; links.Add(href); } return links; } #endregion
提取頁面連接的方法,當讀到發現這個爲html代碼的時候,繼續解讀裏面的代碼,找到裏面的網址連接,也正是這樣纔有網絡爬蟲的功能(否則只能提取本頁面就沒意思了),其中http連接要提取就理所固然,href裏面的話是由於。。。。(學網站大家懂的,很差解釋)裏面放的幾乎都是圖片,文章,所以纔有了上面要處理href這類代碼。多線程
#region 添加url到 UnDownLoad 集合 + AddUrls(List<string> urls, int depth) /// <summary> /// 添加url到 UnDownLoad 集合 /// </summary> /// <param name="urls"></param> /// <param name="depth"></param> private void AddUrls(List<string> urls, int depth) { lock (_locker) { if (depth >= MAXDEPTH) { //深度過大 return; } foreach (string url in urls) { string cleanUrl = url.Trim(); int end = cleanUrl.IndexOf(' '); if (end > 0) { cleanUrl = cleanUrl.Substring(0, end); } if (UrlAvailable(cleanUrl)) { UnDownLoad.Add(cleanUrl, depth); } } } } #endregion
#region 開始捕獲 + DispatchWork() /// <summary> /// 開始捕獲 /// </summary> private void DispatchWork() { for (int i = 0; i < _reqCount; i++) { if (!_reqsBusy[i]) { Request(i); Thread.Sleep(1000); } } } #endregion
這個函數就是讓那些蟲子工做的,其中_reqCount的值是一開始弄上去的,其實形象理解就是你放出蟲子的個數,這個程序裏面我默認放的20,隨時能夠修改。說到某些網站須要cookie的話是經過一開始先訪問輸入的網址,固然也是用HttpWebRequest幫助類,cookies = request.CookieContainer; //保存cookies,在後面訪問後續網址的時候就加上去就好了request.CookieContainer = cookies;//cookie 嘗試。應用了cookie才能訪問的網站的話,根網頁是不須要的,也就比如百度圖片的網址http://image.baidu.com/是不須要的,但若是很唐突的訪問裏面的圖片的話就要附上cookie了,因此這個問題也解決,xmfdsh發現這個程序仍是有一些網站不能去抓圖,抓到必定數量就停了,具體緣由不知道,後面再慢慢改進異步
附上源碼:http://files.cnblogs.com/xmfdsh/%E7%BD%91%E7%BB%9C%E7%88%AC%E8%99%AB.raride