上一篇:Java網絡爬蟲實操(9)html
各位好,立刻又是618購物節了,你們的購物熱情多少有點被勾起吧。相信你們最頻繁的操做確定是打開購物網站,輸入關心商品的關鍵字,而後看看哪一個店的銷量高,哪一個店的價格最低,等等。 本篇文章結合Java爬蟲框架NetDiscovery使用selenium技術實現自動化獲取前三個商品的信息。node
WebDriverPoolConfig config = new WebDriverPoolConfig("example/chromedriver.exe", Browser.Chrome);
WebDriverPool.init(config);
複製代碼
package com.cv4j.netdiscovery.example.jd;
import com.cv4j.netdiscovery.selenium.Utils;
import com.cv4j.netdiscovery.selenium.action.SeleniumAction;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import org.openqa.selenium.By;
import org.openqa.selenium.WebDriver;
import org.openqa.selenium.WebElement;
public class JDAction extends SeleniumAction {
@Override
public SeleniumAction perform(WebDriver driver) {
try {
//BrowserAction 最大化瀏覽器窗口
driver.manage().window().maximize();
Thread.sleep(3000);
//輸入商品關鍵字
String searchText = "商務筆記本";
String searchInput = "//*[@id=\"keyword\"]";
WebElement userInput = Utils.getWebElementByXpath(driver, searchInput);
userInput.sendKeys(searchText);
Thread.sleep(3000);
//觸發查詢事件
String searchBtn = "/html/body/div[2]/form/input[4]";
Utils.clickElement(driver, By.xpath(searchBtn));
Thread.sleep(3000);
//觸發銷量事件
String saleSortBtn = "//*[@id=\"J_filter\"]/div[1]/div[1]/a[2]";
Utils.clickElement(driver, By.xpath(saleSortBtn));
Thread.sleep(3000);
//獲取頁面的html源碼並轉化爲對象
String pageHtml = driver.getPageSource();
Document document = Jsoup.parse(pageHtml);
Elements elements = document.select("div[id=J_goodsList] li[class=gl-item]");
if(elements != null && elements.size() >= 3) {
for (int i = 0; i < 3; i++) {
Element element = elements.get(i);
String storeName = element.select("div[class=p-shop] a").first().text();
String goodsName = element.select("div[class=p-name p-name-type-2] a em").first().text();
String goodsPrice = element.select("div[class=p-price] i").first().text();
System.out.println(storeName+" "+goodsName+" ¥"+goodsPrice);
}
}
} catch(InterruptedException e) {
e.printStackTrace();
}
return null;
}
}
複製代碼
package com.cv4j.netdiscovery.example.jd;
import com.cv4j.netdiscovery.core.Spider;
import com.cv4j.netdiscovery.selenium.Browser;
import com.cv4j.netdiscovery.selenium.downloader.SeleniumDownloader;
import com.cv4j.netdiscovery.selenium.pool.WebDriverPool;
import com.cv4j.netdiscovery.selenium.pool.WebDriverPoolConfig;
public class JDSpider {
public static void main(String[] args) {
WebDriverPoolConfig config = new WebDriverPoolConfig("example/chromedriver.exe", Browser.Chrome);
WebDriverPool.init(config);
JDAction jdAction = new JDAction();
SeleniumDownloader seleniumDownloader = new SeleniumDownloader(jdAction);
String url = "https://search.jd.com/";
Spider.create()
.name("searchJD")
.url(url)
.downloader(seleniumDownloader)
.run();
}
}
複製代碼
List<SeleniumAction> actionList = new ArrayList<>();
actionList.add(new BrowserAction());
actionList.add(new InitAction());
actionList.add(new WorkAction());
SeleniumDownloader seleniumDownloader = new SeleniumDownloader(actionList);
複製代碼
SpiderEngine engine = SpiderEngine.create();
for(...) {
engine.addSpider(spider);
}
engine.runWithRepeat();
複製代碼
也許有些夥伴會問action類中的xpath怎麼來的?我是使用chrome瀏覽器,經過開發者工具中Elements,選中元素後點右鍵,而後找到xpath的。 git
如何從html字符串轉換爲對象,實現目標數據的精確獲取,有不少方法。好比Jsoup。github
好了,本篇只是拋磚引玉,相信有須要的夥伴確定有深刻的需求和想法,歡迎你們關注Java爬蟲框架NetDiscovery https://github.com/fengzhizi715/NetDiscoverychrome