Java网络爬虫怎么实现? java 网络爬虫怎么实现

java \u7f51\u7edc\u722c\u866b\u600e\u4e48\u5b9e\u73b0\uff1f

1\u3001\u5728\u6253\u5f00\u7684ie\u6d4f\u89c8\u5668\u7a97\u53e3\u53f3\u4e0a\u65b9\u70b9\u51fb\u9f7f\u8f6e\u56fe\u6807\uff0c\u9009\u62e9\u201cInternet\u9009\u9879\u201d\uff0c\u5982\u4e0b\u56fe\u6240\u793a\uff1a

2\u3001\u5728\u6253\u5f00\u7684Internet\u9009\u9879\u7a97\u53e3\u4e2d\uff0c\u5207\u6362\u5230\u5b89\u5168\u680f\uff0c\u5728\u5b89\u5168\u9009\u5361\u4e2d\u70b9\u51fb\u201c\u81ea\u5b9a\u4e49\u7ea7\u522b\u201d\uff0c\u5982\u4e0b\u56fe\u6240\u793a\uff1a

3\u3001\u5728\u201c\u5b89\u5168\u8bbe\u7f6e-Internet \u533a\u57df\u201d\u754c\u9762\u627e\u5230\u201cJava \u5c0f\u7a0b\u5e8f\u811a\u672c\u201d\u3001\u201c\u6d3b\u52a8\u811a\u672c\u201d\uff0c\u5e76\u5c06\u8fd9\u4e24\u4e2a\u9009\u9879\u90fd\u9009\u62e9\u4e3a\u201c\u7981\u7528\u201d\uff0c\u7136\u540e\u70b9\u51fb\u786e\u5b9a,\u5982\u4e0b\u56fe\u6240\u793a\uff1a

\u3000\u3000\u7f51\u7edc\u722c\u866b\u662f\u4e00\u4e2a\u81ea\u52a8\u63d0\u53d6\u7f51\u9875\u7684\u7a0b\u5e8f\uff0c\u5b83\u4e3a\u641c\u7d22\u5f15\u64ce\u4ece\u4e07\u7ef4\u7f51\u4e0a\u4e0b\u8f7d\u7f51\u9875\uff0c\u662f\u641c\u7d22\u5f15\u64ce\u7684\u91cd\u8981\u7ec4\u6210\u3002
\u3000\u3000\u4f20\u7edf\u722c\u866b\u4ece\u4e00\u4e2a\u6216\u82e5\u5e72\u521d\u59cb\u7f51\u9875\u7684URL\u5f00\u59cb\uff0c\u83b7\u5f97\u521d\u59cb\u7f51\u9875\u4e0a\u7684URL\uff0c\u5728\u6293\u53d6\u7f51\u9875\u7684\u8fc7\u7a0b\u4e2d\uff0c\u4e0d\u65ad\u4ece\u5f53\u524d\u9875\u9762\u4e0a\u62bd\u53d6\u65b0\u7684URL\u653e\u5165\u961f\u5217\uff0c\u76f4\u5230\u6ee1\u8db3\u7cfb\u7edf\u7684\u4e00\u5b9a\u505c\u6b62\u6761\u4ef6\u3002\u5bf9\u4e8e\u5782\u76f4\u641c\u7d22\u6765\u8bf4\uff0c\u805a\u7126\u722c\u866b\uff0c\u5373\u6709\u9488\u5bf9\u6027\u5730\u722c\u53d6\u7279\u5b9a\u4e3b\u9898\u7f51\u9875\u7684\u722c\u866b\uff0c\u66f4\u4e3a\u9002\u5408\u3002

\u3000\u3000\u4ee5\u4e0b\u662f\u4e00\u4e2a\u4f7f\u7528java\u5b9e\u73b0\u7684\u7b80\u5355\u722c\u866b\u6838\u5fc3\u4ee3\u7801\uff1a\u3000\u3000
public void crawl() throws Throwable {
while (continueCrawling()) {
CrawlerUrl url = getNextUrl(); //\u83b7\u53d6\u5f85\u722c\u53d6\u961f\u5217\u4e2d\u7684\u4e0b\u4e00\u4e2aURL
if (url != null) {
printCrawlInfo();
String content = getContent(url); //\u83b7\u53d6URL\u7684\u6587\u672c\u4fe1\u606f

//\u805a\u7126\u722c\u866b\u53ea\u722c\u53d6\u4e0e\u4e3b\u9898\u5185\u5bb9\u76f8\u5173\u7684\u7f51\u9875\uff0c\u8fd9\u91cc\u91c7\u7528\u6b63\u5219\u5339\u914d\u7b80\u5355\u5904\u7406
if (isContentRelevant(content, this.regexpSearchPattern)) {
saveContent(url, content); //\u4fdd\u5b58\u7f51\u9875\u81f3\u672c\u5730

//\u83b7\u53d6\u7f51\u9875\u5185\u5bb9\u4e2d\u7684\u94fe\u63a5\uff0c\u5e76\u653e\u5165\u5f85\u722c\u53d6\u961f\u5217\u4e2d
Collection urlStrings = extractUrls(content, url);
addUrlsToUrlQueue(url, urlStrings);
} else {
System.out.println(url + " is not relevant ignoring ...");
}

//\u5ef6\u65f6\u9632\u6b62\u88ab\u5bf9\u65b9\u5c4f\u853d
Thread.sleep(this.delayBetweenUrls);
}
}
closeOutputStream();
}
private CrawlerUrl getNextUrl() throws Throwable {
CrawlerUrl nextUrl = null;
while ((nextUrl == null) && (!urlQueue.isEmpty())) {
CrawlerUrl crawlerUrl = this.urlQueue.remove();
//doWeHavePermissionToVisit\uff1a\u662f\u5426\u6709\u6743\u9650\u8bbf\u95ee\u8be5URL\uff0c\u53cb\u597d\u7684\u722c\u866b\u4f1a\u6839\u636e\u7f51\u7ad9\u63d0\u4f9b\u7684"Robot.txt"\u4e2d\u914d\u7f6e\u7684\u89c4\u5219\u8fdb\u884c\u722c\u53d6
//isUrlAlreadyVisited\uff1aURL\u662f\u5426\u8bbf\u95ee\u8fc7\uff0c\u5927\u578b\u7684\u641c\u7d22\u5f15\u64ce\u5f80\u5f80\u91c7\u7528BloomFilter\u8fdb\u884c\u6392\u91cd\uff0c\u8fd9\u91cc\u7b80\u5355\u4f7f\u7528HashMap
//isDepthAcceptable\uff1a\u662f\u5426\u8fbe\u5230\u6307\u5b9a\u7684\u6df1\u5ea6\u4e0a\u9650\u3002\u722c\u866b\u4e00\u822c\u91c7\u53d6\u5e7f\u5ea6\u4f18\u5148\u7684\u65b9\u5f0f\u3002\u4e00\u4e9b\u7f51\u7ad9\u4f1a\u6784\u5efa\u722c\u866b\u9677\u9631\uff08\u81ea\u52a8\u751f\u6210\u4e00\u4e9b\u65e0\u6548\u94fe\u63a5\u4f7f\u722c\u866b\u9677\u5165\u6b7b\u5faa\u73af\uff09\uff0c\u91c7\u7528\u6df1\u5ea6\u9650\u5236\u52a0\u4ee5\u907f\u514d
if (doWeHavePermissionToVisit(crawlerUrl)
&& (!isUrlAlreadyVisited(crawlerUrl))
&& isDepthAcceptable(crawlerUrl)) {
nextUrl = crawlerUrl;
// System.out.println("Next url to be visited is " + nextUrl);
}
}
return nextUrl;
}
private String getContent(CrawlerUrl url) throws Throwable {
//HttpClient4.1\u7684\u8c03\u7528\u4e0e\u4e4b\u524d\u7684\u65b9\u5f0f\u4e0d\u540c
HttpClient client = new DefaultHttpClient();
HttpGet httpGet = new HttpGet(url.getUrlString());
StringBuffer strBuf = new StringBuffer();
HttpResponse response = client.execute(httpGet);
if (HttpStatus.SC_OK == response.getStatusLine().getStatusCode()) {
HttpEntity entity = response.getEntity();
if (entity != null) {
BufferedReader reader = new BufferedReader(
new InputStreamReader(entity.getContent(), "UTF-8"));
String line = null;
if (entity.getContentLength() > 0) {
strBuf = new StringBuffer((int) entity.getContentLength());
while ((line = reader.readLine()) != null) {
strBuf.append(line);
}
}
}
if (entity != null) {
nsumeContent();
}
}
//\u5c06url\u6807\u8bb0\u4e3a\u5df2\u8bbf\u95ee
markUrlAsVisited(url);
return strBuf.toString();
}
public static boolean isContentRelevant(String content,
Pattern regexpPattern) {
boolean retValue = false;
if (content != null) {
//\u662f\u5426\u7b26\u5408\u6b63\u5219\u8868\u8fbe\u5f0f\u7684\u6761\u4ef6
Matcher m = regexpPattern.matcher(content.toLowerCase());
retValue = m.find();
}
return retValue;
}
public List extractUrls(String text, CrawlerUrl crawlerUrl) {
Map urlMap = new HashMap();
extractHttpUrls(urlMap, text);
extractRelativeUrls(urlMap, text, crawlerUrl);
return new ArrayList(urlMap.keySet());
}
private void extractHttpUrls(Map urlMap, String text) {
Matcher m = (text);
while (m.find()) {
String url = m.group();
String[] terms = url.split("a href=\"");
for (String term : terms) {
// System.out.println("Term = " + term);
if (term.startsWith("http")) {
int index = term.indexOf("\"");
if (index > 0) {
term = term.substring(0, index);
}
urlMap.put(term, term);
System.out.println("Hyperlink: " + term);
}
}
}
}
private void extractRelativeUrls(Map urlMap, String text,
CrawlerUrl crawlerUrl) {
Matcher m = relativeRegexp.matcher(text);
URL textURL = crawlerUrl.getURL();
String host = textURL.getHost();
while (m.find()) {
String url = m.group();
String[] terms = url.split("a href=\"");
for (String term : terms) {
if (term.startsWith("/")) {
int index = term.indexOf("\"");
if (index > 0) {
term = term.substring(0, index);
}
String s = //" + host + term;
urlMap.put(s, s);
System.out.println("Relative url: " + s);
}
}
}

}
public static void main(String[] args) {
try {
String url = "";
Queue urlQueue = new LinkedList();
String regexp = "java";
urlQueue.add(new CrawlerUrl(url, 0));
NaiveCrawler crawler = new NaiveCrawler(urlQueue, 100, 5, 1000L,
regexp);
// boolean allowCrawl = crawler.areWeAllowedToVisit(url);
// System.out.println("Allowed to crawl: " + url + " " +
// allowCrawl);
crawler.crawl();
} catch (Throwable t) {
System.out.println(t.toString());
t.printStackTrace();
}
}

  网络爬虫是一个自动提取网页的程序,它为搜索引擎从万维网上下载网页,是搜索引擎的重要组成。
  传统爬虫从一个或若干初始网页的URL开始,获得初始网页上的URL,在抓取网页的过程中,不断从当前页面上抽取新的URL放入队列,直到满足系统的一定停止条件。对于垂直搜索来说,聚焦爬虫,即有针对性地爬取特定主题网页的爬虫,更为适合。

  以下是一个使用java实现的简单爬虫核心代码:  
public void crawl() throws Throwable {
while (continueCrawling()) {
CrawlerUrl url = getNextUrl(); //获取待爬取队列中的下一个URL
if (url != null) {
printCrawlInfo();
String content = getContent(url); //获取URL的文本信息

//聚焦爬虫只爬取与主题内容相关的网页,这里采用正则匹配简单处理
if (isContentRelevant(content, this.regexpSearchPattern)) {
saveContent(url, content); //保存网页至本地

//获取网页内容中的链接,并放入待爬取队列中
Collection urlStrings = extractUrls(content, url);
addUrlsToUrlQueue(url, urlStrings);
} else {
System.out.println(url + " is not relevant ignoring ...");
}

//延时防止被对方屏蔽
Thread.sleep(this.delayBetweenUrls);
}
}
closeOutputStream();
}
private CrawlerUrl getNextUrl() throws Throwable {
CrawlerUrl nextUrl = null;
while ((nextUrl == null) && (!urlQueue.isEmpty())) {
CrawlerUrl crawlerUrl = this.urlQueue.remove();
//doWeHavePermissionToVisit:是否有权限访问该URL,友好的爬虫会根据网站提供的"Robot.txt"中配置的规则进行爬取
//isUrlAlreadyVisited:URL是否访问过,大型的搜索引擎往往采用BloomFilter进行排重,这里简单使用HashMap
//isDepthAcceptable:是否达到指定的深度上限。爬虫一般采取广度优先的方式。一些网站会构建爬虫陷阱(自动生成一些无效链接使爬虫陷入死循环),采用深度限制加以避免
if (doWeHavePermissionToVisit(crawlerUrl)
&& (!isUrlAlreadyVisited(crawlerUrl))
&& isDepthAcceptable(crawlerUrl)) {
nextUrl = crawlerUrl;
// System.out.println("Next url to be visited is " + nextUrl);
}
}
return nextUrl;
}
private String getContent(CrawlerUrl url) throws Throwable {
//HttpClient4.1的调用与之前的方式不同
HttpClient client = new DefaultHttpClient();
HttpGet httpGet = new HttpGet(url.getUrlString());
StringBuffer strBuf = new StringBuffer();
HttpResponse response = client.execute(httpGet);
if (HttpStatus.SC_OK == response.getStatusLine().getStatusCode()) {
HttpEntity entity = response.getEntity();
if (entity != null) {
BufferedReader reader = new BufferedReader(
new InputStreamReader(entity.getContent(), "UTF-8"));
String line = null;
if (entity.getContentLength() > 0) {
strBuf = new StringBuffer((int) entity.getContentLength());
while ((line = reader.readLine()) != null) {
strBuf.append(line);
}
}
}
if (entity != null) {
nsumeContent();
}
}
//将url标记为已访问
markUrlAsVisited(url);
return strBuf.toString();
}
public static boolean isContentRelevant(String content,
Pattern regexpPattern) {
boolean retValue = false;
if (content != null) {
//是否符合正则表达式的条件
Matcher m = regexpPattern.matcher(content.toLowerCase());
retValue = m.find();
}
return retValue;
}
public List extractUrls(String text, CrawlerUrl crawlerUrl) {
Map urlMap = new HashMap();
extractHttpUrls(urlMap, text);
extractRelativeUrls(urlMap, text, crawlerUrl);
return new ArrayList(urlMap.keySet());
}
private void extractHttpUrls(Map urlMap, String text) {
Matcher m = (text);
while (m.find()) {
String url = m.group();
String[] terms = url.split("a href=\"");
for (String term : terms) {
// System.out.println("Term = " + term);
if (term.startsWith("http")) {
int index = term.indexOf("\"");
if (index > 0) {
term = term.substring(0, index);
}
urlMap.put(term, term);
System.out.println("Hyperlink: " + term);
}
}
}
}
private void extractRelativeUrls(Map urlMap, String text,
CrawlerUrl crawlerUrl) {
Matcher m = relativeRegexp.matcher(text);
URL textURL = crawlerUrl.getURL();
String host = textURL.getHost();
while (m.find()) {
String url = m.group();
String[] terms = url.split("a href=\"");
for (String term : terms) {
if (term.startsWith("/")) {
int index = term.indexOf("\"");
if (index > 0) {
term = term.substring(0, index);
}
String s = //" + host + term;
urlMap.put(s, s);
System.out.println("Relative url: " + s);
}
}
}

}
public static void main(String[] args) {
try {
String url = "";
Queue urlQueue = new LinkedList();
String regexp = "java";
urlQueue.add(new CrawlerUrl(url, 0));
NaiveCrawler crawler = new NaiveCrawler(urlQueue, 100, 5, 1000L,
regexp);
// boolean allowCrawl = crawler.areWeAllowedToVisit(url);
// System.out.println("Allowed to crawl: " + url + " " +
// allowCrawl);
crawler.crawl();
} catch (Throwable t) {
System.out.println(t.toString());
t.printStackTrace();
}
}

  • java缃戠粶鐖櫕绋嬪簭鎬庝箞杩愯
    绛旓細str = page.asText(); System.out.println(str); //鍏抽棴webclient webClient.closeAllWindows(); }}濡傛灉鐢℉TTPclient锛屾ゼ涓诲彲浠ョ櫨搴﹀畠鐨勬暀绋嬶紝鏈夋湰涔﹀彨鍋氥婅嚜宸卞姩鎵嬪啓缃戠粶鐖櫕銆嬶紝閲岄潰鏄互java璇█涓哄熀纭璁茬殑锛屼綔涓轰竴涓埇铏叆闂ㄨ呭彲浠ュ幓鐪嬬湅 ...
  • java鐖櫕鎶撳彇鎸囧畾鏁版嵁
    绛旓細鏍规嵁java缃戠粶缂栫▼鐩稿叧鐨勫唴瀹癸紝浣跨敤jdk鎻愪緵鐨勭浉鍏崇被鍙互寰楀埌url瀵瑰簲缃戦〉鐨刪tml椤甸潰浠g爜銆傞拡瀵瑰緱鍒扮殑html浠g爜锛岄氳繃浣跨敤姝e垯琛ㄨ揪寮忓嵆鍙緱鍒版垜浠兂瑕佺殑鍐呭銆傛瘮濡傦紝鎴戜滑濡傛灉鎯冲緱鍒颁竴涓綉椤典笂鎵鏈夊寘鎷渏ava鈥濆叧閿瓧鐨勬枃鏈唴瀹癸紝灏卞彲浠ラ愯瀵圭綉椤典唬鐮佽繘琛屾鍒欒〃杈惧紡鐨勫尮閰嶃傛渶鍚庤揪鍒板幓闄tml鏍囩鍜屼笉鐩稿叧鐨勫唴瀹癸紝鍙緱鍒...
  • java鐖櫕瑕佹帉鎻″摢浜涙妧鏈
    绛旓細璇ョ▼搴忛渶瑕佹帉鎻℃妧鏈涓嬶細1銆丠TTP鍗忚锛氫簡瑙TTP鍗忚锛屽苟瀛︿細浣跨敤HTTP瀹㈡埛绔簱杩涜缃戠粶璇锋眰銆2銆佹暟鎹瓨鍌細浜嗚В鏁版嵁搴撶浉鍏崇煡璇嗭紝骞跺浼氫娇鐢ㄦ暟鎹簱杩涜鏁版嵁瀛樺偍鍜屾煡璇㈡搷浣溿3銆佸苟鍙戝鐞嗭細鎺屾彙骞跺彂澶勭悊鍜屽绾跨▼鎶鏈紝骞跺浼氫娇鐢ㄧ嚎绋嬫睜绛夊伐鍏锋彁楂樼▼搴忔ц兘銆
  • 鍝綅鏈嬪弸鐭ラ亾鐢java濡備綍瀹炵幇缃戠粶鐖櫕鍜屾悳绱㈠紩鎿庣殑鎶鏈,璇磋鍘熺悊鏈濂...
    绛旓細heritrix鎶撳彇缃戦〉 缃戦〉瑙f瀽鐨勬湁寰堝灏变笉璇翠簡锛屼笉杩囨渶濂借嚜宸卞啓 lucene绱㈠紩 棣栧厛鐖櫕鏄渶瑕佷竴涓鐞嗗櫒閾剧殑锛岀綉椤电殑鎶撳彇骞堕潪鍑犲崄琛屼唬鐮佸氨鑳瀹炵幇鐨勶紝鍥犱负鏈夊緢澶氶棶棰樺嚭 鐜般1.鑾峰彇缃戦〉锛氬垽鏂綉椤电紪鐮侊紝璁$畻缃戦〉姝f枃浣嶇疆锛岃幏鍙栭〉闈㈠唴url锛坲rl鐨勮繃婊ゃ佺紦瀛樸佸瓨鍌ㄨ繖閮ㄥ垎杩橀渶瑕佺嚎绋嬫睜鐨勪紭鍖栵級锛寀rl鐨勫垎閰嶃佸強绾跨▼姹...
  • java缃戠粶鐖櫕鎬庝箞瀹炵幇鎶撳彇鐧诲綍鍚庣殑椤甸潰
    绛旓細閲囩敤selenium+java妯℃嫙鐧诲綍锛岃緭鍏ヨ处鍙峰瘑鐮佸悗灏卞彲浠ラ噰闆嗚烦杞悗鐨勯〉闈簡
  • java鐖櫕浠g悊濡備綍瀹炵幇
    绛旓細鏃犺鏄娇鐢java銆丳ython鐖彇鏁版嵁锛岄兘浼氬嚭鐜癐P琚皝鐨勬儏鍐碉紝鎵浠ュ氨闇瑕佷娇鐢ㄤ唬鐞咺P鏇挎垜浠搷浣溿傛垜涓鑸細鍒╃敤Java鐨HttpClient鍖咃紝鏉ュ姞鍏ュ姩鎬佷唬鐞嗗姛鑳斤紝鎴戜娇鐢ㄧ殑鏄姖楹籋TTP浠g悊锛屽綋鐒朵綘涔熷彲浠ラ夋嫨鍏朵粬鐨勪唬鐞嗘彁渚涘晢銆1銆佸厛澹版槑涓涓唬鐞嗙被 2銆佸0鏄庝竴涓狧ttpClient 瀵硅薄锛岃缃ソ瓒呮椂鏃堕棿 3銆佽缃姖楹籋TTP浠g悊 4銆...
  • java鍐缃戠粶鐖櫕,濡備綍鐖彇鍦ㄥ悓涓涓綉椤典腑,浣嗘槸宸茬粡琚殣钘忕殑div(闇瑕佺偣鍑...
    绛旓細杩欑鏄敤js瀹炵幇鐨勩傛墍浠ュ悗闈㈢殑鍐呭瀹為檯涓婃槸鍔ㄦ佺敓鎴愮殑锛缃戠粶鐖櫕鎶撳彇鐨勬槸闈欐侀〉闈傝嚦浜庤В鍐冲姙娉曪紝缃戜笂鏈夊嚑绉嶏細涓绉嶆槸浣跨敤鑷姩鍖栨祴璇曞伐鍏峰幓鍋氾紝姣斿selenium锛屽彲浠ユā鎷熺偣鍑荤瓑鎿嶄綔锛屼絾鏄繖涓叾瀹炲拰鐖櫕杩樻槸鏈夊緢澶у尯鍒殑銆備簩鏄埄鐢ㄧ壒瀹氱殑绫诲簱鍦ㄥ悗绔皟鐢╦s锛宲ython鐨勫掓槸鏈夛紝浣嗘槸java鐨鎴戝氨涓嶆竻妤氫簡銆備笁鏄嚜宸...
  • 鏂版墜,鎯抽棶java鐨勭埇铏鏄粈涔堝師鐞
    绛旓細浣犲ソ锛屽叾瀹炲氨鏄竴涓狧TTP鐨勫鎴风锛屾兂鏈嶅姟绔彂璧蜂竴涓猦ttp璇锋眰锛屽皢椤甸潰鎶撳彇鍥炴潵锛岀劧鍚庡html鏂囨。杩涜瑙f瀽锛岃幏寰楅〉闈笂璇撮渶瑕佺殑鏁版嵁淇℃伅锛岃繘琛屾湰鍦板鐞嗐傚洜涓篐tml椤甸潰閲岃繕浼氭湁鍏朵粬鐨勮秴閾炬帴锛岀劧鍚鐖櫕浼氱户缁線杩欎簺閾炬帴鐖彇锛屽鐞嗘祦绋嬬被浼硷紝灏辨槸閫掑綊鎶撳彇鐨勫惈涔夈傝繖鍙槸涓涓畝鍗曠殑浠嬬粛锛孞soup杩欎釜妗嗘灦鍙互鍋氬埌绫讳技鐨...
  • 涓撲负鍒濆鑰呰璁:Nutch搴Java涓嬭浇鍣ㄥ叆闂ㄦ寚鍗
    绛旓細Nutch浣滀负涓娆惧紑婧Java鐖櫕妗嗘灦锛岄傜敤浜庡ぇ瑙勬ā缃戠粶鏁版嵁鎶撳彇锛屽畠鍩轰簬Hadoop锛屾敮鎸佸绉嶅姛鑳芥墿灞曪紝濡傞摼鎺ュ垎鏋愬拰鍐呭杩囨护銆傛湰鏂囧皢寮曞鍒濆鑰呴氳繃鍒涘缓涓涓畝鍗曠殑Java涓嬭浇鍣紝瀛︿範濡備綍鍒╃敤Nutch浠庣壒瀹歎RL鎶撳彇缃戦〉鍐呭锛屼互鍙婂浣曞埄鐢ㄤ唬鐞咺P鍜屽绾跨▼鎶鏈彁楂樹笅杞芥ц兘銆傞鍏堬紝浣犻渶瑕佸湪Java椤圭洰涓紩鍏utch搴擄紝鍙互閫夋嫨浠庡畼缃...
  • java鐖櫕鎬庝箞鎶撳彇鐧婚檰鍚庣殑缃戦〉鏁版嵁
    绛旓細涓鑸鐖櫕閮戒笉浼氭姄鐧诲綍浠ュ悗鐨勯〉闈紝濡傛灉浣犲彧鏄复鏃舵姄鏌愪釜绔欙紝鍙互妯℃嫙鐧诲綍锛岀劧鍚庢嬁鍒扮櫥褰曚互鍚庣殑Cookies锛屽啀鍘昏姹傜浉鍏崇殑椤甸潰銆
  • 扩展阅读:不收费的免费网站 ... 十大禁止下载的软件有哪些 ... 永久不收费的vp加速器 ... 爬虫python官网 ... java入门网站 ... 免费爬虫网站 ... 永久免费的爬虫软件 ... javascript入门 ... 免费爬虫软件手机版下载 ...

    本站交流只代表网友个人观点,与本站立场无关
    欢迎反馈与建议,请联系电邮
    2024© 车视网