首页 > 代码库 > 爬虫代码实现三:打通爬虫项目的下载、解析、存储流程

爬虫代码实现三:打通爬虫项目的下载、解析、存储流程

1.新建一个存储接口IStoreService

package com.dajiangtai.djt_spider.service;

import com.dajiangtai.djt_spider.entity.Page;

/**
* 数据存储接口
* @author Administrator
*
*/
public interface IStoreService {
public void store(Page page);
}

2.新建一个存储接口实现类ConsoleStoreService(在控制台打印存储数据)

package com.dajiangtai.djt_spider.service.impl;

import com.dajiangtai.djt_spider.entity.Page;
import com.dajiangtai.djt_spider.service.IStoreService;

public class ConsoleStoreService implements IStoreService {

public void store(Page page) {
System.out.println("总播放数:"+page.getAllnumber());
System.out.println("评论数:"+page.getCommentnumber());
System.out.println("赞:"+page.getSupportnumber());
}

}

3.重构StartDSJCount:

1.添加storeService属性,并且设置get/set方法

2.dsj.setStoreService(new ConsoleStoreService());对接口进行实例化

3.添加存储页面信息方法public void storePageInfo(Page page)

4.测试storePageInfo(Page page)方法是否有效。

package com.dajiangtai.djt_spider.start;

import java.util.regex.Matcher;
import java.util.regex.Pattern;

import com.dajiangtai.djt_spider.entity.Page;
import com.dajiangtai.djt_spider.service.IDownLoadService;
import com.dajiangtai.djt_spider.service.IProcessService;
import com.dajiangtai.djt_spider.service.IStoreService;
import com.dajiangtai.djt_spider.service.impl.ConsoleStoreService;
import com.dajiangtai.djt_spider.service.impl.HttpClientDownLoadService;
import com.dajiangtai.djt_spider.service.impl.YOUKUProcessService;

/**
* 电视剧爬虫入口类
* @author Administrator
*
*/
public class StartDSJCount {

//页面下载接口
private IDownLoadService downLoadService;
//页面解析接口
private IProcessService processService;
//数据存储接口
private IStoreService storeService;

public static void main(String[] args) {
StartDSJCount dsj = new StartDSJCount();
dsj.setDownLoadService(new HttpClientDownLoadService());
dsj.setProcessService(new YOUKUProcessService());
dsj.setStoreService(new ConsoleStoreService());
String url = "http://list.youku.com/show/id_z9cd2277647d311e5b692.html?spm=a2h0j.8191423.sMain.5~5~A!2.iCUyO9";
//下载页面
Page page = dsj.downloadPage(url);
dsj.processPage(page);
//存储页面信息
dsj.storePageInfo(page);


}


//下载页面方法
public Page downloadPage(String url){
return this.downLoadService.download(url);
}

//解析页面方法
public void processPage(Page page){
this.processService.process(page);
}

//存储页面信息方法
public void storePageInfo(Page page){
this.storeService.store(page);
}
public IDownLoadService getDownLoadService() {
return downLoadService;
}

public void setDownLoadService(IDownLoadService downLoadService) {
this.downLoadService = downLoadService;
}

public IProcessService getProcessService() {
return processService;
}

public void setProcessService(IProcessService processService) {
this.processService = processService;
}

 

public IStoreService getStoreService() {
return storeService;
}

 

public void setStoreService(IStoreService storeService) {
this.storeService = storeService;
}


}

控制台输出:

总播放数:null
评论数:null
赞:null

为什么三个值都是null呢?只能说,page值为空。因此,还需要重构YOUKUProcessService的process方法,将它解析出来的信息存入page的每个属性中。具体代码如下:

package com.dajiangtai.djt_spider.service.impl;

import java.util.regex.Matcher;
import java.util.regex.Pattern;

import org.htmlcleaner.HtmlCleaner;
import org.htmlcleaner.TagNode;
import org.htmlcleaner.XPatherException;

import com.dajiangtai.djt_spider.entity.Page;
import com.dajiangtai.djt_spider.service.IProcessService;
import com.dajiangtai.djt_spider.util.HtmlUtil;
import com.dajiangtai.djt_spider.util.LoadPropertyUtil;
import com.dajiangtai.djt_spider.util.RegexUtil;

public class YOUKUProcessService implements IProcessService {

//这里解析得到的是 : 总播放数:16,931,628,832,因此采用正则表达式获取数字
// private String allnumberRegex = "(?<=总播放数:)[\\d,]+";
// private String commentnumberRegex = "(?<=评论:)[\\d,]+";
// private String supportnumberRegex = "(?<=顶:)[\\d,]+";
//
// private String parseAllNumber = "/body/div/div/div/div/div/ul/li[11]";
// private String parseCommentNumber = "//div[@class=\"p-base\"]/ul/li[12]";
// private String parseSupportNumber = "//div[@class=\"p-base\"]/ul/li[13]";

public void process(Page page) {
String content = page.getContent();
HtmlCleaner htmlCleaner = new HtmlCleaner();
//利用htmlCleaner对网页进行解析,得到根节点
TagNode rootNode = htmlCleaner.clean(content);

try {
// /html/body/div[4]/div/div[1]/div[2]/div[2]/ul/li[11]
//对XPath做相应的调整,使其有效,如果不该写,则使用debug模式,会发现evaluateXPath为[]
//总播放数
// String allnumber = HtmlUtil.getFieldByRegex(rootNode, parseAllNumber, allnumberRegex);
String allnumber = HtmlUtil.getFieldByRegex(rootNode, LoadPropertyUtil.getYOUKY("parseAllNumber"), LoadPropertyUtil.getYOUKY("allnumberRegex"));
//System.out.println("总播放数数量为:"+allnumber);
page.setAllnumber(allnumber);

//总播放数
String commentnumber = HtmlUtil.getFieldByRegex(rootNode, LoadPropertyUtil.getYOUKY("parseCommentNumber"), LoadPropertyUtil.getYOUKY("commentnumberRegex"));
//System.out.println("总评论数量为:"+commentnumber);
page.setCommentnumber(commentnumber);

//总播放数
String supportnumber = HtmlUtil.getFieldByRegex(rootNode, LoadPropertyUtil.getYOUKY("parseSupportNumber"), LoadPropertyUtil.getYOUKY("supportnumberRegex"));
//System.out.println("总评论数量为:"+supportnumber);
page.setSupportnumber(supportnumber);

page.setDaynumber("0");
page.setAgainstnumber("0");
page.setCollectnumber("0");

} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
}

重构ConsoleStoreService:

package com.dajiangtai.djt_spider.service.impl;

import com.dajiangtai.djt_spider.entity.Page;
import com.dajiangtai.djt_spider.service.IStoreService;

public class ConsoleStoreService implements IStoreService {

public void store(Page page) {
System.out.println("总播放数:"+page.getAllnumber());
System.out.println("评论数:"+page.getCommentnumber());
System.out.println("赞:"+page.getSupportnumber());
System.out.println("踩:"+page.getAgainstnumber());
System.out.println("收藏:"+page.getCollectnumber());
System.out.println("每日播放增量:"+page.getDaynumber());

}

}

测试,运行StartDSJCount的main方法,控制台输出:

总播放数:17,015,726,387
评论数:1,256,223
赞:13,835,376
踩:0
收藏:0
每日播放增量:0

 

爬虫代码实现三:打通爬虫项目的下载、解析、存储流程