您现在的位置是:首页 > 文章详情

从爬取的文章 HTML 中提取出中文关键字

日期:2018-05-29点击:358

分2步。

1.从 HTML 中提取出纯文本(去掉标签)

import org.htmlparser.NodeFilter; import org.htmlparser.Parser; import org.htmlparser.beans.StringBean; import org.htmlparser.filters.CssSelectorNodeFilter; import org.htmlparser.util.NodeList; public class HtmlUtil { public static String getText(String html, String id) { try { Parser parser = new Parser(html); NodeFilter filter = new CssSelectorNodeFilter("#" + id); NodeList nList = parser.extractAllNodesThatMatch(filter); return nList == null || nList.size() == 0 ? null : nList.elementAt( 0).toPlainTextString(); } catch (Exception e) { e.printStackTrace(); return null; } } public static String getTextByClass(String html, String css_class) { try { Parser parser = new Parser(html); NodeFilter filter = new CssSelectorNodeFilter("." + css_class); NodeList nList = parser.extractAllNodesThatMatch(filter); return nList == null || nList.size() == 0 ? null : nList.elementAt( 0).toPlainTextString(); } catch (Exception e) { e.printStackTrace(); return null; } } /** * 获取网页中纯文本信息 * * @param html * @return * @throws Exception * @throws Exception */ public static String getText(String html) throws Exception { StringBean bean = new StringBean(); bean.setLinks(false); bean.setReplaceNonBreakingSpaces(true); bean.setCollapse(true); // 返回解析后的网页纯文本信息 Parser parser = Parser.createParser(html, "utf-8"); parser.visitAllNodesWith(bean); parser.reset(); String text = bean.getStrings(); String reg = "[^\u4e00-\u9fa5]"; text = text.replaceAll(reg, " "); return text; } } 

2.从纯文本中提取出中文关键字(TextRank关键词提取)

import com.hankcs.hanlp.HanLP; import com.hankcs.hanlp.dictionary.stopword.CoreStopWordDictionary; import com.hankcs.hanlp.seg.common.Term; import java.util.*; import java.util.stream.Collectors; /** * TextRank关键词提取 * * @author hankcs */ public class TextRankKeyword { public static final int MAX_KEY_WORDS = 7; /** * 阻尼系数(DampingFactor),一般取值为0.85 */ static final float d = 0.618f; /** * 最大迭代次数 */ static final int max_iter = 2000; static final float min_diff = 0.001f; public TextRankKeyword() { // jdk bug : Exception in thread "main" java.lang.IllegalArgumentException: Comparison method violates its general contract! System.setProperty("java.util.Arrays.useLegacyMergeSort", "true"); } public String getKeyword(String title, String content) { List<Term> termList = HanLP.segment(title + content); List<String> wordList = new ArrayList<String>(); for (Term t : termList) { if (shouldInclude(t)) { wordList.add(t.word); } } Map<String, Set<String>> words = new HashMap<String, Set<String>>(); Queue<String> que = new LinkedList<String>(); for (String w : wordList) { if (!words.containsKey(w)) { words.put(w, new HashSet<String>()); } que.offer(w); if (que.size() > 5) { que.poll(); } for (String w1 : que) { for (String w2 : que) { if (w1.equals(w2)) { continue; } words.get(w1).add(w2); words.get(w2).add(w1); } } } Map<String, Float> score = new HashMap<String, Float>(); for (int i = 0; i < max_iter; ++i) { Map<String, Float> m = new HashMap<String, Float>(); float max_diff = 0; for (Map.Entry<String, Set<String>> entry : words.entrySet()) { String key = entry.getKey(); Set<String> value = entry.getValue(); m.put(key, 1 - d); for (String other : value) { int size = words.get(other).size(); if (key.equals(other) || size == 0) continue; m.put(key, m.get(key) + d / size * (score.get(other) == null ? 0 : score.get(other))); } max_diff = Math.max(max_diff, Math.abs(m.get(key) - (score.get(key) == null ? 0 : score.get(key)))); } score = m; if (max_diff <= min_diff) break; } List<Map.Entry<String, Float>> entryList = new ArrayList<Map.Entry<String, Float>>(score.entrySet()); Collections.sort(entryList, (o1, o2) -> (o1.getValue() - o2.getValue() > 0 ? -1 : 1)); List<Map.Entry<String, Float>> list = entryList.stream().filter(w -> w.getKey().length() > 1).collect(Collectors.toList()); String result = ""; int nKeyword = MAX_KEY_WORDS > list.size() ? list.size() : MAX_KEY_WORDS; for (int i = 0; i < nKeyword; ++i) { result += list.get(i).getKey() + ';'; } System.out.println(result); return result; } /** * 是否应当将这个term纳入计算,词性属于名词、动词、副词、形容词 * * @param term * @return 是否应当 */ public boolean shouldInclude(Term term) { return CoreStopWordDictionary.shouldInclude(term); } } 

完整工程源代码:

https://github.com/KotlinSpringBoot/saber

附: 完整爬取各大著名技术站点的博客文章的源代码。

原文链接:https://yq.aliyun.com/articles/626913
关注公众号

低调大师中文资讯倾力打造互联网数据资讯、行业资源、电子商务、移动互联网、网络营销平台。

持续更新报道IT业界、互联网、市场资讯、驱动更新,是最及时权威的产业资讯及硬件资讯报道平台。

转载内容版权归作者及来源网站所有,本站原创内容转载请注明来源。

文章评论

共有0条评论来说两句吧...

文章二维码

扫描即可查看该文章

点击排行

推荐阅读

最新文章