1. 具體代碼
java
1.1 ANSJTokenizerFactory 工廠類算法
package org.ansj.solr; import org.apache.lucene.analysis.util.TokenizerFactory; import org.apache.lucene.util.AttributeFactory; import java.io.IOException; import java.io.Reader; import java.util.Map; public class ANSJTokenizerFactory extends TokenizerFactory { private ThreadLocal<ANSJTokenizer> tokenizerLocal = new ThreadLocal<ANSJTokenizer>(); /** Creates a new ANSJTokenizerFactory */ public ANSJTokenizerFactory(Map<String,String> args) { super(args); assureMatchVersion(); if (!args.isEmpty()) { throw new IllegalArgumentException("Unknown parameters: " + args); } } @Override public ANSJTokenizer create(AttributeFactory factory, Reader input) { ANSJTokenizer tokenizer = tokenizerLocal.get(); if(tokenizer == null) { tokenizer = newTokenizer(factory, input); } try { tokenizer.setReader(input); } catch (IOException e) { tokenizer = newTokenizer(factory, input); } return tokenizer; } private ANSJTokenizer newTokenizer(AttributeFactory factory, Reader input) { ANSJTokenizer tokenizer = new ANSJTokenizer(factory, input); tokenizerLocal.set(tokenizer); return tokenizer; } }
1.2 ANSJTokenizer類apache
package org.ansj.solr; import java.io.FileOutputStream; import java.io.FileWriter; import java.io.IOException; import java.io.OutputStreamWriter; import java.io.PrintWriter; import java.io.Reader; import java.io.Writer; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; import org.apache.lucene.analysis.tokenattributes.TypeAttribute; import org.apache.lucene.util.AttributeFactory; import org.ansj.domain.Term; import org.ansj.splitWord.Analysis; import org.ansj.splitWord.analysis.ToAnalysis; public final class ANSJTokenizer extends Tokenizer { Analysis udf = null; private int offset = 0, bufferIndex=0, dataLen=0; private final static int MAX_WORD_LEN = 255; private final static int IO_BUFFER_SIZE = 1024; private final char[] buffer = new char[MAX_WORD_LEN]; private final char[] ioBuffer = new char[IO_BUFFER_SIZE]; private int length; private int start; private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class); private final TypeAttribute typeAtt = (TypeAttribute)addAttribute(TypeAttribute.class); public ANSJTokenizer(Reader in) { super(in); } public ANSJTokenizer(AttributeFactory factory, Reader in) { super(factory, in); } public Analysis getAnalysis() { udf = new ToAnalysis(input); return udf; } private final boolean flush() { if (length>0) { //System.out.println(new String(buffer, 0, //length)); termAtt.copyBuffer(buffer, 0, length); offsetAtt.setOffset(correctOffset(start), correctOffset(start+length)); return true; } else return false; } @Override public boolean incrementToken() throws IOException { clearAttributes(); if(udf == null) { udf = getAnalysis(); } Term term = udf.next(); if(term != null) { termAtt.copyBuffer(term.getName().toCharArray(), 0, term.getName().length()); //if the text has newline, then the first word in the newly started line will have start position 1, which will //cause the if condition failed, in original code, it can cause serious problem. if(term.getTo().getOffe() < term.getOffe() || term.getTo().getOffe() < 0) { offsetAtt.setOffset(term.getOffe(), term.getOffe() +term.getName().length()); typeAtt.setType("word"); return true; } //added by xiao for debugging else { offsetAtt.setOffset(term.getOffe(), term.getTo().getOffe()); typeAtt.setType("word"); return true; } } else { end(); return false; } } @Override public final void end() throws IOException { super.end(); // set final offset final int finalOffset = correctOffset(offset); this.offsetAtt.setOffset(finalOffset, finalOffset); } @Override public void reset() throws IOException { super.reset(); offset = bufferIndex = dataLen = 0; udf = new ToAnalysis(input); } @Override public void close() throws IOException { super.close(); offset = bufferIndex = dataLen = 0; } }
2. 打包ansj工程,將導出的jar包添加到solr.war中的WEB-INF\lib\目錄,一樣將ansj依賴的nlp-lang.jar包添加,否則會報錯。 dom
3. 將ansj下的resources目錄拷貝到solr的example目錄,否則運行solr時會說找不到詞性文件。有其餘配置方法,但我沒想到。 ide
4. 修改solr自帶例子的schema.xml文件,添加以下兩行this
<field name="chinese_ansj_text" type="text_cn" indexed="true" stored="true" /> <fieldType name="text_cn" class="solr.TextField" positionIncrementGap="100"> <analyzer type="index"> <tokenizer class="org.ansj.solr.ANSJTokenizerFactory"/> <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt" enablePositionIncrements="true" /> <filter class="solr.LowerCaseFilterFactory"/> </analyzer> <analyzer type="query"> <tokenizer class="org.ansj.solr.ANSJTokenizerFactory"/> <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt" enablePositionIncrements="true" /> <filter class="solr.LowerCaseFilterFactory"/> </analyzer> </fieldType>
修改solr的exampledocs中的gb18030-example,增長一行spa
<add> <doc> <field name="id">GB18030TEST</field> <field name="name">Test with some GB18030 encoded characters</field> <field name="features">No accents here</field> <field name="features">這是一個功能</field> <field name="features">This is a feature (translated)</field> <field name="features">這份文件是頗有光澤</field> <field name="features">This document is very shiny (translated)</field> <field name="price">0</field> <field name="inStock">true</field> <field name="chinese_ansj_text">這份文件是頗有光澤</field> </doc> </add>
5.作實驗,在solr的analysis頁面中輸入「數據挖掘通常是指從大量的數據中經過算法搜索隱藏於其中信息的過程」,點擊analyze,顯示debug
在Solr管理端的Query輸入chinese_ansj_text:光澤,點擊查詢,顯示結果code