咱們在D盤下建一個文件夾叫lucene,lucene內再建兩個文件夾,一個叫example,一個叫index01。example文件夾下三個txt文件,a.txt內容爲hello java,b.txt內容爲hello lucene,c.txt內容爲hello hadoop。java
package com.amazing; import java.io.File; import java.io.FileReader; import java.io.IOException; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.store.Directory; import org.apache.lucene.store.FSDirectory; import org.apache.lucene.util.Version; public class HelloLucene { public void createIndex(){ IndexWriter writer = null; try { Directory directory = FSDirectory.open(new File("D:"+File.separator+"lucene"+File.separator+"index01")); IndexWriterConfig iwc = new IndexWriterConfig(Version.LUCENE_35,new StandardAnalyzer(Version.LUCENE_35)); writer = new IndexWriter(directory,iwc); Document doc = null; File f = new File("D:"+File.separator+"lucene"+File.separator+"example"); for(File file:f.listFiles()){ doc = new Document(); doc.add(new Field("content",new FileReader(file))); doc.add(new Field("filename",file.getName(),Field.Store.YES,Field.Index.NOT_ANALYZED)); doc.add(new Field("path",file.getAbsolutePath(),Field.Store.YES,Field.Index.NOT_ANALYZED)); writer.addDocument(doc); } } catch (IOException e) { e.printStackTrace(); } finally{ if(writer != null){ try { writer.close(); } catch (CorruptIndexException e) { e.printStackTrace(); } catch (IOException e) { e.printStackTrace(); } } } } }
運行測試類:apache
package com.amazing; import org.junit.Test; public class TestLucene { @Test public void testCreateIndex(){ HelloLucene hl = new HelloLucene(); hl.createIndex(); } }
文件夾index01下出現了一些文件:oop