在Django項目中,當咱們用到全文檢索框架時,若是想要很好的支持中文分詞方式,咱們可使用「結巴」分詞。 python
pip install jieba
/home/michael/.virtualenvs/Django_test/lib/python3.5/site-packages/haystack/backends/
import jieba from whoosh.analysis import Tokenizer, Token class ChineseTokenizer(Tokenizer): def __call__(self, value, positions=False, chars=False, keeporiginal=False, removestops=True, start_pos=0, start_char=0, mode='', **kwargs): t = Token(positions, chars, removestops=removestops, mode=mode, **kwargs) seglist = jieba.cut(value, cut_all=True) for w in seglist: t.original = t.text = w t.boost = 1.0 if positions: t.pos = start_pos + value.find(w) if chars: t.startchar = start_char + value.find(w) t.endchar = start_char + value.find(w) + len(w) yield t def ChineseAnalyzer(): return ChineseTokenizer()
cp whoosh_backend.py whoosh_cn_backend.py
from .ChineseAnalyzer import ChineseAnalyzer
找到 analyzer=StemmingAnalyzer() 改成 analyzer=ChineseAnalyzer()
HAYSTACK_CONNECTIONS = { 'default': { # whoosh_cn_backend就是複製出來的那個文件 'ENGINE': 'haystack.backends.whoosh_cn_backend.WhooshEngine', 'PATH': os.path.join(BASE_DIR, 'whoosh_index'), } }
python manage.py rebuild_index