已經生成4個pickle文件,分別爲documents,word_features,originalnaivebayes5k,featurestspython
其中featurests容量最大,3百多兆,若是擴大5000特徵集,容量繼續擴大,準確性也提供算法
https://www.pythonprogramming.net/sentiment-analysis-module-nltk-tutorial/dom
# -*- coding: utf-8 -*- """ Created on Sat Jan 14 09:59:09 2017 @author: daxiong """ #File: sentiment_mod.py import nltk import random import pickle from nltk.tokenize import word_tokenize documents_f = open("documents.pickle", "rb") documents = pickle.load(documents_f) documents_f.close() word_features5k_f = open("word_features5k.pickle", "rb") word_features = pickle.load(word_features5k_f) word_features5k_f.close() def find_features(document): words = word_tokenize(document) features = {} for w in word_features: features[w] = (w in words) return features featuresets_f = open("featuresets.pickle", "rb") featuresets = pickle.load(featuresets_f) featuresets_f.close() random.shuffle(featuresets) print(len(featuresets)) testing_set = featuresets[10000:] training_set = featuresets[:10000] open_file = open("originalnaivebayes5k.pickle", "rb") classifier = pickle.load(open_file) open_file.close() def sentiment(text): feats = find_features(text) return classifier.classify(feats)
def sentiment_test(text):
feats = find_features(text)
value=classifier.classify(feats)
if value=="pos":
print("正面評價")
else:
print("負面評價")
def sentiment_inputTest():
text=input("主人請輸入留言:")
feats = find_features(text)
value=classifier.classify(feats)
if value=="pos":
print("正面評價")
else:
print("負面評價") print(sentiment("This movie was awesome! The acting was great, plot was wonderful, and there were pythons...so yea!")) print(sentiment("This movie was utter junk. There were absolutely 0 pythons. I don't see what the point was at all. Horrible movie, 0/10"))
測試效果機器學習
仍是比較準,the movie is good 測試不許,看來要改進算法,考慮用頻率分析和過濾垃圾詞來提升準確率ide