适合新闻文本数据
from gensim.models.word2vec import KeyedVectors
import jieba
import pandas as pd
import gensim
from gensim.models import word2vec
model = KeyedVectors.load_word2vec_format("45000-small.txt") #加载训练好的词向量
print(model.doesnt_match("性价比 好".split(" ")))
print(model.similarity(‘好‘,"超好"))
print(model.most_similar(positive=[‘设施‘, ‘好‘], negative=[‘不好‘], topn=1))
print(model.doesnt_match("上海 成都 广州 北京".split(" ")))
print(model.most_similar(‘不错‘,topn=10))
适合特定环境的语料。如酒店评论文本
def divide_word(df,column=‘评论内容‘): #分词
seg_list = jieba.cut(df[column], cut_all=False)
return " ".join(seg_list)
data_hotel=pd.read_csv("data/process_data/酒店评论.csv",encoding=‘gbk‘)
data_hotel[‘评论分词‘] = data_hotel.apply(divide_word,axis = 1)
sentences=[]
for item in data_hotel[‘评论分词‘]: #改成二维列表保存,word2Vec识别
ls=item.split(" ")
sentences.append(ls)
for item in data_area[‘评论分词‘]:
ls=item.split(" ")
sentences.append(ls)
model = gensim.models.Word2Vec(sentences, vector_size=200) #加载自定义语料库
print(model.wv.most_similar(‘服务‘,topn=10))
原文:https://www.cnblogs.com/linli069/p/14713181.html