# -*- codeing = utf-8 -*- # @Time : 2022/12/29 11:28 # @Author : Clown # @File : demo_结巴分词拆解菜单.py # @Software : PyCharm import re # 正则表达式库 import collections # 词频统计库 import numpy as np # numpy数据处理库 import jieba # 结巴分词 import pandas as pd import jieba.posseg as jp import json import wordcloud # 词云展示库 from PIL import Image # 图像处理库 import matplotlib.pyplot as plt # 图像展示库 import csv jieba.load_userdict("C:/Users/ClownHe/PycharmProjects/pythonProject/venv/Lib/site-packages/jieba/mydict.txt")#加载自定义词库 # 读取文件 path = "C:/Users/ClownHe/Desktop/组合.xlsx" data = pd.read_excel(path,dtype=str) title = ["平台商品名称","关键词"] with open("C:/Users/ClownHe/Desktop/平台名称拆解no2.csv", 'a', newline='', encoding="utf-8-sig") as t: # encoding= "utf-8" writer = csv.writer(t) # 这一步是创建一个csv的写入 writer.writerow(title) # 写入标签 for goods in data["平台商品名称"]: # 文本预处理 # pattern = re.compile(u'\t|\n|\.|-|:|;|\)|\(|\?|"') # 定义正则表达式匹配模式 pattern = re.compile(u'[^\u4e00-\u9fa5]') string_data = re.sub(pattern,'', goods) # 将符合模式的字符去除 # 文本分词 # seg_list_exact = jieba.cut_for_search(string_data) # 搜索模式 seg_list_exact = jieba.cut(string_data,use_paddle=True) # 搜索模式 # seg_list_exact = jp.cut(string_data) # remove_words = [u'的', u'+',u'(', u')', u'(', u')', u'[',u']',u'1',u'个',u'4',u'包',u'2',u'3',u'4',u'5',u'不要',u'尊享',u'加辣',u'加'] # 自定义去除词库 for word in seg_list_exact: # 循环读出每个分词 # if word not in remove_words: # 如果不在去除词库中 # w = word.word # f = word.flag # object_list.append([w,f]) # 分词追加到列表 # object_list.append(word) # 分词追加到列表 object_list = [[goods,word]] writer.writerows(object_list) # 写入样本数据