From 77442552cd36bf89b2a24f0101a1f50046b8e909 Mon Sep 17 00:00:00 2001 From: Xinghe <62918118+XingHehy@users.noreply.github.com> Date: Wed, 13 Dec 2023 19:26:06 +0800 Subject: [PATCH] Update analysis.py MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 词云分开统计:“我”说的最多和“TA”说的最多,而不是“我们”说的最多 --- app/analysis/analysis.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/app/analysis/analysis.py b/app/analysis/analysis.py index 430179d..5eb13fe 100644 --- a/app/analysis/analysis.py +++ b/app/analysis/analysis.py @@ -17,7 +17,7 @@ wordcloud_width = 780 wordcloud_height = 720 -def wordcloud(wxid, is_Annual_report=False, year='2023'): +def wordcloud(wxid, is_Annual_report=False, year='2023', who='1'): import jieba txt_messages = msg_db.get_messages_by_type(wxid, MsgType.TEXT, is_Annual_report, year) if not txt_messages: @@ -27,7 +27,9 @@ def wordcloud(wxid, is_Annual_report=False, year='2023'): 'max_num': "0", 'dialogs': [] } - text = ''.join(map(lambda x: x[7], txt_messages)) + # text = ''.join(map(lambda x: x[7], txt_messages)) + text = ''.join(map(lambda x: x[7] if x[4] == int(who) else '', txt_messages)) # 1“我”说的话,0“Ta”说的话 + total_msg_len = len(text) # 使用jieba进行分词,并加入停用词 words = jieba.cut(text)