From 920b37d4fed8febe583e4e817ed603265b3e6793 Mon Sep 17 00:00:00 2001 From: jujimeizuo Date: Mon, 29 Jan 2024 16:03:20 +0800 Subject: [PATCH 1/4] fix: modify wendao --- .../agents/metagpt/role_1_streamlit_prompt.py | 239 ++++++++++-------- .../agents/metagpt_agents/wendao/__init__.py | 1 + .../{wendao_agent => wendao}/action.py | 72 +++--- .../{wendao_agent => wendao}/role.py | 27 +- .../metagpt_agents/wendao_agent/__init__.py | 1 - 5 files changed, 184 insertions(+), 156 deletions(-) create mode 100644 tianji/agents/metagpt_agents/wendao/__init__.py rename tianji/agents/metagpt_agents/{wendao_agent => wendao}/action.py (79%) rename tianji/agents/metagpt_agents/{wendao_agent => wendao}/role.py (55%) delete mode 100644 tianji/agents/metagpt_agents/wendao_agent/__init__.py diff --git a/test/agents/metagpt/role_1_streamlit_prompt.py b/test/agents/metagpt/role_1_streamlit_prompt.py index 9e8cc51..cef2a9f 100644 --- a/test/agents/metagpt/role_1_streamlit_prompt.py +++ b/test/agents/metagpt/role_1_streamlit_prompt.py @@ -1,13 +1,16 @@ from dotenv import load_dotenv + load_dotenv() + import asyncio -import sys +import json +from typing import Optional, Any + from metagpt.actions import Action -from metagpt.roles import Role +from metagpt.roles.role import Role, RoleReactMode from metagpt.schema import Message from metagpt.logs import logger -import json -from typing import Optional + from tianji.utils.json_from import SharedDataSingleton from tianji.utils.common_llm_api import LLMApi from tianji.agents.metagpt_agents.ruyi_agent import ruyi @@ -28,8 +31,9 @@ # "wish": "" # } + # 设计思路 给定人设并导入参考聊天话术、历史聊天语料进行聊天。 -class read_and_ana(Action): +class RecvAndAnalyze(Action): PROMPT_TEMPLATE: str = """ 你是一个需求语言分析大师,你需要根据"历史消息记录"中的内容分析出以下要素(注意:没如果没有不要回答): 1.分析对话需求(requirement)。用关键词表示。如:请帮我写一段祝福。->写一段祝福 @@ -67,8 +71,8 @@ class read_and_ana(Action): 请认真结合历史消息记录分析每一个要素的情况。 只需要回复我JSON内容,不需要markdown格式,不需要回复其他任何内容! """ - - name: str = "read_and_ana" + + name: str = "RecvAndAnalyze" async def run(self, instruction: str): case = { @@ -83,7 +87,7 @@ async def run(self, instruction: str): "time": "傍晚", "hobby": "广场舞", "wish": "家庭成员平安", - "style": "小红书版" + "style": "小红书版", } case1 = { "requirement": "给爸爸送祝福", @@ -97,106 +101,111 @@ async def run(self, instruction: str): "time": "晚上", "hobby": "摄影", "wish": "希望我能学会欣赏艺术的美", - "style": "老年人版" + "style": "老年人版", } + case = json.dumps(case) - case1 = json.dumps(case1) - sharedData = SharedDataSingleton.get_instance() - print("instruction",instruction) + # case1 = json.dumps(case1) - prompt = self.PROMPT_TEMPLATE.format(instruction=sharedData.first_status_user_history,case = case,case1 = case1) - print("prompt",prompt) - rsp = await LLMApi()._aask(prompt=prompt,top_p=0.1) + sharedData = SharedDataSingleton.get_instance() + + prompt = self.PROMPT_TEMPLATE.format( + instruction=sharedData.first_status_user_history, case=case + ) + + rsp = await LLMApi()._aask(prompt=prompt, top_p=0.1) rsp = rsp.replace("```json", "").replace("```", "") - #rsp = rsp.strip('json\n').rstrip('') + # rsp = rsp.strip('json\n').rstrip('') - print("机器人分析需求:",rsp) + logger.info("机器人分析需求:" + rsp) sharedData.json_from_data = json.loads(rsp) # json_from_data = json.loads(rsp) return rsp + # 设计思路 根据当前状态和聊天与恋爱相关性等综合打分。给出当前回合的打分情况 -class rerask(Action): - sharedData: Optional[SharedDataSingleton] = SharedDataSingleton.get_instance() - json_from_data: Optional[dict] = sharedData.json_from_data +class RaiseQuestion(Action): + sharedData: Optional[Any] = SharedDataSingleton.get_instance() + json_from_data: Optional[dict] = sharedData.json_from_data + + # PROMPT_TEMPLATE: str = """ + # 限定提问的问题``` + # {question_list_str} + # ``` + # 你是一个提问大师,你只能从"限定提问的问题"中随机选择一个对我进行提问,每次提问只能问一个问题。 + # 提问问题的时候,你的语言风格满足: + # 1.友好,活泼 + # 你只需要回复我你的提问内容,不需要任何其他内容! + # """ PROMPT_TEMPLATE: str = """ - 限定提问的问题``` - {question_list_str} - ``` - 你是一个提问大师,你只能从"限定提问的问题"中随机选择一个对我进行提问,每次提问只能问一个问题。 - 提问问题的时候,你的语言风格满足: - 1.友好,活泼 - 你只需要回复我你的提问内容,不需要任何其他内容! - """ - PROMPT_TEMPLATE = """ - 你是一个提问大师,你只能从"限定提问的问题"中随机选择一个对我进行提问,每次提问只能问一个问题。 + 你是一个提问大师,你只能从"限定提问的问题"中随机选择一个对我进行提问。 限定提问的问题``` {question_list_str} ``` 每次提问只能问一个问题。 """ - name: str = "rerask" + name: str = "RaiseQuestion" async def run(self, instruction: str): - sharedData = SharedDataSingleton.get_instance() + sharedData = SharedDataSingleton.get_instance() json_from_data = sharedData.json_from_data - #case = {"requirement": "", "scene": "家庭聚会", "festival": "元旦", "role": "妈妈", "age": "中老年人", "career": "退休中学教师", "state": "", "character": "开朗", "time": "傍晚", "hobby": "园艺", "wish": ""} - #case = json.dumps(json_from_data) - #print("case",case) + # case = {"requirement": "", "scene": "家庭聚会", "festival": "元旦", "role": "妈妈", "age": "中老年人", "career": "退休中学教师", "state": "", "character": "开朗", "time": "傍晚", "hobby": "园艺", "wish": ""} + # case = json.dumps(json_from_data) + # print("case",case) check_after_question_list = { "requirement": "请告诉我你的需求,比如送祝福。", "scene": "你准备在什么场景下进行呢?比如家庭聚会,朋友聚会等等。", "festival": "是在哪个特殊的节日(比如中秋节,春节)吗?", "role": "你送祝福的对象是谁呢?", "age": "你送祝福的对象年龄多大呢?", - #"career": "送祝福的对象是做什么职业呢?", - #"state": "送祝福的对象最近状态如何呢?比如身体状况,精神状况等等。", - #"character": "送祝福的对象他有什么性格特点吗?", + "career": "送祝福的对象是做什么职业呢?", + "state": "送祝福的对象最近状态如何呢?比如身体状况,精神状况等等。", + "character": "送祝福的对象他有什么性格特点吗?", "time": "你准备在什么时间送祝福呢?", "hobby": "送祝福的对象有什么习惯吗?", - #"wish": "送祝福的对象有哪些个人愿望吗?", - "style": "你期望送祝福的语气是老年风格,小红书风格还是带颜文字可爱风格呢?" + "wish": "送祝福的对象有哪些个人愿望吗?", + "style": "你期望送祝福的语气是老年风格,小红书风格还是带颜文字可爱风格呢?", } question_list = [] for key, value in json_from_data.items(): - if key in check_after_question_list: - if json_from_data[key] == "": - question_list.append(check_after_question_list[key]) + if key in check_after_question_list and value == "": + question_list.append(check_after_question_list[key]) question_list_str = "\n".join(question_list) - prompt = self.PROMPT_TEMPLATE.format(question_list_str=question_list_str) - print("rerask prompt",prompt) - rsp = await LLMApi()._aask(prompt=prompt,top_p=0.1) - print("机器人提问:",rsp) + + rsp = await LLMApi()._aask(prompt=prompt, top_p=0.1) if question_list == []: - rsp = "YES|"+str(rsp) + rsp = "YES|" + str(rsp) else: - rsp = "NO|"+str(rsp) - print(rsp) + logger.info("机器人提问:" + rsp) + rsp = "NO|" + str(rsp) + + # print(rsp) + return rsp # 问道 问出来信息 -class wendao(Role): - name: str = "wendao" +class WenDao(Role): + name: str = "WenDao" profile: str = "GetInformation" - + def __init__(self, **kwargs): super().__init__(**kwargs) - self._init_actions([read_and_ana,rerask]) - self._set_react_mode(react_mode="by_order") + self._init_actions([RecvAndAnalyze, RaiseQuestion]) + self._set_react_mode(react_mode=RoleReactMode.BY_ORDER.value) async def _act(self) -> Message: logger.info(f"{self._setting}: to do {self.rc.todo}({self.rc.todo.name})") - + todo = self.rc.todo - msg = self.get_memories(k=1)[0] # find the most k recent messages + msg = self.get_memories(k=1)[0] result = await todo.run(msg.content) msg = Message(content=result, role=self.profile, cause_by=type(todo)) @@ -204,28 +213,17 @@ async def _act(self) -> Message: return msg async def _act_by_order(self) -> Message: - """switch action each time by order defined in _init_actions, i.e. _act (Action1) -> _act (Action2) -> ...""" for i in range(len(self.states)): self._set_state(i) rsp = await self._act() - return rsp # return output from the last action - - - - - - - - - - - - + return rsp # 新增代码,便于区分----->Start import streamlit as st import uuid + + # 定义一个执行异步代码的函数 def run_async_code(async_function, *args, **kwargs): # 创建一个新的事件循环 @@ -237,23 +235,28 @@ def run_async_code(async_function, *args, **kwargs): finally: # 关闭事件循环 loop.close() + + # 定义一个异步函数 async def run_async_model(user_input): - role_wendao = wendao() + role_wendao = WenDao() print("user_input", user_input) result = await role_wendao.run(user_input) return result.content + async def run_async_qianbianzhe(user_input): role_wendao = qianbianzhe() result = await role_wendao.run(user_input) return result.content + async def run_async_ruyi(user_input): role_wendao = ruyi() result = await role_wendao.run(user_input) return result.content + def json_to_special_str(data): result = "" for key, value in data.items(): @@ -262,39 +265,43 @@ def json_to_special_str(data): result += f"{key} - {value}
" return result + def show_history_st_messages(): - sharedData = SharedDataSingleton.get_instance() + sharedData = SharedDataSingleton.get_instance() for one_message in sharedData.chat_history: - if one_message['method'] == "json": - st.chat_message( one_message['role'] ).json( one_message['showdata'] ) - if one_message['method'] == "write": - st.chat_message( one_message['role'] ).write( one_message['showdata'] ) + if one_message["method"] == "json": + st.chat_message(one_message["role"]).json(one_message["showdata"]) + if one_message["method"] == "write": + st.chat_message(one_message["role"]).write(one_message["showdata"]) + -def show_one_message( role , method="write", showdata="",is_add=False): - sharedData = SharedDataSingleton.get_instance() +def show_one_message(role, method="write", showdata="", is_add=False): + sharedData = SharedDataSingleton.get_instance() if method == "json": - st.chat_message( role ).json( showdata ) + st.chat_message(role).json(showdata) if method == "write": - st.chat_message( role ).write( showdata ) - if is_add == True: - sharedData.chat_history.append({"role": role , "method": method , "showdata": showdata}) + st.chat_message(role).write(showdata) + if is_add is True: + sharedData.chat_history.append( + {"role": role, "method": method, "showdata": showdata} + ) # 初始化session_state变量 -if 'user_id' not in st.session_state: +if "user_id" not in st.session_state: # 为新用户会话生成一个唯一的UUID - st.session_state['user_id'] = str(uuid.uuid4()) + st.session_state["user_id"] = str(uuid.uuid4()) st.write(f"您的会话ID是: {st.session_state['user_id']}") # 在侧边栏中创建一个标题和一个链接 with st.sidebar: st.markdown("## 友情提示") - "这是为了优化人情世故大模型--搜集用户需求角色(WenDao)功能。" + "这是为了优化人情世故大模型--搜集用户需求角色(WenDao)功能。" # 创建一个滑块,用于选择最大长度,范围在0到1024之间,默认值为512 - #max_length = st.slider("max_length", 0, 1024, 512, step=1) - #templature = st.slider("templature", 0, 1024, 512, step=3) - if st.button('清除历史'): + # max_length = st.slider("max_length", 0, 1024, 512, step=1) + # templature = st.slider("templature", 0, 1024, 512, step=3) + if st.button("清除历史"): st.session_state.messages = [] # 获取新的需求收集对象 status_step = 0 @@ -307,35 +314,53 @@ def show_one_message( role , method="write", showdata="",is_add=False): # 创建一个标题和一个副标题 st.title("💬 人情世故-问道") st.caption("🚀 优化 需求搜集 的模块") -st.chat_message( "assistant" ).write( "你通过不断的跟我沟通,我来收集你的需求。" ) +st.chat_message("assistant").write("你通过不断的跟我沟通,我来收集你的需求。") status_step = 0 # 在Streamlit代码中调用异步函数 if prompt := st.chat_input(): - # 显示历史消息--优化前端效果 + # 显示历史消息--优化前端效果 show_history_st_messages() - sharedData = SharedDataSingleton.get_instance() - #st.chat_message("user").write(prompt) - show_one_message( role="user" , method="write" , showdata=prompt , is_add = True) + sharedData = SharedDataSingleton.get_instance() + # st.chat_message("user").write(prompt) + show_one_message(role="user", method="write", showdata=prompt, is_add=True) - #st.write(f"您的会话ID3是: {st.session_state['user_id']}") + # st.write(f"您的会话ID3是: {st.session_state['user_id']}") # 运行异步代码并获取结果 - sharedData.first_status_user_history = sharedData.first_status_user_history + "\n" + "user:" + str(prompt) + sharedData.first_status_user_history = ( + sharedData.first_status_user_history + "\n" + "user:" + str(prompt) + ) st.chat_message("assistant").write("正在处理,请稍候...") - print("sharedData.first_status_user_history",sharedData.first_status_user_history) + print("sharedData.first_status_user_history", sharedData.first_status_user_history) result = run_async_code(run_async_model, sharedData.first_status_user_history) - show_one_message( role="assistant" , method="write" , showdata="目前阶段的需求汇总如下" , is_add = False) - show_one_message( role="assistant" , method="json" , showdata=sharedData.json_from_data , is_add = False) + show_one_message( + role="assistant", method="write", showdata="目前阶段的需求汇总如下", is_add=False + ) + show_one_message( + role="assistant", + method="json", + showdata=sharedData.json_from_data, + is_add=False, + ) first_status_result_list = result.split("|") if first_status_result_list[0] == "NO": - #st.chat_message("assistant").write(first_status_result_list[1]) - show_one_message( role="assistant" , method="write" , showdata=first_status_result_list[1] , is_add = True) - sharedData.first_status_user_history = sharedData.first_status_user_history + "\n" + "assistant:" + str(first_status_result_list[1]) + # st.chat_message("assistant").write(first_status_result_list[1]) + show_one_message( + role="assistant", + method="write", + showdata=first_status_result_list[1], + is_add=True, + ) + sharedData.first_status_user_history = ( + sharedData.first_status_user_history + + "\n" + + "assistant:" + + str(first_status_result_list[1]) + ) else: status_step = 1 - #st.chat_message("assistant").write("需求收集完毕,谢谢你") - show_one_message( role="assistant" , method="write" , showdata="需求收集完毕,谢谢你", is_add = True) - - - + # st.chat_message("assistant").write("需求收集完毕,谢谢你") + show_one_message( + role="assistant", method="write", showdata="需求收集完毕,谢谢你", is_add=True + ) diff --git a/tianji/agents/metagpt_agents/wendao/__init__.py b/tianji/agents/metagpt_agents/wendao/__init__.py new file mode 100644 index 0000000..82e73ab --- /dev/null +++ b/tianji/agents/metagpt_agents/wendao/__init__.py @@ -0,0 +1 @@ +from .role import * diff --git a/tianji/agents/metagpt_agents/wendao_agent/action.py b/tianji/agents/metagpt_agents/wendao/action.py similarity index 79% rename from tianji/agents/metagpt_agents/wendao_agent/action.py rename to tianji/agents/metagpt_agents/wendao/action.py index f2fd595..8197cf0 100644 --- a/tianji/agents/metagpt_agents/wendao_agent/action.py +++ b/tianji/agents/metagpt_agents/wendao/action.py @@ -1,19 +1,19 @@ from dotenv import load_dotenv load_dotenv() -# 项目名称:人情世故大模型 -# 项目描述: -import sys +from typing import Optional, Any +import json from metagpt.actions import Action -import json -from typing import Optional +from metagpt.logs import logger + from tianji.utils.json_from import SharedDataSingleton from tianji.utils.common_llm_api import LLMApi + # 设计思路 给定人设并导入参考聊天话术、历史聊天语料进行聊天。 -class read_and_ana(Action): +class RecvAndAnalyze(Action): PROMPT_TEMPLATE: str = """ 你是一个需求语言分析大师,你需要根据"历史消息记录"中的内容分析出以下要素(注意:没如果没有不要回答): 1.分析对话需求(requirement)。用关键词表示。如:请帮我写一段祝福。->写一段祝福 @@ -52,7 +52,7 @@ class read_and_ana(Action): 只需要回复我JSON内容,不需要markdown格式,不需要回复其他任何内容! """ - name: str = "read_and_ana" + name: str = "RecvAndAnalyze" async def run(self, instruction: str): case = { @@ -67,7 +67,7 @@ async def run(self, instruction: str): "time": "傍晚", "hobby": "广场舞", "wish": "家庭成员平安", - "style": "小红书版" + "style": "小红书版", } case1 = { "requirement": "给爸爸送祝福", @@ -81,28 +81,31 @@ async def run(self, instruction: str): "time": "晚上", "hobby": "摄影", "wish": "希望我能学会欣赏艺术的美", - "style": "老年人版" + "style": "老年人版", } + case = json.dumps(case) - case1 = json.dumps(case1) + # case1 = json.dumps(case1) + sharedData = SharedDataSingleton.get_instance() - print("instruction", instruction) - prompt = self.PROMPT_TEMPLATE.format(instruction=sharedData.first_status_user_history, case=case, case1=case1) - print("prompt", prompt) + prompt = self.PROMPT_TEMPLATE.format( + instruction=sharedData.first_status_user_history, case=case + ) + rsp = await LLMApi()._aask(prompt=prompt, top_p=0.1) rsp = rsp.replace("```json", "").replace("```", "") # rsp = rsp.strip('json\n').rstrip('') - print("机器人分析需求:", rsp) + logger.info("机器人分析需求:" + rsp) sharedData.json_from_data = json.loads(rsp) # json_from_data = json.loads(rsp) return rsp # 设计思路 根据当前状态和聊天与恋爱相关性等综合打分。给出当前回合的打分情况 -class rerask(Action): - sharedData: Optional[SharedDataSingleton] = SharedDataSingleton.get_instance() +class RaiseQuestion(Action): + sharedData: Optional[Any] = SharedDataSingleton.get_instance() json_from_data: Optional[dict] = sharedData.json_from_data # PROMPT_TEMPLATE: str = """ @@ -114,53 +117,52 @@ class rerask(Action): # 1.友好,活泼 # 你只需要回复我你的提问内容,不需要任何其他内容! # """ + PROMPT_TEMPLATE: str = """ - 你是一个提问大师,你只能从"限定提问的问题"中随机选择一个对我进行提问,每次提问只能问一个问题。 + 你是一个提问大师,你只能从"限定提问的问题"中随机选择一个对我进行提问。 限定提问的问题``` {question_list_str} ``` 每次提问只能问一个问题。 """ - name: str = "rerask" + name: str = "RaiseQuestion" async def run(self, instruction: str): - sharedData: Optional[SharedDataSingleton] = SharedDataSingleton.get_instance() - json_from_data: Optional[dict] = sharedData.json_from_data + sharedData = SharedDataSingleton.get_instance() + json_from_data = sharedData.json_from_data # case = {"requirement": "", "scene": "家庭聚会", "festival": "元旦", "role": "妈妈", "age": "中老年人", "career": "退休中学教师", "state": "", "character": "开朗", "time": "傍晚", "hobby": "园艺", "wish": ""} # case = json.dumps(json_from_data) # print("case",case) check_after_question_list = { "requirement": "请告诉我你的需求,比如送祝福。", - # "scene": "你准备在什么场景下进行呢?比如家庭聚会,朋友聚会等等。", + "scene": "你准备在什么场景下进行呢?比如家庭聚会,朋友聚会等等。", "festival": "是在哪个特殊的节日(比如中秋节,春节)吗?", "role": "你送祝福的对象是谁呢?", - # "age": "你送祝福的对象年龄多大呢?", - # "career": "送祝福的对象是做什么职业呢?", - # "state": "送祝福的对象最近状态如何呢?比如身体状况,精神状况等等。", - # "character": "送祝福的对象他有什么性格特点吗?", + "age": "你送祝福的对象年龄多大呢?", + "career": "送祝福的对象是做什么职业呢?", + "state": "送祝福的对象最近状态如何呢?比如身体状况,精神状况等等。", + "character": "送祝福的对象他有什么性格特点吗?", "time": "你准备在什么时间送祝福呢?", - # "hobby": "送祝福的对象有什么习惯吗?", - # "wish": "送祝福的对象有哪些个人愿望吗?", - "style": "你期望送祝福的语气是老年风格,小红书风格还是带颜文字可爱风格呢?" + "hobby": "送祝福的对象有什么习惯吗?", + "wish": "送祝福的对象有哪些个人愿望吗?", + "style": "你期望送祝福的语气是老年风格,小红书风格还是带颜文字可爱风格呢?", } question_list = [] for key, value in json_from_data.items(): - if key in check_after_question_list: - if json_from_data[key] == "": - question_list.append(check_after_question_list[key]) + if key in check_after_question_list and value == "": + question_list.append(check_after_question_list[key]) question_list_str = "\n".join(question_list) prompt = self.PROMPT_TEMPLATE.format(question_list_str=question_list_str) - print("rerask prompt", prompt) + rsp = await LLMApi()._aask(prompt=prompt, top_p=0.1) - print("机器人提问:", rsp) if question_list == []: rsp = "YES|" + str(rsp) else: + logger.info("机器人提问:" + rsp) rsp = "NO|" + str(rsp) - print(rsp) - return rsp + return rsp diff --git a/tianji/agents/metagpt_agents/wendao_agent/role.py b/tianji/agents/metagpt_agents/wendao/role.py similarity index 55% rename from tianji/agents/metagpt_agents/wendao_agent/role.py rename to tianji/agents/metagpt_agents/wendao/role.py index 25c403b..fb75f91 100644 --- a/tianji/agents/metagpt_agents/wendao_agent/role.py +++ b/tianji/agents/metagpt_agents/wendao/role.py @@ -1,26 +1,28 @@ from dotenv import load_dotenv + load_dotenv() -import sys + from metagpt.logs import logger -from metagpt.roles import Role +from metagpt.roles.role import Role, RoleReactMode from metagpt.schema import Message -from .action import read_and_ana,rerask +from .action import RecvAndAnalyze, RaiseQuestion + -class wendao(Role): - name: str = "wendao" +class WenDao(Role): + name: str = "WenDao" profile: str = "GetInformation" - - def __init__(self,**kwargs): + + def __init__(self, **kwargs): super().__init__(**kwargs) - self._init_actions([read_and_ana,rerask]) - self._set_react_mode(react_mode="by_order") + self._init_actions([RecvAndAnalyze, RaiseQuestion]) + self._set_react_mode(react_mode=RoleReactMode.BY_ORDER.value) async def _act(self) -> Message: logger.info(f"{self._setting}: to do {self.rc.todo}({self.rc.todo.name})") - + todo = self.rc.todo - msg = self.get_memories(k=1)[0] # find the most k recent messagesA + msg = self.get_memories(k=1)[0] result = await todo.run(msg.content) msg = Message(content=result, role=self.profile, cause_by=type(todo)) @@ -28,8 +30,7 @@ async def _act(self) -> Message: return msg async def _act_by_order(self) -> Message: - """switch action each time by order defined in _init_actions, i.e. _act (Action1) -> _act (Action2) -> ...""" for i in range(len(self.states)): self._set_state(i) rsp = await self._act() - return rsp # return output from the last action \ No newline at end of file + return rsp diff --git a/tianji/agents/metagpt_agents/wendao_agent/__init__.py b/tianji/agents/metagpt_agents/wendao_agent/__init__.py deleted file mode 100644 index 9e5b2b1..0000000 --- a/tianji/agents/metagpt_agents/wendao_agent/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .role import * \ No newline at end of file From e7f75e0d3f4183c47cfd4bbfeb38186e757d81b0 Mon Sep 17 00:00:00 2001 From: jujimeizuo Date: Mon, 29 Jan 2024 16:19:34 +0800 Subject: [PATCH 2/4] fix: modify qianbianzhe --- .../agents/metagpt/role_2_streamlit_prompt.py | 62 ++++++++++--------- .../metagpt_agents/qianbianzhe/__init__.py | 1 + .../action.py | 33 +++++----- .../role.py | 30 +++------ .../qianbianzhe_agent/__init__.py | 1 - 5 files changed, 61 insertions(+), 66 deletions(-) create mode 100644 tianji/agents/metagpt_agents/qianbianzhe/__init__.py rename tianji/agents/metagpt_agents/{qianbianzhe_agent => qianbianzhe}/action.py (86%) rename tianji/agents/metagpt_agents/{qianbianzhe_agent => qianbianzhe}/role.py (51%) delete mode 100644 tianji/agents/metagpt_agents/qianbianzhe_agent/__init__.py diff --git a/test/agents/metagpt/role_2_streamlit_prompt.py b/test/agents/metagpt/role_2_streamlit_prompt.py index b283add..11f7c8d 100644 --- a/test/agents/metagpt/role_2_streamlit_prompt.py +++ b/test/agents/metagpt/role_2_streamlit_prompt.py @@ -1,16 +1,19 @@ from dotenv import load_dotenv load_dotenv() + import asyncio +import json +from typing import Optional, Any + from metagpt.actions import Action -from metagpt.roles import Role +from metagpt.roles.role import Role, RoleReactMode from metagpt.schema import Message from metagpt.logs import logger -import json -from typing import Optional + from tianji.utils.json_from import SharedDataSingleton from tianji.utils.common_llm_api import LLMApi -from tianji.agents.metagpt_agents.wendao_agent import wendao +from tianji.agents.metagpt_agents.wendao import WenDao from tianji.agents.metagpt_agents.ruyi_agent import ruyi # json_from_data = { @@ -29,7 +32,7 @@ # 设计思路 给定人设并导入参考聊天话术、历史聊天语料进行聊天。 -class ansWrite(Action): +class AnsWrite(Action): # 这是对json中每个key的解释: # 语言场景(scene),目前的聊天场合,比如工作聚会。 # 节日(festival),对话目前背景所在的节日,比如生日。 @@ -42,10 +45,10 @@ class ansWrite(Action): # 聊天对象爱好(hobby),和role相关,就是聊天对象的兴趣爱好,例如下象棋。 # 聊天对象愿望(wish),和role相关,就是聊天对象目前的愿望是什么,例如果希望家庭成员平安。 - name: str = "read_and_ana" + name: str = "AnsWrite" async def run(self, instruction: str): - sharedData: Optional[SharedDataSingleton] = SharedDataSingleton.get_instance() + sharedData: Optional[Any] = SharedDataSingleton.get_instance() json_from_data: Optional[dict] = sharedData.json_from_data knowledge: str = "" PROMPT_TEMPLATE: str = f""" @@ -63,52 +66,55 @@ async def run(self, instruction: str): 经过思考后,将这些信息整理成一段完整的{json_from_data["requirement"]}。 """ - print("json_from_data####################################", json_from_data) + # print("json_from_data####################################", json_from_data) # knowledges = "" prompt = PROMPT_TEMPLATE.format(instruction=instruction) - print(prompt) + # print(prompt) rsp = await LLMApi()._aask(prompt) - print("回复生成:", rsp) + + logger.info("回复生成:\n" + rsp) + # print("回复生成:", rsp) + return rsp # 设计思路 根据当前状态和聊天与恋爱相关性等综合打分。给出当前回合的打分情况 -class stylize(Action): +class Stylize(Action): PROMPT_TEMPLATE: str = """ 你是一个萌妹,对任何人说话都很温柔客气。你很聪明礼貌。你喜欢发一些颜文字表情。大家都很喜欢你。 请用自己的语气改写{instruction} """ - name: str = "stylize" + name: str = "Stylize" async def run(self, instruction: str): prompt = self.PROMPT_TEMPLATE.format(instruction=instruction) - rsp = await self._aask(prompt) - print("风格化:", rsp) + rsp = await LLMApi()._aask(prompt) + logger.info("风格化:\n" + rsp) + return rsp # 千变者 以自己的身份回答问题 -class qianbianzhe(Role): - name: str = "qianbianzhe" - profile: str = "stylize" +class QianBianZhe(Role): + name: str = "QianBianZhe" + profile: str = "Stylize" def __init__(self, **kwargs): super().__init__(**kwargs) - self._init_actions([ansWrite, stylize]) - self._set_react_mode(react_mode="by_order") + self._init_actions([AnsWrite, Stylize]) + self._set_react_mode(react_mode=RoleReactMode.BY_ORDER.value) async def _act(self) -> Message: - # logger.info(f"{self._setting}: to do {self.rc.todo}({self.rc.todo.name})") - + logger.info(f"{self._setting}: to do {self.rc.todo}({self.rc.todo.name})") todo = self.rc.todo - msg = self.get_memories(k=1)[0] # find the most k recent messagesA + msg = self.get_memories(k=1)[0] result = await todo.run(msg.content) - msg = Message(content=result, role=self.profile, cause_by=type(todo)) self.rc.memory.add(msg) + return msg @@ -134,20 +140,20 @@ def run_async_code(async_function, *args, **kwargs): # 定义一个异步函数 async def run_async_model(user_input): - role_wendao = wendao() + role_wendao = WenDao() result = await role_wendao.run(user_input) return result.content async def run_async_qianbianzhe(user_input): - role_wendao = qianbianzhe() - result = await role_wendao.run(user_input) + role_qianbianzhe = QianBianZhe() + result = await role_qianbianzhe.run(user_input) return result.content async def run_async_ruyi(user_input): - role_wendao = ruyi() - result = await role_wendao.run(user_input) + role_ruyi = ruyi() + result = await role_ruyi.run(user_input) return result.content diff --git a/tianji/agents/metagpt_agents/qianbianzhe/__init__.py b/tianji/agents/metagpt_agents/qianbianzhe/__init__.py new file mode 100644 index 0000000..82e73ab --- /dev/null +++ b/tianji/agents/metagpt_agents/qianbianzhe/__init__.py @@ -0,0 +1 @@ +from .role import * diff --git a/tianji/agents/metagpt_agents/qianbianzhe_agent/action.py b/tianji/agents/metagpt_agents/qianbianzhe/action.py similarity index 86% rename from tianji/agents/metagpt_agents/qianbianzhe_agent/action.py rename to tianji/agents/metagpt_agents/qianbianzhe/action.py index d6c24c3..304cb3a 100644 --- a/tianji/agents/metagpt_agents/qianbianzhe_agent/action.py +++ b/tianji/agents/metagpt_agents/qianbianzhe/action.py @@ -1,14 +1,12 @@ from dotenv import load_dotenv load_dotenv() -# 项目名称:人情世故大模型 -# 项目描述: -import sys - -from typing import Optional +from typing import Optional, Any from metagpt.actions import Action +from metagpt.logs import logger + from tianji.utils.json_from import SharedDataSingleton from tianji.utils.common_llm_api import LLMApi @@ -29,7 +27,7 @@ # 设计思路 给定人设并导入参考聊天话术、历史聊天语料进行聊天。 -class ansWrite(Action): +class AnsWrite(Action): # 这是对json中每个key的解释: # 语言场景(scene),目前的聊天场合,比如工作聚会。 # 节日(festival),对话目前背景所在的节日,比如生日。 @@ -42,10 +40,10 @@ class ansWrite(Action): # 聊天对象爱好(hobby),和role相关,就是聊天对象的兴趣爱好,例如下象棋。 # 聊天对象愿望(wish),和role相关,就是聊天对象目前的愿望是什么,例如果希望家庭成员平安。 - name: str = "ansWrite" + name: str = "AnsWrite" async def run(self, instruction: str): - sharedData: Optional[SharedDataSingleton] = SharedDataSingleton.get_instance() + sharedData: Optional[Any] = SharedDataSingleton.get_instance() json_from_data: Optional[dict] = sharedData.json_from_data knowledge: str = "" PROMPT_TEMPLATE: str = f""" @@ -63,27 +61,30 @@ async def run(self, instruction: str): 经过思考后,将这些信息整理成一段完整的{json_from_data["requirement"]}。 """ - print("json_from_data####################################", json_from_data) + # print("json_from_data####################################", json_from_data) # knowledges = "" prompt = PROMPT_TEMPLATE.format(instruction=instruction) - print(prompt) - rsp = await LLMApi()._aask(prompt=prompt, top_p=0.1) - print("回复生成:", rsp) + # print(prompt) + rsp = await LLMApi()._aask(prompt) + + logger.info("回复生成:\n" + rsp) + return rsp # 设计思路 根据当前状态和聊天与恋爱相关性等综合打分。给出当前回合的打分情况 -class stylize(Action): +class Stylize(Action): PROMPT_TEMPLATE: str = """ 你是一个萌妹,对任何人说话都很温柔客气。你很聪明礼貌。你喜欢发一些颜文字表情。大家都很喜欢你。 请用自己的语气改写{instruction} """ - name: str = "stylize" + name: str = "Stylize" async def run(self, instruction: str): prompt = self.PROMPT_TEMPLATE.format(instruction=instruction) - rsp = await LLMApi()._aask(prompt=prompt, top_p=0.1) - print("风格化:", rsp) + rsp = await LLMApi()._aask(prompt) + logger.info("风格化:\n" + rsp) + return rsp diff --git a/tianji/agents/metagpt_agents/qianbianzhe_agent/role.py b/tianji/agents/metagpt_agents/qianbianzhe/role.py similarity index 51% rename from tianji/agents/metagpt_agents/qianbianzhe_agent/role.py rename to tianji/agents/metagpt_agents/qianbianzhe/role.py index 330f824..a5a7b4c 100644 --- a/tianji/agents/metagpt_agents/qianbianzhe_agent/role.py +++ b/tianji/agents/metagpt_agents/qianbianzhe/role.py @@ -2,41 +2,29 @@ load_dotenv() -from metagpt.roles import Role +from metagpt.roles.role import Role, RoleReactMode from metagpt.schema import Message from metagpt.logs import logger -from .action import ansWrite, stylize +from .action import AnsWrite, Stylize # 千变者 以自己的身份回答问题 -class qianbianzhe(Role): - name: str = "qianbianzhe" - profile: str = "stylize" +class QianBianZhe(Role): + name: str = "QianBianZhe" + profile: str = "Stylize" def __init__(self, **kwargs): super().__init__(**kwargs) - self._init_actions([ansWrite, stylize]) - self._set_react_mode(react_mode="by_order") + self._init_actions([AnsWrite, Stylize]) + self._set_react_mode(react_mode=RoleReactMode.BY_ORDER.value) async def _act(self) -> Message: logger.info(f"{self._setting}: to do {self.rc.todo}({self.rc.todo.name})") - todo = self.rc.todo - msg = self.get_memories(k=1)[0] # find the most k recent messagesA + msg = self.get_memories(k=1)[0] result = await todo.run(msg.content) - msg = Message(content=result, role=self.profile, cause_by=type(todo)) self.rc.memory.add(msg) - return msg - -# async def main(): -# # 对话导入 -# msg = "test" -# role = qianbianzhe() -# logger.info(msg) -# result = await role.run(msg) -# logger.info(result) - -# asyncio.run(main()) + return msg diff --git a/tianji/agents/metagpt_agents/qianbianzhe_agent/__init__.py b/tianji/agents/metagpt_agents/qianbianzhe_agent/__init__.py deleted file mode 100644 index 9e5b2b1..0000000 --- a/tianji/agents/metagpt_agents/qianbianzhe_agent/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .role import * \ No newline at end of file From ccb09cca051478284bd6116a7f533cda0949611e Mon Sep 17 00:00:00 2001 From: jujimeizuo Date: Mon, 29 Jan 2024 16:30:42 +0800 Subject: [PATCH 3/4] fix: modify ruyi --- .../agents/metagpt/role_1_streamlit_prompt.py | 4 +- .../agents/metagpt/role_2_streamlit_prompt.py | 5 +- .../agents/metagpt/role_3_streamlit_prompt.py | 47 ++++++++++--------- tianji/agents/metagpt_agents/ruyi/__init__.py | 1 + .../{ruyi_agent => ruyi}/action.py | 26 +++++----- .../{ruyi_agent => ruyi}/role.py | 19 ++++---- .../metagpt_agents/ruyi_agent/__init__.py | 1 - 7 files changed, 53 insertions(+), 50 deletions(-) create mode 100644 tianji/agents/metagpt_agents/ruyi/__init__.py rename tianji/agents/metagpt_agents/{ruyi_agent => ruyi}/action.py (88%) rename tianji/agents/metagpt_agents/{ruyi_agent => ruyi}/role.py (60%) delete mode 100644 tianji/agents/metagpt_agents/ruyi_agent/__init__.py diff --git a/test/agents/metagpt/role_1_streamlit_prompt.py b/test/agents/metagpt/role_1_streamlit_prompt.py index cef2a9f..f044137 100644 --- a/test/agents/metagpt/role_1_streamlit_prompt.py +++ b/test/agents/metagpt/role_1_streamlit_prompt.py @@ -13,8 +13,8 @@ from tianji.utils.json_from import SharedDataSingleton from tianji.utils.common_llm_api import LLMApi -from tianji.agents.metagpt_agents.ruyi_agent import ruyi -from tianji.agents.metagpt_agents.qianbianzhe_agent import qianbianzhe +from tianji.agents.metagpt_agents.ruyi import RuYi +from tianji.agents.metagpt_agents.qianbianzhe import QianBianZhe # json_from_data = { diff --git a/test/agents/metagpt/role_2_streamlit_prompt.py b/test/agents/metagpt/role_2_streamlit_prompt.py index 11f7c8d..ac5dad6 100644 --- a/test/agents/metagpt/role_2_streamlit_prompt.py +++ b/test/agents/metagpt/role_2_streamlit_prompt.py @@ -14,7 +14,7 @@ from tianji.utils.json_from import SharedDataSingleton from tianji.utils.common_llm_api import LLMApi from tianji.agents.metagpt_agents.wendao import WenDao -from tianji.agents.metagpt_agents.ruyi_agent import ruyi +from tianji.agents.metagpt_agents.ruyi import RuYi # json_from_data = { # "requirement": "祝福", @@ -74,7 +74,6 @@ async def run(self, instruction: str): rsp = await LLMApi()._aask(prompt) logger.info("回复生成:\n" + rsp) - # print("回复生成:", rsp) return rsp @@ -152,7 +151,7 @@ async def run_async_qianbianzhe(user_input): async def run_async_ruyi(user_input): - role_ruyi = ruyi() + role_ruyi = RuYi() result = await role_ruyi.run(user_input) return result.content diff --git a/test/agents/metagpt/role_3_streamlit_prompt.py b/test/agents/metagpt/role_3_streamlit_prompt.py index 0738361..5280bbe 100644 --- a/test/agents/metagpt/role_3_streamlit_prompt.py +++ b/test/agents/metagpt/role_3_streamlit_prompt.py @@ -1,30 +1,32 @@ from dotenv import load_dotenv load_dotenv() + import asyncio +import json +from typing import Optional, Any + from metagpt.actions import Action -from metagpt.roles import Role +from metagpt.roles.role import Role, RoleReactMode from metagpt.schema import Message from metagpt.logs import logger -import json -from typing import Optional + from tianji.utils.json_from import SharedDataSingleton from tianji.utils.knowledge_tool import ( get_docs_list_query_openai, get_docs_list_query_zhipuai, ) from tianji.utils.common_llm_api import LLMApi -from tianji.agents.metagpt_agents.ruyi_agent import ruyi -from tianji.agents.metagpt_agents.qianbianzhe_agent import qianbianzhe -from tianji.agents.metagpt_agents.wendao_agent import wendao - +from tianji.agents.metagpt_agents.ruyi import RuYi +from tianji.agents.metagpt_agents.qianbianzhe import QianBianZhe +from tianji.agents.metagpt_agents.wendao import WenDao KNOWLEDGE_PATH = r"/Users/fengzetao/Workspace/Github/SocialAI/Tianji/tianji/knowledges/04-Wishes/knowledges.txt" SAVE_PATH = r"/Users/fengzetao/Workspace/Github/SocialAI/Tianji/temp" # 给出针对回答的知识 并用md展示 -class writeMD(Action): +class WriteMarkDown(Action): # 这是对json中每个key的解释: # 语言场景(scene),目前的聊天场合,比如工作聚会。 # 节日(festival),对话目前背景所在的节日,比如生日。 @@ -37,13 +39,12 @@ class writeMD(Action): # 聊天对象爱好(hobby),和role相关,就是聊天对象的兴趣爱好,例如下象棋。 # 聊天对象愿望(wish),和role相关,就是聊天对象目前的愿望是什么,例如果希望家庭成员平安。 - name: str = "writeMD" + name: str = "WriteMarkDown" knowledge: str = "" json_from_data: Optional[dict] = SharedDataSingleton.get_instance().json_from_data async def run(self, instruction: str): - # knowledges = "" json_from_data: Optional[ dict ] = SharedDataSingleton.get_instance().json_from_data @@ -54,7 +55,7 @@ async def run(self, instruction: str): persist_directory=SAVE_PATH, k_num=5, ) - print("knowledge:", knowledge) + print("knowledge:\n", knowledge) PROMPT_TEMPLATE: str = f""" 你是一个{json_from_data["festival"]}的祝福大师。 你需要写一段关于如何写{json_from_data["festival"]}{json_from_data["requirement"]}的思路总结。目前了解到这段{json_from_data["festival"]}{json_from_data["requirement"]}是在{json_from_data["scene"]}送给{json_from_data["role"]}的。 @@ -70,30 +71,30 @@ async def run(self, instruction: str): """ prompt = PROMPT_TEMPLATE.format(instruction=instruction) rsp = await LLMApi()._aask(prompt) - print("回复生成:", rsp) + logger.info("回复生成:\n" + rsp) + return rsp # 如何写 如意如意如我心意 -class ruyi(Role): - name: str = "ruyi" - profile: str = "stylize" +class RuYi(Role): + name: str = "RuYi" + profile: str = "Stylize" def __init__(self, **kwargs): super().__init__(**kwargs) - self._init_actions([writeMD]) - self._set_react_mode(react_mode="by_order") + self._init_actions([WriteMarkDown]) + self._set_react_mode(react_mode=RoleReactMode.BY_ORDER.value) async def _act(self) -> Message: logger.info(f"{self._setting}: to do {self.rc.todo}({self.rc.todo.name})") - todo = self.rc.todo - msg = self.get_memories(k=1)[0] # find the most k recent messagesA + msg = self.get_memories(k=1)[0] result = await todo.run(msg.content) - msg = Message(content=result, role=self.profile, cause_by=type(todo)) self.rc.memory.add(msg) + return msg @@ -119,19 +120,19 @@ def run_async_code(async_function, *args, **kwargs): # 定义一个异步函数 async def run_async_model(user_input): - role_wendao = wendao() + role_wendao = WenDao() result = await role_wendao.run(user_input) return result.content async def run_async_qianbianzhe(user_input): - role_wendao = qianbianzhe() + role_wendao = QianBianZhe() result = await role_wendao.run(user_input) return result.content async def run_async_ruyi(user_input): - role_wendao = ruyi() + role_wendao = RuYi() result = await role_wendao.run(user_input) return result.content diff --git a/tianji/agents/metagpt_agents/ruyi/__init__.py b/tianji/agents/metagpt_agents/ruyi/__init__.py new file mode 100644 index 0000000..82e73ab --- /dev/null +++ b/tianji/agents/metagpt_agents/ruyi/__init__.py @@ -0,0 +1 @@ +from .role import * diff --git a/tianji/agents/metagpt_agents/ruyi_agent/action.py b/tianji/agents/metagpt_agents/ruyi/action.py similarity index 88% rename from tianji/agents/metagpt_agents/ruyi_agent/action.py rename to tianji/agents/metagpt_agents/ruyi/action.py index 99ef5e9..1807c76 100644 --- a/tianji/agents/metagpt_agents/ruyi_agent/action.py +++ b/tianji/agents/metagpt_agents/ruyi/action.py @@ -1,13 +1,13 @@ from dotenv import load_dotenv load_dotenv() -# 项目名称:人情世故大模型 -# 项目描述: -from tianji.utils.common_llm_api import LLMApi -import sys from typing import Optional + from metagpt.actions import Action +from metagpt.logs import logger + +from tianji.utils.common_llm_api import LLMApi from tianji.utils.json_from import SharedDataSingleton from tianji.utils.knowledge_tool import ( get_docs_list_query_openai, @@ -19,7 +19,7 @@ # 给出针对回答的知识 并用md展示 -class writeMD(Action): +class WriteMarkDown(Action): # 这是对json中每个key的解释: # 语言场景(scene),目前的聊天场合,比如工作聚会。 # 节日(festival),对话目前背景所在的节日,比如生日。 @@ -32,14 +32,15 @@ class writeMD(Action): # 聊天对象爱好(hobby),和role相关,就是聊天对象的兴趣爱好,例如下象棋。 # 聊天对象愿望(wish),和role相关,就是聊天对象目前的愿望是什么,例如果希望家庭成员平安。 - name: str = "writeMD" + name: str = "WriteMarkDown" knowledge: str = "" - json_from_data: str = SharedDataSingleton.get_instance().json_from_data + json_from_data: Optional[dict] = SharedDataSingleton.get_instance().json_from_data async def run(self, instruction: str): - # knowledges = "" - json_from_data = SharedDataSingleton.get_instance().json_from_data + json_from_data: Optional[ + dict + ] = SharedDataSingleton.get_instance().json_from_data knowledge_key = json_from_data["festival"] + json_from_data["requirement"] knowledge = get_docs_list_query_zhipuai( query_str=knowledge_key, @@ -47,6 +48,8 @@ async def run(self, instruction: str): persist_directory=SAVE_PATH, k_num=5, ) + print("knowledge:\n", knowledge) + PROMPT_TEMPLATE: str = f""" 你是一个{json_from_data["festival"]}的祝福大师。 你需要写一段关于如何写{json_from_data["festival"]}{json_from_data["requirement"]}的思路总结。目前了解到这段{json_from_data["festival"]}{json_from_data["requirement"]}是在{json_from_data["scene"]}送给{json_from_data["role"]}的。 @@ -61,8 +64,9 @@ async def run(self, instruction: str): """ prompt = PROMPT_TEMPLATE.format(instruction=instruction) - rsp = await LLMApi()._aask(prompt=prompt, top_p=0.1) - print("回复生成:", rsp) + rsp = await LLMApi()._aask(prompt) + logger.info("回复生成:\n" + rsp) + return rsp diff --git a/tianji/agents/metagpt_agents/ruyi_agent/role.py b/tianji/agents/metagpt_agents/ruyi/role.py similarity index 60% rename from tianji/agents/metagpt_agents/ruyi_agent/role.py rename to tianji/agents/metagpt_agents/ruyi/role.py index fe42038..52c51a4 100644 --- a/tianji/agents/metagpt_agents/ruyi_agent/role.py +++ b/tianji/agents/metagpt_agents/ruyi/role.py @@ -1,26 +1,25 @@ -from metagpt.roles import Role +from metagpt.roles.role import Role, RoleReactMode from metagpt.schema import Message from metagpt.logs import logger -from .action import writeMD +from .action import WriteMarkDown -class ruyi(Role): - name: str = "ruyi" - profile: str = "stylize" +class RuYi(Role): + name: str = "RuYi" + profile: str = "Stylize" def __init__(self, **kwargs): super().__init__(**kwargs) - self._init_actions([writeMD]) - self._set_react_mode(react_mode="by_order") + self._init_actions([WriteMarkDown]) + self._set_react_mode(react_mode=RoleReactMode.BY_ORDER.value) async def _act(self) -> Message: logger.info(f"{self._setting}: to do {self.rc.todo}({self.rc.todo.name})") - todo = self.rc.todo - msg = self.get_memories(k=1)[0] # find the most k recent messagesA + msg = self.get_memories(k=1)[0] result = await todo.run(msg.content) - msg = Message(content=result, role=self.profile, cause_by=type(todo)) self.rc.memory.add(msg) + return msg diff --git a/tianji/agents/metagpt_agents/ruyi_agent/__init__.py b/tianji/agents/metagpt_agents/ruyi_agent/__init__.py deleted file mode 100644 index 9e5b2b1..0000000 --- a/tianji/agents/metagpt_agents/ruyi_agent/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .role import * \ No newline at end of file From 62ef51d0be708e034d8872a0bee9ab32c0b01d37 Mon Sep 17 00:00:00 2001 From: jujimeizuo Date: Mon, 29 Jan 2024 19:07:16 +0800 Subject: [PATCH 4/4] feat: add knowledge path config --- run/metagpt_webui.py | 147 +++++++++++++------- test/knowledges/test_get_docs_list_query.py | 10 +- tianji/agents/metagpt_agents/ruyi/action.py | 8 +- tianji/knowledges/__init__.py | 0 tianji/knowledges/config.py | 38 +++++ tianji/utils/knowledge_tool.py | 8 +- 6 files changed, 146 insertions(+), 65 deletions(-) create mode 100644 tianji/knowledges/__init__.py create mode 100644 tianji/knowledges/config.py diff --git a/run/metagpt_webui.py b/run/metagpt_webui.py index f4296f5..17bf4d4 100644 --- a/run/metagpt_webui.py +++ b/run/metagpt_webui.py @@ -8,25 +8,25 @@ from streamlit_chat import message import asyncio from metagpt.logs import logger -from tianji.agents.metagpt_agents.wendao_agent import wendao -from tianji.agents.metagpt_agents.qianbianzhe_agent import qianbianzhe -from tianji.agents.metagpt_agents.ruyi_agent import ruyi +from tianji.agents.metagpt_agents.wendao import WenDao +from tianji.agents.metagpt_agents.qianbianzhe import QianBianZhe +from tianji.agents.metagpt_agents.ruyi import RuYi from tianji.utils.json_from import SharedDataSingleton from datetime import datetime import uuid # 初始化session_state变量 -if 'user_id' not in st.session_state: +if "user_id" not in st.session_state: # 为新用户会话生成一个唯一的UUID - logger.log(0,"add uuid") - st.session_state['user_id'] = str(uuid.uuid4()) + logger.log(0, "add uuid") + st.session_state["user_id"] = str(uuid.uuid4()) # 设计思路 给定人设并导入参考聊天话术、历史聊天语料进行聊天。 # async def start(msg): # role_wendao = wendao() # logger.info(msg) -# result = await role_wendao.run(msg) +# result = await role_wendao.run(msg) # sharedData = SharedDataSingleton.get_instance() # json_from_data = sharedData.json_from_data @@ -41,86 +41,133 @@ # logger.info(result) # print("final ans :\n",qianjibian_ans,ruyi_ans) + def timestamp_str(): # 获取当前时间 now = datetime.now() # 将当前时间转换为字符串格式 timestamp_str = now.strftime("%Y-%m-%d %H:%M:%S.%f") # 从字符串中去除最后的微秒部分,并转换为时间戳形式的字符串 - timestamp_str = timestamp_str[:-3].replace(" ", "_").replace(":", "").replace("-", "").replace(".", "") + timestamp_str = ( + timestamp_str[:-3] + .replace(" ", "_") + .replace(":", "") + .replace("-", "") + .replace(".", "") + ) return timestamp_str + async def main(): first_status_user_history = "" - # 对话导入 + # 对话导入 # msg = "test" # msg = """元旦节下午,我和哥哥一起去图书馆学习。我像给哥哥一个祝福。我的哥哥,一位医学院的学生,正在为即将到来的考试做准备。他今年24岁,对医学充满热情。图书馆里非常安静,我们专心致志地学习。哥哥的爱好是玩篮球,他经常说运动是放松大脑的最佳方式。他总是希望我也能热爱学习,努力追求知识。""" st.markdown("#### 人情世故大模型_祝福模块") - if 'generated' not in st.session_state: - st.session_state['generated'] = [] - if 'past' not in st.session_state: - st.session_state['past'] = [] - user_input=st.text_input("我是人情世故大模型团队开发的祝福agents。你可以在这里找到一个完整的祝福。我会告诉你怎么写,还会针对你的祝福给你生成专属的知识文档。\n 首先你需要完整的告诉我,你想在什么节日给谁送祝福?这个人是谁呢(是妈妈)?他会有什么愿望呢?你想在什么时候送给他?可以告诉我他的爱好、性格、年龄段、最近的状态。\n 就像这段:【元旦节下午,我和哥哥一起去图书馆学习。我想给哥哥一个祝福。我的哥哥,一位医学院的学生,正在为即将到来的考试做准备。他今年24岁,对医学充满热情。图书馆里非常安静,我们专心致志地学习。哥哥的爱好是玩篮球,他经常说运动是放松大脑的最佳方式。他总是希望我也能热爱学习,努力追求知识。】\n请输入你的问题:",key='input') + if "generated" not in st.session_state: + st.session_state["generated"] = [] + if "past" not in st.session_state: + st.session_state["past"] = [] + user_input = st.text_input( + "我是人情世故大模型团队开发的祝福agents。你可以在这里找到一个完整的祝福。我会告诉你怎么写,还会针对你的祝福给你生成专属的知识文档。\n 首先你需要完整的告诉我,你想在什么节日给谁送祝福?这个人是谁呢(是妈妈)?他会有什么愿望呢?你想在什么时候送给他?可以告诉我他的爱好、性格、年龄段、最近的状态。\n 就像这段:【元旦节下午,我和哥哥一起去图书馆学习。我想给哥哥一个祝福。我的哥哥,一位医学院的学生,正在为即将到来的考试做准备。他今年24岁,对医学充满热情。图书馆里非常安静,我们专心致志地学习。哥哥的爱好是玩篮球,他经常说运动是放松大脑的最佳方式。他总是希望我也能热爱学习,努力追求知识。】\n请输入你的问题:", + key="input", + ) task_status = 0 # 显示用户的UUID(仅供演示) st.write(f"您的会话ID1是: {st.session_state['user_id']}") - + if user_input: # 显示用户的UUID(仅供演示) st.write(f"您的会话ID2是: {st.session_state['user_id']}") - sharedData = SharedDataSingleton.get_instance() - for one_message in sharedData.first_status_message_list : - message(one_message['message'],is_user=one_message['is_user'],key=one_message['keyname']) - - st.session_state['past'].append(user_input) - message(st.session_state['past'][-1], is_user=True,key="_user") - sharedData.first_status_message_list.append({"message":st.session_state['past'][-1],"is_user":True,"keyname":"_user"+str(timestamp_str)}) + sharedData = SharedDataSingleton.get_instance() + for one_message in sharedData.first_status_message_list: + message( + one_message["message"], + is_user=one_message["is_user"], + key=one_message["keyname"], + ) + + st.session_state["past"].append(user_input) + message(st.session_state["past"][-1], is_user=True, key="_user") + sharedData.first_status_message_list.append( + { + "message": st.session_state["past"][-1], + "is_user": True, + "keyname": "_user" + str(timestamp_str), + } + ) # 第一阶段-搜集用户的需求 - - if task_status == 0: - sharedData.first_status_user_history = sharedData.first_status_user_history + "\n" + "user:" + str(user_input) + if task_status == 0: + sharedData.first_status_user_history = ( + sharedData.first_status_user_history + "\n" + "user:" + str(user_input) + ) - role_wendao = wendao() - result = await role_wendao.run(sharedData.first_status_user_history) + role_wendao = WenDao() + result = await role_wendao.run(sharedData.first_status_user_history) first_status_result_list = result.content.split("|") if first_status_result_list[0] == "NO": - sharedData.first_status_user_history = sharedData.first_status_user_history + "\n" + "assistant:" + str(first_status_result_list[1]) - st.session_state['generated'].append(str(first_status_result_list[1])) - message(st.session_state["generated"][-1], key=str(len(st.session_state["generated"]))) - - sharedData.first_status_message_list.append({"message":st.session_state["generated"][-1],"is_user":False,"keyname":str(len(st.session_state["generated"]))+str(timestamp_str)}) + sharedData.first_status_user_history = ( + sharedData.first_status_user_history + + "\n" + + "assistant:" + + str(first_status_result_list[1]) + ) + st.session_state["generated"].append(str(first_status_result_list[1])) + message( + st.session_state["generated"][-1], + key=str(len(st.session_state["generated"])), + ) + + sharedData.first_status_message_list.append( + { + "message": st.session_state["generated"][-1], + "is_user": False, + "keyname": str(len(st.session_state["generated"])) + + str(timestamp_str), + } + ) st.text("请继续输入") else: task_status = 1 - sharedData.ask_num +=1 - print("ask_num:",sharedData.ask_num) + sharedData.ask_num += 1 + print("ask_num:", sharedData.ask_num) - if task_status==1 or sharedData.ask_num>3: - sharedData = SharedDataSingleton.get_instance() + if task_status == 1 or sharedData.ask_num > 3: + sharedData = SharedDataSingleton.get_instance() json_from_data = sharedData.json_from_data - knowledge_key= json_from_data["festival"] + json_from_data["requirement"] - st.session_state['generated'].append("知识库数据:"+str(knowledge_key)) - message(st.session_state["generated"][-1], key=str(len(st.session_state["generated"]))) - - role_qianbianzhe = qianbianzhe() + knowledge_key = json_from_data["festival"] + json_from_data["requirement"] + st.session_state["generated"].append("知识库数据:" + str(knowledge_key)) + message( + st.session_state["generated"][-1], + key=str(len(st.session_state["generated"])), + ) + + role_qianbianzhe = QianBianZhe() qianjibian_ans = await role_qianbianzhe.run(" ") # st.session_state['past'].append("等待中!!!") - st.session_state['generated'].append("agent 千机变回答:"+str(qianjibian_ans)) - message(st.session_state["generated"][-1], key=str(len(st.session_state["generated"]))) - role_ruyi = ruyi() + st.session_state["generated"].append("agent 千机变回答:" + str(qianjibian_ans)) + message( + st.session_state["generated"][-1], + key=str(len(st.session_state["generated"])), + ) + role_ruyi = RuYi() ruyi_ans = await role_ruyi.run(" ") - - st.session_state['generated'].append("agent 如意回答:"+str(ruyi_ans)) - message(st.session_state["generated"][-1], key=str(len(st.session_state["generated"]))) + + st.session_state["generated"].append("agent 如意回答:" + str(ruyi_ans)) + message( + st.session_state["generated"][-1], + key=str(len(st.session_state["generated"])), + ) # if st.session_state['generated']: # for i in range(len(st.session_state['generated'])-1, -1, -1): # message(st.session_state["generated"][i], key=str(i)) - # message(st.session_state['past'][i], - # is_user=True, + # message(st.session_state['past'][i], + # is_user=True, # key=str(i)+'_user') - + + asyncio.run(main()) diff --git a/test/knowledges/test_get_docs_list_query.py b/test/knowledges/test_get_docs_list_query.py index c3615ef..83b4f47 100644 --- a/test/knowledges/test_get_docs_list_query.py +++ b/test/knowledges/test_get_docs_list_query.py @@ -1,4 +1,5 @@ import tianji.utils.knowledge_tool as knowledgetool +from tianji.knowledges.config import KNOWLEDGE_PATH, EMBEDDING_PATH from dotenv import load_dotenv load_dotenv() @@ -6,16 +7,13 @@ # KNOWLEDGE_PATH = r"D:\1-wsl\TIANJI\Tianji\tianji\knowledges\04-Wishes\knowledges.txt" # SAVE_PATH = r"D:\1-wsl\TIANJI\Tianji\temp" -KNOWLEDGE_PATH = r"/Users/fengzetao/Workspace/Github/SocialAI/Tianji/tianji/knowledges/04-Wishes/knowledges.txt" -SAVE_PATH = r"/Users/fengzetao/Workspace/Github/SocialAI/Tianji/temp" - # doclist = knowledgetool.get_docs_list_query_openai(query_str="春节",loader_file_path=KNOWLEDGE_PATH, \ # persist_directory = SAVE_PATH,k_num=5) -doclist = knowledgetool.get_docs_list_query_zhipuai( +doclist = knowledgetool.get_docs_list_query_openai( query_str="春节", - loader_file_path=KNOWLEDGE_PATH, - persist_directory=SAVE_PATH, + loader_file_path=KNOWLEDGE_PATH.WISHES.path(), + persist_directory=EMBEDDING_PATH.WISHES.path(filename="openai"), k_num=5, ) diff --git a/tianji/agents/metagpt_agents/ruyi/action.py b/tianji/agents/metagpt_agents/ruyi/action.py index 1807c76..32c42d9 100644 --- a/tianji/agents/metagpt_agents/ruyi/action.py +++ b/tianji/agents/metagpt_agents/ruyi/action.py @@ -9,14 +9,12 @@ from tianji.utils.common_llm_api import LLMApi from tianji.utils.json_from import SharedDataSingleton +from tianji.knowledges.config import KNOWLEDGE_PATH, EMBEDDING_PATH from tianji.utils.knowledge_tool import ( get_docs_list_query_openai, get_docs_list_query_zhipuai, ) -KNOWLEDGE_PATH = r"/Users/fengzetao/Workspace/Github/SocialAI/Tianji/tianji/knowledges/04-Wishes/knowledges.txt" -SAVE_PATH = r"/Users/fengzetao/Workspace/Github/SocialAI/Tianji/temp" - # 给出针对回答的知识 并用md展示 class WriteMarkDown(Action): @@ -44,8 +42,8 @@ async def run(self, instruction: str): knowledge_key = json_from_data["festival"] + json_from_data["requirement"] knowledge = get_docs_list_query_zhipuai( query_str=knowledge_key, - loader_file_path=KNOWLEDGE_PATH, - persist_directory=SAVE_PATH, + loader_file_path=KNOWLEDGE_PATH.WISHES.path(), + persist_directory=EMBEDDING_PATH.WISHES.path(filename="zhipuai"), k_num=5, ) print("knowledge:\n", knowledge) diff --git a/tianji/knowledges/__init__.py b/tianji/knowledges/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tianji/knowledges/config.py b/tianji/knowledges/config.py new file mode 100644 index 0000000..c352067 --- /dev/null +++ b/tianji/knowledges/config.py @@ -0,0 +1,38 @@ +import os +from enum import Enum +from metagpt.const import METAGPT_ROOT as TIANJI_PATH +from metagpt.logs import logger + +""" +def get_all_knowledge_paths(knowledge_path: str = METAGPT_ROOT, suffix: str = ".txt"): + files = os.listdir(knowledge_path) + all_knowledge_paths = [] + for file in files: + file_path = os.path.join(knowledge_path, file) + if os.path.isdir(file_path): + all_knowledge_paths.extend(get_all_knowledge_paths(file_path)) + else: + if file_path.endswith(suffix): + all_knowledge_paths.append(knowledge_path) + return all_knowledge_paths +""" + + +class KNOWLEDGE_PATH(str, Enum): + WISHES = TIANJI_PATH / "tianji/knowledges/04-Wishes" + + def path(self): + load_path = self.value + logger.info("加载知识库:" + load_path) + + return os.path.join(load_path, "knowledges.txt") + + +class EMBEDDING_PATH(str, Enum): + WISHES = TIANJI_PATH / "temp/embedding/04-Wishes" + + def path(self, filename="other"): + save_path = os.path.join(self.value, filename) + logger.info("Embedding 路径:" + save_path) + + return save_path diff --git a/tianji/utils/knowledge_tool.py b/tianji/utils/knowledge_tool.py index 9fca673..584b8ae 100644 --- a/tianji/utils/knowledge_tool.py +++ b/tianji/utils/knowledge_tool.py @@ -28,8 +28,8 @@ def get_docs_list_query_openai( db = Chroma(embedding_function=embeddings, persist_directory=persist_directory) else: loader = TextLoader(file_path=loader_file_path, encoding="utf-8") - print("loader_file_path", loader) - print(os.path.exists(loader_file_path)) + # print("loader_file_path", loader) + # print(os.path.exists(loader_file_path)) documents = loader.load() text_splitter = CharacterTextSplitter(chunk_size=100, chunk_overlap=0) docs = text_splitter.split_documents(documents) @@ -166,8 +166,8 @@ def get_docs_list_query_zhipuai( db = Chroma(embedding_function=embeddings, persist_directory=persist_directory) else: loader = TextLoader(file_path=loader_file_path, encoding="utf-8") - print("loader_file_path", loader) - print(os.path.exists(loader_file_path)) + # print("loader_file_path", loader) + # print(os.path.exists(loader_file_path)) documents = loader.load() text_splitter = CharacterTextSplitter(chunk_size=100, chunk_overlap=0) docs = text_splitter.split_documents(documents)