diff --git a/dbgpt/rag/embedding/embeddings.py b/dbgpt/rag/embedding/embeddings.py index 6735765a0..b4c0e64fc 100644 --- a/dbgpt/rag/embedding/embeddings.py +++ b/dbgpt/rag/embedding/embeddings.py @@ -803,9 +803,10 @@ def embed_query(self, text: str) -> List[float]: "Please install ollama by command `pip install ollama" ) from e try: - return ( - Client(self.api_url).embeddings(model=self.model_name, prompt=text) - )["embedding"] + embedding = Client(self.api_url).embeddings( + model=self.model_name, prompt=text + ) + return list(embedding["embedding"]) except ollama.ResponseError as e: raise ValueError(f"**Ollama Response Error, Please CheckErrorInfo.**: {e}") @@ -839,7 +840,7 @@ async def aembed_query(self, text: str) -> List[float]: embedding = await AsyncClient(host=self.api_url).embeddings( model=self.model_name, prompt=text ) - return embedding["embedding"] + return list(embedding["embedding"]) except ollama.ResponseError as e: raise ValueError(f"**Ollama Response Error, Please CheckErrorInfo.**: {e}") diff --git a/examples/__main__.py b/examples/__main__.py new file mode 100644 index 000000000..d1282645e --- /dev/null +++ b/examples/__main__.py @@ -0,0 +1,32 @@ +# TODO add example run code here + +import asyncio + +# Agents examples +from .agents.auto_plan_agent_dialogue_example import main as auto_plan_main +from .agents.awel_layout_agents_chat_examples import main as awel_layout_main +from .agents.custom_tool_agent_example import main as custom_tool_main +from .agents.plugin_agent_dialogue_example import main as plugin_main +from .agents.retrieve_summary_agent_dialogue_example import ( + main as retrieve_summary_main, +) +from .agents.sandbox_code_agent_example import main as sandbox_code_main +from .agents.single_agent_dialogue_example import main as single_agent_main +from .agents.sql_agent_dialogue_example import main as sql_main + +if __name__ == "__main__": + + # Run the examples + + ## Agent examples + asyncio.run(auto_plan_main()) + asyncio.run(awel_layout_main()) + asyncio.run(custom_tool_main()) + asyncio.run(retrieve_summary_main()) + asyncio.run(plugin_main()) + asyncio.run(sandbox_code_main()) + asyncio.run(single_agent_main()) + asyncio.run(sql_main()) + + ## awel examples + print("hello world!") diff --git a/examples/agents/auto_plan_agent_dialogue_example.py b/examples/agents/auto_plan_agent_dialogue_example.py index fa8a8501e..a423eb039 100644 --- a/examples/agents/auto_plan_agent_dialogue_example.py +++ b/examples/agents/auto_plan_agent_dialogue_example.py @@ -5,9 +5,9 @@ Execute the following command in the terminal: Set env params. .. code-block:: shell - - export OPENAI_API_KEY=sk-xx - export OPENAI_API_BASE=https://xx:80/v1 + + export SILICONFLOW_API_KEY=sk-xx + export SILICONFLOW_API_BASE=https://xx:80/v1 run example. ..code-block:: shell @@ -33,13 +33,14 @@ async def main(): - from dbgpt.model.proxy import OpenAILLMClient + from dbgpt.model.proxy.llms.siliconflow import SiliconFlowLLMClient agent_memory = AgentMemory() - from dbgpt.model.proxy.llms.tongyi import TongyiLLMClient - llm_client = TongyiLLMClient( - model_alias="qwen2-72b-instruct", + llm_client = SiliconFlowLLMClient( + model_alias=os.getenv( + "SILICONFLOW_MODEL_VERSION", "Qwen/Qwen2.5-Coder-32B-Instruct" + ), ) context: AgentContext = AgentContext( diff --git a/examples/agents/awel_layout_agents_chat_examples.py b/examples/agents/awel_layout_agents_chat_examples.py index 579791a9b..c38a2a9a7 100644 --- a/examples/agents/awel_layout_agents_chat_examples.py +++ b/examples/agents/awel_layout_agents_chat_examples.py @@ -6,14 +6,13 @@ Set env params. .. code-block:: shell - export OPENAI_API_KEY=sk-xx - export OPENAI_API_BASE=https://xx:80/v1 + export SILICONFLOW_API_KEY=sk-xx + export SILICONFLOW_API_BASE=https://xx:80/v1 run example. ..code-block:: shell python examples/agents/awel_layout_agents_chat_examples.py """ - import asyncio import os @@ -34,15 +33,16 @@ async def main(): - from dbgpt.model.proxy import OpenAILLMClient agent_memory = AgentMemory() agent_memory.gpts_memory.init(conv_id="test456") try: - from dbgpt.model.proxy.llms.tongyi import TongyiLLMClient + from dbgpt.model.proxy.llms.siliconflow import SiliconFlowLLMClient - llm_client = TongyiLLMClient( - model_alias="qwen2-72b-instruct", + llm_client = SiliconFlowLLMClient( + model_alias=os.getenv( + "SILICONFLOW_MODEL_VERSION", "Qwen/Qwen2.5-Coder-32B-Instruct" + ), ) context: AgentContext = AgentContext(conv_id="test456", gpts_app_name="信息析助手") diff --git a/examples/agents/custom_tool_agent_example.py b/examples/agents/custom_tool_agent_example.py index c49ef5924..5b8a14e9a 100644 --- a/examples/agents/custom_tool_agent_example.py +++ b/examples/agents/custom_tool_agent_example.py @@ -44,12 +44,17 @@ def count_directory_files(path: Annotated[str, Doc("The directory path")]) -> in async def main(): - from dbgpt.model.proxy import OpenAILLMClient - - llm_client = OpenAILLMClient(model_alias="gpt-3.5-turbo") - context: AgentContext = AgentContext(conv_id="test456") + from dbgpt.model.proxy.llms.siliconflow import SiliconFlowLLMClient + llm_client = SiliconFlowLLMClient( + model_alias=os.getenv( + "SILICONFLOW_MODEL_VERSION", "Qwen/Qwen2.5-Coder-32B-Instruct" + ), + ) agent_memory = AgentMemory() + agent_memory.gpts_memory.init(conv_id="test456") + + context: AgentContext = AgentContext(conv_id="test456", gpts_app_name="工具助手") tools = ToolPack([simple_calculator, count_directory_files]) @@ -77,7 +82,7 @@ async def main(): ) # dbgpt-vis message infos - print(await agent_memory.gpts_memory.one_chat_completions("test456")) + print(await agent_memory.gpts_memory.app_link_chat_message("test456")) if __name__ == "__main__": diff --git a/examples/agents/plugin_agent_dialogue_example.py b/examples/agents/plugin_agent_dialogue_example.py index 300f6e0fd..189e2fee3 100644 --- a/examples/agents/plugin_agent_dialogue_example.py +++ b/examples/agents/plugin_agent_dialogue_example.py @@ -6,8 +6,8 @@ Set env params. .. code-block:: shell - export OPENAI_API_KEY=sk-xx - export OPENAI_API_BASE=https://xx:80/v1 + export SILICONFLOW_API_KEY=sk-xx + export SILICONFLOW_API_BASE=https://xx:80/v1 run example. ..code-block:: shell @@ -20,19 +20,24 @@ from dbgpt.agent import AgentContext, AgentMemory, LLMConfig, UserProxyAgent from dbgpt.agent.expand.tool_assistant_agent import ToolAssistantAgent from dbgpt.agent.resource import AutoGPTPluginToolPack +from dbgpt.configs.model_config import ROOT_PATH -current_dir = os.getcwd() -parent_dir = os.path.dirname(current_dir) -test_plugin_dir = os.path.join(parent_dir, "test_files/plugins") +test_plugin_dir = os.path.join(ROOT_PATH, "examples/test_files/plugins") async def main(): - from dbgpt.model.proxy import OpenAILLMClient + from dbgpt.model.proxy.llms.siliconflow import SiliconFlowLLMClient - llm_client = OpenAILLMClient(model_alias="gpt-3.5-turbo") - context: AgentContext = AgentContext(conv_id="test456") + llm_client = SiliconFlowLLMClient( + model_alias=os.getenv( + "SILICONFLOW_MODEL_VERSION", "Qwen/Qwen2.5-Coder-32B-Instruct" + ), + ) agent_memory = AgentMemory() + agent_memory.gpts_memory.init(conv_id="test456") + + context: AgentContext = AgentContext(conv_id="test456", gpts_app_name="插件对话助手") tools = AutoGPTPluginToolPack(test_plugin_dir) @@ -54,7 +59,7 @@ async def main(): ) # dbgpt-vis message infos - print(await agent_memory.gpts_memory.one_chat_completions("test456")) + print(await agent_memory.gpts_memory.app_link_chat_message("test456")) if __name__ == "__main__": diff --git a/examples/agents/retrieve_summary_agent_dialogue_example.py b/examples/agents/retrieve_summary_agent_dialogue_example.py index 527f09ad2..e46e47a0b 100644 --- a/examples/agents/retrieve_summary_agent_dialogue_example.py +++ b/examples/agents/retrieve_summary_agent_dialogue_example.py @@ -6,8 +6,8 @@ Set env params. .. code-block:: shell - export OPENAI_API_KEY=sk-xx - export OPENAI_API_BASE=https://xx:80/v1 + export SILICONFLOW_API_KEY=sk-xx + export SILICONFLOW_API_BASE=https://xx:80/v1 run example. ..code-block:: shell @@ -18,20 +18,27 @@ import os from dbgpt.agent import AgentContext, AgentMemory, LLMConfig, UserProxyAgent -from dbgpt.agent.expand.retrieve_summary_assistant_agent import ( - RetrieveSummaryAssistantAgent, -) +from dbgpt.agent.expand.summary_assistant_agent import SummaryAssistantAgent from dbgpt.configs.model_config import ROOT_PATH -async def summary_example_with_success(): - from dbgpt.model.proxy import OpenAILLMClient +async def main(): + from dbgpt.model.proxy.llms.siliconflow import SiliconFlowLLMClient + + llm_client = SiliconFlowLLMClient( + model_alias=os.getenv( + "SILICONFLOW_MODEL_VERSION", "Qwen/Qwen2.5-Coder-32B-Instruct" + ), + ) + context: AgentContext = AgentContext( + conv_id="retrieve_summarize", gpts_app_name="Summary Assistant" + ) - llm_client = OpenAILLMClient(model_alias="gpt-3.5-turbo-16k") - context: AgentContext = AgentContext(conv_id="retrieve_summarize") agent_memory = AgentMemory() + agent_memory.gpts_memory.init(conv_id="retrieve_summarize") + summarizer = ( - await RetrieveSummaryAssistantAgent() + await SummaryAssistantAgent() .bind(context) .bind(LLMConfig(llm_client=llm_client)) .bind(agent_memory) @@ -47,6 +54,7 @@ async def summary_example_with_success(): "https://en.wikipedia.org/wiki/Chernobyl_disaster", ] + # TODO add a tool to load the pdf and internet files await user_proxy.initiate_chat( recipient=summarizer, reviewer=user_proxy, @@ -55,11 +63,8 @@ async def summary_example_with_success(): ) # dbgpt-vis message infos - print(await agent_memory.gpts_memory.one_chat_completions("retrieve_summarize")) + print(await agent_memory.gpts_memory.app_link_chat_message("retrieve_summarize")) if __name__ == "__main__": - asyncio.run(summary_example_with_success()) - print( - "\033[92m=======================The Summary Assistant with Successful Results Ended==================\n\n\033[91m" - ) + asyncio.run(main()) diff --git a/examples/agents/sandbox_code_agent_example.py b/examples/agents/sandbox_code_agent_example.py index 67cfdde1a..501158916 100644 --- a/examples/agents/sandbox_code_agent_example.py +++ b/examples/agents/sandbox_code_agent_example.py @@ -10,9 +10,9 @@ environment. The code execution environment is isolated from the host system, preventing access to the internet and other external resources. """ - import asyncio import logging +import os from typing import Optional, Tuple from dbgpt.agent import ( @@ -270,11 +270,28 @@ async def correctness_check( async def main(): - from dbgpt.model.proxy import OpenAILLMClient + from dbgpt.model.proxy.llms.siliconflow import SiliconFlowLLMClient - llm_client = OpenAILLMClient(model_alias="gpt-4o-mini") + llm_client = SiliconFlowLLMClient( + model_alias=os.getenv( + "SILICONFLOW_MODEL_VERSION", "Qwen/Qwen2.5-Coder-32B-Instruct" + ), + ) context: AgentContext = AgentContext(conv_id="test123") - agent_memory = AgentMemory(HybridMemory[AgentMemoryFragment].from_chroma()) + + # TODO Embedding and Rerank model refactor + from dbgpt.rag.embedding import OpenAPIEmbeddings + + silicon_embeddings = OpenAPIEmbeddings( + api_url=os.getenv("SILICONFLOW_API_BASE") + "/embeddings", + api_key=os.getenv("SILICONFLOW_API_KEY"), + model_name="BAAI/bge-large-zh-v1.5", + ) + agent_memory = AgentMemory( + HybridMemory[AgentMemoryFragment].from_chroma( + embeddings=silicon_embeddings, + ) + ) agent_memory.gpts_memory.init("test123") coder = ( diff --git a/examples/agents/single_agent_dialogue_example.py b/examples/agents/single_agent_dialogue_example.py index d9996dfe6..09576285f 100644 --- a/examples/agents/single_agent_dialogue_example.py +++ b/examples/agents/single_agent_dialogue_example.py @@ -17,28 +17,21 @@ import asyncio import os -from dbgpt.agent import ( - AgentContext, - AgentMemory, - AgentMemoryFragment, - HybridMemory, - LLMConfig, - UserProxyAgent, -) +from dbgpt.agent import AgentContext, AgentMemory, LLMConfig, UserProxyAgent from dbgpt.agent.expand.code_assistant_agent import CodeAssistantAgent async def main(): - from dbgpt.model.proxy import OpenAILLMClient + from dbgpt.model.proxy.llms.siliconflow import SiliconFlowLLMClient - # llm_client = OpenAILLMClient(model_alias="gpt-3.5-turbo") - from dbgpt.model.proxy.llms.tongyi import TongyiLLMClient - - llm_client = TongyiLLMClient( - model_alias="qwen2-72b-instruct", + llm_client = SiliconFlowLLMClient( + model_alias=os.getenv( + "SILICONFLOW_MODEL_VERSION", "Qwen/Qwen2.5-Coder-32B-Instruct" + ), ) - context: AgentContext = AgentContext(conv_id="test123") + context: AgentContext = AgentContext(conv_id="test123", gpts_app_name="代码助手") + agent_memory = AgentMemory() agent_memory.gpts_memory.init(conv_id="test123") try: diff --git a/examples/agents/single_summary_agent_dialogue_example.py b/examples/agents/single_summary_agent_dialogue_example.py index 81991258f..d7dc9539f 100644 --- a/examples/agents/single_summary_agent_dialogue_example.py +++ b/examples/agents/single_summary_agent_dialogue_example.py @@ -15,18 +15,25 @@ """ import asyncio +import os from dbgpt.agent import AgentContext, AgentMemory, LLMConfig, UserProxyAgent from dbgpt.agent.expand.summary_assistant_agent import SummaryAssistantAgent async def summary_example_with_success(): - from dbgpt.model.proxy import OpenAILLMClient + from dbgpt.model.proxy.llms.siliconflow import SiliconFlowLLMClient - llm_client = OpenAILLMClient(model_alias="gpt-3.5-turbo") + llm_client = SiliconFlowLLMClient( + model_alias=os.getenv( + "SILICONFLOW_MODEL_VERSION", "Qwen/Qwen2.5-Coder-32B-Instruct" + ), + ) context: AgentContext = AgentContext(conv_id="summarize") agent_memory = AgentMemory() + agent_memory.gpts_memory.init(conv_id="summarize") + summarizer = ( await SummaryAssistantAgent() .bind(context) @@ -71,16 +78,22 @@ async def summary_example_with_success(): ) # dbgpt-vis message infos - print(await agent_memory.gpts_memory.one_chat_completions("summarize")) + print(await agent_memory.gpts_memory.app_link_chat_message("summarize")) async def summary_example_with_faliure(): - from dbgpt.model.proxy import OpenAILLMClient + from dbgpt.model.proxy.llms.siliconflow import SiliconFlowLLMClient - llm_client = OpenAILLMClient(model_alias="gpt-3.5-turbo") + llm_client = SiliconFlowLLMClient( + model_alias=os.getenv( + "SILICONFLOW_MODEL_VERSION", "Qwen/Qwen2.5-Coder-32B-Instruct" + ), + ) context: AgentContext = AgentContext(conv_id="summarize") agent_memory = AgentMemory() + agent_memory.gpts_memory.init(conv_id="summarize") + summarizer = ( await SummaryAssistantAgent() .bind(context) @@ -92,7 +105,6 @@ async def summary_example_with_faliure(): user_proxy = await UserProxyAgent().bind(agent_memory).bind(context).build() # Test the failure example - await user_proxy.initiate_chat( recipient=summarizer, reviewer=user_proxy, @@ -110,7 +122,7 @@ async def summary_example_with_faliure(): """, ) - print(await agent_memory.gpts_memory.one_chat_completions("summarize")) + print(await agent_memory.gpts_memory.app_link_chat_message("summarize")) if __name__ == "__main__": diff --git a/examples/agents/sql_agent_dialogue_example.py b/examples/agents/sql_agent_dialogue_example.py index b6dbfc79a..1684d1f66 100644 --- a/examples/agents/sql_agent_dialogue_example.py +++ b/examples/agents/sql_agent_dialogue_example.py @@ -20,24 +20,28 @@ from dbgpt.agent import AgentContext, AgentMemory, LLMConfig, UserProxyAgent from dbgpt.agent.expand.data_scientist_agent import DataScientistAgent from dbgpt.agent.resource import SQLiteDBResource +from dbgpt.configs.model_config import ROOT_PATH from dbgpt.util.tracer import initialize_tracer -current_dir = os.getcwd() -parent_dir = os.path.dirname(current_dir) -test_plugin_dir = os.path.join(parent_dir, "test_files") +test_plugin_dir = os.path.join(ROOT_PATH, "test_files") initialize_tracer("/tmp/agent_trace.jsonl", create_system_app=True) async def main(): - from dbgpt.model.proxy import OpenAILLMClient + from dbgpt.model.proxy.llms.siliconflow import SiliconFlowLLMClient - llm_client = OpenAILLMClient(model_alias="gpt-3.5-turbo") + llm_client = SiliconFlowLLMClient( + model_alias=os.getenv( + "SILICONFLOW_MODEL_VERSION", "Qwen/Qwen2.5-Coder-32B-Instruct" + ), + ) context: AgentContext = AgentContext(conv_id="test456") + agent_memory = AgentMemory() + agent_memory.gpts_memory.init(conv_id="test456") sqlite_resource = SQLiteDBResource("SQLite Database", f"{test_plugin_dir}/dbgpt.db") - user_proxy = await UserProxyAgent().bind(agent_memory).bind(context).build() sql_boy = ( @@ -56,7 +60,7 @@ async def main(): ) ## dbgpt-vis message infos - print(await agent_memory.gpts_memory.one_chat_completions("test456")) + print(await agent_memory.gpts_memory.app_link_chat_message("test456")) if __name__ == "__main__": diff --git a/examples/awel/simple_rag_summary_example.py b/examples/awel/simple_rag_summary_example.py index 724c71ec7..adccbbf6d 100644 --- a/examples/awel/simple_rag_summary_example.py +++ b/examples/awel/simple_rag_summary_example.py @@ -31,7 +31,7 @@ from dbgpt._private.pydantic import BaseModel, Field from dbgpt.core.awel import DAG, HttpTrigger, MapOperator from dbgpt.model.proxy import OpenAILLMClient -from dbgpt.rag.knowledge import KnowledgeType +from dbgpt.rag.knowledge.base import KnowledgeType from dbgpt.rag.operators import KnowledgeOperator, SummaryAssemblerOperator diff --git a/examples/notebook/agent_auto_plan_dialogue_example.ipynb b/examples/notebook/agent_auto_plan_dialogue_example.ipynb deleted file mode 100644 index 5c861e558..000000000 --- a/examples/notebook/agent_auto_plan_dialogue_example.ipynb +++ /dev/null @@ -1,118 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "metadata": { - "ExecuteTime": { - "end_time": "2024-04-10T04:38:14.228948Z", - "start_time": "2024-04-10T04:38:14.224972Z" - } - }, - "source": [ - "import nest_asyncio\n", - "from dbgpt.agent import (\n", - " AgentContext,\n", - " GptsMemory,\n", - " LLMConfig,\n", - " ResourceLoader,\n", - " UserProxyAgent,\n", - ")\n", - "from dbgpt.agent.expand.code_assistant_agent import CodeAssistantAgent\n", - "from dbgpt.agent.plan import AutoPlanChatManager\n", - "from dbgpt.model.proxy import OpenAILLMClient\n", - "\n", - "nest_asyncio.apply()" - ], - "execution_count": 7, - "outputs": [] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "is_executing": true - }, - "source": [ - "# Set your api key and api base url\n", - "# os.environ[\"OPENAI_API_KEY\"] = \"Your API\"\n", - "# os.environ[\"OPENAI_API_BASE\"] = \"https://api.openai.com/v1\"" - ], - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "ExecuteTime": { - "end_time": "2024-04-10T04:19:47.838081Z", - "start_time": "2024-04-10T04:17:54.465616Z" - } - }, - "source": [ - "llm_client = OpenAILLMClient(model_alias=\"gpt-4\")\n", - "context: AgentContext = AgentContext(conv_id=\"test456\", gpts_app_name=\"代码分析助手\")\n", - "\n", - "default_memory = GptsMemory()\n", - "\n", - "resource_loader = ResourceLoader()\n", - "\n", - "coder = (\n", - " await CodeAssistantAgent()\n", - " .bind(context)\n", - " .bind(LLMConfig(llm_client=llm_client))\n", - " .bind(default_memory)\n", - " .bind(resource_loader)\n", - " .build()\n", - ")\n", - "\n", - "manager = (\n", - " await AutoPlanChatManager()\n", - " .bind(context)\n", - " .bind(default_memory)\n", - " .bind(LLMConfig(llm_client=llm_client))\n", - " .build()\n", - ")\n", - "manager.hire([coder])\n", - "\n", - "user_proxy = await UserProxyAgent().bind(context).bind(default_memory).build()\n", - "\n", - "\n", - "await user_proxy.initiate_chat(\n", - " recipient=manager,\n", - " reviewer=user_proxy,\n", - " message=\"Obtain simple information about issues in the repository 'eosphoros-ai/DB-GPT' in the past three days and analyze the data. Create a Markdown table grouped by day and status.\",\n", - " # message=\"Find papers on gpt-4 in the past three weeks on arxiv, and organize their titles, authors, and links into a markdown table\",\n", - " # message=\"find papers on LLM applications from arxiv in the last month, create a markdown table of different domains.\",\n", - ")" - ], - "execution_count": 4, - "outputs": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "dbgpt_env", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.13" - }, - "orig_nbformat": 4, - "vscode": { - "interpreter": { - "hash": "f8b6b0e04f284afd2fbb5e4163e7d03bbdc845eaeb6e8c78fae04fce6b51dae6" - } - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/examples/notebook/agent_awel_layout_dialogue_example.ipynb b/examples/notebook/agent_awel_layout_dialogue_example.ipynb deleted file mode 100644 index d7b583ab6..000000000 --- a/examples/notebook/agent_awel_layout_dialogue_example.ipynb +++ /dev/null @@ -1,135 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "id": "6de2e0bb", - "metadata": { - "ExecuteTime": { - "end_time": "2024-04-10T04:37:21.832993Z", - "start_time": "2024-04-10T04:37:21.828221Z" - } - }, - "source": [ - "import os\n", - "import nest_asyncio\n", - "from dbgpt.agent import (\n", - " AgentContext,\n", - " AgentResource,\n", - " GptsMemory,\n", - " LLMConfig,\n", - " ResourceLoader,\n", - " ResourceType,\n", - " UserProxyAgent,\n", - ")\n", - "from dbgpt.agent.expand.plugin_assistant_agent import PluginAssistantAgent\n", - "from dbgpt.agent.expand.summary_assistant_agent import SummaryAssistantAgent\n", - "from dbgpt.agent.plan import WrappedAWELLayoutManager\n", - "from dbgpt.agent.resource import PluginFileLoadClient\n", - "from dbgpt.configs.model_config import ROOT_PATH\n", - "from dbgpt.model.proxy import OpenAILLMClient\n", - "\n", - "nest_asyncio.apply()\n", - "test_plugin_dir = os.path.join(ROOT_PATH, \"examples/test_files/plugins\")" - ], - "execution_count": 11, - "outputs": [] - }, - { - "cell_type": "code", - "id": "437b9c40", - "metadata": { - "ExecuteTime": { - "end_time": "2024-04-10T04:37:27.592117Z", - "start_time": "2024-04-10T04:37:23.569538Z" - } - }, - "source": [ - "# os.environ['OPENAI_API_KEY']=\"sk-x\"\n", - "# os.environ['OPENAI_API_BASE']=\"https://proxy_url/v1\"\n", - "# os.environ['SEARCH_ENGINE']=\"baidu\"\n", - "# os.environ['BAIDU_COOKIE']=\"\"\"your baidu cookie\"\"\"\n", - "\n", - "llm_client = OpenAILLMClient(model_alias=\"gpt-3.5-turbo\")\n", - "context: AgentContext = AgentContext(conv_id=\"test456\", gpts_app_name=\"信息析助手\")\n", - "\n", - "default_memory = GptsMemory()\n", - "\n", - "resource_loader = ResourceLoader()\n", - "plugin_file_loader = PluginFileLoadClient()\n", - "resource_loader.register_resource_api(plugin_file_loader)\n", - "\n", - "plugin_resource = AgentResource(\n", - " type=ResourceType.Plugin,\n", - " name=\"test\",\n", - " value=test_plugin_dir,\n", - ")\n", - "\n", - "tool_engineer = (\n", - " await PluginAssistantAgent()\n", - " .bind(context)\n", - " .bind(LLMConfig(llm_client=llm_client))\n", - " .bind(default_memory)\n", - " .bind([plugin_resource])\n", - " .bind(resource_loader)\n", - " .build()\n", - ")\n", - "summarizer = (\n", - " await SummaryAssistantAgent()\n", - " .bind(context)\n", - " .bind(default_memory)\n", - " .bind(LLMConfig(llm_client=llm_client))\n", - " .build()\n", - ")\n", - "\n", - "manager = (\n", - " await WrappedAWELLayoutManager()\n", - " .bind(context)\n", - " .bind(default_memory)\n", - " .bind(LLMConfig(llm_client=llm_client))\n", - " .build()\n", - ")\n", - "manager.hire([tool_engineer, summarizer])\n", - "\n", - "user_proxy = await UserProxyAgent().bind(context).bind(default_memory).build()\n", - "\n", - "await user_proxy.initiate_chat(\n", - " recipient=manager,\n", - " reviewer=user_proxy,\n", - " message=\"查询成都今天天气\",\n", - " # message=\"查询今天的最新热点财经新闻\",\n", - ")" - ], - "execution_count": 12, - "outputs": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7ded4107", - "metadata": {}, - "source": [], - "outputs": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.13" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -}