diff --git a/Dockerfile b/Dockerfile
index 2b8582fd..3071d572 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,6 +1,6 @@
FROM python:3.11-slim
WORKDIR /app
COPY . .
-RUN pip install flask praisonai==2.0.15 gunicorn markdown
+RUN pip install flask praisonai==2.0.16 gunicorn markdown
EXPOSE 8080
CMD ["gunicorn", "-b", "0.0.0.0:8080", "api:app"]
diff --git a/README.md b/README.md
index 2b7426a7..960468e2 100644
--- a/README.md
+++ b/README.md
@@ -464,33 +464,7 @@ if __name__ == "__main__":
## Commands to Install Dependencies:
-1. **Install all dependencies, including dev dependencies:**
-
- ```sh
- poetry install
- ```
-
-2. **Install only documentation dependencies:**
-
- ```sh
- poetry install --with docs
- ```
-
-3. **Install only test dependencies:**
-
- ```sh
- poetry install --with test
- ```
-
-4. **Install only dev dependencies:**
-
- ```sh
- poetry install --with dev
- ```
-
-This configuration ensures that your development dependencies are correctly categorized and installed as needed.
-
-### Using uv (Fast Python Package Installer)
+### Using uv
```bash
# Install uv if you haven't already
pip install uv
diff --git a/agents.yaml b/agents.yaml
index 5645e390..5a3f7f3b 100644
--- a/agents.yaml
+++ b/agents.yaml
@@ -9,13 +9,17 @@ roles:
role: Researcher
llm:
model: "gpt-4o"
+ reflect_llm:
+ model: "gpt-4o"
+ min_reflect: 2
+ max_reflect: 4
tasks:
research_task:
description: Research about Mars, its environment, and the feasibility of
a cat being on Mars. Also, research about cat behavior and characteristics.
expected_output: Document with research findings on Mars and cats.
tools:
- - 'search_tool'
+ - search_tool
narrative_designer:
backstory: Skilled in narrative development, with a focus on creating engaging
stories.
diff --git a/agents/example.py b/agents/example.py
index e3b7f733..139bb8ee 100644
--- a/agents/example.py
+++ b/agents/example.py
@@ -40,7 +40,10 @@ def internet_search_tool(query) -> list:
allow_delegation=False,
tools=[internet_search_tool],
llm="gpt-4o",
- markdown=True
+ markdown=True,
+ reflect_llm="gpt-4o",
+ min_reflect=2,
+ max_reflect=4
)
writer = Agent(
name="Writer",
diff --git a/agents/praisonaiagents/agent/agent.py b/agents/praisonaiagents/agent/agent.py
index 726c5a2b..64704c9b 100644
--- a/agents/praisonaiagents/agent/agent.py
+++ b/agents/praisonaiagents/agent/agent.py
@@ -21,27 +21,49 @@ def _generate_tool_definition(self, function_name):
"""
Generate a tool definition from a function name by inspecting the function.
"""
+ logging.debug(f"Attempting to generate tool definition for: {function_name}")
+
# First try to get the tool definition if it exists
tool_def_name = f"{function_name}_definition"
tool_def = globals().get(tool_def_name)
+ logging.debug(f"Looking for {tool_def_name} in globals: {tool_def is not None}")
+
if not tool_def:
import __main__
tool_def = getattr(__main__, tool_def_name, None)
+ logging.debug(f"Looking for {tool_def_name} in __main__: {tool_def is not None}")
if tool_def:
+ logging.debug(f"Found tool definition: {tool_def}")
return tool_def
- # If no definition exists, try to generate one from the function
- func = globals().get(function_name)
+ # Try to find the function in the agent's tools list first
+ func = None
+ for tool in self.tools:
+ if callable(tool) and getattr(tool, '__name__', '') == function_name:
+ func = tool
+ break
+
+ logging.debug(f"Looking for {function_name} in agent tools: {func is not None}")
+
+ # If not found in tools, try globals and main
if not func:
- import __main__
- func = getattr(__main__, function_name, None)
+ func = globals().get(function_name)
+ logging.debug(f"Looking for {function_name} in globals: {func is not None}")
+
+ if not func:
+ import __main__
+ func = getattr(__main__, function_name, None)
+ logging.debug(f"Looking for {function_name} in __main__: {func is not None}")
if not func or not callable(func):
+ logging.debug(f"Function {function_name} not found or not callable")
return None
import inspect
sig = inspect.signature(func)
+ logging.debug(f"Function signature: {sig}")
+
parameters = {
"type": "object",
"properties": {},
@@ -50,10 +72,13 @@ def _generate_tool_definition(self, function_name):
# Parse docstring for parameter descriptions
docstring = inspect.getdoc(func)
+ logging.debug(f"Function docstring: {docstring}")
+
param_descriptions = {}
if docstring:
import re
param_section = re.split(r'\s*Args:\s*', docstring)
+ logging.debug(f"Param section split: {param_section}")
if len(param_section) > 1:
param_lines = param_section[1].split('\n')
for line in param_lines:
@@ -61,6 +86,8 @@ def _generate_tool_definition(self, function_name):
if line and ':' in line:
param_name, param_desc = line.split(':', 1)
param_descriptions[param_name.strip()] = param_desc.strip()
+
+ logging.debug(f"Parameter descriptions: {param_descriptions}")
for name, param in sig.parameters.items():
param_type = "string" # Default type
@@ -83,11 +110,13 @@ def _generate_tool_definition(self, function_name):
parameters["properties"][name] = param_info
if param.default == inspect.Parameter.empty:
parameters["required"].append(name)
+
+ logging.debug(f"Generated parameters: {parameters}")
# Extract description from docstring
description = docstring.split('\n')[0] if docstring else f"Function {function_name}"
-
- return {
+
+ tool_def = {
"type": "function",
"function": {
"name": function_name,
@@ -95,6 +124,8 @@ def _generate_tool_definition(self, function_name):
"parameters": parameters
}
}
+ logging.debug(f"Generated tool definition: {tool_def}")
+ return tool_def
def __init__(
self,
@@ -102,7 +133,7 @@ def __init__(
role: str,
goal: str,
backstory: str,
- llm: Optional[Union[str, Any]] = "gpt-4o-mini",
+ llm: Optional[Union[str, Any]] = "gpt-4o",
tools: Optional[List[Any]] = None,
function_calling_llm: Optional[Any] = None,
max_iter: int = 20,
@@ -125,7 +156,9 @@ def __init__(
use_system_prompt: Optional[bool] = True,
markdown: bool = True,
self_reflect: bool = True,
- max_reflection_iter: int = 3
+ max_reflect: int = 3,
+ min_reflect: int = 1,
+ reflect_llm: Optional[str] = None
):
self.name = name
self.role = role
@@ -155,28 +188,45 @@ def __init__(
self.chat_history = []
self.markdown = markdown
self.self_reflect = self_reflect
- self.max_reflection_iter = max_reflection_iter
-
+ self.max_reflect = max_reflect
+ self.min_reflect = min_reflect
+ self.reflect_llm = reflect_llm
def execute_tool(self, function_name, arguments):
"""
Execute a tool dynamically based on the function name and arguments.
"""
logging.debug(f"{self.name} executing tool {function_name} with arguments: {arguments}")
- # Try to get the function from globals first
- func = globals().get(function_name)
+ # Try to find the function in the agent's tools list first
+ func = None
+ for tool in self.tools:
+ if callable(tool) and getattr(tool, '__name__', '') == function_name:
+ func = tool
+ break
+
+ logging.debug(f"Looking for {function_name} in agent tools: {func is not None}")
+
+ # If not found in tools, try globals and main
if not func:
- # Then try to get from the main module
- import __main__
- func = getattr(__main__, function_name, None)
+ func = globals().get(function_name)
+ logging.debug(f"Looking for {function_name} in globals: {func is not None}")
+
+ if not func:
+ import __main__
+ func = getattr(__main__, function_name, None)
+ logging.debug(f"Looking for {function_name} in __main__: {func is not None}")
if func and callable(func):
try:
return func(**arguments)
except Exception as e:
- return {"error": str(e)}
+ error_msg = str(e)
+ logging.error(f"Error executing tool {function_name}: {error_msg}")
+ return {"error": error_msg}
- return {"error": f"Tool '{function_name}' is not callable"}
+ error_msg = f"Tool '{function_name}' is not callable"
+ logging.error(error_msg)
+ return {"error": error_msg}
def clear_history(self):
self.chat_history = []
@@ -287,8 +337,8 @@ def _chat_completion(self, messages, temperature=0.2, tools=None, stream=True):
def chat(self, prompt, temperature=0.2, tools=None, output_json=None):
if self.use_system_prompt:
system_prompt = f"""{self.backstory}\n
- Your Role: {self.role}\n
- Your Goal: {self.goal}
+Your Role: {self.role}\n
+Your Goal: {self.goal}
"""
else:
system_prompt = None
@@ -361,17 +411,17 @@ def chat(self, prompt, temperature=0.2, tools=None, output_json=None):
return response_text
reflection_prompt = f"""
- Reflect on your previous response: '{response_text}'.
- Identify any flaws, improvements, or actions.
- Provide a "satisfactory" status ('yes' or 'no').
- Output MUST be JSON with 'reflection' and 'satisfactory'.
+Reflect on your previous response: '{response_text}'.
+Identify any flaws, improvements, or actions.
+Provide a "satisfactory" status ('yes' or 'no').
+Output MUST be JSON with 'reflection' and 'satisfactory'.
"""
logging.debug(f"{self.name} reflection attempt {reflection_count+1}, sending prompt: {reflection_prompt}")
messages.append({"role": "user", "content": reflection_prompt})
try:
reflection_response = client.beta.chat.completions.parse(
- model=self.llm,
+ model=self.reflect_llm if self.reflect_llm else self.llm,
messages=messages,
temperature=temperature,
response_format=ReflectionOutput
@@ -380,35 +430,42 @@ def chat(self, prompt, temperature=0.2, tools=None, output_json=None):
reflection_output = reflection_response.choices[0].message.parsed
if self.verbose:
- display_self_reflection(f"Agent {self.name} self reflection: reflection='{reflection_output.reflection}' satisfactory='{reflection_output.satisfactory}'")
+ display_self_reflection(f"Agent {self.name} self reflection (using {self.reflect_llm if self.reflect_llm else self.llm}): reflection='{reflection_output.reflection}' satisfactory='{reflection_output.satisfactory}'")
messages.append({"role": "assistant", "content": f"Self Reflection: {reflection_output.reflection} Satisfactory?: {reflection_output.satisfactory}"})
- if reflection_output.satisfactory == "yes":
+ # Only consider satisfactory after minimum reflections
+ if reflection_output.satisfactory == "yes" and reflection_count >= self.min_reflect - 1:
if self.verbose:
- display_self_reflection("Agent marked the response as satisfactory")
+ display_self_reflection("Agent marked the response as satisfactory after meeting minimum reflections")
+ self.chat_history.append({"role": "user", "content": prompt})
self.chat_history.append({"role": "assistant", "content": response_text})
display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time)
return response_text
- logging.debug(f"{self.name} reflection not satisfactory, requesting regeneration.")
+ # Check if we've hit max reflections
+ if reflection_count >= self.max_reflect - 1:
+ if self.verbose:
+ display_self_reflection("Maximum reflection count reached, returning current response")
+ self.chat_history.append({"role": "user", "content": prompt})
+ self.chat_history.append({"role": "assistant", "content": response_text})
+ display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time)
+ return response_text
+
+ logging.debug(f"{self.name} reflection count {reflection_count + 1}, continuing reflection process")
messages.append({"role": "user", "content": "Now regenerate your response using the reflection you made"})
response = self._chat_completion(messages, temperature=temperature, tools=None, stream=True)
response_text = response.choices[0].message.content.strip()
+ reflection_count += 1
+ continue # Continue the loop for more reflections
+
except Exception as e:
display_error(f"Error in parsing self-reflection json {e}. Retrying")
logging.error("Reflection parsing failed.", exc_info=True)
messages.append({"role": "assistant", "content": f"Self Reflection failed."})
+ reflection_count += 1
+ continue # Continue even after error to try again
- reflection_count += 1
-
- self.chat_history.append({"role": "user", "content": prompt})
- self.chat_history.append({"role": "assistant", "content": response_text})
-
- if self.verbose:
- logging.info(f"Agent {self.name} final response: {response_text}")
- display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time)
- return response_text
except Exception as e:
display_error(f"Error in chat: {e}")
return None
\ No newline at end of file
diff --git a/agents/praisonaiagents/agents/agents.py b/agents/praisonaiagents/agents/agents.py
index 5665fadd..8554d3e0 100644
--- a/agents/praisonaiagents/agents/agents.py
+++ b/agents/praisonaiagents/agents/agents.py
@@ -64,8 +64,8 @@ def execute_task(self, task_id):
executor_agent = task.agent
task_prompt = f"""
- You need to do the following task: {task.description}.
- Expected Output: {task.expected_output}.
+You need to do the following task: {task.description}.
+Expected Output: {task.expected_output}.
"""
if task.context:
context_results = ""
diff --git a/agents/pyproject.toml b/agents/pyproject.toml
index a6eb8bd2..bfe3348c 100644
--- a/agents/pyproject.toml
+++ b/agents/pyproject.toml
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
[project]
name = "praisonaiagents"
-version = "0.0.7"
+version = "0.0.12"
description = "Praison AI agents for completing complex tasks with Self Reflection Agents"
authors = [
{ name="Mervin Praison" }
diff --git a/agents/uv.lock b/agents/uv.lock
index 1df2ebea..cd31375d 100644
--- a/agents/uv.lock
+++ b/agents/uv.lock
@@ -186,7 +186,7 @@ wheels = [
[[package]]
name = "praisonaiagents"
-version = "0.0.7"
+version = "0.0.12"
source = { editable = "." }
dependencies = [
{ name = "openai" },
diff --git a/docs/api/praisonai/deploy.html b/docs/api/praisonai/deploy.html
index 3f021832..a7c9865e 100644
--- a/docs/api/praisonai/deploy.html
+++ b/docs/api/praisonai/deploy.html
@@ -110,7 +110,7 @@
Raises
file.write("FROM python:3.11-slim\n")
file.write("WORKDIR /app\n")
file.write("COPY . .\n")
- file.write("RUN pip install flask praisonai==2.0.15 gunicorn markdown\n")
+ file.write("RUN pip install flask praisonai==2.0.16 gunicorn markdown\n")
file.write("EXPOSE 8080\n")
file.write('CMD ["gunicorn", "-b", "0.0.0.0:8080", "api:app"]\n')
diff --git a/poetry.lock b/poetry.lock
index 94ef68d4..91bcff92 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1483,13 +1483,13 @@ files = [
[[package]]
name = "duckduckgo-search"
-version = "7.1.0"
+version = "7.1.1"
description = "Search for words, documents, images, news, maps and text translation using the DuckDuckGo.com search engine."
optional = true
python-versions = ">=3.8"
files = [
- {file = "duckduckgo_search-7.1.0-py3-none-any.whl", hash = "sha256:2e51901992048108ea5c24d8e2c0373078d5088b802fba23eb124ee4d812c38f"},
- {file = "duckduckgo_search-7.1.0.tar.gz", hash = "sha256:2276d60e3a57171058eaa7f0a57f09876f77e14a32dbdc97acc707aad134bbc3"},
+ {file = "duckduckgo_search-7.1.1-py3-none-any.whl", hash = "sha256:753b2237fd8a9a4cbf06c788aaf0cfc7e00f4623e8f854d0525682ff31770093"},
+ {file = "duckduckgo_search-7.1.1.tar.gz", hash = "sha256:1874e55aa6eac3de2d0e9b556b70f35ed0cd7a4544d7417cf38216275b3de009"},
]
[package.dependencies]
@@ -2525,13 +2525,13 @@ test = ["objgraph", "psutil"]
[[package]]
name = "griffe"
-version = "1.5.1"
+version = "1.5.4"
description = "Signatures for entire Python programs. Extract the structure, the frame, the skeleton of your project, to generate API documentation or find breaking changes in your API."
optional = false
python-versions = ">=3.9"
files = [
- {file = "griffe-1.5.1-py3-none-any.whl", hash = "sha256:ad6a7980f8c424c9102160aafa3bcdf799df0e75f7829d75af9ee5aef656f860"},
- {file = "griffe-1.5.1.tar.gz", hash = "sha256:72964f93e08c553257706d6cd2c42d1c172213feb48b2be386f243380b405d4b"},
+ {file = "griffe-1.5.4-py3-none-any.whl", hash = "sha256:ed33af890586a5bebc842fcb919fc694b3dc1bc55b7d9e0228de41ce566b4a1d"},
+ {file = "griffe-1.5.4.tar.gz", hash = "sha256:073e78ad3e10c8378c2f798bd4ef87b92d8411e9916e157fd366a17cc4fd4e52"},
]
[package.dependencies]
@@ -4220,13 +4220,13 @@ python-legacy = ["mkdocstrings-python-legacy (>=0.2.1)"]
[[package]]
name = "mkdocstrings-python"
-version = "1.12.2"
+version = "1.13.0"
description = "A Python handler for mkdocstrings."
optional = false
python-versions = ">=3.9"
files = [
- {file = "mkdocstrings_python-1.12.2-py3-none-any.whl", hash = "sha256:7f7d40d6db3cb1f5d19dbcd80e3efe4d0ba32b073272c0c0de9de2e604eda62a"},
- {file = "mkdocstrings_python-1.12.2.tar.gz", hash = "sha256:7a1760941c0b52a2cd87b960a9e21112ffe52e7df9d0b9583d04d47ed2e186f3"},
+ {file = "mkdocstrings_python-1.13.0-py3-none-any.whl", hash = "sha256:b88bbb207bab4086434743849f8e796788b373bd32e7bfefbf8560ac45d88f97"},
+ {file = "mkdocstrings_python-1.13.0.tar.gz", hash = "sha256:2dbd5757e8375b9720e81db16f52f1856bf59905428fd7ef88005d1370e2f64c"},
]
[package.dependencies]
@@ -5476,13 +5476,13 @@ selenium = ">=4.18.1,<5.0.0"
[[package]]
name = "praisonaiagents"
-version = "0.0.7"
+version = "0.0.8"
description = "Praison AI agents for completing complex tasks with Self Reflection Agents"
optional = false
python-versions = "*"
files = [
- {file = "praisonaiagents-0.0.7-py3-none-any.whl", hash = "sha256:10d0a07f2ccae3c9521a896a0c7b7844ea35ef58cb340e8b2eecab02c23cccb1"},
- {file = "praisonaiagents-0.0.7.tar.gz", hash = "sha256:b216ac5193dbaad277c75f16499304cb2d2e0c2e0169d29a6a5d75b4405d69ca"},
+ {file = "praisonaiagents-0.0.8-py3-none-any.whl", hash = "sha256:f290a32da08f329a2b7fbe84039aaea1e49a0dd0060cc8821567f6bf25320657"},
+ {file = "praisonaiagents-0.0.8.tar.gz", hash = "sha256:b442dddb3a45248201006a658d3799699869b045af1bc0b0a073e4c1433db987"},
]
[package.dependencies]
@@ -8815,4 +8815,4 @@ ui = ["chainlit"]
[metadata]
lock-version = "2.0"
python-versions = ">=3.10,<3.13"
-content-hash = "86b2fd10bb29fe0c92fd56204d608573779fe733056f2a711d139fb8f0407171"
+content-hash = "e92a18d8479733651141cd833ee1f1a53c863eb65b8a1a0b7360e2f483460f36"
diff --git a/praisonai.rb b/praisonai.rb
index c4bd404c..fc1d1700 100644
--- a/praisonai.rb
+++ b/praisonai.rb
@@ -3,7 +3,7 @@ class Praisonai < Formula
desc "AI tools for various AI applications"
homepage "https://github.com/MervinPraison/PraisonAI"
- url "https://github.com/MervinPraison/PraisonAI/archive/refs/tags/2.0.15.tar.gz"
+ url "https://github.com/MervinPraison/PraisonAI/archive/refs/tags/2.0.16.tar.gz"
sha256 "1828fb9227d10f991522c3f24f061943a254b667196b40b1a3e4a54a8d30ce32" # Replace with actual SHA256 checksum
license "MIT"
diff --git a/praisonai/agents_generator.py b/praisonai/agents_generator.py
index 31ddbb04..dedef1e9 100644
--- a/praisonai/agents_generator.py
+++ b/praisonai/agents_generator.py
@@ -202,54 +202,42 @@ def load_tools_from_package(self, package_path):
def load_tools_from_tools_py(self):
"""
- Automatically loads all tools (functions and tool definitions) from tools.py file.
+ Imports and returns all contents from tools.py file.
+ Also adds the tools to the global namespace.
Returns:
- dict: A dictionary containing:
- - Function names as keys and function objects as values
- - Tool definition names as keys and tool definition dictionaries as values
-
- Note:
- This function looks for:
- 1. Regular functions
- 2. Tool definition dictionaries (containing 'type' and 'function' keys)
- 3. Variables named as tools or ending with '_tool'
+ list: A list of callable functions with proper formatting
"""
- tools_dict = {}
+ tools_list = []
try:
# Try to import tools.py from current directory
spec = importlib.util.spec_from_file_location("tools", "tools.py")
+ self.logger.info(f"Spec: {spec}")
if spec is None:
- self.logger.warning("tools.py not found in current directory")
- return tools_dict
+ self.logger.info("tools.py not found in current directory")
+ return tools_list
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
- # Get all module attributes
+ # Get all module attributes except private ones and classes
for name, obj in inspect.getmembers(module):
- # Skip private attributes
- if name.startswith('_'):
- continue
-
- # Case 1: Regular functions
- if inspect.isfunction(obj) and obj.__module__ == "tools":
- tools_dict[name] = obj
-
- # Case 2: Tool definition dictionaries
- elif isinstance(obj, dict) and obj.get('type') == 'function' and 'function' in obj:
- tools_dict[name] = obj
-
- # Case 3: Variables named as tools
- elif (name.endswith('_tool') or name == 'tools') and not inspect.ismodule(obj):
- tools_dict[name] = obj
-
- self.logger.debug(f"Loaded {len(tools_dict)} tools from tools.py")
+ if (not name.startswith('_') and
+ callable(obj) and
+ not inspect.isclass(obj)):
+ # Add the function to global namespace
+ globals()[name] = obj
+ # Add to tools list
+ tools_list.append(obj)
+ self.logger.info(f"Loaded and globalized tool function: {name}")
+
+ self.logger.info(f"Loaded {len(tools_list)} tool functions from tools.py")
+ self.logger.info(f"Tools list: {tools_list}")
except Exception as e:
self.logger.warning(f"Error loading tools from tools.py: {e}")
- return tools_dict
+ return tools_list
def generate_crew_and_kickoff(self):
"""
@@ -549,7 +537,9 @@ def _run_praisonai(self, config, topic, tools_dict):
tasks = []
tasks_dict = {}
- tools_dict = self.load_tools_from_tools_py()
+ # Load tools once at the beginning
+ tools_list = self.load_tools_from_tools_py()
+ self.logger.info(f"Loaded tools: {tools_list}")
# Create agents from config
for role, details in config['roles'].items():
@@ -557,34 +547,16 @@ def _run_praisonai(self, config, topic, tools_dict):
goal_filled = details['goal'].format(topic=topic)
backstory_filled = details['backstory'].format(topic=topic)
- # Get agent tools
- agent_tools = [tools_dict[tool] for tool in details.get('tools', [])
- if tool in tools_dict]
-
- # Configure LLM
- llm_model = details.get('llm')
- if llm_model:
- llm = llm_model.get("model", os.environ.get("MODEL_NAME", "gpt-4o"))
- else:
- llm = os.environ.get("MODEL_NAME", "gpt-4o")
-
- # Configure function calling LLM
- function_calling_llm_model = details.get('function_calling_llm')
- if function_calling_llm_model:
- function_calling_llm = function_calling_llm_model.get("model", os.environ.get("MODEL_NAME", "openai/gpt-4o"))
- else:
- function_calling_llm = os.environ.get("MODEL_NAME", "gpt-4o")
-
- # Create PraisonAI agent
+ # Pass all loaded tools to the agent
agent = PraisonAgent(
name=role_filled,
role=role_filled,
goal=goal_filled,
backstory=backstory_filled,
- tools=agent_tools,
+ tools=tools_list, # Pass the entire tools list to the agent
allow_delegation=details.get('allow_delegation', False),
- llm=llm,
- function_calling_llm=function_calling_llm,
+ llm=details.get('llm', {}).get("model", os.environ.get("MODEL_NAME", "gpt-4o")),
+ function_calling_llm=details.get('function_calling_llm', {}).get("model", os.environ.get("MODEL_NAME", "gpt-4o")),
max_iter=details.get('max_iter', 15),
max_rpm=details.get('max_rpm'),
max_execution_time=details.get('max_execution_time'),
@@ -593,25 +565,27 @@ def _run_praisonai(self, config, topic, tools_dict):
system_template=details.get('system_template'),
prompt_template=details.get('prompt_template'),
response_template=details.get('response_template'),
+ reflect_llm=details.get('reflect_llm', {}).get("model", os.environ.get("MODEL_NAME", "gpt-4o")),
+ min_reflect=details.get('min_reflect', 1),
+ max_reflect=details.get('max_reflect', 3),
)
- # Set agent callback if provided
if self.agent_callback:
agent.step_callback = self.agent_callback
agents[role] = agent
+ self.logger.info(f"Created agent {role_filled} with tools: {agent.tools}")
# Create tasks for the agent
for task_name, task_details in details.get('tasks', {}).items():
description_filled = task_details['description'].format(topic=topic)
expected_output_filled = task_details['expected_output'].format(topic=topic)
- # Create task using PraisonAI Task class
task = PraisonTask(
description=description_filled,
expected_output=expected_output_filled,
agent=agent,
- tools=task_details.get('tools', []),
+ tools=tools_list, # Pass the same tools list to the task
async_execution=task_details.get('async_execution', False),
context=[],
config=task_details.get('config', {}),
@@ -621,8 +595,9 @@ def _run_praisonai(self, config, topic, tools_dict):
callback=task_details.get('callback'),
create_directory=task_details.get('create_directory', False)
)
+
+ self.logger.info(f"Created task {task_name} with tools: {task.tools}")
- # Set task callback if provided
if self.task_callback:
task.callback = self.task_callback
@@ -634,7 +609,7 @@ def _run_praisonai(self, config, topic, tools_dict):
for task_name, task_details in details.get('tasks', {}).items():
task = tasks_dict[task_name]
context_tasks = [tasks_dict[ctx] for ctx in task_details.get('context', [])
- if ctx in tasks_dict]
+ if ctx in tasks_dict]
task.context = context_tasks
# Create and run the PraisonAI agents
@@ -652,13 +627,12 @@ def _run_praisonai(self, config, topic, tools_dict):
tasks=tasks,
verbose=2
)
-
+
self.logger.debug("Final Configuration:")
self.logger.debug(f"Agents: {agents.agents}")
self.logger.debug(f"Tasks: {agents.tasks}")
response = agents.start()
- # result = f"### Task Output ###\n{response}"
self.logger.debug(f"Result: {response}")
result = ""
diff --git a/praisonai/deploy.py b/praisonai/deploy.py
index 6cb3584d..145574be 100644
--- a/praisonai/deploy.py
+++ b/praisonai/deploy.py
@@ -56,7 +56,7 @@ def create_dockerfile(self):
file.write("FROM python:3.11-slim\n")
file.write("WORKDIR /app\n")
file.write("COPY . .\n")
- file.write("RUN pip install flask praisonai==2.0.15 gunicorn markdown\n")
+ file.write("RUN pip install flask praisonai==2.0.16 gunicorn markdown\n")
file.write("EXPOSE 8080\n")
file.write('CMD ["gunicorn", "-b", "0.0.0.0:8080", "api:app"]\n')
diff --git a/pyproject.toml b/pyproject.toml
index b847edd6..ea9463de 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,7 +1,7 @@
[project]
name = "PraisonAI"
-version = "2.0.15"
-description = "PraisonAI application combines AutoGen and CrewAI or similar frameworks into a low-code solution for building and managing multi-agent LLM systems, focusing on simplicity, customization, and efficient human-agent collaboration."
+version = "2.0.16"
+description = "PraisonAI is an AI Agents Framework with Self Reflection. PraisonAI application combines PraisonAI Agents, AutoGen, and CrewAI into a low-code solution for building and managing multi-agent LLM systems, focusing on simplicity, customisation, and efficient human–agent collaboration."
readme = "README.md"
license = ""
requires-python = ">=3.10,<3.13"
@@ -12,7 +12,7 @@ dependencies = [
"rich>=13.7",
"markdown>=3.5",
"pyparsing>=3.0.0",
- "praisonaiagents>=0.0.7",
+ "praisonaiagents>=0.0.12",
"python-dotenv>=0.19.0",
"instructor>=1.3.3",
"PyYAML>=6.0",
@@ -78,8 +78,8 @@ autogen = ["pyautogen>=0.2.19", "praisonai-tools>=0.0.7", "crewai"]
[tool.poetry]
name = "PraisonAI"
-version = "2.0.15"
-description = "PraisonAI application combines AutoGen and CrewAI or similar frameworks into a low-code solution for building and managing multi-agent LLM systems, focusing on simplicity, customization, and efficient human-agent collaboration."
+version = "2.0.16"
+description = "PraisonAI is an AI Agents Framework with Self Reflection. PraisonAI application combines PraisonAI Agents, AutoGen, and CrewAI into a low-code solution for building and managing multi-agent LLM systems, focusing on simplicity, customisation, and efficient human–agent collaboration."
authors = ["Mervin Praison"]
license = ""
readme = "README.md"
@@ -96,7 +96,7 @@ python = ">=3.10,<3.13"
rich = ">=13.7"
markdown = ">=3.5"
pyparsing = ">=3.0.0"
-praisonaiagents = ">=0.0.7"
+praisonaiagents = ">=0.0.12"
python-dotenv = ">=0.19.0"
instructor = ">=1.3.3"
PyYAML = ">=6.0"
diff --git a/tools.py b/tools.py
index c771364b..eba69de3 100644
--- a/tools.py
+++ b/tools.py
@@ -1,30 +1,34 @@
# tools.py
-# from duckduckgo_search import DDGS
-
-# def search_tool(query):
-# """
-# Perform a search using DuckDuckGo.
-
-# Args:
-# query (str): The search query.
-
-# Returns:
-# list: A list of search result titles and URLs.
-# """
-# try:
-# results = []
-# ddgs = DDGS()
-# for result in ddgs.text(keywords=query, max_results=10):
-# results.append({
-# "title": result.get("title", ""),
-# "url": result.get("href", "")
-# })
-# return results
-
-# except Exception as e:
-# print(f"Error during DuckDuckGo search: {e}")
-# return []
+from duckduckgo_search import DDGS
+
+def search_tool(query: str) -> list:
+ """
+ Perform a web search using DuckDuckGo and return relevant results.
+
+ Args:
+ query (str): The search query string to look up information about.
+
+ Returns:
+ list: A list of dictionaries containing search results with the following keys:
+ - title (str): The title of the search result
+ - url (str): The URL of the search result
+ - snippet (str): A brief excerpt or description of the search result
+ """
+ try:
+ results = []
+ ddgs = DDGS()
+ for result in ddgs.text(keywords=query, max_results=10):
+ results.append({
+ "title": result.get("title", ""),
+ "url": result.get("href", ""),
+ "snippet": result.get("body", ""),
+ })
+ return results
+
+ except Exception as e:
+ print(f"Error during DuckDuckGo search: {e}")
+ return []
# # Define tools
# search_tool = {
diff --git a/uv.lock b/uv.lock
index 63a65188..9eafd59b 100644
--- a/uv.lock
+++ b/uv.lock
@@ -3061,7 +3061,7 @@ wheels = [
[[package]]
name = "praisonai"
-version = "2.0.14"
+version = "2.0.16"
source = { editable = "." }
dependencies = [
{ name = "instructor" },
@@ -3192,7 +3192,7 @@ requires-dist = [
{ name = "plotly", marker = "extra == 'realtime'", specifier = ">=5.24.0" },
{ name = "praisonai-tools", marker = "extra == 'autogen'", specifier = ">=0.0.7" },
{ name = "praisonai-tools", marker = "extra == 'crewai'", specifier = ">=0.0.7" },
- { name = "praisonaiagents", specifier = ">=0.0.7" },
+ { name = "praisonaiagents", specifier = ">=0.0.12" },
{ name = "pyautogen", marker = "extra == 'autogen'", specifier = ">=0.2.19" },
{ name = "pydantic", marker = "extra == 'chat'", specifier = "<=2.10.1" },
{ name = "pydantic", marker = "extra == 'code'", specifier = "<=2.10.1" },
@@ -3243,16 +3243,16 @@ wheels = [
[[package]]
name = "praisonaiagents"
-version = "0.0.7"
+version = "0.0.12"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "openai" },
{ name = "pydantic" },
{ name = "rich" },
]
-sdist = { url = "https://files.pythonhosted.org/packages/9f/f2/b0cbb05a9803fdad0613e4dd6c6e42b50c347c29b95e7b5f2571155944af/praisonaiagents-0.0.7.tar.gz", hash = "sha256:b216ac5193dbaad277c75f16499304cb2d2e0c2e0169d29a6a5d75b4405d69ca", size = 15927 }
+sdist = { url = "https://files.pythonhosted.org/packages/a5/90/8c80ce947eae4887984af45b19dd943dc942f2c6015388f3b770f3690269/praisonaiagents-0.0.12.tar.gz", hash = "sha256:f51da9b0acd1147dcf2b547873b5e7ca089427dce6639c760f992d8008f02eba", size = 16403 }
wheels = [
- { url = "https://files.pythonhosted.org/packages/96/5b/8c85f4d335cce89bc004b9929579ee4f258659a1c76cdde8b64c8bf84b1e/praisonaiagents-0.0.7-py3-none-any.whl", hash = "sha256:10d0a07f2ccae3c9521a896a0c7b7844ea35ef58cb340e8b2eecab02c23cccb1", size = 21971 },
+ { url = "https://files.pythonhosted.org/packages/b3/c1/d12e058fcca1c0667168a5430329b3f7714fa654a0979273d61a8fd20ef3/praisonaiagents-0.0.12-py3-none-any.whl", hash = "sha256:4ee4916300abcac4086bfcf289681fd7a1206aeacfa76f75ff007a1683535896", size = 22433 },
]
[[package]]