Skip to content

Commit

Permalink
feat(agent, forge): Markdown-formatted history -> message history (#7228
Browse files Browse the repository at this point in the history
)

- Implement message based history in `ActionHistoryComponent`
- Make non-summarized message count configurable (`ActionHistoryComponent.full_message_count`)
- Run `ActionHistoryComponent` after `SystemComponent` so that history messages are last in the prompt
- Omit final instruction message if prompt already contains assistant messages
- Filter `raw_message` from `ActionProposal.schema()`

---------

Co-authored-by: Krzysztof Czerwinski <[email protected]>
  • Loading branch information
Pwuts and kcze authored Jul 2, 2024
1 parent 2fa4fd2 commit 97e4cce
Show file tree
Hide file tree
Showing 6 changed files with 118 additions and 18 deletions.
20 changes: 12 additions & 8 deletions autogpt/autogpt/agents/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,14 +111,18 @@ def __init__(

# Components
self.system = SystemComponent()
self.history = ActionHistoryComponent(
settings.history,
lambda x: self.llm_provider.count_tokens(x, self.llm.name),
llm_provider,
ActionHistoryConfiguration(
model_name=app_config.fast_llm, max_tokens=self.send_token_limit
),
).run_after(WatchdogComponent)
self.history = (
ActionHistoryComponent(
settings.history,
lambda x: self.llm_provider.count_tokens(x, self.llm.name),
llm_provider,
ActionHistoryConfiguration(
model_name=app_config.fast_llm, max_tokens=self.send_token_limit
),
)
.run_after(WatchdogComponent)
.run_after(SystemComponent)
)
if not app_config.noninteractive_mode:
self.user_interaction = UserInteractionComponent()
self.file_manager = FileManagerComponent(file_storage, settings)
Expand Down
1 change: 1 addition & 0 deletions autogpt/autogpt/agents/prompt_strategies/one_shot.py
Original file line number Diff line number Diff line change
Expand Up @@ -275,4 +275,5 @@ def parse_response_content(
assistant_reply_dict["use_tool"] = response.tool_calls[0].function

parsed_response = OneShotAgentActionProposal.parse_obj(assistant_reply_dict)
parsed_response.raw_message = response.copy()
return parsed_response
1 change: 1 addition & 0 deletions docs/content/forge/components/built-in-components.md
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,7 @@ Keeps track of agent's actions and their outcomes. Provides their summary to the
| `model_name` | Name of the llm model used to compress the history | `ModelName` | `"gpt-3.5-turbo"` |
| `max_tokens` | Maximum number of tokens to use for the history summary | `int` | `1024` |
| `spacy_language_model` | Language model used for summary chunking using spacy | `str` | `"en_core_web_sm"` |
| `full_message_count` | Number of cycles to include unsummarized in the prompt | `int` | `4` |

**MessageProvider**

Expand Down
5 changes: 4 additions & 1 deletion forge/forge/agent/forge_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
from forge.file_storage.base import FileStorage
from forge.llm.prompting.schema import ChatPrompt
from forge.llm.prompting.utils import dump_prompt
from forge.llm.providers.schema import AssistantFunctionCall
from forge.llm.providers.schema import AssistantChatMessage, AssistantFunctionCall
from forge.llm.providers.utils import function_specs_from_commands
from forge.models.action import (
ActionErrorResult,
Expand Down Expand Up @@ -178,6 +178,9 @@ async def propose_action(self) -> ActionProposal:
use_tool=AssistantFunctionCall(
name="finish", arguments={"reason": "Unimplemented logic"}
),
raw_message=AssistantChatMessage(
content="finish(reason='Unimplemented logic')"
),
)

self.config.cycle_count += 1
Expand Down
89 changes: 81 additions & 8 deletions forge/forge/components/action_history/action_history.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
from forge.llm.providers import ChatMessage, MultiProvider
from forge.llm.providers.multi import ModelName
from forge.llm.providers.openai import OpenAIModelName
from forge.llm.providers.schema import ToolResultMessage

from .model import ActionResult, AnyProposal, Episode, EpisodicActionHistory

Expand All @@ -21,6 +22,8 @@ class ActionHistoryConfiguration(BaseModel):
"""Maximum number of tokens to use up with generated history messages"""
spacy_language_model: str = "en_core_web_sm"
"""Language model used for summary chunking using spacy"""
full_message_count: int = 4
"""Number of latest non-summarized messages to include in the history"""


class ActionHistoryComponent(
Expand All @@ -46,12 +49,47 @@ def __init__(
self.llm_provider = llm_provider

def get_messages(self) -> Iterator[ChatMessage]:
if progress := self._compile_progress(
self.event_history.episodes,
self.config.max_tokens,
self.count_tokens,
):
yield ChatMessage.system(f"## Progress on your Task so far\n\n{progress}")
messages: list[ChatMessage] = []
step_summaries: list[str] = []
tokens: int = 0
n_episodes = len(self.event_history.episodes)

# Include a summary for all except a few latest steps
for i, episode in enumerate(reversed(self.event_history.episodes)):
# Use full format for a few steps, summary or format for older steps
if i < self.config.full_message_count:
messages.insert(0, episode.action.raw_message)
tokens += self.count_tokens(str(messages[0])) # HACK
if episode.result:
result_message = self._make_result_message(episode, episode.result)
messages.insert(1, result_message)
tokens += self.count_tokens(str(result_message)) # HACK
continue
elif episode.summary is None:
step_content = indent(episode.format(), 2).strip()
else:
step_content = episode.summary

step = f"* Step {n_episodes - i}: {step_content}"

if self.config.max_tokens and self.count_tokens:
step_tokens = self.count_tokens(step)
if tokens + step_tokens > self.config.max_tokens:
break
tokens += step_tokens

step_summaries.insert(0, step)

if step_summaries:
step_summaries_fmt = "\n\n".join(step_summaries)
yield ChatMessage.system(
f"## Progress on your Task so far\n"
"Here is a summary of the steps that you have executed so far, "
"use this as your consideration for determining the next action!\n"
f"{step_summaries_fmt}"
)

yield from messages

def after_parse(self, result: AnyProposal) -> None:
self.event_history.register_action(result)
Expand All @@ -62,6 +100,41 @@ async def after_execute(self, result: ActionResult) -> None:
self.llm_provider, self.config.model_name, self.config.spacy_language_model
)

@staticmethod
def _make_result_message(episode: Episode, result: ActionResult) -> ChatMessage:
if result.status == "success":
return (
ToolResultMessage(
content=str(result.outputs),
tool_call_id=episode.action.raw_message.tool_calls[0].id,
)
if episode.action.raw_message.tool_calls
else ChatMessage.user(
f"{episode.action.use_tool.name} returned: "
+ (
f"```\n{result.outputs}\n```"
if "\n" in str(result.outputs)
else f"`{result.outputs}`"
)
)
)
elif result.status == "error":
return (
ToolResultMessage(
content=f"{result.reason}\n\n{result.error or ''}".strip(),
is_error=True,
tool_call_id=episode.action.raw_message.tool_calls[0].id,
)
if episode.action.raw_message.tool_calls
else ChatMessage.user(
f"{episode.action.use_tool.name} raised an error: ```\n"
f"{result.reason}\n"
"```"
)
)
else:
return ChatMessage.user(result.feedback)

def _compile_progress(
self,
episode_history: list[Episode[AnyProposal]],
Expand All @@ -76,8 +149,8 @@ def _compile_progress(
n_episodes = len(episode_history)

for i, episode in enumerate(reversed(episode_history)):
# Use full format for the latest 4 steps, summary or format for older steps
if i < 4 or episode.summary is None:
# Use full format for a few latest steps, summary or format for older steps
if i < self.config.full_message_count or episode.summary is None:
step_content = indent(episode.format(), 2).strip()
else:
step_content = episode.summary
Expand Down
20 changes: 19 additions & 1 deletion forge/forge/models/action.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,9 @@
from typing import Any, Literal, Optional, TypeVar

from pydantic import BaseModel
from pydantic.schema import default_ref_template

from forge.llm.providers.schema import AssistantFunctionCall
from forge.llm.providers.schema import AssistantChatMessage, AssistantFunctionCall

from .utils import ModelWithSummary

Expand All @@ -13,6 +14,23 @@ class ActionProposal(BaseModel):
thoughts: str | ModelWithSummary
use_tool: AssistantFunctionCall

raw_message: AssistantChatMessage = None # type: ignore
"""
The message from which the action proposal was parsed. To be set by the parser.
"""

@classmethod
def schema(
cls, by_alias: bool = True, ref_template: str = default_ref_template, **kwargs
):
"""
The schema for this ActionProposal model, excluding the 'raw_message' property.
"""
schema = super().schema(by_alias=by_alias, ref_template=ref_template, **kwargs)
if "raw_message" in schema["properties"]: # must check because schema is cached
del schema["properties"]["raw_message"]
return schema


AnyProposal = TypeVar("AnyProposal", bound=ActionProposal)

Expand Down

0 comments on commit 97e4cce

Please sign in to comment.