Skip to content

Commit

Permalink
version 0.0.1
Browse files Browse the repository at this point in the history
  • Loading branch information
robert-lieck committed Oct 10, 2024
1 parent d2ee51a commit e1ea446
Show file tree
Hide file tree
Showing 13 changed files with 517 additions and 141 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/test_dev.yml
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-latest, macos-latest, windows-latest]
os: [ubuntu-latest]
python-version: ["3.9", "3.10"]
env:
OS: ${{ matrix.os }}
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-latest, macos-latest, windows-latest]
os: [ubuntu-latest]
python-version: ["3.9", "3.10"]
env:
OS: ${{ matrix.os }}
Expand Down
101 changes: 101 additions & 0 deletions examples/plot_copy_paste_llm.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,101 @@
"""
Copy Paste LLM
==============================
"""

# %%
# The :class:`~metaprompting.base.CopyPasteLLM` class allows you to simulate an LLM API by copy-pasting over prompts
# and responses from/to the standard output/input.
#
# The script must be called with '--interactive' command line switch to use the
# :class:`~metaprompting.base.CopyPasteLLM` class, otherwise, we define a :class:`DummyLLM` class to simulate
# interaction for generating the documentation.

import sys

from metaprompting import State, LlmAction, HistoryAction, Conversation, LLM, CopyPasteLLM


if "--interactive" in sys.argv:
interactive = True
llm = CopyPasteLLM(
auto_copy_paste=True, # automatically: copy LLM prompt to clipboard; paste response back when clipboard changes
instructions=False, # don't print additional instructions
)
else:
class DummyLLM(LLM):

def __call__(self, prompt, *args, **kwargs):
return f"HERE BE THE RESPONSE TO THE FOLLOWING PROMPT\n\n{prompt}"

interactive = False
llm = DummyLLM()

# %%
# Create conversation graph with state nodes

graph = Conversation()
input_state, history_state, inner_speech_state, output_state = graph.add_states([State(), State(), State(), State()])
graph.input_state = input_state
graph.output_state = output_state

# %%
# Create and connect action nodes

# remember history
history_action = HistoryAction()
graph.connect_action([input_state, output_state], history_action, history_state, add=True)

# generate inner speech
inner_speech_action = LlmAction(llm=llm, prompt_parts=[
"Here is the history of a conversation between Person 1 and Person 2:\n\n",
"What are some general thoughts about this conversation?\n\n" +
"Keep the output short and to a single paragraph of text-only without formatting, bullet points etc",
])
graph.connect_action(history_state, inner_speech_action, inner_speech_state, add=True)

# construct prompt for response
response_action = LlmAction(llm=llm, prompt_parts=[
"Here is the history of a conversation between Person 1 and Person 2:\n\n",
"\n\nSome general thoughts about this conversation are:\n\n",
"\n\nThe most recent message from Person 1 is:\n\n",
"\n\nWhat could Person 2 reply? Only print the reply itself, nothing else!",
])
graph.connect_action([history_state, inner_speech_state, input_state], response_action, output_state, add=True)

# %%
# Initialise nodes

inner_speech_state.update("This is the beginning of the conversation...")
inner_speech_action.block(1) # block trigger from updating history
history_state.update("BEGINNING OF HISTORY\n\n")

# %%
# Run conversation interleaved with inner speech

if interactive:
# for running in terminal with '--interactive' switch
def print_inner_speech():
print(f"Inner speech: {inner_speech_state.value}")
print("Start a conversation (use Ctrl-C to cancel)!")
graph.run(post_response_callback=print_inner_speech)
else:
# for generating example in documentation
input_state.update("Some user input...")
print("========================")
print("User Input")
print("========================")
print(input_state.value)
print("========================")
print("LLM Response")
print("========================")
print(output_state.value)
print("========================")
print("Inner Speech")
print("========================")
print(inner_speech_state.value)
print("========================")
print("History")
print("========================")
print(history_state.value)
print("========================")
78 changes: 62 additions & 16 deletions examples/plot_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,53 +2,99 @@
Default Action and State nodes
==============================
A simple example using the :class:`~metaprompting.base.DefaultAction` and :class:`~metaprompting.base.DefaultState`
A simple example using the :class:`~metaprompting.DefaultAction` and :class:`~metaprompting.DefaultState`
classes."""

# %%
# Define derived classes to make the call dynamics visible
# Verbose Nodes
# -------------

from metaprompting.base import DefaultAction, DefaultState
# %%
# First, we define derived classes that print nicely and make the dynamic calls visible

from metaprompting import State, Action, Graph, connect

class VerboseAction(Action):

class VerboseAction(DefaultAction):
def __repr__(self):
return f"Action({id(self)})"

def input_trigger(self, input):
print(f"{self} was triggered by {input}")
super().input_trigger(input)
if super().input_trigger(input):
print(f"{self} was executed")
else:
print(f"{self} was NOT executed")

def execute(self, *args, **kwargs):
def execute(self):
print(f"executing {self}")
super().execute(*args, **kwargs)
return super().execute()

class VerboseState(State):

class VerboseState(DefaultState):
def __repr__(self):
return f"State({id(self)})"

def update(self, text):
def update(self, text, *args, **kwargs):
print(f"updating {self}")
super().update(text)

# %%
# Basic Manual Setup
# ------------------

# %%
# Create state nodes
# We can manually create a graph of connected state and action nodes using basic operations.
#
# For this, we create the state nodes, create the action nodes, and connect them (note that the order matters)
root_1 = VerboseState()
root_2 = VerboseState()
root_3 = VerboseState()
leaf_1 = VerboseState()
leaf_2 = VerboseState()

# %%
# Create action nodes, which auto-connects states
action1 = VerboseAction(input_states=[root_1, root_2, root_3], output_state=leaf_1)
action2 = VerboseAction(input_states=[root_3, root_2, root_1], output_state=leaf_2)
action_1 = VerboseAction()
action_2 = VerboseAction()

connect([root_1, root_2, root_3], action_1)
connect([root_3, root_2, root_1], action_2)
connect(action_1, leaf_1)
connect(action_2, leaf_2)

# %%
# Update root state nodes, which triggers a cascade to leaf nodes
# Updating all root state nodes triggers a cascade of executions and updates (execution of an action only happens
# after all inputs were updated)
root_1.update("smoke")
root_2.update(" and ")
root_3.update("mirrors")

# %%
# Print output of leaf nodes
# Note how the different input order leads to different values in the two leaf nodes
print(leaf_1.value)
print(leaf_2.value)

# %%
# Graph Objects
# -------------
#
# The :class:`~metaprompting.Graph` class holds all nodes and provides some convenience functions for working
# with graphs.

graph = Graph()

(root_1_, root_2_, root_3_,
leaf_1_, leaf_2_) = graph.add_states([VerboseState(), VerboseState(), VerboseState(),
VerboseState(), VerboseState()])

graph.connect_action([root_1_, root_2_, root_3_], VerboseAction(), leaf_1_, add=True)
graph.connect_action([root_3_, root_2_, root_1_], VerboseAction(), leaf_2_, add=True)

print(list(graph.states))
print(list(graph.actions))

root_1.update("smoke")
root_2.update(" and ")
root_3.update("mirrors")

print(leaf_1.value)
print(leaf_2.value)
5 changes: 5 additions & 0 deletions metaprompting/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
from metaprompting.util import *
from metaprompting.state import *
from metaprompting.llm import *
from metaprompting.action import *
from metaprompting.graph import *
113 changes: 113 additions & 0 deletions metaprompting/action.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,113 @@
from abc import ABC

from metaprompting import make_iterable, LLM


class Action(ABC):

def __init__(self):
"""
An executable node that takes zero or more input_states, executes an action, and returns its output to the
output_state.
:param input_states: Iterable over input :class:`~State`\s
:param output_state: Output :class:`~State`
"""
self._input_states = []
self._inputs_updated = {}
self._output_state = None
self._block = 0

def block(self, state=True):
self._block = state

def add_input_states(self, states):
states = make_iterable(states)
for s in states:
self._input_states.append(s)
self._inputs_updated[s] = False

def set_output_state(self, state, force=False):
if self._output_state is not None and not force:
raise RuntimeError("Output state is already set (use force=True to override)")
self._output_state = state

def input_trigger(self, input):
"""
Trigger the :class:`~Action` from a specific input, typically when the input has been updated.
:param input: input :class:`~State`
"""
# remember updated inputs
try:
self._inputs_updated[input] = True
except KeyError:
raise KeyError("Given input is not an input of this node")
# return False if any inputs are not updated
for is_updated in self._inputs_updated.values():
if not is_updated:
return False
# reset update flags
for key in self._inputs_updated.keys():
self._inputs_updated[key] = False
# ignore if blocking active
if self._block is True:
return False
if self._block > 0:
self._block -= 1
return False
# execute otherwise
self._execute()
# return True to signal execution
return True

def _execute(self):
"""
Excecute the :class:`~Action` with given arguments and pass on the output to the output :class:`~State`.
"""
out = self.execute()
# update output
self._output_state.update(out)
return out

def execute(self):
# simple action: concatenate inputs with " + " in between
out = None
for i in self._input_states:
if out is None:
out = i.value
else:
out = out + i.value
return out


class LlmAction(Action):

def __init__(self, llm: LLM, prompt_parts):
super().__init__()
self.llm = llm
self.prompt_parts = list(prompt_parts)

def execute(self):
if len(self.prompt_parts) != len(self._input_states) + 1:
raise RuntimeError(f"Number of prompt parts ({len(self.prompt_parts)}) must be one less than "
f"number of input states ({len(self._input_states)})")
prompt = self.prompt_parts[0]
for i, p in zip(self._input_states, self.prompt_parts[1:]):
prompt += i.value + p
return self.llm(prompt)


class HistoryAction(Action):

def __init__(self):
super().__init__()
self.history = []

def execute(self):
self.history.append(tuple(i.value for i in self._input_states))
out = ""
for msgs in self.history:
for i, msg in enumerate(msgs):
out += f"Person {i + 1}: {msg}\n\n"
return out
Loading

0 comments on commit e1ea446

Please sign in to comment.