diff --git a/.github/workflows/test_dev.yml b/.github/workflows/test_dev.yml index 6b719a0..810fd03 100644 --- a/.github/workflows/test_dev.yml +++ b/.github/workflows/test_dev.yml @@ -13,7 +13,7 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ubuntu-latest, macos-latest, windows-latest] + os: [ubuntu-latest] python-version: ["3.9", "3.10"] env: OS: ${{ matrix.os }} diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 795056b..44ec2c0 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -11,7 +11,7 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ubuntu-latest, macos-latest, windows-latest] + os: [ubuntu-latest] python-version: ["3.9", "3.10"] env: OS: ${{ matrix.os }} diff --git a/examples/plot_copy_paste_llm.py b/examples/plot_copy_paste_llm.py new file mode 100644 index 0000000..0cd2779 --- /dev/null +++ b/examples/plot_copy_paste_llm.py @@ -0,0 +1,102 @@ +""" +Copy Paste LLM +============================== +""" + +# %% +# The :class:`~metaprompting.base.CopyPasteLLM` class allows you to simulate an LLM API by copy-pasting over prompts +# and responses from/to the standard output/input. +# +# If this script is called without command line arguments, we define a :class:`DummyLLM` class to simulate +# interaction. To interact with the :class:`~metaprompting.base.CopyPasteLLM` provide any command line argument (e.g. +# "-"). + +import sys + +from metaprompting import State, LlmAction, HistoryAction, Conversation, LLM, CopyPasteLLM + + +if len(sys.argv) > 1: + print(sys.argv) + interactive = True + llm = CopyPasteLLM( + auto_copy_paste=True, # automatically: copy LLM prompt to clipboard; paste response back when clipboard changes + instructions=False, # don't print additional instructions + ) +else: + class DummyLLM(LLM): + + def __call__(self, prompt, *args, **kwargs): + return f"HERE BE THE RESPONSE TO THE FOLLOWING PROMPT\n\n{prompt}" + + interactive = False + llm = DummyLLM() + +# %% +# Create conversation graph with state nodes + +graph = Conversation() +input_state, history_state, inner_speech_state, output_state = graph.add_states([State(), State(), State(), State()]) +graph.input_state = input_state +graph.output_state = output_state + +# %% +# Create and connect action nodes + +# remember history +history_action = HistoryAction() +graph.connect_action([input_state, output_state], history_action, history_state, add=True) + +# generate inner speech +inner_speech_action = LlmAction(llm=llm, prompt_parts=[ + "Here is the history of a conversation between Person 1 and Person 2:\n\n", + "What are some general thoughts about this conversation?\n\n" + + "Keep the output short and to a single paragraph of text-only without formatting, bullet points etc", +]) +graph.connect_action(history_state, inner_speech_action, inner_speech_state, add=True) + +# construct prompt for response +response_action = LlmAction(llm=llm, prompt_parts=[ + "Here is the history of a conversation between Person 1 and Person 2:\n\n", + "\n\nSome general thoughts about this conversation are:\n\n", + "\n\nThe most recent message from Person 1 is:\n\n", + "\n\nWhat could Person 2 reply? Only print the reply itself, nothing else!", +]) +graph.connect_action([history_state, inner_speech_state, input_state], response_action, output_state, add=True) + +# %% +# Initialise nodes + +inner_speech_state.update("This is the beginning of the conversation...") +inner_speech_action.block(1) # block trigger from updating history +history_state.update("BEGINNING OF HISTORY\n\n") + +# %% +# Run conversation interleaved with inner speech + +if interactive: + # for running in terminal (if some argument is provided to indicate interactive mode) + def print_inner_speech(): + print(f"Inner speech: {inner_speech_state.value}") + print("Start a conversation (use Ctrl-C to cancel)!") + graph.run(post_response_callback=print_inner_speech) +else: + # for generating example in documentation + input_state.update("Some user input...") + print("========================") + print("User Input") + print("========================") + print(input_state.value) + print("========================") + print("LLM Response") + print("========================") + print(output_state.value) + print("========================") + print("Inner Speech") + print("========================") + print(inner_speech_state.value) + print("========================") + print("History") + print("========================") + print(history_state.value) + print("========================") diff --git a/examples/plot_example.py b/examples/plot_example.py index 645ae31..502a4fe 100644 --- a/examples/plot_example.py +++ b/examples/plot_example.py @@ -2,53 +2,99 @@ Default Action and State nodes ============================== -A simple example using the :class:`~metaprompting.base.DefaultAction` and :class:`~metaprompting.base.DefaultState` +A simple example using the :class:`~metaprompting.DefaultAction` and :class:`~metaprompting.DefaultState` classes.""" # %% -# Define derived classes to make the call dynamics visible +# Verbose Nodes +# ------------- -from metaprompting.base import DefaultAction, DefaultState +# %% +# First, we define derived classes that print nicely and make the dynamic calls visible + +from metaprompting import State, Action, Graph, connect +class VerboseAction(Action): -class VerboseAction(DefaultAction): + def __repr__(self): + return f"Action({id(self)})" def input_trigger(self, input): print(f"{self} was triggered by {input}") - super().input_trigger(input) + if super().input_trigger(input): + print(f"{self} was executed") + else: + print(f"{self} was NOT executed") - def execute(self, *args, **kwargs): + def execute(self): print(f"executing {self}") - super().execute(*args, **kwargs) + return super().execute() +class VerboseState(State): -class VerboseState(DefaultState): + def __repr__(self): + return f"State({id(self)})" - def update(self, text): + def update(self, text, *args, **kwargs): print(f"updating {self}") super().update(text) +# %% +# Basic Manual Setup +# ------------------ # %% -# Create state nodes +# We can manually create a graph of connected state and action nodes using basic operations. +# +# For this, we create the state nodes, create the action nodes, and connect them (note that the order matters) root_1 = VerboseState() root_2 = VerboseState() root_3 = VerboseState() leaf_1 = VerboseState() leaf_2 = VerboseState() -# %% -# Create action nodes, which auto-connects states -action1 = VerboseAction(input_states=[root_1, root_2, root_3], output_state=leaf_1) -action2 = VerboseAction(input_states=[root_3, root_2, root_1], output_state=leaf_2) +action_1 = VerboseAction() +action_2 = VerboseAction() + +connect([root_1, root_2, root_3], action_1) +connect([root_3, root_2, root_1], action_2) +connect(action_1, leaf_1) +connect(action_2, leaf_2) # %% -# Update root state nodes, which triggers a cascade to leaf nodes +# Updating all root state nodes triggers a cascade of executions and updates (execution of an action only happens +# after all inputs were updated) root_1.update("smoke") root_2.update(" and ") root_3.update("mirrors") # %% -# Print output of leaf nodes +# Note how the different input order leads to different values in the two leaf nodes +print(leaf_1.value) +print(leaf_2.value) + +# %% +# Graph Objects +# ------------- +# +# The :class:`~metaprompting.Graph` class holds all nodes and provides some convenience functions for working +# with graphs. + +graph = Graph() + +(root_1_, root_2_, root_3_, + leaf_1_, leaf_2_) = graph.add_states([VerboseState(), VerboseState(), VerboseState(), + VerboseState(), VerboseState()]) + +graph.connect_action([root_1_, root_2_, root_3_], VerboseAction(), leaf_1_, add=True) +graph.connect_action([root_3_, root_2_, root_1_], VerboseAction(), leaf_2_, add=True) + +print(list(graph.states)) +print(list(graph.actions)) + +root_1.update("smoke") +root_2.update(" and ") +root_3.update("mirrors") + print(leaf_1.value) print(leaf_2.value) diff --git a/metaprompting/__init__.py b/metaprompting/__init__.py index e69de29..16b2024 100644 --- a/metaprompting/__init__.py +++ b/metaprompting/__init__.py @@ -0,0 +1,5 @@ +from metaprompting.util import * +from metaprompting.state import * +from metaprompting.llm import * +from metaprompting.action import * +from metaprompting.graph import * diff --git a/metaprompting/action.py b/metaprompting/action.py new file mode 100644 index 0000000..938b047 --- /dev/null +++ b/metaprompting/action.py @@ -0,0 +1,113 @@ +from abc import ABC + +from metaprompting import make_iterable, LLM + + +class Action(ABC): + + def __init__(self): + """ + An executable node that takes zero or more input_states, executes an action, and returns its output to the + output_state. + + :param input_states: Iterable over input :class:`~State`\s + :param output_state: Output :class:`~State` + """ + self._input_states = [] + self._inputs_updated = {} + self._output_state = None + self._block = 0 + + def block(self, state=True): + self._block = state + + def add_input_states(self, states): + states = make_iterable(states) + for s in states: + self._input_states.append(s) + self._inputs_updated[s] = False + + def set_output_state(self, state, force=False): + if self._output_state is not None and not force: + raise RuntimeError("Output state is already set (use force=True to override)") + self._output_state = state + + def input_trigger(self, input): + """ + Trigger the :class:`~Action` from a specific input, typically when the input has been updated. + + :param input: input :class:`~State` + """ + # remember updated inputs + try: + self._inputs_updated[input] = True + except KeyError: + raise KeyError("Given input is not an input of this node") + # return False if any inputs are not updated + for is_updated in self._inputs_updated.values(): + if not is_updated: + return False + # reset update flags + for key in self._inputs_updated.keys(): + self._inputs_updated[key] = False + # ignore if blocking active + if self._block is True: + return False + if self._block > 0: + self._block -= 1 + return False + # execute otherwise + self._execute() + # return True to signal execution + return True + + def _execute(self): + """ + Excecute the :class:`~Action` with given arguments and pass on the output to the output :class:`~State`. + """ + out = self.execute() + # update output + self._output_state.update(out) + return out + + def execute(self): + # simple action: concatenate inputs with " + " in between + out = None + for i in self._input_states: + if out is None: + out = i.value + else: + out = out + i.value + return out + + +class LlmAction(Action): + + def __init__(self, llm: LLM, prompt_parts): + super().__init__() + self.llm = llm + self.prompt_parts = list(prompt_parts) + + def execute(self): + if len(self.prompt_parts) != len(self._input_states) + 1: + raise RuntimeError(f"Number of prompt parts ({len(self.prompt_parts)}) must be one less than " + f"number of input states ({len(self._input_states)})") + prompt = self.prompt_parts[0] + for i, p in zip(self._input_states, self.prompt_parts[1:]): + prompt += i.value + p + return self.llm(prompt) + + +class HistoryAction(Action): + + def __init__(self): + super().__init__() + self.history = [] + + def execute(self): + self.history.append(tuple(i.value for i in self._input_states)) + out = "" + for msgs in self.history: + for i, msg in enumerate(msgs): + out += f"Person {i + 1}: {msg}\n\n" + return out diff --git a/metaprompting/base.py b/metaprompting/base.py deleted file mode 100644 index 6900dab..0000000 --- a/metaprompting/base.py +++ /dev/null @@ -1,121 +0,0 @@ -from abc import ABC, abstractmethod - - -class LLM(ABC): - - @abstractmethod - def __call__(self, *args, **kwargs): - """ - Call the LLM with given arguments and return its output. - """ - raise NotImplemented - - -class Action(ABC): - - def __init__(self, input_states=None, output_state=None): - """ - An executable node that takes zero or more input_states, executes an action, and returns its output to the - output_state. - - :param input_states: Iterable over input :class:`~State`\s - :param output_state: Output :class:`~State` - """ - self.input_states = input_states - self.output_state = output_state - - def input_trigger(self, input): - """ - Trigger the :class:`~Action` from a specific input, typically when the input has been updated. - - :param input: input :class:`~State` - """ - pass - - @abstractmethod - def execute(self, *args, **kwargs): - """ - Excecute the :class:`~Action` with given arguments and pass on the output to the output :class:`~State`. - """ - raise NotImplementedError - - -class State(ABC): - - def __init__(self, input_action=None, output_actions=None): - """ - A static node holding information generated by an input_action. It may pass on the information to zero or - more output_actions. - - :param input_action: Input :class:`~Action` - :param output_actions: Iterable over output :class:`~Action`\s - """ - self.input_action = input_action - self.output_actions = output_actions - - def trigger_outputs(self): - """ - Trigger all outputs of this :class:`~State`. Should typically be called at the end of :meth:`~update`. - """ - for output in self.output_actions: - output.input_trigger(self) - - @abstractmethod - def update(self, *args, **kwargs): - raise NotImplementedError - - -class DefaultAction(Action): - - def __init__(self, input_states=None, output_state=None, auto_connect=True): - if input_states is None: - input_states = [] - super().__init__(input_states=input_states, output_state=output_state) - # remember update status of inputs - self.inputs_updated = {i: False for i in self.input_states} - # connect inputs and outputs - if auto_connect: - for i in self.input_states: - i.output_actions.append(self) - self.output_state.input_action = self - - def input_trigger(self, input): - # remember updated inputs - try: - self.inputs_updated[input] = True - except KeyError: - raise KeyError("Given input is not an input of this node") - # execute if all inputs were updated - for val in self.inputs_updated.values(): - if not val: - break - else: - # reset input flags - for key in self.inputs_updated.keys(): - self.inputs_updated[key] = False - # execute this action - self.execute() - - def execute(self, *args, **kwargs): - # simple action: concatenate inputs with " + " in between - out = None - for i in self.input_states: - if out is None: - out = i.value - else: - out = out + i.value - # update output - self.output_state.update(out) - - -class DefaultState(State): - - def __init__(self, input_action=None, output_actions=None): - if output_actions is None: - output_actions = [] - super().__init__(input_action=input_action, output_actions=output_actions) - self.value = None - - def update(self, value): - self.value = value - self.trigger_outputs() diff --git a/metaprompting/graph.py b/metaprompting/graph.py new file mode 100644 index 0000000..96d0ddb --- /dev/null +++ b/metaprompting/graph.py @@ -0,0 +1,115 @@ +from itertools import chain, count + +from metaprompting import make_iterable, read_multiline_input, State, Action + + +class Graph: + + def __init__(self, states=(), actions=()): + self.states = set() + self.actions = set() + self.add_states(states) + self.add_actions(actions) + + def add_states(self, states): + states = make_iterable(states) + for s in states: + self.states.add(s) + if len(states) == 1: + return states[0] + else: + return states + + def add_actions(self, actions): + actions = make_iterable(actions) + for a in actions: + self.actions.add(a) + if len(actions) == 1: + return actions[0] + else: + return actions + + def _assert_is_node(self, node, add=False): + if isinstance(node, State): + if node not in self.states: + if add: + self.add_states(node) + return + else: + raise KeyError(f"State {node} is not in this graph") + else: + return + if isinstance(node, Action): + if node not in self.actions: + if add: + self.add_actions(node) + return + else: + raise KeyError(f"Action {node} is not in this graph") + else: + return + raise KeyError(f"Node {node} is neither " + f"State ({isinstance(node, State)}) nor " + f"Action node ({isinstance(node, Action)})") + + def connect_action(self, input_nodes, action, output_node, force=False, add=False): + input_nodes = make_iterable(input_nodes) + for n in input_nodes: + self._assert_is_node(n) + self._assert_is_node(output_node) + self._assert_is_node(action, add=add) + connect(from_nodes=input_nodes, to_nodes=action, force=force) + connect(from_nodes=action, to_nodes=output_node, force=force) + return action + + +class Conversation(Graph): + + def __init__(self, input_state=None, output_state=None, multiline=False, states=(), actions=()): + super().__init__(states=states, actions=actions) + self.input_state = input_state + self.output_state = output_state + self.multiline = multiline + self.user_prefix = "You: " + self.response_prefix = "LLM: " + + def run(self, n_interactions=None, pre_input_callback=None, post_input_callback=None, post_response_callback=None): + if n_interactions is None: + it = count() + else: + it = range(n_interactions) + for i in it: + if pre_input_callback is not None: + pre_input_callback() + # get user input and update input node + if self.multiline: + msg = read_multiline_input(self.user_prefix) + else: + msg = input(self.user_prefix) + self.input_state.update(msg) + if post_input_callback is not None: + post_input_callback() + # print response + print(self.response_prefix + self.output_state.value) + if post_response_callback is not None: + post_response_callback() + + + +def connect(from_nodes, to_nodes, force=False): + # handle single node arguments + from_nodes = make_iterable(from_nodes) + to_nodes = make_iterable(to_nodes) + # connect all possible 'from' --> 'to' pairs + for fn in from_nodes: + for tn in to_nodes: + if isinstance(fn, State) and isinstance(tn, Action): + # state --> action connection + fn.add_output_actions(tn) + tn.add_input_states(fn) + elif isinstance(fn, Action) and isinstance(tn, State): + # action --> state connection + fn.set_output_state(tn, force=force) + tn.set_input_action(fn, force=force) + else: + raise RuntimeError("Can only connect State to Action or Action to State node") diff --git a/metaprompting/llm.py b/metaprompting/llm.py new file mode 100644 index 0000000..f566d01 --- /dev/null +++ b/metaprompting/llm.py @@ -0,0 +1,52 @@ +from abc import ABC, abstractmethod +from time import sleep + +import pyperclip + +from metaprompting import read_multiline_input + + +class LLM(ABC): + + @abstractmethod + def __call__(self, prompt, *args, **kwargs): + """ + Call the LLM with given arguments and return its output. + """ + raise NotImplemented + + +class CopyPasteLLM(LLM): + + def __init__(self, multiline=True, auto_copy_paste=False, instructions=True): + self.multiline = multiline + self.auto_copy_paste = auto_copy_paste + self.instructions = instructions + + def __call__(self, prompt, *args, **kwargs): + if self.auto_copy_paste: + pyperclip.copy(prompt) + if self.instructions: + print("COPY-PASTE LLM: The LLM prompt has been copied to your clipboard. " + "Please paste it into your LLM and copy the response. " + "The clipboard will be automatically monitored and the " + "response is read out as soon as it is copied in.") + while True: + sleep(0.01) + response = pyperclip.paste() + if response != prompt: + return response + else: + if self.instructions: + print("COPY-PASTE LLM: Copy-paste the text in between the >>>/<<< lines to your LMM") + print(">>>") + print(prompt) + print("<<<") + if self.instructions: + print("COPY-PASTE LLM: Copy-paste your LLM response here!") + if self.multiline: + if self.instructions: + print("COPY-PASTE LLM: (use enter for new lines and Ctrl-D to send)") + return read_multiline_input() + else: + return input() diff --git a/metaprompting/state.py b/metaprompting/state.py new file mode 100644 index 0000000..75735f2 --- /dev/null +++ b/metaprompting/state.py @@ -0,0 +1,38 @@ +from abc import ABC + +from metaprompting import make_iterable + +class State(ABC): + + def __init__(self): + """ + A static node holding information generated by an input_action. It may pass on the information to zero or + more output_actions. + + :param input_action: Input :class:`~Action` + :param output_actions: Iterable over output :class:`~Action`\s + """ + self._input_action = None + self._output_actions = [] + self.value = None + + def set_input_action(self, action, force=False): + if self._input_action is not None and not force: + raise RuntimeError("Input action is already set (use force=True to override)") + self._input_action = action + + def add_output_actions(self, actions): + actions = make_iterable(actions) + for a in actions: + self._output_actions.append(a) + + def trigger_outputs(self): + """ + Trigger all outputs of this :class:`~State`. Should typically be called at the end of :meth:`~update`. + """ + for output in self._output_actions: + output.input_trigger(self) + + def update(self, value, *args, **kwargs): + self.value = value + self.trigger_outputs() diff --git a/metaprompting/util.py b/metaprompting/util.py new file mode 100644 index 0000000..0b58a76 --- /dev/null +++ b/metaprompting/util.py @@ -0,0 +1,27 @@ +from collections.abc import Iterable + + +def make_iterable(x, type_check=True): + if type_check: + if isinstance(x, Iterable): + return x + else: + return [x] + else: + try: + _ = iter(x) + except TypeError: + x = [x] + return x + + +def read_multiline_input(prefix=""): + contents = [] + while True: + try: + line = input(prefix) + except EOFError: + break + contents.append(line) + print() + return "\n".join(contents) diff --git a/requirements.txt b/requirements.txt index 3fcfb51..c9c47dd 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1 +1 @@ -colorama +pyperclip diff --git a/setup.py b/setup.py index 3150b8b..ddc3925 100644 --- a/setup.py +++ b/setup.py @@ -8,7 +8,7 @@ setuptools.setup( name="metaprompting", - version="0.0.0", + version="0.0.1", author="Robert Lieck", author_email="robert.lieck@durham.ac.uk", description="A meta-prompting library",