generated from robert-lieck/pythontemplatepackage
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
1 parent
d2ee51a
commit e1ea446
Showing
13 changed files
with
517 additions
and
141 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,101 @@ | ||
""" | ||
Copy Paste LLM | ||
============================== | ||
""" | ||
|
||
# %% | ||
# The :class:`~metaprompting.base.CopyPasteLLM` class allows you to simulate an LLM API by copy-pasting over prompts | ||
# and responses from/to the standard output/input. | ||
# | ||
# The script must be called with '--interactive' command line switch to use the | ||
# :class:`~metaprompting.base.CopyPasteLLM` class, otherwise, we define a :class:`DummyLLM` class to simulate | ||
# interaction for generating the documentation. | ||
|
||
import sys | ||
|
||
from metaprompting import State, LlmAction, HistoryAction, Conversation, LLM, CopyPasteLLM | ||
|
||
|
||
if "--interactive" in sys.argv: | ||
interactive = True | ||
llm = CopyPasteLLM( | ||
auto_copy_paste=True, # automatically: copy LLM prompt to clipboard; paste response back when clipboard changes | ||
instructions=False, # don't print additional instructions | ||
) | ||
else: | ||
class DummyLLM(LLM): | ||
|
||
def __call__(self, prompt, *args, **kwargs): | ||
return f"HERE BE THE RESPONSE TO THE FOLLOWING PROMPT\n\n{prompt}" | ||
|
||
interactive = False | ||
llm = DummyLLM() | ||
|
||
# %% | ||
# Create conversation graph with state nodes | ||
|
||
graph = Conversation() | ||
input_state, history_state, inner_speech_state, output_state = graph.add_states([State(), State(), State(), State()]) | ||
graph.input_state = input_state | ||
graph.output_state = output_state | ||
|
||
# %% | ||
# Create and connect action nodes | ||
|
||
# remember history | ||
history_action = HistoryAction() | ||
graph.connect_action([input_state, output_state], history_action, history_state, add=True) | ||
|
||
# generate inner speech | ||
inner_speech_action = LlmAction(llm=llm, prompt_parts=[ | ||
"Here is the history of a conversation between Person 1 and Person 2:\n\n", | ||
"What are some general thoughts about this conversation?\n\n" + | ||
"Keep the output short and to a single paragraph of text-only without formatting, bullet points etc", | ||
]) | ||
graph.connect_action(history_state, inner_speech_action, inner_speech_state, add=True) | ||
|
||
# construct prompt for response | ||
response_action = LlmAction(llm=llm, prompt_parts=[ | ||
"Here is the history of a conversation between Person 1 and Person 2:\n\n", | ||
"\n\nSome general thoughts about this conversation are:\n\n", | ||
"\n\nThe most recent message from Person 1 is:\n\n", | ||
"\n\nWhat could Person 2 reply? Only print the reply itself, nothing else!", | ||
]) | ||
graph.connect_action([history_state, inner_speech_state, input_state], response_action, output_state, add=True) | ||
|
||
# %% | ||
# Initialise nodes | ||
|
||
inner_speech_state.update("This is the beginning of the conversation...") | ||
inner_speech_action.block(1) # block trigger from updating history | ||
history_state.update("BEGINNING OF HISTORY\n\n") | ||
|
||
# %% | ||
# Run conversation interleaved with inner speech | ||
|
||
if interactive: | ||
# for running in terminal with '--interactive' switch | ||
def print_inner_speech(): | ||
print(f"Inner speech: {inner_speech_state.value}") | ||
print("Start a conversation (use Ctrl-C to cancel)!") | ||
graph.run(post_response_callback=print_inner_speech) | ||
else: | ||
# for generating example in documentation | ||
input_state.update("Some user input...") | ||
print("========================") | ||
print("User Input") | ||
print("========================") | ||
print(input_state.value) | ||
print("========================") | ||
print("LLM Response") | ||
print("========================") | ||
print(output_state.value) | ||
print("========================") | ||
print("Inner Speech") | ||
print("========================") | ||
print(inner_speech_state.value) | ||
print("========================") | ||
print("History") | ||
print("========================") | ||
print(history_state.value) | ||
print("========================") |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,5 @@ | ||
from metaprompting.util import * | ||
from metaprompting.state import * | ||
from metaprompting.llm import * | ||
from metaprompting.action import * | ||
from metaprompting.graph import * |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,113 @@ | ||
from abc import ABC | ||
|
||
from metaprompting import make_iterable, LLM | ||
|
||
|
||
class Action(ABC): | ||
|
||
def __init__(self): | ||
""" | ||
An executable node that takes zero or more input_states, executes an action, and returns its output to the | ||
output_state. | ||
:param input_states: Iterable over input :class:`~State`\s | ||
:param output_state: Output :class:`~State` | ||
""" | ||
self._input_states = [] | ||
self._inputs_updated = {} | ||
self._output_state = None | ||
self._block = 0 | ||
|
||
def block(self, state=True): | ||
self._block = state | ||
|
||
def add_input_states(self, states): | ||
states = make_iterable(states) | ||
for s in states: | ||
self._input_states.append(s) | ||
self._inputs_updated[s] = False | ||
|
||
def set_output_state(self, state, force=False): | ||
if self._output_state is not None and not force: | ||
raise RuntimeError("Output state is already set (use force=True to override)") | ||
self._output_state = state | ||
|
||
def input_trigger(self, input): | ||
""" | ||
Trigger the :class:`~Action` from a specific input, typically when the input has been updated. | ||
:param input: input :class:`~State` | ||
""" | ||
# remember updated inputs | ||
try: | ||
self._inputs_updated[input] = True | ||
except KeyError: | ||
raise KeyError("Given input is not an input of this node") | ||
# return False if any inputs are not updated | ||
for is_updated in self._inputs_updated.values(): | ||
if not is_updated: | ||
return False | ||
# reset update flags | ||
for key in self._inputs_updated.keys(): | ||
self._inputs_updated[key] = False | ||
# ignore if blocking active | ||
if self._block is True: | ||
return False | ||
if self._block > 0: | ||
self._block -= 1 | ||
return False | ||
# execute otherwise | ||
self._execute() | ||
# return True to signal execution | ||
return True | ||
|
||
def _execute(self): | ||
""" | ||
Excecute the :class:`~Action` with given arguments and pass on the output to the output :class:`~State`. | ||
""" | ||
out = self.execute() | ||
# update output | ||
self._output_state.update(out) | ||
return out | ||
|
||
def execute(self): | ||
# simple action: concatenate inputs with " + " in between | ||
out = None | ||
for i in self._input_states: | ||
if out is None: | ||
out = i.value | ||
else: | ||
out = out + i.value | ||
return out | ||
|
||
|
||
class LlmAction(Action): | ||
|
||
def __init__(self, llm: LLM, prompt_parts): | ||
super().__init__() | ||
self.llm = llm | ||
self.prompt_parts = list(prompt_parts) | ||
|
||
def execute(self): | ||
if len(self.prompt_parts) != len(self._input_states) + 1: | ||
raise RuntimeError(f"Number of prompt parts ({len(self.prompt_parts)}) must be one less than " | ||
f"number of input states ({len(self._input_states)})") | ||
prompt = self.prompt_parts[0] | ||
for i, p in zip(self._input_states, self.prompt_parts[1:]): | ||
prompt += i.value + p | ||
return self.llm(prompt) | ||
|
||
|
||
class HistoryAction(Action): | ||
|
||
def __init__(self): | ||
super().__init__() | ||
self.history = [] | ||
|
||
def execute(self): | ||
self.history.append(tuple(i.value for i in self._input_states)) | ||
out = "" | ||
for msgs in self.history: | ||
for i, msg in enumerate(msgs): | ||
out += f"Person {i + 1}: {msg}\n\n" | ||
return out |
Oops, something went wrong.