Skip to content

Commit

Permalink
streaming descriptions are back
Browse files Browse the repository at this point in the history
  • Loading branch information
neph1 committed Jan 6, 2024
1 parent 45974e1 commit c1b2c9a
Show file tree
Hide file tree
Showing 11 changed files with 137 additions and 67 deletions.
2 changes: 1 addition & 1 deletion stories/teaparty/story_config.json
Original file line number Diff line number Diff line change
Expand Up @@ -27,5 +27,5 @@
"type": "A whimsical and humoristic tale of tea and madness. Guests are so busy with their own problems that it's difficult to make yourself heard.",
"world_info": "",
"world_mood": 0,
"custom_resources" : "True"
"custom_resources": true
}
93 changes: 83 additions & 10 deletions stories/teaparty/world.json
Original file line number Diff line number Diff line change
Expand Up @@ -46,9 +46,9 @@
"personality": "",
"occupation": "",
"age": 0,
"type": "Npc",
"type": "Mob",
"race": "",
"gender": "m",
"gender": "male",
"level": 1,
"stats": {
"ac": 0,
Expand All @@ -65,7 +65,16 @@
"strength": 3,
"dexterity": 3,
"unarmed_attack": "FISTS"
}
},
"memory": {
"known_locations": {},
"observed_events": [],
"conversations": [],
"sentiments": {},
"action_history": [],
"planned_actions": [],
"goal": null
}
},
"duchess": {
"location": "Living room",
Expand All @@ -79,9 +88,9 @@
"personality": "",
"occupation": "",
"age": 0,
"type": "Npc",
"type": "Mob",
"race": "",
"gender": "f",
"gender": "female",
"level": 1,
"stats": {
"ac": 0,
Expand All @@ -98,7 +107,16 @@
"strength": 3,
"dexterity": 3,
"unarmed_attack": "FISTS"
}
},
"memory": {
"known_locations": {},
"observed_events": [],
"conversations": [],
"sentiments": {},
"action_history": [],
"planned_actions": [],
"goal": null
}
},
"ace of spades": {
"location": "Living room",
Expand All @@ -112,9 +130,9 @@
"personality": "",
"occupation": "",
"age": 0,
"type": "Npc",
"type": "Mob",
"race": "",
"gender": "m",
"gender": "male",
"level": 1,
"stats": {
"ac": 0,
Expand All @@ -131,9 +149,64 @@
"strength": 3,
"dexterity": 3,
"unarmed_attack": "FISTS"
}
},
"memory": {
"known_locations": {},
"observed_events": [],
"conversations": [],
"sentiments": {},
"action_history": [],
"planned_actions": [],
"goal": null
}
}
},
"items": {}
},
"catalogue": {
"items": [
{
"name": "dagger",
"title": "Dagger",
"descr": "",
"short_descr": "A steel dagger",
"value": 0,
"rent": 0.0,
"weight": 0.0,
"takeable": true,
"location": "",
"wc": 0,
"base_damage": 1,
"bonus_damage": 0,
"weapon_type": "ONE_HANDED"
},
{
"name": "club",
"title": "Club",
"descr": "",
"short_descr": "A wooden club",
"value": 0,
"rent": 0.0,
"weight": 0.0,
"takeable": true,
"location": "",
"wc": 0,
"base_damage": 1,
"bonus_damage": 0,
"weapon_type": "ONE_HANDED"
},
{
"name": "note",
"title": "Note",
"descr": "",
"short_descr": "",
"value": 0,
"rent": 0.0,
"weight": 0.0,
"takeable": true,
"location": ""
}
],
"creatures": []
}
}
}
51 changes: 15 additions & 36 deletions tale/llm/character.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,9 @@
from tale.base import Location
from tale.errors import LlmResponseException
from tale.llm import llm_config
from tale.llm.contexts.ActionContext import ActionContext
from tale.llm.llm_io import IoUtil
from tale.llm.contexts.DialogueContext import DialogueContext
from tale.load_character import CharacterV2


Expand All @@ -19,39 +21,33 @@ def __init__(self, backend: str, io_util: IoUtil, default_body: dict):
self.pre_prompt = llm_config.params['PRE_PROMPT']
self.dialogue_prompt = llm_config.params['DIALOGUE_PROMPT']
self.character_prompt = llm_config.params['CREATE_CHARACTER_PROMPT']
self.item_prompt = llm_config.params['ITEM_PROMPT']
self.backend = backend
self.io_util = io_util
self.default_body = default_body
self.analysis_body = json.loads(llm_config.params['ANALYSIS_BODY'])
self.travel_prompt = llm_config.params['TRAVEL_PROMPT']
self.reaction_prompt = llm_config.params['REACTION_PROMPT']
self.idle_action_prompt = llm_config.params['IDLE_ACTION_PROMPT']
self.free_form_action_prompt = llm_config.params['ACTION_PROMPT']
self.json_grammar = llm_config.params['JSON_GRAMMAR']
self.dialogue_template = llm_config.params['DIALOGUE_TEMPLATE']
self.action_template = llm_config.params['ACTION_TEMPLATE']

def generate_dialogue(self, conversation: str,
character_card: str,
character_name: str,
target: str,
target_description: str='',
def generate_dialogue(self,
context: DialogueContext,
conversation: str,
sentiment = '',
location_description = '',
story_context = '',
event_history = '',
short_len : bool=False):
prompt = self.pre_prompt

#formatted_conversation = llm_config.params['USER_START']
formatted_conversation = conversation.replace('<break>', '\n')#llm_config.params['USER_END'] + '\n' + llm_config.params['USER_START'])
prompt += self.dialogue_prompt.format(
story_context=story_context,
location=location_description,
context=context.to_prompt_string(),
previous_conversation=formatted_conversation,
character2_description=character_card,
character2=character_name,
character1=target,
character1_description=target_description,
character2=context.speaker_name,
character1=context.target_name,
dialogue_template=self.dialogue_template,
history=event_history,
sentiment=sentiment)
request_body = deepcopy(self.default_body)
Expand Down Expand Up @@ -150,29 +146,12 @@ def perform_reaction(self, action: str, character_name: str, acting_character_na
text = self.io_util.synchronous_request(request_body, prompt=prompt)
return parse_utils.trim_response(text) + "\n"

def free_form_action(self, story_context: str, story_type: str, location: Location, character_name: str, character_card: str = '', event_history: str = ''):
actions = ', '.join(['move, say, attack, wear, remove, wield, take, eat, drink, emote'])
characters = {}
for living in location.livings:
if living.visible and living.name != character_name.lower():
if living.alive:
characters[living.name] = living.short_description
else:
characters[living.name] = f"{living.short_description} (dead)"
exits = location.exits.keys()
items = [item.name for item in location.items if item.visible]
def free_form_action(self, action_context: ActionContext):
prompt = self.pre_prompt
prompt += self.free_form_action_prompt.format(
story_context=story_context,
story_type=story_type,
actions=actions,
location=location.name,
exits=exits,
location_items=items,
characters=json.dumps(characters),
history=event_history,
character_name=character_name,
character=character_card)
context=action_context.to_prompt_string(),
character_name=action_context.character_name,
action_template=self.action_template)
request_body = deepcopy(self.default_body)
request_body['grammar'] = self.json_grammar
try :
Expand Down
24 changes: 17 additions & 7 deletions tale/llm/llm_io.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import re
import requests
import time
import aiohttp
Expand Down Expand Up @@ -51,13 +52,13 @@ def asynchronous_request(self, request_body: dict, prompt: str) -> str:
return self.synchronous_request(request_body, prompt)
return self.stream_request(request_body, wait=True, prompt=prompt)

def stream_request(self, request_body: dict, prompt: str, player_io: TextBuffer = None, io = None, wait: bool = False) -> str:
def stream_request(self, request_body: dict, prompt: str, io = None, wait: bool = False) -> str:
if self.backend != 'kobold_cpp':
raise NotImplementedError("Currently does not support streaming requests for OpenAI")
self._set_prompt(request_body, prompt)
result = asyncio.run(self._do_stream_request(self.url + self.stream_endpoint, request_body))
if result:
return self._do_process_result(self.url + self.data_endpoint, player_io, io, wait)
return self._do_process_result(self.url + self.data_endpoint, io, wait)
return ''

async def _do_stream_request(self, url: str, request_body: dict,) -> bool:
Expand All @@ -70,7 +71,7 @@ async def _do_stream_request(self, url: str, request_body: dict,) -> bool:
# Handle errors
print("Error occurred:", response.status)

def _do_process_result(self, url, player_io: TextBuffer = None, io = None, wait: bool = False) -> str:
def _do_process_result(self, url, io = None, wait: bool = False) -> str:
""" Process the result from the stream endpoint """
tries = 0
old_text = ''
Expand All @@ -84,10 +85,9 @@ def _do_process_result(self, url, player_io: TextBuffer = None, io = None, wait:
continue
if not wait:
new_text = text[len(old_text):]
player_io.print(new_text, end=False, format=True, line_breaks=False)
io.write_output()
io.output_no_newline(new_text, new_paragraph=False)
old_text = text

io.output_no_newline("")
return old_text

def _parse_kobold_result(self, result: str) -> str:
Expand All @@ -108,7 +108,17 @@ def _set_prompt(self, request_body: dict, prompt: str) -> dict:
if self.user_end_prompt:
prompt = prompt + self.user_end_prompt
if self.backend == 'kobold_cpp':
context = self._extract_context(prompt)
request_body['memory'] = context
request_body['prompt'] = prompt
else :
request_body['messages'][1]['content'] = prompt
return request_body
return request_body

def _extract_context(self, full_string):
pattern = re.escape('<context>') + "(.*?)" + re.escape('</context>')
match = re.search(pattern, full_string, re.DOTALL)
if match:
return '<context>' + match.group(1) + '</context>'
else:
return ''
4 changes: 1 addition & 3 deletions tale/llm/llm_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,11 +100,9 @@ def evoke(self, player_io: TextBuffer, message: str, short_len : bool=False, rol
text = self.io_util.synchronous_request(request_body, prompt=prompt)
llm_cache.cache_look(text, text_hash_value)
return output_template.format(message=message, text=text), rolling_prompt
text = self.io_util.stream_request(request_body=request_body, player_io=player_io, prompt=prompt, io=self.connection)
player_io.print(output_template.format(message=message, text=text), end=False, format=True, line_breaks=False)

text = self.io_util.stream_request(request_body=request_body, player_io=player_io, prompt=prompt, io=self.connection)
llm_cache.cache_look(text, text_hash_value)

return '\n', rolling_prompt

def generate_dialogue(self, conversation: str,
Expand Down
4 changes: 2 additions & 2 deletions tale/player.py
Original file line number Diff line number Diff line change
Expand Up @@ -339,9 +339,9 @@ def output(self, *lines: str) -> None:
"""directly writes the given text to the player's screen, without buffering and formatting/wrapping"""
self.io.output(*lines)

def output_no_newline(self, line: str) -> None:
def output_no_newline(self, line: str, new_paragraph = True) -> None:
"""similar to output() but writes a single line, without newline at the end"""
self.io.output_no_newline(self.io.smartquotes(line))
self.io.output_no_newline(self.io.smartquotes(line), new_paragraph)

def input_direct(self, prompt: str) -> str:
"""
Expand Down
4 changes: 2 additions & 2 deletions tale/tio/console_io.py
Original file line number Diff line number Diff line change
Expand Up @@ -177,12 +177,12 @@ def output(self, *lines: str) -> None:
print(self._apply_style(line, self.do_styles))
sys.stdout.flush()

def output_no_newline(self, text: str) -> None:
def output_no_newline(self, text: str, new_paragraph = True) -> None:
"""Like output, but just writes a single line, without end-of-line."""
if prompt_toolkit and self.do_prompt_toolkit:
self.output(text)
else:
super().output_no_newline(text)
super().output_no_newline(text, new_paragraph)
print(self._apply_style(text, self.do_styles), end="")
sys.stdout.flush()

Expand Down
9 changes: 6 additions & 3 deletions tale/tio/if_browser_io.py
Original file line number Diff line number Diff line change
Expand Up @@ -157,12 +157,15 @@ def output(self, *lines: str) -> None:
self.output_no_newline(line)
self.__new_html_available.set()

def output_no_newline(self, text: str) -> None:
super().output_no_newline(text)
def output_no_newline(self, text: str, new_paragraph = True) -> None:
super().output_no_newline(text, new_paragraph)
text = self.convert_to_html(text)
if text == "\n":
text = "<br>"
self.__html_to_browser.append("<p>" + text + "</p>\n")
if new_paragraph:
self.__html_to_browser.append("<p>" + text + "</p>\n")
else:
self.__html_to_browser.append(text.replace("\\n", "<br>"))
self.__new_html_available.set()

def convert_to_html(self, line: str) -> str:
Expand Down
2 changes: 1 addition & 1 deletion tale/tio/iobase.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ def output(self, *lines: str) -> None:
"""
self.last_output_line = lines[-1]

def output_no_newline(self, text: str) -> None:
def output_no_newline(self, text: str, new_paragraph = True) -> None:
"""
Like output, but just writes a single line, without end-of-line.
Implement specific behavior in subclass (but don't forget to call base method)
Expand Down
4 changes: 2 additions & 2 deletions tale/tio/tkinter_io.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,9 +91,9 @@ def output(self, *lines: str) -> None:
for line in lines:
self.gui.write_line(line)

def output_no_newline(self, text: str) -> None:
def output_no_newline(self, text: str, new_paragraph = True) -> None:
"""Like output, but just writes a single line, without end-of-line."""
super().output_no_newline(text)
super().output_no_newline(text, new_paragraph)
self.gui.write_line(text)


Expand Down
7 changes: 7 additions & 0 deletions tests/test_player.py
Original file line number Diff line number Diff line change
Expand Up @@ -582,6 +582,13 @@ def test_strip(self):
output.print(" 1 ", format=False)
self.assertEqual([(" 1 \n", False)], output.get_paragraphs())

def test_no_line_break(self):
output = TextBuffer()
output.print("1", line_breaks=False)
output.print("2", line_breaks=False)
output.print("3", line_breaks=False)
self.assertEqual([("123\n", True)], output.get_paragraphs())


class TestCharacterBuilders(unittest.TestCase):
def setUp(self):
Expand Down

0 comments on commit c1b2c9a

Please sign in to comment.