From 1b138ddec149ee8cd57b9ef911761d25c8f16c21 Mon Sep 17 00:00:00 2001 From: rickard Date: Sat, 20 Jan 2024 21:55:05 +0100 Subject: [PATCH] get rid of left over prints --- tale/llm/io_adapters.py | 2 -- tale/llm/llm_io.py | 1 - tale/llm/llm_utils.py | 2 -- 3 files changed, 5 deletions(-) diff --git a/tale/llm/io_adapters.py b/tale/llm/io_adapters.py index c967a1f2..7a9022ac 100644 --- a/tale/llm/io_adapters.py +++ b/tale/llm/io_adapters.py @@ -145,6 +145,4 @@ def set_prompt(self, request_body: dict, prompt: str, context: str = '') -> dict prompt = prompt.replace('{context}', '') request_body['messages'][0]['content'] = f'{context}' request_body['messages'][1]['content'] = prompt - print("context " + context) - print (request_body) return request_body \ No newline at end of file diff --git a/tale/llm/llm_io.py b/tale/llm/llm_io.py index 2b089395..3af1815a 100644 --- a/tale/llm/llm_io.py +++ b/tale/llm/llm_io.py @@ -44,7 +44,6 @@ def asynchronous_request(self, request_body: dict, prompt: str, context: str = ' return self.stream_request(request_body, wait=True, prompt=prompt, context=context) def stream_request(self, request_body: dict, prompt: str, context: str = '', io = None, wait: bool = False) -> str: - print("context 1 " + context) if self.io_adapter: request_body = self.io_adapter.set_prompt(request_body, prompt, context) return self.io_adapter.stream_request(request_body, io, wait) diff --git a/tale/llm/llm_utils.py b/tale/llm/llm_utils.py index 3a19716c..a59afaa4 100644 --- a/tale/llm/llm_utils.py +++ b/tale/llm/llm_utils.py @@ -86,7 +86,6 @@ def evoke(self, message: str, short_len: bool=False, rolling_prompt: str = '', a cached_look = llm_cache.get_looks([text_hash_value]) if cached_look: return output_template.format(message=message, text=cached_look), rolling_prompt - print("context 2 " + self.__story_context) trimmed_message = parse_utils.remove_special_chars(str(message)) story_context = EvokeContext(story_context=self.__story_context, history=rolling_prompt if not skip_history or alt_prompt else '', extra_context=extra_context) prompt = self.pre_prompt @@ -94,7 +93,6 @@ def evoke(self, message: str, short_len: bool=False, rolling_prompt: str = '', a context = '{context}', max_words=self.word_limit if not short_len else self.short_word_limit, input_text=str(trimmed_message))) - print("context 2 " + story_context.to_prompt_string()) request_body = deepcopy(self.default_body) if not self.stream: