Skip to content

Commit

Permalink
get rid of left over prints
Browse files Browse the repository at this point in the history
  • Loading branch information
neph1 committed Jan 20, 2024
1 parent 6fc646d commit 1b138dd
Show file tree
Hide file tree
Showing 3 changed files with 0 additions and 5 deletions.
2 changes: 0 additions & 2 deletions tale/llm/io_adapters.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,6 +145,4 @@ def set_prompt(self, request_body: dict, prompt: str, context: str = '') -> dict
prompt = prompt.replace('<context>{context}</context>', '')
request_body['messages'][0]['content'] = f'<context>{context}</context>'
request_body['messages'][1]['content'] = prompt
print("context " + context)
print (request_body)
return request_body
1 change: 0 additions & 1 deletion tale/llm/llm_io.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,6 @@ def asynchronous_request(self, request_body: dict, prompt: str, context: str = '
return self.stream_request(request_body, wait=True, prompt=prompt, context=context)

def stream_request(self, request_body: dict, prompt: str, context: str = '', io = None, wait: bool = False) -> str:
print("context 1 " + context)
if self.io_adapter:
request_body = self.io_adapter.set_prompt(request_body, prompt, context)
return self.io_adapter.stream_request(request_body, io, wait)
Expand Down
2 changes: 0 additions & 2 deletions tale/llm/llm_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,15 +86,13 @@ def evoke(self, message: str, short_len: bool=False, rolling_prompt: str = '', a
cached_look = llm_cache.get_looks([text_hash_value])
if cached_look:
return output_template.format(message=message, text=cached_look), rolling_prompt
print("context 2 " + self.__story_context)
trimmed_message = parse_utils.remove_special_chars(str(message))
story_context = EvokeContext(story_context=self.__story_context, history=rolling_prompt if not skip_history or alt_prompt else '', extra_context=extra_context)
prompt = self.pre_prompt
prompt += alt_prompt or (self.evoke_prompt.format(
context = '{context}',
max_words=self.word_limit if not short_len else self.short_word_limit,
input_text=str(trimmed_message)))
print("context 2 " + story_context.to_prompt_string())
request_body = deepcopy(self.default_body)

if not self.stream:
Expand Down

0 comments on commit 1b138dd

Please sign in to comment.