diff --git a/kai/kai_trace.py b/kai/kai_trace.py index a7e39770..8900bf3a 100644 --- a/kai/kai_trace.py +++ b/kai/kai_trace.py @@ -128,7 +128,7 @@ def prompt(self, current_batch_count: int, prompt: str, pb_vars: dict): f.write(json.dumps(data, indent=4)) @enabled_check - def llm_result( + def llm_result_with_codeblocks( self, current_batch_count: int, retry_count: int, result: BaseMessage ): result_file_path = os.path.join( @@ -136,10 +136,24 @@ def llm_result( ) os.makedirs(os.path.dirname(result_file_path), exist_ok=True) with open(result_file_path, "w") as f: - f.write(result.pretty_repr()) + f.write(str(result)) @enabled_check - def response_metadata( + def llm_result_without_codeblocks( + self, current_batch_count: int, retry_count: int, result: BaseMessage + ): + result_file_path = os.path.join( + self.trace_dir, + f"{current_batch_count}", + f"{retry_count}", + "llm_result_without_codeblocks", + ) + os.makedirs(os.path.dirname(result_file_path), exist_ok=True) + with open(result_file_path, "w") as f: + f.write(str(result)) + + @enabled_check + def response_metadata_for_response_with_codeblocks( self, current_batch_count: int, retry_count: int, response_metadata: dict ): response_metadata_file_path = os.path.join( @@ -152,6 +166,20 @@ def response_metadata( with open(response_metadata_file_path, "w") as f: f.write(json.dumps(response_metadata, indent=4, default=str)) + @enabled_check + def response_metadata_for_response_without_codeblocks( + self, current_batch_count: int, retry_count: int, response_metadata: dict + ): + response_metadata_file_path = os.path.join( + self.trace_dir, + f"{current_batch_count}", + f"{retry_count}", + "response_metadata_without_codeblocks.json", + ) + os.makedirs(os.path.dirname(response_metadata_file_path), exist_ok=True) + with open(response_metadata_file_path, "w") as f: + f.write(json.dumps(response_metadata, indent=4, default=str)) + @enabled_check def estimated_tokens( self, diff --git a/kai/service/kai_application/kai_application.py b/kai/service/kai_application/kai_application.py index 1a65b2a2..c5494ff6 100644 --- a/kai/service/kai_application/kai_application.py +++ b/kai/service/kai_application/kai_application.py @@ -238,27 +238,53 @@ def get_incident_solutions_for_file( application_name, f'{file_name.replace("/", "-")}', ): - llm_result = self.model_provider.llm.invoke(prompt) - trace.llm_result(count, retry_attempt_count, llm_result) + llm_request = [("human", prompt)] + llm_result = self.model_provider.llm.invoke(llm_request) + content = parse_file_solution_content( + src_file_language, str(llm_result.content) + ) + + if len(content.updated_file) == 0: + trace.llm_result_without_codeblocks( + count, retry_attempt_count, llm_result.content + ) + trace.response_metadata_for_response_without_codeblocks( + count, retry_attempt_count, llm_result.response_metadata + ) + self.has_tokens_exceeded( + llm_result.response_metadata, + estimated_prompt_tokens, + file_name, + ) + llm_request.append( + ( + "human", + "I request you to generate a complete response.", + ) + ) + llm_result = self.model_provider.llm.invoke(llm_request) + content = parse_file_solution_content( + src_file_language, str(llm_result.content) + ) + + trace.llm_result_with_codeblocks( + count, retry_attempt_count, llm_result.content + ) + trace.response_metadata_for_response_with_codeblocks( + count, retry_attempt_count, llm_result.response_metadata + ) trace.estimated_tokens( count, retry_attempt_count, estimated_prompt_tokens, self.tiktoken_encoding_base, ) - trace.response_metadata( - count, retry_attempt_count, llm_result.response_metadata - ) self.has_tokens_exceeded( llm_result.response_metadata, estimated_prompt_tokens, file_name, ) - content = parse_file_solution_content( - src_file_language, str(llm_result.content) - ) - if not content.updated_file: raise Exception( f"Error in LLM Response: The LLM did not provide an updated file for {file_name}"