Skip to content

Commit

Permalink
fixed-no-codeblocks-error
Browse files Browse the repository at this point in the history
Signed-off-by: devjpt23 <[email protected]>
  • Loading branch information
devjpt23 committed Oct 27, 2024
1 parent f2f6125 commit 8d204aa
Show file tree
Hide file tree
Showing 2 changed files with 67 additions and 12 deletions.
34 changes: 31 additions & 3 deletions kai/kai_trace.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,18 +128,32 @@ def prompt(self, current_batch_count: int, prompt: str, pb_vars: dict):
f.write(json.dumps(data, indent=4))

@enabled_check
def llm_result(
def llm_result_with_codeblocks(
self, current_batch_count: int, retry_count: int, result: BaseMessage
):
result_file_path = os.path.join(
self.trace_dir, f"{current_batch_count}", f"{retry_count}", "llm_result"
)
os.makedirs(os.path.dirname(result_file_path), exist_ok=True)
with open(result_file_path, "w") as f:
f.write(result.pretty_repr())
f.write(str(result))

@enabled_check
def response_metadata(
def llm_result_without_codeblocks(
self, current_batch_count: int, retry_count: int, result: BaseMessage
):
result_file_path = os.path.join(
self.trace_dir,
f"{current_batch_count}",
f"{retry_count}",
"llm_result_without_codeblocks",
)
os.makedirs(os.path.dirname(result_file_path), exist_ok=True)
with open(result_file_path, "w") as f:
f.write(str(result))

@enabled_check
def response_metadata_for_response_with_codeblocks(
self, current_batch_count: int, retry_count: int, response_metadata: dict
):
response_metadata_file_path = os.path.join(
Expand All @@ -152,6 +166,20 @@ def response_metadata(
with open(response_metadata_file_path, "w") as f:
f.write(json.dumps(response_metadata, indent=4, default=str))

@enabled_check
def response_metadata_for_response_without_codeblocks(
self, current_batch_count: int, retry_count: int, response_metadata: dict
):
response_metadata_file_path = os.path.join(
self.trace_dir,
f"{current_batch_count}",
f"{retry_count}",
"response_metadata_without_codeblocks.json",
)
os.makedirs(os.path.dirname(response_metadata_file_path), exist_ok=True)
with open(response_metadata_file_path, "w") as f:
f.write(json.dumps(response_metadata, indent=4, default=str))

@enabled_check
def estimated_tokens(
self,
Expand Down
45 changes: 36 additions & 9 deletions kai/service/kai_application/kai_application.py
Original file line number Diff line number Diff line change
Expand Up @@ -238,27 +238,54 @@ def get_incident_solutions_for_file(
application_name,
f'{file_name.replace("/", "-")}',
):
llm_result = self.model_provider.llm.invoke(prompt)
trace.llm_result(count, retry_attempt_count, llm_result)
llm_request = [("human", prompt)]
llm_result = self.model_provider.llm.invoke(llm_request)
content = parse_file_solution_content(
src_file_language, str(llm_result.content)
)

# The LLM response must include code blocks (formatted within triple backticks) to be considered complete. Usually, the LM responds with code blocks, but occasionally it fails to do so, as noted in issue #350 [https://github.com/konveyor/kai/issues/350] . Complete responses are saved in the trace directory directly. For incomplete responses, an additional prompt is sent to the LLM, and the resulting complete response (with code blocks) is saved in the trace directory as a new file.
if len(content.updated_file) == 0:
trace.llm_result_without_codeblocks(
count, retry_attempt_count, llm_result.content
)
trace.response_metadata_for_response_without_codeblocks(
count, retry_attempt_count, llm_result.response_metadata
)
self.has_tokens_exceeded(
llm_result.response_metadata,
estimated_prompt_tokens,
file_name,
)
llm_request.append(
(
"human",
"I request you to generate a complete response.",
)
)
llm_result = self.model_provider.llm.invoke(llm_request)
content = parse_file_solution_content(
src_file_language, str(llm_result.content)
)

trace.llm_result_with_codeblocks(
count, retry_attempt_count, llm_result.content
)
trace.response_metadata_for_response_with_codeblocks(
count, retry_attempt_count, llm_result.response_metadata
)
trace.estimated_tokens(
count,
retry_attempt_count,
estimated_prompt_tokens,
self.tiktoken_encoding_base,
)
trace.response_metadata(
count, retry_attempt_count, llm_result.response_metadata
)
self.has_tokens_exceeded(
llm_result.response_metadata,
estimated_prompt_tokens,
file_name,
)

content = parse_file_solution_content(
src_file_language, str(llm_result.content)
)

if not content.updated_file:
raise Exception(
f"Error in LLM Response: The LLM did not provide an updated file for {file_name}"
Expand Down

0 comments on commit 8d204aa

Please sign in to comment.