Skip to content

Commit

Permalink
fix error and tested
Browse files Browse the repository at this point in the history
  • Loading branch information
b10902118 committed Nov 29, 2024
1 parent b0dd600 commit 2e95c63
Showing 1 changed file with 9 additions and 4 deletions.
13 changes: 9 additions & 4 deletions lightrag/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,9 +70,14 @@ async def openai_complete_if_cache(
if if_cache_return is not None:
return if_cache_return["return"]

response = await openai_async_client.chat.completions.create(
model=model, messages=messages, **kwargs
)
if "response_format" in kwargs:
response = await openai_async_client.beta.chat.completions.parse(
model=model, messages=messages, **kwargs
)
else:
response = await openai_async_client.chat.completions.create(
model=model, messages=messages, **kwargs
)
content = response.choices[0].message.content
if r"\u" in content:
content = content.encode("utf-8").decode("unicode_escape")
Expand Down Expand Up @@ -545,7 +550,7 @@ async def ollama_model_complete(
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
) -> str:
if keyword_extraction:
kwargs["response_format"] = "json"
kwargs["format"] = "json"
model_name = kwargs["hashing_kv"].global_config["llm_model_name"]
return await ollama_model_if_cache(
model_name,
Expand Down

0 comments on commit 2e95c63

Please sign in to comment.