diff --git a/lightrag/api/lightrag_server.py b/lightrag/api/lightrag_server.py index 87a72a98..533be818 100644 --- a/lightrag/api/lightrag_server.py +++ b/lightrag/api/lightrag_server.py @@ -719,16 +719,13 @@ async def lifespan(app: FastAPI): # Create working directory if it doesn't exist Path(args.working_dir).mkdir(parents=True, exist_ok=True) - if args.llm_binding_host == "lollms" or args.embedding_binding == "lollms": + if args.llm_binding == "lollms" or args.embedding_binding == "lollms": from lightrag.llm.lollms import lollms_model_complete, lollms_embed - if args.llm_binding_host == "ollama" or args.embedding_binding == "ollama": + if args.llm_binding == "ollama" or args.embedding_binding == "ollama": from lightrag.llm.ollama import ollama_model_complete, ollama_embed - if args.llm_binding_host == "openai" or args.embedding_binding == "openai": + if args.llm_binding == "openai" or args.embedding_binding == "openai": from lightrag.llm.openai import openai_complete_if_cache, openai_embed - if ( - args.llm_binding_host == "azure_openai" - or args.embedding_binding == "azure_openai" - ): + if args.llm_binding == "azure_openai" or args.embedding_binding == "azure_openai": from lightrag.llm.azure_openai import ( azure_openai_complete_if_cache, azure_openai_embed, diff --git a/lightrag/operate.py b/lightrag/operate.py index 75bb2c25..af66eee6 100644 --- a/lightrag/operate.py +++ b/lightrag/operate.py @@ -589,6 +589,9 @@ async def kg_query( query, query_param, global_config, hashing_kv ) + logger.info(f"High-level keywords: {hl_keywords}") + logger.info(f"Low-level keywords: {ll_keywords}") + # Handle empty keywords if hl_keywords == [] and ll_keywords == []: logger.warning("low_level_keywords and high_level_keywords is empty") @@ -1534,9 +1537,18 @@ async def naive_query( if query_param.only_need_context: return section + # Process conversation history + history_context = "" + if query_param.conversation_history: + history_context = get_conversation_turns( + query_param.conversation_history, query_param.history_turns + ) + sys_prompt_temp = PROMPTS["naive_rag_response"] sys_prompt = sys_prompt_temp.format( - content_data=section, response_type=query_param.response_type + content_data=section, + response_type=query_param.response_type, + history=history_context, ) if query_param.only_need_prompt: