Skip to content

Commit

Permalink
feat - in local and global search system prompts, add the support to …
Browse files Browse the repository at this point in the history
…enable/disable repeated instructions
  • Loading branch information
ksachdeva committed Oct 8, 2024
1 parent 1868509 commit 69d9aa9
Show file tree
Hide file tree
Showing 7 changed files with 45 additions and 4 deletions.
22 changes: 19 additions & 3 deletions examples/simple-app/app/query.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,10 @@ def global_search(
None, help="Context window size for ollama model"
),
show_references: bool = typer.Option(True, help="Show references in the output"), # noqa: FBT001, FBT003
repeat_instructions: bool = typer.Option( # noqa: FBT001
True, # noqa: FBT003
help="Repeat instructions in the prompt",
),
enable_langsmith: bool = typer.Option(False, help="Enable Langsmith"), # noqa: FBT001, FBT003
):
if enable_langsmith:
Expand All @@ -94,6 +98,7 @@ def global_search(
"Not Provided" if ollama_num_context is None else ollama_num_context,
],
["Show References", str(show_references)],
["Repeat Instructions In Prompt", str(repeat_instructions)],
]
)

Expand All @@ -113,14 +118,17 @@ def global_search(
cache_dir,
ollama_num_context=ollama_num_context,
),
prompt_builder=KeyPointsGeneratorPromptBuilder(show_references=show_references),
prompt_builder=KeyPointsGeneratorPromptBuilder(
show_references=show_references, repeat_instructions=repeat_instructions
),
context_builder=report_context_builder,
)

kp_aggregator = KeyPointsAggregator(
llm=make_llm_instance(llm_type, llm_model, cache_dir),
prompt_builder=KeyPointsAggregatorPromptBuilder(
show_references=show_references
show_references=show_references,
repeat_instructions=repeat_instructions,
),
context_builder=KeyPointsContextBuilder(
token_counter=TiktokenCounter(),
Expand Down Expand Up @@ -157,6 +165,10 @@ def local_search(
None, help="Context window size for ollama model"
),
show_references: bool = typer.Option(True, help="Show references in the output"), # noqa: FBT001, FBT003
repeat_instructions: bool = typer.Option( # noqa: FBT001
True, # noqa: FBT003
help="Repeat instructions in the prompt",
),
enable_langsmith: bool = typer.Option(False, help="Enable Langsmith"), # noqa: FBT001, FBT003
):
if enable_langsmith:
Expand All @@ -183,6 +195,7 @@ def local_search(
"Not Provided" if ollama_num_context is None else ollama_num_context,
],
["Show References", str(show_references)],
["Repeat Instructions In Prompt", str(repeat_instructions)],
]
)

Expand Down Expand Up @@ -231,7 +244,10 @@ def local_search(
)

local_search = LocalSearch(
prompt_builder=LocalSearchPromptBuilder(show_references=show_references),
prompt_builder=LocalSearchPromptBuilder(
show_references=show_references,
repeat_instructions=repeat_instructions,
),
llm=make_llm_instance(
llm_type,
llm_model,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,10 +39,14 @@
{{response_type}}
Add sections and commentary to the response as appropriate for the length and format. Style the response in markdown.
---Analyst Reports---
{{report_data}}
{{#repeat_instructions}}
---Goal---
Generate a response of the target length and format that responds to the user's question, summarize all the reports from multiple analysts who focused on different parts of the dataset.
Expand Down Expand Up @@ -76,4 +80,5 @@
{{response_type}}
Add sections and commentary to the response as appropriate for the length and format. Style the response in markdown.
{{/repeat_instructions}}
"""
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ def __init__(
system_prompt: str | None = None,
system_prompt_path: Path | None = None,
show_references: bool = True,
repeat_instructions: bool = True,
):
self._system_prompt: str | None
if system_prompt is None and system_prompt_path is None:
Expand All @@ -29,6 +30,7 @@ def __init__(

self._system_prompt_path = system_prompt_path
self._show_references = show_references
self._repeat_instructions = repeat_instructions

def build(self) -> tuple[BasePromptTemplate, BaseOutputParser]:
if self._system_prompt_path:
Expand All @@ -42,6 +44,7 @@ def build(self) -> tuple[BasePromptTemplate, BaseOutputParser]:
partial_variables=dict(
response_type="Multiple Paragraphs",
show_references=self._show_references,
repeat_instructions=self._repeat_instructions,
),
template_format="mustache",
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,8 @@
{{context_data}}
{{#repeat_instructions}}
---Goal---
Generate a response consisting of a list of key points that responds to the user's question, summarizing all relevant information in the input data tables.
Expand Down Expand Up @@ -83,4 +85,5 @@
{"description": "Description of point 2 {{#show_references}}[Data: Reports (report ids)]{{/show_references}}", "score": score_value}
]
}
{{/repeat_instructions}}
"""
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ def __init__(
system_prompt: str | None = None,
system_prompt_path: Path | None = None,
show_references: bool = True,
repeat_instructions: bool = True,
):
self._system_prompt: str | None
if system_prompt is None and system_prompt_path is None:
Expand All @@ -29,6 +30,7 @@ def __init__(

self._system_prompt_path = system_prompt_path
self._show_references = show_references
self._repeat_instructions = repeat_instructions

def build(self) -> tuple[BasePromptTemplate, BaseOutputParser]:
if self._system_prompt_path:
Expand All @@ -40,7 +42,10 @@ def build(self) -> tuple[BasePromptTemplate, BaseOutputParser]:
system_template = SystemMessagePromptTemplate.from_template(
prompt,
template_format="mustache",
partial_variables=dict(show_references=self._show_references),
partial_variables=dict(
show_references=self._show_references,
repeat_instructions=self._repeat_instructions,
),
)

template = ChatPromptTemplate(
Expand Down
6 changes: 6 additions & 0 deletions src/langchain_graphrag/query/local_search/_system_prompt.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,10 +33,14 @@
{{response_type}}
Add sections and commentary to the response as appropriate for the length and format. Style the response in markdown.
---Data tables---
{{context_data}}
{{#repeat_instructions}}
---Goal---
Generate a response of the target length and format that responds to the user's question, summarizing all information in the input data tables appropriate for the response length and format, and incorporating any relevant general knowledge.
Expand Down Expand Up @@ -64,4 +68,6 @@
{{response_type}}
Add sections and commentary to the response as appropriate for the length and format. Style the response in markdown.
{{/repeat_instructions}}
"""
3 changes: 3 additions & 0 deletions src/langchain_graphrag/query/local_search/prompt_builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ def __init__(
system_prompt: str | None = None,
system_prompt_path: Path | None = None,
show_references: bool = True,
repeat_instructions: bool = True,
):
self._system_prompt: str | None
if system_prompt is None and system_prompt_path is None:
Expand All @@ -29,6 +30,7 @@ def __init__(

self._system_prompt_path = system_prompt_path
self._show_references = show_references
self._repeat_instructions = repeat_instructions

def build(self) -> tuple[BasePromptTemplate, BaseOutputParser]:
if self._system_prompt_path:
Expand All @@ -42,6 +44,7 @@ def build(self) -> tuple[BasePromptTemplate, BaseOutputParser]:
partial_variables=dict(
response_type="Multiple Paragraphs",
show_references=self._show_references,
repeat_instructions=self._repeat_instructions,
),
template_format="mustache",
)
Expand Down

0 comments on commit 69d9aa9

Please sign in to comment.