Skip to content

Commit

Permalink
Fix response routing and improve exception handling (#17)
Browse files Browse the repository at this point in the history
* Add check to prevent observed error: https://neon-ai.sentry.io/issues/6270623501/events/e450d637fd014f8380064ad7026f39dc/
Update LLM response queues to be unique to match responses to requests

* Add more logging

* Add more logging to debug persona issues

* Add exception handling for empty options (i.e. if bots join in the middle of a conversation round)

* Update to address new test failure

* Revert arbitrary log change
  • Loading branch information
NeonDaniel authored Feb 5, 2025
1 parent f9a6193 commit ad775a9
Show file tree
Hide file tree
Showing 2 changed files with 27 additions and 5 deletions.
30 changes: 26 additions & 4 deletions neon_llm_core/chatbot.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,8 @@
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

from typing import List, Optional
from uuid import uuid4

from chatbot_core.v2 import ChatBot
from neon_data_models.models.api.mq import (
LLMProposeRequest,
Expand Down Expand Up @@ -83,6 +85,9 @@ def ask_discusser(self, options: dict, context: dict = None) -> str:
:param options: proposed responses (botname: response)
:param context: message context
"""
if not options:
LOG.error(f"No options provided: options={options}")
return DEFAULT_RESPONSE
options = {k: v for k, v in options.items() if k != self.service_name}
prompt_id = context.get('prompt_id') if context else None
prompt_sentence = None
Expand All @@ -101,6 +106,10 @@ def ask_appraiser(self, options: dict, context: dict = None) -> str:
:param options: proposed responses (botname: response)
:param context: message context
"""
if not options:
LOG.error(f"No options provided: options={options}")
return DEFAULT_VOTE

# Determine the relevant prompt
prompt_id = context.get('prompt_id') if context else None
prompt_sentence = None
Expand All @@ -118,6 +127,14 @@ def ask_appraiser(self, options: dict, context: dict = None) -> str:
answer_data = self._get_llm_api_choice(prompt=prompt_sentence,
responses=bot_responses)
LOG.info(f'Received answer_data={answer_data}')
if not answer_data:
LOG.error("No response to vote request")
return DEFAULT_VOTE
if len(answer_data.sorted_answer_indexes) != len(bots):
LOG.error(f"Invalid vote response! "
f"input_responses={bot_responses}|"
f"response_idxs={answer_data.sorted_answer_indexes}")
return DEFAULT_VOTE
if answer_data and answer_data.sorted_answer_indexes:
return bots[answer_data.sorted_answer_indexes[0]]
return DEFAULT_VOTE
Expand All @@ -129,9 +146,11 @@ def _get_llm_api_response(self, shout: str) -> Optional[LLMProposeResponse]:
:returns response from LLM API
"""
queue = self.mq_queue_config.ask_response_queue
response_queue = f"{queue}.response.{uuid4().hex}"

try:
LOG.info(f"Sending to {self.mq_queue_config.vhost}/{queue}")
LOG.info(f"Sending to {self.mq_queue_config.vhost}/{queue} for "
f"persona={self.persona}")

request_data = LLMProposeRequest(model=self.base_llm,
persona=self.persona,
Expand All @@ -141,7 +160,8 @@ def _get_llm_api_response(self, shout: str) -> Optional[LLMProposeResponse]:
resp_data = send_mq_request(vhost=self.mq_queue_config.vhost,
request_data=request_data.model_dump(),
target_queue=queue,
response_queue=f"{queue}.response")
response_queue=response_queue)
LOG.info(f"Got response for persona={self.persona}")
return LLMProposeResponse.model_validate(obj=resp_data)
except Exception as e:
LOG.exception(f"Failed to get response on "
Expand All @@ -155,6 +175,7 @@ def _get_llm_api_opinion(self, prompt: str, options: dict) -> Optional[LLMDiscus
:returns response data from LLM API
"""
queue = self.mq_queue_config.ask_discusser_queue
response_queue = f"{queue}.response.{uuid4().hex}"

try:
LOG.info(f"Sending to {self.mq_queue_config.vhost}/{queue}")
Expand All @@ -168,7 +189,7 @@ def _get_llm_api_opinion(self, prompt: str, options: dict) -> Optional[LLMDiscus
resp_data = send_mq_request(vhost=self.mq_queue_config.vhost,
request_data=request_data.model_dump(),
target_queue=queue,
response_queue=f"{queue}.response")
response_queue=response_queue)
return LLMDiscussResponse.model_validate(obj=resp_data)
except Exception as e:
LOG.exception(f"Failed to get response on "
Expand All @@ -183,6 +204,7 @@ def _get_llm_api_choice(self, prompt: str,
:returns response data from LLM API
"""
queue = self.mq_queue_config.ask_appraiser_queue
response_queue = f"{queue}.response.{uuid4().hex}"

try:
LOG.info(f"Sending to {self.mq_queue_config.vhost}/{queue}")
Expand All @@ -196,7 +218,7 @@ def _get_llm_api_choice(self, prompt: str,
resp_data = send_mq_request(vhost=self.mq_queue_config.vhost,
request_data=request_data.model_dump(),
target_queue=queue,
response_queue=f"{queue}.response")
response_queue=response_queue)
return LLMVoteResponse.model_validate(obj=resp_data)
except Exception as e:
LOG.exception(f"Failed to get response on "
Expand Down
2 changes: 1 addition & 1 deletion neon_llm_core/rmq.py
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,7 @@ def _handle_request_async(self, request: dict):
query = request["query"]
history = request["history"]
persona = request.get("persona", {})

LOG.debug(f"Request persona={persona}|key={routing_key}")
try:
response = self.model.ask(message=query, chat_history=history,
persona=persona)
Expand Down

0 comments on commit ad775a9

Please sign in to comment.