diff --git a/neon_llm_core/rmq.py b/neon_llm_core/rmq.py index 839f5b0..1b54047 100644 --- a/neon_llm_core/rmq.py +++ b/neon_llm_core/rmq.py @@ -200,13 +200,16 @@ def _handle_request_async(self, request: dict): history = request["history"] persona = request.get("persona", {}) LOG.debug(f"Request persona={persona}|key={routing_key}") + # Default response if the model fails to respond + response = 'Sorry, but I cannot respond to your message at the '\ + 'moment; please, try again later' try: response = self.model.ask(message=query, chat_history=history, persona=persona) except ValueError as err: LOG.error(f'ValueError={err}') - response = ('Sorry, but I cannot respond to your message at the ' - 'moment, please try again later') + except Exception as e: + LOG.exception(e) api_response = LLMProposeResponse(message_id=message_id, response=response, routing_key=routing_key)