diff --git a/myllm/default_settings.toml b/myllm/default_settings.toml index e33d0799..51c51ada 100644 --- a/myllm/default_settings.toml +++ b/myllm/default_settings.toml @@ -39,7 +39,7 @@ llm_commands = """ ➰ /aimode\n 🧽 /clearai\n """ - +llm_prefix = "🐻" llm_ai_mode = false # template prompt context diff --git a/myllm/main.py b/myllm/main.py index d240db43..29e0641c 100644 --- a/myllm/main.py +++ b/myllm/main.py @@ -76,30 +76,28 @@ async def get_myllm_info(self): f"ℹ️ MyLLM v{__version__}\n {settings.llm_model}\n{settings.llm_provider}" ) + async def chat(self, prompt): """ Asynchronously chats with the user. - + Args: prompt (str): The prompt message from the user. - + Returns: - str: The response from the conversation model. + str: The response from the conversation model. """ - self.conversation.add_message("user", prompt) - logger.debug("conversation {}", self.conversation.get_messages()) - response = await self.provider.create_async( - model=settings.llm_model, - messages=self.conversation.get_messages(), - ) - - self.conversation.add_message("ai", response) - sleep(settings.lag) - if response: - logger.debug("response received {}", response) - return response - else: - logger.debug("No response from the model") + try: + self.conversation.add_message("user", prompt) + response = await self.provider.create_async( + model=settings.llm_model, + messages=self.conversation.get_messages(), + ) + sleep(settings.lag) + self.conversation.add_message("ai", response) + return f"{settings.llm_prefix} {response}" + except Exception as error: + logger.error("No response from the model {}", error) return "No response from the model" async def clear_chat_history(self):