Skip to content

Commit

Permalink
Add warning for LLM to avoid context overflow (Significant-Gravitas#3646
Browse files Browse the repository at this point in the history
)
  • Loading branch information
Pwuts authored May 2, 2023
1 parent 4767fe6 commit 1d26f6b
Showing 1 changed file with 11 additions and 0 deletions.
11 changes: 11 additions & 0 deletions autogpt/agent/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
from autogpt.json_utils.json_fix_llm import fix_json_using_multiple_techniques
from autogpt.json_utils.utilities import LLM_DEFAULT_RESPONSE_FORMAT, validate_json
from autogpt.llm import chat_with_ai, create_chat_completion, create_chat_message
from autogpt.llm.token_counter import count_string_tokens
from autogpt.logs import logger, print_assistant_thoughts
from autogpt.speech import say_text
from autogpt.spinner import Spinner
Expand Down Expand Up @@ -233,6 +234,16 @@ def start_interaction_loop(self):
)
result = f"Command {command_name} returned: " f"{command_result}"

result_tlength = count_string_tokens(
str(command_result), cfg.fast_llm_model
)
memory_tlength = count_string_tokens(
str(self.summary_memory), cfg.fast_llm_model
)
if result_tlength + memory_tlength + 600 > cfg.fast_token_limit:
result = f"Failure: command {command_name} returned too much output. \
Do not execute this command again with the same arguments."

for plugin in cfg.plugins:
if not plugin.can_handle_post_command():
continue
Expand Down

0 comments on commit 1d26f6b

Please sign in to comment.