Skip to content

Commit

Permalink
v0.14.7 - updated anthropic module, fixed typos in requirements.txt, …
Browse files Browse the repository at this point in the history
…new system prompt optimized specifically for claude
  • Loading branch information
benbaptist committed Jun 21, 2024
1 parent 543f75a commit 5184ca1
Show file tree
Hide file tree
Showing 13 changed files with 58 additions and 271 deletions.
13 changes: 12 additions & 1 deletion TODO.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,10 @@
# Version Goals

# Next
- Model 'gpt-3.5-turbo' for provider 'openai' not found
- Catch and handle Anthropic anthropic.InternalServerError cleanly


## 0.15
- [ ] Save sessions in ~/.config/luminos, associate them with current directory (absolute path), and prompt for recalling previous sessions upon launch. Show a list of old sessions and the option to either recall one or to proceed a fresh session
- [ ] Better CLI elements
Expand All @@ -17,7 +22,13 @@
- [ ] `google` will be the provider, three models will inherit it: `gemini-1.0-pro`, `gemini-1.5-flash`, `gemini-1.5-pro`
- [ ] See other models' implementations to see how it should be implemented. Specifically, see how Ollama is implemented since it uses LiteLLM, and we should use LiteLLM to implement Google. See example in 'examples/gemini.py' for an example

## 0.17
- [ ] Token Optimizations;
- [ ] Automatically compress and reduce redundant lines in scrollback to reduce input tokens
- [ ] Use the current LLM to compress conversations when hard token limits are reached

# Unsorted Goals
- [ ] Better code structure, more organized
- [ ] Better handling of token overflows
- [ ] Two --verbose levels, for better control of debugging
- [ ] Two --verbose levels, for better control of debugging
- [ ] Consider migrating to using litellm for ALL LLM-calling, reducing code complexity and increasing reusability
8 changes: 4 additions & 4 deletions luminos/models/anthropic/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@

from luminos.exceptions import *

from .system_prompt import SYSTEM_PROMPT

from anthropic.types import (
ContentBlock,
Expand All @@ -30,9 +31,6 @@
TextBlock,
TextBlockParam,
TextDelta,
)

from anthropic.types.beta.tools import (
ToolUseBlock
)

Expand All @@ -52,6 +50,8 @@ class BaseAnthropic(BaseModel):
temperature = 0.5
max_tokens = 4096

system_prompt_template = SYSTEM_PROMPT

def __init__(self):
super().__init__()

Expand Down Expand Up @@ -88,7 +88,7 @@ def generate_response(self):
asst.model = self.model

try:
response = self.client.beta.tools.messages.create(
response = self.client.messages.create(
model=self.model,
max_tokens=self.max_tokens,
messages=serialized_messages,
Expand Down
8 changes: 8 additions & 0 deletions luminos/models/anthropic/system_prompt.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
SYSTEM_PROMPT = """You are Luminos, an AI in a Linux shell. Execute prompts iteratively until goals are achieved. Use file system access and shell commands. Continue refining actions until objectives are met. Only stop for user intervention. Use functions to assist. When writing files, output all content without truncation. Be explicit about actions. You're on a live system. Use relative paths to save tokens. Minimize token usage and keep responses concise. Utilize tools as needed to accomplish tasks efficiently.
*** CURRENT SYSTEM INFO, FOR AI TO USE IF NEEDED ***
User's username: {username}
Current directory: {current_directory}
Listing of directories/files: {listing}
Current time: {time}
"""
1 change: 0 additions & 1 deletion luminos/models/ollama-test/__init__.py

This file was deleted.

Empty file.
7 changes: 0 additions & 7 deletions luminos/models/ollama-test/messages/tool_call.py

This file was deleted.

87 changes: 0 additions & 87 deletions luminos/models/ollama-test/ollama.py

This file was deleted.

34 changes: 0 additions & 34 deletions luminos/models/ollama-test/system_prompt.py

This file was deleted.

84 changes: 0 additions & 84 deletions luminos/models/ollama-test/tool_parser.py

This file was deleted.

52 changes: 27 additions & 25 deletions luminos/models/ollama/ollama.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@
from luminos.messages.response import Response

from .system_prompt import SYSTEM_PROMPT
from .tool_parser import tool_parser

from litellm import completion

Expand All @@ -26,33 +25,31 @@ class Ollama(BaseModel):
def __init__(self):
super().__init__()

@property
def system_prompt_template(self):
tool_prompt = ""
# @property
# def system_prompt_template(self):
# tool_prompt = ""

for tool in self.tools.__obj__:
name = tool["function"]["name"]
description = tool["function"]["description"]
parameters = json.dumps(tool["function"]["parameters"])
# for tool in self.tools.__obj__:
# name = tool["function"]["name"]
# description = tool["function"]["description"]
# parameters = json.dumps(tool["function"]["parameters"])

tool_prompt += f"**{name}**{description}\nJSON Schema: {parameters}\n"
# tool_prompt += f"**{name}**{description}\nJSON Schema: {parameters}\n"

tool_prompt = tool_prompt.replace("{", "{{")
tool_prompt = tool_prompt.replace("}", "}}")
# tool_prompt = tool_prompt.replace("{", "{{")
# tool_prompt = tool_prompt.replace("}", "}}")

return SYSTEM_PROMPT + "\n\n" + tool_prompt
# return SYSTEM_PROMPT + "\n\n" + tool_prompt

def generate_response(self):
# Limit to the most basic tools, for now
self.tools.tools = ("Shell", "FileIO")

serialized_messages = [message.serialize() for message in self.messages]

try:
response = completion(
model=f"ollama_chat/{self.model}",
messages=serialized_messages,
api_base=self.api_base,
tools=self.tools.__obj__
)

logger.debug(response)
Expand All @@ -65,18 +62,23 @@ def generate_response(self):
content = choice.message["content"]
finish_reason = choice.finish_reason

# Parse tool calls from the response
try:
tool_calls = tool_parser(content)
except Exception as e:
logger.error(f"Error while parsing for potential tool calls {e}")
logger.debug(content)
raise ModelReturnError(f"Error while parsing for potential tool calls: {e}")
# # Parse tool calls from the response
# try:
# tool_calls = tool_parser(content)
# except Exception as e:
# logger.error(f"Error while parsing for potential tool calls {e}")
# logger.debug(content)
# raise ModelReturnError(f"Error while parsing for potential tool calls: {e}")

tool_calls = []

if finish_reason == "tool_calls":
tool_calls_data = response.choices[0].message.tool_calls

print(tool_calls)
tool_calls = [
ToolCall(content=data.function, id=data.id, type=data.type) for data in tool_calls_data
]

if len(tool_calls) > 0:
finish_reason = "tool_calls"
msg = Assistant(content)
msg.tool_calls = tool_calls

Expand Down
Loading

0 comments on commit 5184ca1

Please sign in to comment.