Skip to content

Commit

Permalink
wip add some basic logging capabilities
Browse files Browse the repository at this point in the history
  • Loading branch information
cpsievert committed Dec 10, 2024
1 parent 3b7ccea commit 33a4502
Show file tree
Hide file tree
Showing 10 changed files with 86 additions and 39 deletions.
2 changes: 1 addition & 1 deletion chatlas/_anthropic.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,11 +16,11 @@
ContentToolRequest,
ContentToolResult,
)
from ._logging import log_model_default
from ._provider import Provider
from ._tokens import tokens_log
from ._tools import Tool, basemodel_to_param_schema
from ._turn import Turn, normalize_turns
from ._utils import log_model_default

if TYPE_CHECKING:
from anthropic.types import (
Expand Down
3 changes: 3 additions & 0 deletions chatlas/_chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@
MarkdownDisplay,
MockMarkdownDisplay,
)
from ._logging import log_tool_error
from ._provider import Provider
from ._tools import Tool
from ._turn import Turn, user_turn
Expand Down Expand Up @@ -1035,6 +1036,7 @@ def _invoke_tool(

return ContentToolResult(id_, result, None)
except Exception as e:
log_tool_error(func.__name__, str(arguments), e)
return ContentToolResult(id_, None, str(e))

@staticmethod
Expand All @@ -1054,6 +1056,7 @@ async def _invoke_tool_async(

return ContentToolResult(id_, result, None)
except Exception as e:
log_tool_error(func.__name__, str(arguments), e)
return ContentToolResult(id_, None, str(e))

def _markdown_display(
Expand Down
3 changes: 2 additions & 1 deletion chatlas/_github.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,10 @@
from typing import TYPE_CHECKING, Optional

from ._chat import Chat
from ._logging import log_model_default
from ._openai import ChatOpenAI
from ._turn import Turn
from ._utils import MISSING, MISSING_TYPE, log_model_default
from ._utils import MISSING, MISSING_TYPE

if TYPE_CHECKING:
from ._openai import ChatCompletion
Expand Down
2 changes: 1 addition & 1 deletion chatlas/_google.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,10 @@
ContentToolRequest,
ContentToolResult,
)
from ._logging import log_model_default
from ._provider import Provider
from ._tools import Tool, basemodel_to_param_schema
from ._turn import Turn, normalize_turns
from ._utils import log_model_default

if TYPE_CHECKING:
from google.generativeai.types.content_types import (
Expand Down
3 changes: 2 additions & 1 deletion chatlas/_groq.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,10 @@
from typing import TYPE_CHECKING, Optional

from ._chat import Chat
from ._logging import log_model_default
from ._openai import ChatOpenAI
from ._turn import Turn
from ._utils import MISSING, MISSING_TYPE, log_model_default
from ._utils import MISSING, MISSING_TYPE

if TYPE_CHECKING:
from ._openai import ChatCompletion
Expand Down
29 changes: 29 additions & 0 deletions chatlas/_logging.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
import logging
import os

logger = logging.getLogger("chatlas")
if len(logger.handlers) == 0:
logger.addHandler(logging.NullHandler())

if os.getenv("CHATLAS_LOG", "").lower() == "info":
print("Setting log level to INFO")
formatter = logging.Formatter("%(asctime)s %(levelname)s - %(name)s - %(message)s")
handler = logging.FileHandler("chatlas.log")
handler.setFormatter(formatter)
handler.setLevel(logging.INFO)
logger.addHandler(handler)
logger.setLevel(logging.INFO)


def log_model_default(model: str) -> str:
logger.info(f"Defaulting to `model = '{model}'`.")
return model


def log_tool_error(name: str, arguments: str, e: Exception):
logger.info(
f"Error invoking tool function '{name}' with arguments: {arguments}. "
f"The error message is: '{e}'",
)


3 changes: 2 additions & 1 deletion chatlas/_openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,12 +16,13 @@
ContentToolRequest,
ContentToolResult,
)
from ._logging import log_model_default
from ._merge import merge_dicts
from ._provider import Provider
from ._tokens import tokens_log
from ._tools import Tool, basemodel_to_param_schema
from ._turn import Turn, normalize_turns
from ._utils import MISSING, MISSING_TYPE, is_testing, log_model_default
from ._utils import MISSING, MISSING_TYPE, is_testing

if TYPE_CHECKING:
from openai.types.chat import (
Expand Down
3 changes: 2 additions & 1 deletion chatlas/_perplexity.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,10 @@
from typing import TYPE_CHECKING, Optional

from ._chat import Chat
from ._logging import log_model_default
from ._openai import ChatOpenAI
from ._turn import Turn
from ._utils import MISSING, MISSING_TYPE, log_model_default
from ._utils import MISSING, MISSING_TYPE

if TYPE_CHECKING:
from ._openai import ChatCompletion
Expand Down
8 changes: 0 additions & 8 deletions chatlas/_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,15 +2,12 @@

import functools
import inspect
import logging
import os
import re
from typing import Awaitable, Callable, TypeVar, cast

from ._typing_extensions import ParamSpec, TypeGuard

logger = logging.getLogger("chatlas")

# --------------------------------------------------------------------
# wrap_async() and is_async_callable() was copied from shiny/_utils.py
# --------------------------------------------------------------------
Expand Down Expand Up @@ -80,11 +77,6 @@ class MISSING_TYPE:
MISSING = MISSING_TYPE()


def log_model_default(model: str) -> str:
logger.info(f"Defaulting to `model = '{model}'`.")
return model


# --------------------------------------------------------------------
# html_escape was copied from htmltools/_utils.py
# --------------------------------------------------------------------
Expand Down
69 changes: 44 additions & 25 deletions docs/tool-calling.qmd
Original file line number Diff line number Diff line change
Expand Up @@ -20,57 +20,58 @@ from chatlas import ChatOpenAI

### Motivating example

Let's take a look at an example where we really need an external tool. Chat models generally do not know the current time, which makes questions like these impossible.
Let's take a look at an example where we really need an external tool. Chat models generally do not have access to "real-time" information, such as current events, weather, etc. Let's see what happens when we ask the chat model about the weather in a specific location:

```{python}
chat = ChatOpenAI(model="gpt-4o")
_ = chat.chat("How long ago exactly was the moment Neil Armstrong touched down on the moon?")
chat = ChatOpenAI(model="gpt-4o-mini")
_ = chat.chat("What's the weather like today in Duluth, MN?")
```


Unfortunately, the LLM doesn't hallucinates the current date. Let's give the chat model the ability to determine the current time and try again.
Fortunately, the model is smart enough to know that it doesn't have access to real-time information, and it doesn't try to make up an answer. However, we can help it out by providing a tool that can fetch the weather for a given location.

### Defining a tool function

The first thing we'll do is define a Python function that returns the current time. This will be our tool.
At it turns out, LLMs are pretty good at figuring out 'structure' like latitude and longitude from 'unstructured' things like a location name. So we can write a tool function that takes a latitude and longitude and returns the current temperature at that location. Here's an example of how you might write such a function using the [Open-Meteo API](https://open-meteo.com/):

```{python}
def get_current_time(tz: str = "UTC") -> str:
import requests
def get_current_temperature(latitude: float, longitude: float):
"""
Gets the current time in the given time zone.
Get the current weather given a latitude and longitude.
Parameters
----------
tz
The time zone to get the current time in. Defaults to "UTC".
Returns
-------
str
The current time in the given time zone.
latitude
The latitude of the location.
longitude
The longitude of the location.
"""
from datetime import datetime
from zoneinfo import ZoneInfo
return datetime.now(ZoneInfo(tz)).strftime("%Y-%m-%d %H:%M:%S %Z")
lat_lng = f"latitude={latitude}&longitude={longitude}"
url = f"https://api.open-meteo.com/v1/forecast?{lat_lng}&current=temperature_2m,wind_speed_10m&hourly=temperature_2m,relative_humidity_2m,wind_speed_10m"
response = requests.get(url)
json = response.json()
return json["current"]
```

Note that we've gone through the trouble of adding the following to our function:

- Type hints for arguments and the return value
- A docstring that explains what the function does and what arguments it expects
- Type hints for function arguments
- A docstring that explains what the function does and what arguments it expects (as well as descriptions for the arguments themselves)

**Providing these hints and context is very important**, as it helps the chat model understand how to use your tool correctly!
**Providing these hints and documentation is very important**, as it helps the chat model understand how to use your tool correctly!

Let's test it:

```{python}
get_current_time()
get_current_temperature(46.7867, -92.1005)
```


### Using the tool

In order for the LLM to make use of our tool, we need to register it with the chat object. This is done by calling the `register_tool` method on the chat object.
In order for the LLM to make use of our tool, we need to register it with the `chat` object. This is done by calling the `register_tool` method on the chat object.

```{python}
chat.register_tool(get_current_time)
Expand All @@ -79,12 +80,30 @@ chat.register_tool(get_current_time)
Now let's retry our original question:

```{python}
_ = chat.chat("How long ago exactly was the moment Neil Armstrong touched down on the moon?")
_ = chat.chat("What's the weather like today in Duluth, MN?")
```

That's correct! Without any further guidance, the chat model decided to call our tool function and successfully used its result in formulating its response.

This tool example was extremely simple, but you can imagine doing much more interesting things from tool functions: calling APIs, reading from or writing to a database, kicking off a complex simulation, or even calling a complementary GenAI model (like an image generator). Or if you are using chatlas in a Shiny app, you could use tools to set reactive values, setting off a chain of reactive updates.
This tool example was extremely simple, but you can imagine doing much more interesting things from tool functions: calling APIs, reading from or writing to a database, kicking off a complex simulation, or even calling a complementary GenAI model (like an image generator). Or if you are using chatlas in a Shiny app, you could use tools to set reactive values, setting off a chain of reactive updates. This is precisely what the [sidebot dashboard](https://github.com/jcheng5/py-sidebot) does to allow for an AI assisted "drill-down" into the data.

### Trouble-shooting

When the execution of a tool function fails, chatlas sends the exception message back to the chat model. This can be useful for gracefully handling errors in the chat model. However, this can also lead to confusion as to why a response did not come back as expected. If you encounter such a situation, you can set `echo="all"` in the `chat.chat()` method to see the full conversation, including tool calls and their results.

```{python}
def get_current_temperature(latitude: float, longitude: float):
"Get the current weather given a latitude and longitude."
raise ValueError("Failed to get current temperature")
chat.tools = [get_current_temperature]
_ = chat.chat("What's the weather like today in Duluth, MN?")
```





### Tool limitations

Expand Down

0 comments on commit 33a4502

Please sign in to comment.