This document provides instructions for migrating your codebase to accommodate breaking changes introduced in new versions of Griptape.
AnthropicDriversConfig
no longer bundles VoyageAiEmbeddingDriver
. If you rely on embeddings when using Anthropic, you must specify an Embedding Driver yourself.
from griptape.configs import Defaults
from griptape.configs.drivers import AnthropicDriversConfig
from griptape.structures import Agent
Defaults.drivers_config = AnthropicDriversConfig()
agent = Agent()
from griptape.configs import Defaults
from griptape.configs.drivers import AnthropicDriversConfig
from griptape.drivers import VoyageAiEmbeddingDriver, LocalVectorStoreDriver
Defaults.drivers_config = AnthropicDriversConfig(
embedding_driver=VoyageAiEmbeddingDriver(),
vector_store_driver=LocalVectorStoreDriver(
embedding_driver=VoyageAiEmbeddingDriver()
)
)
Many callables have been renamed for consistency. Update your code to use the new names using the CHANGELOG.md as the source of truth.
CompletionChunkEvent
has been removed. There is now BaseChunkEvent
with children TextChunkEvent
and ActionChunkEvent
. BaseChunkEvent
can replace completion_chunk_event.token
by doing str(base_chunk_event)
.
def handler_fn_stream(event: CompletionChunkEvent) -> None:
print(f"CompletionChunkEvent: {event.to_json()}")
def handler_fn_stream_text(event: CompletionChunkEvent) -> None:
# This prints out Tool actions with no easy way
# to filter them out
print(event.token, end="", flush=True)
EventListener(handler=handler_fn_stream, event_types=[CompletionChunkEvent])
EventListener(handler=handler_fn_stream_text, event_types=[CompletionChunkEvent])
def handler_fn_stream(event: BaseChunkEvent) -> None:
print(str(e), end="", flush=True)
# print out each child event type
if isinstance(event, TextChunkEvent):
print(f"TextChunkEvent: {event.to_json()}")
if isinstance(event, ActionChunkEvent):
print(f"ActionChunkEvent: {event.to_json()}")
def handler_fn_stream_text(event: TextChunkEvent) -> None:
# This will only be text coming from the
# prompt driver, not Tool actions
print(event.token, end="", flush=True)
EventListener(handler=handler_fn_stream, event_types=[BaseChunkEvent])
EventListener(handler=handler_fn_stream_text, event_types=[TextChunkEvent])
Returning None
from the handler
function now causes the event to not be published to the EventListenerDriver
.
The handler
function can now return a BaseEvent
object.
def handler_fn_return_none(event: BaseEvent) -> Optional[dict]:
# This causes the `BaseEvent` object to be passed to the EventListenerDriver
return None
def handler_fn_return_dict(event: BaseEvent) -> Optional[dict]:
# This causes the returned dictionary to be passed to the EventListenerDriver
return {
"key": "value
}
EventListener(handler=handler_fn_return_none, driver=driver)
EventListener(handler=handler_fn_return_dict, driver=driver)
def handler_fn_return_none(event: BaseEvent) -> Optional[dict | BaseEvent]:
# This causes the `BaseEvent` object to NOT get passed to the EventListenerDriver
return None
def handler_fn_return_dict(event: BaseEvent) -> Optional[dict | BaseEvent]:
# This causes the returned dictionary to be passed to the EventListenerDriver
return {
"key": "value
}
def handler_fn_return_base_event(event: BaseEvent) -> Optional[dict | BaseEvent]:
# This causes the returned `BaseEvent` object to be passed to the EventListenerDriver
return ChildClassOfBaseEvent()
# `driver` has been renamed to `event_listener_driver`
EventListener(handler=handler_fn_return_none, event_listener_driver=driver)
EventListener(handler=handler_fn_return_dict, event_listener_driver=driver)
EventListener(handler=handler_fn_return_base_event, event_listener_driver=driver)
BaseEventListenerDriver.publish_event
no longer takes a flush
argument. If you need to flush the event, call BaseEventListenerDriver.flush_events
directly.
event_listener_driver.publish_event(event, flush=True)
event_listener_driver.publish_event(event)
event_listener_driver.flush_events()
The observable
decorator has been moved to griptape.common.decorators
. Update your imports accordingly.
from griptape.common.observable import observable
from griptape.common.decorators import observable
HuggingFacePipelinePromptDriver.params
has been removed. Use HuggingFacePipelinePromptDriver.extra_params
instead.
driver = HuggingFacePipelinePromptDriver(
params={"max_length": 50}
)
driver = HuggingFacePipelinePromptDriver(
extra_params={"max_length": 50}
)
execute
has been renamed to run
in several places. Update your code accordingly.
task = PromptTask()
if task.can_execute():
task.execute()
task = PromptTask()
if task.can_run():
task.run()
DataframeLoader
has been removed. Use CsvLoader.parse
or build TextArtifact
s from the dataframe instead.
DataframeLoader().load(df)
# Convert the dataframe to csv bytes and parse it
CsvLoader().parse(bytes(df.to_csv(line_terminator='\r\n', index=False), encoding='utf-8'))
# Or build TextArtifacts from the dataframe
[TextArtifact(row) for row in source.to_dict(orient="records")]
PdfLoader().load(Path("attention.pdf").read_bytes())
PdfLoader().load_collection([Path("attention.pdf").read_bytes(), Path("CoT.pdf").read_bytes()])
PdfLoader().load("attention.pdf")
PdfLoader().load_collection([Path("attention.pdf"), "CoT.pdf"])
griptape.utils.file_utils.load_file
and griptape.utils.file_utils.load_files
have been removed.
You can now pass the file path directly to the Loader.
PdfLoader().load(load_file("attention.pdf").read_bytes())
PdfLoader().load_collection(list(load_files(["attention.pdf", "CoT.pdf"]).values()))
PdfLoader().load("attention.pdf")
PdfLoader().load_collection(["attention.pdf", "CoT.pdf"])
Loaders no longer chunk the data after loading it. If you need to chunk the data, use a Chunker after loading the data.
chunks = PdfLoader().load("attention.pdf")
vector_store.upsert_text_artifacts(
{
"griptape": chunks,
}
)
artifact = PdfLoader().load("attention.pdf")
chunks = Chunker().chunk(artifact)
vector_store.upsert_text_artifacts(
{
"griptape": chunks,
}
)
The torch
extra has been removed from the transformers
dependency. If you require torch
, install it separately.
pip install griptape[drivers-prompt-huggingface-hub]
pip install griptape[drivers-prompt-huggingface-hub]
pip install torch
MediaArtifact
has been removed. Use ImageArtifact
or AudioArtifact
instead.
image_media = MediaArtifact(
b"image_data",
media_type="image",
format="jpeg"
)
audio_media = MediaArtifact(
b"audio_data",
media_type="audio",
format="wav"
)
image_artifact = ImageArtifact(
b"image_data",
format="jpeg"
)
audio_artifact = AudioArtifact(
b"audio_data",
format="wav"
)
ImageArtifact.format
is now a required parameter. Update any code that does not provide a format
parameter.
image_artifact = ImageArtifact(
b"image_data"
)
image_artifact = ImageArtifact(
b"image_data",
format="jpeg"
)
CsvRowArtifact
has been removed. Use TextArtifact
instead.
artifact = CsvRowArtifact({"name": "John", "age": 30})
print(artifact.value) # {"name": "John", "age": 30}
print(type(artifact.value)) # <class 'dict'>
artifact = TextArtifact("name: John\nage: 30")
print(artifact.value) # name: John\nage: 30
print(type(artifact.value)) # <class 'str'>
If you require storing a dictionary as an Artifact, you can use GenericArtifact
instead.
CsvLoader
, DataframeLoader
, and SqlLoader
now return a list[TextArtifact]
instead of list[CsvRowArtifact]
.
If you require a dictionary, set a custom formatter_fn
and then parse the text to a dictionary.
results = CsvLoader().load(Path("people.csv").read_text())
print(results[0].value) # {"name": "John", "age": 30}
print(type(results[0].value)) # <class 'dict'>
results = CsvLoader().load(Path("people.csv").read_text())
print(results[0].value) # name: John\nAge: 30
print(type(results[0].value)) # <class 'str'>
# Customize formatter_fn
results = CsvLoader(formatter_fn=lambda x: json.dumps(x)).load(Path("people.csv").read_text())
print(results[0].value) # {"name": "John", "age": 30}
print(type(results[0].value)) # <class 'str'>
dict_results = [json.loads(result.value) for result in results]
print(dict_results[0]) # {"name": "John", "age": 30}
print(type(dict_results[0])) # <class 'dict'>
ImageArtifact.prompt
and ImageArtifact.model
have been moved to ImageArtifact.meta
.
image_artifact = ImageArtifact(
b"image_data",
format="jpeg",
prompt="Generate an image of a cat",
model="DALL-E"
)
print(image_artifact.prompt, image_artifact.model) # Generate an image of a cat, DALL-E
image_artifact = ImageArtifact(
b"image_data",
format="jpeg",
meta={"prompt": "Generate an image of a cat", "model": "DALL-E"}
)
print(image_artifact.meta["prompt"], image_artifact.meta["model"]) # Generate an image of a cat, DALL-E
Renamed GriptapeCloudKnowledgeBaseVectorStoreDriver
to GriptapeCloudVectorStoreDriver
.
from griptape.drivers.griptape_cloud_knowledge_base_vector_store_driver import GriptapeCloudKnowledgeBaseVectorStoreDriver
driver = GriptapeCloudKnowledgeBaseVectorStoreDriver(...)
from griptape.drivers.griptape_cloud_vector_store_driver import GriptapeCloudVectorStoreDriver
driver = GriptapeCloudVectorStoreDriver(...)
OpenAiChatPromptDriver.response_format
is now structured as the openai
SDK accepts it.
driver = OpenAiChatPromptDriver(
response_format="json_object"
)
driver = OpenAiChatPromptDriver(
response_format={"type": "json_object"}
)
DataframeLoader
has been removed. Use CsvLoader.parse
or build TextArtifact
s from the dataframe instead.
DataframeLoader().load(df)
# Convert the dataframe to csv bytes and parse it
CsvLoader().parse(bytes(df.to_csv(line_terminator='\r\n', index=False), encoding='utf-8'))
# Or build TextArtifacts from the dataframe
[TextArtifact(row) for row in source.to_dict(orient="records")]
PdfLoader().load(Path("attention.pdf").read_bytes())
PdfLoader().load_collection([Path("attention.pdf").read_bytes(), Path("CoT.pdf").read_bytes()])
PdfLoader().load("attention.pdf")
PdfLoader().load_collection([Path("attention.pdf"), "CoT.pdf"])
griptape.utils.file_utils.load_file
and griptape.utils.file_utils.load_files
have been removed.
You can now pass the file path directly to the Loader.
PdfLoader().load(load_file("attention.pdf").read_bytes())
PdfLoader().load_collection(list(load_files(["attention.pdf", "CoT.pdf"]).values()))
PdfLoader().load("attention.pdf")
PdfLoader().load_collection(["attention.pdf", "CoT.pdf"])
Loaders no longer chunk the data after loading it. If you need to chunk the data, use a Chunker after loading the data.
chunks = PdfLoader().load("attention.pdf")
vector_store.upsert_text_artifacts(
{
"griptape": chunks,
}
)
artifact = PdfLoader().load("attention.pdf")
chunks = Chunker().chunk(artifact)
vector_store.upsert_text_artifacts(
{
"griptape": chunks,
}
)
MediaArtifact
has been removed. Use ImageArtifact
or AudioArtifact
instead.
image_media = MediaArtifact(
b"image_data",
media_type="image",
format="jpeg"
)
audio_media = MediaArtifact(
b"audio_data",
media_type="audio",
format="wav"
)
image_artifact = ImageArtifact(
b"image_data",
format="jpeg"
)
audio_artifact = AudioArtifact(
b"audio_data",
format="wav"
)
ImageArtifact.format
is now a required parameter. Update any code that does not provide a format
parameter.
image_artifact = ImageArtifact(
b"image_data"
)
image_artifact = ImageArtifact(
b"image_data",
format="jpeg"
)
CsvRowArtifact
has been removed. Use TextArtifact
instead.
artifact = CsvRowArtifact({"name": "John", "age": 30})
print(artifact.value) # {"name": "John", "age": 30}
print(type(artifact.value)) # <class 'dict'>
artifact = TextArtifact("name: John\nage: 30")
print(artifact.value) # name: John\nage: 30
print(type(artifact.value)) # <class 'str'>
If you require storing a dictionary as an Artifact, you can use GenericArtifact
instead.
CsvLoader
, DataframeLoader
, and SqlLoader
now return a list[TextArtifact]
instead of list[CsvRowArtifact]
.
If you require a dictionary, set a custom formatter_fn
and then parse the text to a dictionary.
results = CsvLoader().load(Path("people.csv").read_text())
print(results[0].value) # {"name": "John", "age": 30}
print(type(results[0].value)) # <class 'dict'>
results = CsvLoader().load(Path("people.csv").read_text())
print(type(results)) # <class 'griptape.artifacts.ListArtifact'>
print(results[0].value) # name: John\nAge: 30
print(type(results[0].value)) # <class 'str'>
# Customize formatter_fn
results = CsvLoader(formatter_fn=lambda x: json.dumps(x)).load(Path("people.csv").read_text())
print(results[0].value) # {"name": "John", "age": 30}
print(type(results[0].value)) # <class 'str'>
dict_results = [json.loads(result.value) for result in results]
print(dict_results[0]) # {"name": "John", "age": 30}
print(type(dict_results[0])) # <class 'dict'>
ImageArtifact.prompt
and ImageArtifact.model
have been moved to ImageArtifact.meta
.
image_artifact = ImageArtifact(
b"image_data",
format="jpeg",
prompt="Generate an image of a cat",
model="DALL-E"
)
print(image_artifact.prompt, image_artifact.model) # Generate an image of a cat, DALL-E
image_artifact = ImageArtifact(
b"image_data",
format="jpeg",
meta={"prompt": "Generate an image of a cat", "model": "DALL-E"}
)
print(image_artifact.meta["prompt"], image_artifact.meta["model"]) # Generate an image of a cat, DALL-E
Drivers, Loaders, and Engines now raise exceptions rather than returning ErrorArtifact
s.
Update any logic that expects ErrorArtifact
to handle exceptions instead.
artifacts = WebLoader().load("https://www.griptape.ai")
if isinstance(artifacts, ErrorArtifact):
raise Exception(artifacts.value)
try:
artifacts = WebLoader().load("https://www.griptape.ai")
except Exception as e:
raise e
LocalConversationMemoryDriver.file_path
has been renamed to persist_file
and is now Optional[str]
. If persist_file
is not passed as a parameter, nothing will be persisted and no errors will be raised. LocalConversationMemoryDriver
is now the default driver in the global Defaults
object.
local_driver_with_file = LocalConversationMemoryDriver(
file_path="my_file.json"
)
local_driver = LocalConversationMemoryDriver()
assert local_driver_with_file.file_path == "my_file.json"
assert local_driver.file_path == "griptape_memory.json"
local_driver_with_file = LocalConversationMemoryDriver(
persist_file="my_file.json"
)
local_driver = LocalConversationMemoryDriver()
assert local_driver_with_file.persist_file == "my_file.json"
assert local_driver.persist_file is None
BaseConversationMemoryDriver.driver
has been renamed to conversation_memory_driver
. Method signatures for .store
and .load
have been changed.
memory_driver = LocalConversationMemoryDriver()
conversation_memory = ConversationMemory(
driver=memory_driver
)
load_result: BaseConversationMemory = memory_driver.load()
memory_driver.store(conversation_memory)
memory_driver = LocalConversationMemoryDriver()
conversation_memory = ConversationMemory(
conversation_memory_driver=memory_driver
)
load_result: tuple[list[Run], dict[str, Any]] = memory_driver.load()
memory_driver.store(
conversation_memory.runs,
conversation_memory.meta
)