From ef1619d2bd48dc23ebc64d2b8a54b970b5d0944c Mon Sep 17 00:00:00 2001 From: SixZero Date: Tue, 10 Dec 2024 03:21:31 +0100 Subject: [PATCH 1/3] Using last cache, but not writing new cache with \:all_but_last cache mode. --- src/llm_anthropic.jl | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/src/llm_anthropic.jl b/src/llm_anthropic.jl index 513950d29..cf5b147eb 100644 --- a/src/llm_anthropic.jl +++ b/src/llm_anthropic.jl @@ -24,13 +24,13 @@ Builds a history of the conversation to provide the prompt to the API. All unspe function render(schema::AbstractAnthropicSchema, messages::Vector{<:AbstractMessage}; aiprefill::Union{Nothing, AbstractString} = nothing, - conversation::AbstractVector{<:AbstractMessage} = AbstractMessage[], + conversation_msgs::AbstractVector{<:AbstractMessage} = AbstractMessage[], no_system_message::Bool = false, cache::Union{Nothing, Symbol} = nothing, kwargs...) ## @assert count(issystemmessage, messages)<=1 "AbstractAnthropicSchema only supports at most 1 System message" - @assert (isnothing(cache)||cache in [:system, :tools, :last, :all]) "Currently only `:system`, `:tools`, `:last`, `:all` are supported for Anthropic Prompt Caching" + @assert (isnothing(cache)||cache in [:system, :tools, :last, :all, :all_but_last]) "Currently only `:system`, `:tools`, `:last`, `:all` are supported for Anthropic Prompt Caching (cache=$cache)" # Filter out annotation messages before any processing messages = filter(!isabstractannotationmessage, messages) @@ -39,7 +39,7 @@ function render(schema::AbstractAnthropicSchema, ## First pass: keep the message types but make the replacements provided in `kwargs` messages_replaced = render( - NoSchema(), messages; conversation, no_system_message, kwargs...) + NoSchema(), messages; conversation_msgs, no_system_message, kwargs...) ## Second pass: convert to the message-based schema conversation = Dict{String, Any}[] @@ -94,7 +94,12 @@ function render(schema::AbstractAnthropicSchema, if is_valid_conversation && (cache == :last || cache == :all) conversation[end]["content"][end]["cache_control"] = Dict("type" => "ephemeral") end - if !no_system_message && !isnothing(system) && (cache == :system || cache == :all) + if is_valid_conversation && (cache == :all_but_last) + for i in 1:length(conversation)-1 + conversation[i]["content"][end]["cache_control"] = Dict("type" => "ephemeral") + end + end + if !no_system_message && !isnothing(system) && (cache == :system || cache == :all || cache == :all_but_last) ## Apply cache for system message system = [Dict("type" => "text", "text" => system, "cache_control" => Dict("type" => "ephemeral"))] @@ -457,7 +462,7 @@ function aigenerate( kwargs...) ## global MODEL_ALIASES - @assert (isnothing(cache)||cache in [:system, :tools, :last, :all]) "Currently only `:system`, `:tools`, `:last` and `:all` are supported for Anthropic Prompt Caching" + @assert (isnothing(cache) || cache in [:system, :tools, :last, :all, :all_but_last]) "Currently only `:system`, `:tools`, `:last`, `all_but_last` and `:all` are supported for Anthropic Prompt Caching (cache=$cache)" @assert (isnothing(aiprefill)||!isempty(strip(aiprefill))) "`aiprefill` must not be empty`" ## Find the unique ID for the model alias provided model_id = get(MODEL_ALIASES, model, model) From fc6a803e7a55396c269b3c42815a1e24afea1efb Mon Sep 17 00:00:00 2001 From: SixZero Date: Tue, 10 Dec 2024 12:40:54 +0100 Subject: [PATCH 2/3] :all_but_last "ephemeral"s the previous user message not the last ai message. --- src/llm_anthropic.jl | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/llm_anthropic.jl b/src/llm_anthropic.jl index cf5b147eb..184a1a99b 100644 --- a/src/llm_anthropic.jl +++ b/src/llm_anthropic.jl @@ -30,7 +30,7 @@ function render(schema::AbstractAnthropicSchema, kwargs...) ## @assert count(issystemmessage, messages)<=1 "AbstractAnthropicSchema only supports at most 1 System message" - @assert (isnothing(cache)||cache in [:system, :tools, :last, :all, :all_but_last]) "Currently only `:system`, `:tools`, `:last`, `:all` are supported for Anthropic Prompt Caching (cache=$cache)" + @assert (isnothing(cache)||cache in [:system, :tools, :last, :all, :all_but_last]) "Currently only `:system`, `:tools`, `:last`, `all_but_last`, `:all` are supported for Anthropic Prompt Caching (cache=$cache)" # Filter out annotation messages before any processing messages = filter(!isabstractannotationmessage, messages) @@ -95,9 +95,7 @@ function render(schema::AbstractAnthropicSchema, conversation[end]["content"][end]["cache_control"] = Dict("type" => "ephemeral") end if is_valid_conversation && (cache == :all_but_last) - for i in 1:length(conversation)-1 - conversation[i]["content"][end]["cache_control"] = Dict("type" => "ephemeral") - end + length(conversation)-2>0 && (conversation[length(conversation)-2]["content"][end]["cache_control"] = Dict("type" => "ephemeral")) end if !no_system_message && !isnothing(system) && (cache == :system || cache == :all || cache == :all_but_last) ## Apply cache for system message From 715a1b746294cecc814abce2ea0a8ef047bdc0f2 Mon Sep 17 00:00:00 2001 From: J S <49557684+svilupp@users.noreply.github.com> Date: Tue, 10 Dec 2024 20:13:48 +0000 Subject: [PATCH 3/3] update the all_but_last implementation --- src/llm_anthropic.jl | 82 ++++++++++++++++++++++++++----------------- test/llm_anthropic.jl | 60 ++++++++++++++++++++++++++++++- 2 files changed, 108 insertions(+), 34 deletions(-) diff --git a/src/llm_anthropic.jl b/src/llm_anthropic.jl index 184a1a99b..b3997757c 100644 --- a/src/llm_anthropic.jl +++ b/src/llm_anthropic.jl @@ -19,7 +19,12 @@ Builds a history of the conversation to provide the prompt to the API. All unspe - `aiprefill`: A string to be used as a prefill for the AI response. This steer the AI response in a certain direction (and potentially save output tokens). - `conversation`: Past conversation to be included in the beginning of the prompt (for continued conversations). - `no_system_message`: If `true`, do not include the default system message in the conversation history OR convert any provided system message to a user message. -- `cache`: A symbol representing the caching strategy to be used. Currently only `nothing` (no caching), `:system`, `:tools`,`:last` and `:all` are supported. +- `cache`: A symbol representing the caching strategy to be used. Currently only `nothing` (no caching), `:system`, `:tools`,`:last`, `:all_but_last`, and `:all` are supported. + - `:system`: Mark only the system message as cacheable. Best default if you have large system message and you will be sending short conversations (no replies / multi-turn conversations). + - `:all`: Mark SYSTEM, one before last and LAST user message as cacheable. Best for multi-turn conversations (you write cache point as "last" and it will be read in the next turn as "preceding" cache mark). + - `:last`: Mark only the last message as cacheable. Use ONLY if you want to send the SAME REQUEST multiple times (and want to save upto the last USER message). This will not work for multi-turn conversations, as the "last" message keeps moving. + - `:all_but_last`: Mark SYSTEM and one before LAST USER message. Use if you have a longer conversation that you want to re-use, but you will NOT CONTINUE it (no subsequent messages/follow-ups). + - In short, use `:all` for multi-turn conversations, `:system` for repeated single-turn conversations with same system message, and `:all_but_last` for longer conversations that you want to re-use, but not continue. """ function render(schema::AbstractAnthropicSchema, messages::Vector{<:AbstractMessage}; @@ -76,28 +81,32 @@ function render(schema::AbstractAnthropicSchema, # Note: Ignores any DataMessage or other types end - ## Add Tool definitions to the System Prompt - # if !isempty(tools) - # ANTHROPIC_TOOL_SUFFIX = "Use the $(tools[1][:name]) tool in your response." - # ## Add to system message - # if isnothing(system) - # system = ANTHROPIC_TOOL_SUFFIX - # else - # system *= "\n\n" * ANTHROPIC_TOOL_SUFFIX - # end - # end - + ## Note: For cache to work, it must be marked in the same location across calls! ## Apply cache for last message is_valid_conversation = length(conversation) > 0 && haskey(conversation[end], "content") && length(conversation[end]["content"]) > 0 - if is_valid_conversation && (cache == :last || cache == :all) - conversation[end]["content"][end]["cache_control"] = Dict("type" => "ephemeral") - end - if is_valid_conversation && (cache == :all_but_last) - length(conversation)-2>0 && (conversation[length(conversation)-2]["content"][end]["cache_control"] = Dict("type" => "ephemeral")) + user_msg_counter = 0 + if is_valid_conversation + for i in reverse(eachindex(conversation)) + ## we mark only user messages + # Cache points must be EXACTLY at the same location across calls! + if conversation[i]["role"] == "user" + if cache == :last && user_msg_counter == 0 # marks exactly once + conversation[i]["content"][end]["cache_control"] = Dict("type" => "ephemeral") + elseif cache == :all && user_msg_counter < 2 # marks twice - for 0 and 1 + # Mark the last AND preceding user message! + # If we don't do this, then next time we call it with a new message, the cache points will not overlap! + conversation[i]["content"][end]["cache_control"] = Dict("type" => "ephemeral") + elseif cache == :all_but_last && user_msg_counter == 1 # marks once, only the preceding user message + conversation[i]["content"][end]["cache_control"] = Dict("type" => "ephemeral") + end + user_msg_counter += 1 + end + end end - if !no_system_message && !isnothing(system) && (cache == :system || cache == :all || cache == :all_but_last) + if !no_system_message && !isnothing(system) && + (cache == :system || cache == :all || cache == :all_but_last) ## Apply cache for system message system = [Dict("type" => "text", "text" => system, "cache_control" => Dict("type" => "ephemeral"))] @@ -264,7 +273,7 @@ Simple wrapper for a call to Anthropic API. - `http_kwargs::NamedTuple`: Additional keyword arguments for the HTTP request. Defaults to empty `NamedTuple`. - `stream`: A boolean indicating whether to stream the response. Defaults to `false`. - `url`: The URL of the Ollama API. Defaults to "localhost". -- `cache`: A symbol representing the caching strategy to be used. Currently only `nothing` (no caching), `:system`, `:tools`,`:last` and `:all` are supported. +- `cache`: A symbol representing the caching strategy to be used. Currently only `nothing` (no caching), `:system`, `:tools`,`:last`, `:all_but_last`, and `:all` are supported. - `betas`: A vector of symbols representing the beta features to be used. Currently only `:tools` and `:cache` are supported. - `kwargs`: Prompt variables to be used to fill the prompt/template """ @@ -358,11 +367,12 @@ Generate an AI response based on a given prompt using the Anthropic API. - `http_kwargs::NamedTuple`: Additional keyword arguments for the HTTP request. Defaults to empty `NamedTuple`. - `api_kwargs::NamedTuple`: Additional keyword arguments for the Ollama API. Defaults to an empty `NamedTuple`. - `max_tokens::Int`: The maximum number of tokens to generate. Defaults to 2048, because it's a required parameter for the API. -- `cache`: A symbol indicating whether to use caching for the prompt. Supported values are `nothing` (no caching), `:system`, `:tools`, `:last` and `:all`. Note that COST estimate will be wrong (ignores the caching). - - `:system`: Caches the system message - - `:tools`: Caches the tool definitions (and everything before them) - - `:last`: Caches the last message in the conversation (and everything before it) - - `:all`: Cache trigger points are inserted in all of the above places (ie, higher likelyhood of cache hit, but also slightly higher cost) +- `cache`: A symbol representing the caching strategy to be used. Currently only `nothing` (no caching), `:system`, `:tools`,`:last`, `:all_but_last` and `:all` are supported. Note that COST estimate will be wrong (ignores the caching). + - `:system`: Mark only the system message as cacheable. Best default if you have large system message and you will be sending short conversations (no replies / multi-turn conversations). + - `:all`: Mark SYSTEM, one before last and LAST user message as cacheable. Best for multi-turn conversations (you write cache point as "last" and it will be read in the next turn as "preceding" cache mark). + - `:last`: Mark only the last message as cacheable. Use ONLY if you want to send the SAME REQUEST multiple times (and want to save upto the last USER message). This will not work for multi-turn conversations, as the "last" message keeps moving. + - `:all_but_last`: Mark SYSTEM and one before LAST USER message. Use if you have a longer conversation that you want to re-use, but you will NOT CONTINUE it (no subsequent messages/follow-ups). + - In short, use `:all` for multi-turn conversations, `:system` for repeated single-turn conversations with same system message, and `:all_but_last` for longer conversations that you want to re-use, but not continue. - `betas::Union{Nothing, Vector{Symbol}}`: A vector of symbols representing the beta features to be used. See `?anthropic_extra_headers` for details. - `kwargs`: Prompt variables to be used to fill the prompt/template @@ -460,7 +470,7 @@ function aigenerate( kwargs...) ## global MODEL_ALIASES - @assert (isnothing(cache) || cache in [:system, :tools, :last, :all, :all_but_last]) "Currently only `:system`, `:tools`, `:last`, `all_but_last` and `:all` are supported for Anthropic Prompt Caching (cache=$cache)" + @assert (isnothing(cache)||cache in [:system, :tools, :last, :all, :all_but_last]) "Currently only `:system`, `:tools`, `:last`, `all_but_last` and `:all` are supported for Anthropic Prompt Caching (cache=$cache)" @assert (isnothing(aiprefill)||!isempty(strip(aiprefill))) "`aiprefill` must not be empty`" ## Find the unique ID for the model alias provided model_id = get(MODEL_ALIASES, model, model) @@ -555,11 +565,12 @@ It's effectively a light wrapper around `aigenerate` call, which requires additi - `http_kwargs`: A named tuple of HTTP keyword arguments. - `api_kwargs`: A named tuple of API keyword arguments. - `:tool_choice`: A string indicating which tool to use. Supported values are `nothing`, `"auto"`, `"any"` and `"exact"`. `nothing` will use the default tool choice. -- `cache`: A symbol indicating whether to use caching for the prompt. Supported values are `nothing` (no caching), `:system`, `:tools`, `:last` and `:all`. Note that COST estimate will be wrong (ignores the caching). - - `:system`: Caches the system message - - `:tools`: Caches the tool definitions (and everything before them) - - `:last`: Caches the last message in the conversation (and everything before it) - - `:all`: Cache trigger points are inserted in all of the above places (ie, higher likelyhood of cache hit, but also slightly higher cost) +- `cache`: A symbol representing the caching strategy to be used. Currently only `nothing` (no caching), `:system`, `:tools`,`:last`, `:all_but_last`, and `:all` are supported. Note: COST estimate will be wrong (ignores the caching). + - `:system`: Mark only the system message as cacheable. Best default if you have large system message and you will be sending short conversations (no replies / multi-turn conversations). + - `:all`: Mark SYSTEM, one before last and LAST user message as cacheable. Best for multi-turn conversations (you write cache point as "last" and it will be read in the next turn as "preceding" cache mark). + - `:last`: Mark only the last message as cacheable. Use ONLY if you want to send the SAME REQUEST multiple times (and want to save upto the last USER message). This will not work for multi-turn conversations, as the "last" message keeps moving. + - `:all_but_last`: Mark SYSTEM and one before LAST USER message. Use if you have a longer conversation that you want to re-use, but you will NOT CONTINUE it (no subsequent messages/follow-ups). + - In short, use `:all` for multi-turn conversations, `:system` for repeated single-turn conversations with same system message, and `:all_but_last` for longer conversations that you want to re-use, but not continue. - `betas::Union{Nothing, Vector{Symbol}}`: A vector of symbols representing the beta features to be used. See `?anthropic_extra_headers` for details. - `kwargs`: Prompt variables to be used to fill the prompt/template @@ -693,7 +704,7 @@ function aiextract(prompt_schema::AbstractAnthropicSchema, prompt::ALLOWED_PROMP kwargs...) ## global MODEL_ALIASES - @assert (isnothing(cache)||cache in [:system, :tools, :last, :all]) "Currently only `:system`, `:tools`, `:last` and `:all` are supported for Anthropic Prompt Caching" + @assert (isnothing(cache)||cache in [:system, :tools, :last, :all_but_last, :all]) "Currently only `:system`, `:tools`, `:last`, `:all_but_last` and `:all` are supported for Anthropic Prompt Caching" ## Check that no functions or methods are provided, that is not supported @assert !(return_type isa Vector)||!any(x -> x isa Union{Function, Method}, return_type) "Functions and Methods are not supported in `aiextract`!" @@ -818,7 +829,12 @@ Differences to `aiextract`: Can provide infinitely many tools (including Functio - `conversation`: An optional vector of `AbstractMessage` objects representing the conversation history. - `no_system_message::Bool = false`: Whether to exclude the system message from the conversation history. - `image_path::Union{Nothing, AbstractString, Vector{<:AbstractString}} = nothing`: A path to a local image file, or a vector of paths to local image files. Always attaches images to the latest user message. -- `cache::Union{Nothing, Symbol} = nothing`: Whether to cache the prompt. Defaults to `nothing`. +- `cache`: A symbol representing the caching strategy to be used. Currently only `nothing` (no caching), `:system`, `:tools`,`:last`, `:all_but_last`, and `:all` are supported. Note: COST estimate will be wrong (ignores the caching). + - `:system`: Mark only the system message as cacheable. Best default if you have large system message and you will be sending short conversations (no replies / multi-turn conversations). + - `:all`: Mark SYSTEM, one before last and LAST user message as cacheable. Best for multi-turn conversations (you write cache point as "last" and it will be read in the next turn as "preceding" cache mark). + - `:last`: Mark only the last message as cacheable. Use ONLY if you want to send the SAME REQUEST multiple times (and want to save upto the last USER message). This will not work for multi-turn conversations, as the "last" message keeps moving. + - `:all_but_last`: Mark SYSTEM and one before LAST USER message. Use if you have a longer conversation that you want to re-use, but you will NOT CONTINUE it (no subsequent messages/follow-ups). + - In short, use `:all` for multi-turn conversations, `:system` for repeated single-turn conversations with same system message, and `:all_but_last` for longer conversations that you want to re-use, but not continue. - `betas::Union{Nothing, Vector{Symbol}} = nothing`: A vector of symbols representing the beta features to be used. See `?anthropic_extra_headers` for details. - `http_kwargs`: A named tuple of HTTP keyword arguments. - `api_kwargs`: A named tuple of API keyword arguments. Several important arguments are highlighted below: @@ -900,7 +916,7 @@ function aitools(prompt_schema::AbstractAnthropicSchema, prompt::ALLOWED_PROMPT_ tool_choice = nothing), kwargs...) global MODEL_ALIASES - @assert (isnothing(cache)||cache in [:system, :tools, :last, :all]) "Currently only `:system`, `:tools`, `:last` and `:all` are supported for Anthropic Prompt Caching" + @assert (isnothing(cache)||cache in [:system, :tools, :last, :all_but_last, :all]) "Currently only `:system`, `:tools`, `:last`, `:all_but_last` and `:all` are supported for Anthropic Prompt Caching" ## Find the unique ID for the model alias provided model_id = get(MODEL_ALIASES, model, model) diff --git a/test/llm_anthropic.jl b/test/llm_anthropic.jl index 83a5d47e5..c6518968d 100644 --- a/test/llm_anthropic.jl +++ b/test/llm_anthropic.jl @@ -174,6 +174,23 @@ using PromptingTools: call_cost, anthropic_api, function_call_signature, "cache_control" => Dict("type" => "ephemeral"))])]) @test conversation == expected_output + ## We mark only user messages + messages_with_ai = [ + SystemMessage("Act as a helpful AI assistant"), + UserMessage("Hello, my name is {{name}}"), + AIMessage("Hi there") + ] + conversation = render(schema, messages_with_ai; name = "John", cache = :last) + expected_output = (; + system = "Act as a helpful AI assistant", + conversation = [ + Dict("role" => "user", + "content" => [Dict("type" => "text", "text" => "Hello, my name is John", + "cache_control" => Dict("type" => "ephemeral"))]), + Dict("role" => "assistant", + "content" => [Dict("type" => "text", "text" => "Hi there")])]) + @test conversation == expected_output + conversation = render(schema, messages; name = "John", cache = :all) expected_output = (; system = Dict{String, Any}[Dict("cache_control" => Dict("type" => "ephemeral"), @@ -183,7 +200,48 @@ using PromptingTools: call_cost, anthropic_api, function_call_signature, "cache_control" => Dict("type" => "ephemeral"))])]) @test conversation == expected_output - # Test aiprefill functionality + conversation = render(schema, messages_with_ai; name = "John", cache = :all) + expected_output = (; + system = Dict{String, Any}[Dict("cache_control" => Dict("type" => "ephemeral"), + "text" => "Act as a helpful AI assistant", "type" => "text")], + conversation = [ + Dict("role" => "user", + "content" => [Dict("type" => "text", "text" => "Hello, my name is John", + "cache_control" => Dict("type" => "ephemeral"))]), + Dict("role" => "assistant", + "content" => [Dict("type" => "text", "text" => "Hi there")])]) + @test conversation == expected_output + + ## Longer conversation + messages_longer = [ + SystemMessage("Act as a helpful AI assistant"), + UserMessage("Hello, my name is {{name}}"), + AIMessage("Hi there"), + UserMessage("How are you?"), + AIMessage("I'm doing well, thank you!") + ] + system, conversation = render(schema, messages_longer; name = "John", cache = :all) + ## marks last user message + @test conversation[end - 1]["content"][end]["cache_control"] == + Dict("type" => "ephemeral") + ## marks one before last user message + @test conversation[end - 3]["content"][end]["cache_control"] == + Dict("type" => "ephemeral") + ## marks system message + @test system[1]["cache_control"] == Dict("type" => "ephemeral") + + ## all_but_last + system, conversation = render( + schema, messages_longer; name = "John", cache = :all_but_last) + ## does not mark last user message + @test !haskey(conversation[end - 1]["content"][end], "cache_control") + ## marks one before last user message + @test conversation[end - 3]["content"][end]["cache_control"] == + Dict("type" => "ephemeral") + ## marks system message + @test system[1]["cache_control"] == Dict("type" => "ephemeral") + + ### aiprefill functionality messages = [ SystemMessage("Act as a helpful AI assistant"), UserMessage("Hello, what's your name?")