From 5e41883f135b5a11e2ae459b1e0a9679bfdeecb9 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Fri, 13 Dec 2024 15:21:53 +0000 Subject: [PATCH] feat: Updated OpenAPI spec --- src/libs/Cohere/Generated/Cohere.CohereApi.Chat.g.cs | 3 ++- src/libs/Cohere/Generated/Cohere.CohereApi.Chatv2.g.cs | 3 ++- src/libs/Cohere/Generated/Cohere.ICohereApi.Chat.g.cs | 3 ++- src/libs/Cohere/Generated/Cohere.ICohereApi.Chatv2.g.cs | 3 ++- src/libs/Cohere/Generated/Cohere.Models.ChatRequest.g.cs | 6 ++++-- .../Cohere.Models.ChatRequestPromptTruncation.g.cs | 2 +- .../Generated/Cohere.Models.ChatRequestSafetyMode.g.cs | 3 ++- .../Cohere/Generated/Cohere.Models.Chatv2Request.g.cs | 6 ++++-- .../Generated/Cohere.Models.Chatv2RequestSafetyMode.g.cs | 5 +++-- .../Cohere/Generated/Cohere.Models.CitationOptions.g.cs | 6 ++++-- .../Generated/Cohere.Models.CitationOptionsMode.g.cs | 3 ++- src/libs/Cohere/openapi.yaml | 8 ++++---- 12 files changed, 32 insertions(+), 19 deletions(-) diff --git a/src/libs/Cohere/Generated/Cohere.CohereApi.Chat.g.cs b/src/libs/Cohere/Generated/Cohere.CohereApi.Chat.g.cs index 05188d7..9bed5d6 100644 --- a/src/libs/Cohere/Generated/Cohere.CohereApi.Chat.g.cs +++ b/src/libs/Cohere/Generated/Cohere.CohereApi.Chat.g.cs @@ -561,7 +561,7 @@ partial void ProcessChatResponseContent( /// With `prompt_truncation` set to "AUTO", some elements from `chat_history` and `documents` will be dropped in an attempt to construct a prompt that fits within the model's context length limit. During this process the order of the documents and chat history will be changed and ranked by relevance.
/// With `prompt_truncation` set to "AUTO_PRESERVE_ORDER", some elements from `chat_history` and `documents` will be dropped in an attempt to construct a prompt that fits within the model's context length limit. During this process the order of the documents and chat history will be preserved as they are inputted into the API.
/// With `prompt_truncation` set to "OFF", no elements will be dropped. If the sum of the inputs exceeds the model's context length limit, a `TooManyTokens` error will be returned.
- /// Compatible Deployments:
+ /// Compatible Deployments:
/// - AUTO: Cohere Platform Only
/// - AUTO_PRESERVE_ORDER: Azure, AWS Sagemaker/Bedrock, Private Deployments /// @@ -694,6 +694,7 @@ partial void ProcessChatResponseContent( /// When `NONE` is specified, the safety instruction will be omitted.
/// Safety modes are not yet configurable in combination with `tools`, `tool_results` and `documents` parameters.
/// **Note**: This parameter is only compatible with models [Command R 08-2024](https://docs.cohere.com/docs/command-r#august-2024-release), [Command R+ 08-2024](https://docs.cohere.com/docs/command-r-plus#august-2024-release) and newer.
+ /// **Note**: `command-r7b-12-2024` only supports `"CONTEXTUAL"` and `"STRICT"` modes.
/// Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments /// /// The token to cancel the operation with diff --git a/src/libs/Cohere/Generated/Cohere.CohereApi.Chatv2.g.cs b/src/libs/Cohere/Generated/Cohere.CohereApi.Chatv2.g.cs index f5e89fc..f299756 100644 --- a/src/libs/Cohere/Generated/Cohere.CohereApi.Chatv2.g.cs +++ b/src/libs/Cohere/Generated/Cohere.CohereApi.Chatv2.g.cs @@ -552,7 +552,8 @@ partial void ProcessChatv2ResponseContent( /// Used to select the [safety instruction](https://docs.cohere.com/v2/docs/safety-modes) inserted into the prompt. Defaults to `CONTEXTUAL`.
/// When `OFF` is specified, the safety instruction will be omitted.
/// Safety modes are not yet configurable in combination with `tools`, `tool_results` and `documents` parameters.
- /// **Note**: This parameter is only compatible with models [Command R 08-2024](https://docs.cohere.com/v2/docs/command-r#august-2024-release), [Command R+ 08-2024](https://docs.cohere.com/v2/docs/command-r-plus#august-2024-release) and newer. + /// **Note**: This parameter is only compatible with models [Command R 08-2024](https://docs.cohere.com/v2/docs/command-r#august-2024-release), [Command R+ 08-2024](https://docs.cohere.com/v2/docs/command-r-plus#august-2024-release) and newer.
+ /// **Note**: `command-r7b-12-2024` only supports `"CONTEXTUAL"` and `"STRICT"` modes. /// /// /// The maximum number of tokens the model will generate as part of the response.
diff --git a/src/libs/Cohere/Generated/Cohere.ICohereApi.Chat.g.cs b/src/libs/Cohere/Generated/Cohere.ICohereApi.Chat.g.cs index a5b723c..385f1d2 100644 --- a/src/libs/Cohere/Generated/Cohere.ICohereApi.Chat.g.cs +++ b/src/libs/Cohere/Generated/Cohere.ICohereApi.Chat.g.cs @@ -64,7 +64,7 @@ public partial interface ICohereApi /// With `prompt_truncation` set to "AUTO", some elements from `chat_history` and `documents` will be dropped in an attempt to construct a prompt that fits within the model's context length limit. During this process the order of the documents and chat history will be changed and ranked by relevance.
/// With `prompt_truncation` set to "AUTO_PRESERVE_ORDER", some elements from `chat_history` and `documents` will be dropped in an attempt to construct a prompt that fits within the model's context length limit. During this process the order of the documents and chat history will be preserved as they are inputted into the API.
/// With `prompt_truncation` set to "OFF", no elements will be dropped. If the sum of the inputs exceeds the model's context length limit, a `TooManyTokens` error will be returned.
- /// Compatible Deployments:
+ /// Compatible Deployments:
/// - AUTO: Cohere Platform Only
/// - AUTO_PRESERVE_ORDER: Azure, AWS Sagemaker/Bedrock, Private Deployments /// @@ -197,6 +197,7 @@ public partial interface ICohereApi /// When `NONE` is specified, the safety instruction will be omitted.
/// Safety modes are not yet configurable in combination with `tools`, `tool_results` and `documents` parameters.
/// **Note**: This parameter is only compatible with models [Command R 08-2024](https://docs.cohere.com/docs/command-r#august-2024-release), [Command R+ 08-2024](https://docs.cohere.com/docs/command-r-plus#august-2024-release) and newer.
+ /// **Note**: `command-r7b-12-2024` only supports `"CONTEXTUAL"` and `"STRICT"` modes.
/// Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments /// /// The token to cancel the operation with diff --git a/src/libs/Cohere/Generated/Cohere.ICohereApi.Chatv2.g.cs b/src/libs/Cohere/Generated/Cohere.ICohereApi.Chatv2.g.cs index 5c2670f..246d7ae 100644 --- a/src/libs/Cohere/Generated/Cohere.ICohereApi.Chatv2.g.cs +++ b/src/libs/Cohere/Generated/Cohere.ICohereApi.Chatv2.g.cs @@ -63,7 +63,8 @@ public partial interface ICohereApi /// Used to select the [safety instruction](https://docs.cohere.com/v2/docs/safety-modes) inserted into the prompt. Defaults to `CONTEXTUAL`.
/// When `OFF` is specified, the safety instruction will be omitted.
/// Safety modes are not yet configurable in combination with `tools`, `tool_results` and `documents` parameters.
- /// **Note**: This parameter is only compatible with models [Command R 08-2024](https://docs.cohere.com/v2/docs/command-r#august-2024-release), [Command R+ 08-2024](https://docs.cohere.com/v2/docs/command-r-plus#august-2024-release) and newer. + /// **Note**: This parameter is only compatible with models [Command R 08-2024](https://docs.cohere.com/v2/docs/command-r#august-2024-release), [Command R+ 08-2024](https://docs.cohere.com/v2/docs/command-r-plus#august-2024-release) and newer.
+ /// **Note**: `command-r7b-12-2024` only supports `"CONTEXTUAL"` and `"STRICT"` modes. /// /// /// The maximum number of tokens the model will generate as part of the response.
diff --git a/src/libs/Cohere/Generated/Cohere.Models.ChatRequest.g.cs b/src/libs/Cohere/Generated/Cohere.Models.ChatRequest.g.cs index d2cf3d8..f6029a7 100644 --- a/src/libs/Cohere/Generated/Cohere.Models.ChatRequest.g.cs +++ b/src/libs/Cohere/Generated/Cohere.Models.ChatRequest.g.cs @@ -64,7 +64,7 @@ public sealed partial class ChatRequest /// With `prompt_truncation` set to "AUTO", some elements from `chat_history` and `documents` will be dropped in an attempt to construct a prompt that fits within the model's context length limit. During this process the order of the documents and chat history will be changed and ranked by relevance.
/// With `prompt_truncation` set to "AUTO_PRESERVE_ORDER", some elements from `chat_history` and `documents` will be dropped in an attempt to construct a prompt that fits within the model's context length limit. During this process the order of the documents and chat history will be preserved as they are inputted into the API.
/// With `prompt_truncation` set to "OFF", no elements will be dropped. If the sum of the inputs exceeds the model's context length limit, a `TooManyTokens` error will be returned.
- /// Compatible Deployments:
+ /// Compatible Deployments:
/// - AUTO: Cohere Platform Only
/// - AUTO_PRESERVE_ORDER: Azure, AWS Sagemaker/Bedrock, Private Deployments /// @@ -254,6 +254,7 @@ public sealed partial class ChatRequest /// When `NONE` is specified, the safety instruction will be omitted.
/// Safety modes are not yet configurable in combination with `tools`, `tool_results` and `documents` parameters.
/// **Note**: This parameter is only compatible with models [Command R 08-2024](https://docs.cohere.com/docs/command-r#august-2024-release), [Command R+ 08-2024](https://docs.cohere.com/docs/command-r-plus#august-2024-release) and newer.
+ /// **Note**: `command-r7b-12-2024` only supports `"CONTEXTUAL"` and `"STRICT"` modes.
/// Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments /// [global::System.Text.Json.Serialization.JsonPropertyName("safety_mode")] @@ -306,7 +307,7 @@ public sealed partial class ChatRequest /// With `prompt_truncation` set to "AUTO", some elements from `chat_history` and `documents` will be dropped in an attempt to construct a prompt that fits within the model's context length limit. During this process the order of the documents and chat history will be changed and ranked by relevance.
/// With `prompt_truncation` set to "AUTO_PRESERVE_ORDER", some elements from `chat_history` and `documents` will be dropped in an attempt to construct a prompt that fits within the model's context length limit. During this process the order of the documents and chat history will be preserved as they are inputted into the API.
/// With `prompt_truncation` set to "OFF", no elements will be dropped. If the sum of the inputs exceeds the model's context length limit, a `TooManyTokens` error will be returned.
- /// Compatible Deployments:
+ /// Compatible Deployments:
/// - AUTO: Cohere Platform Only
/// - AUTO_PRESERVE_ORDER: Azure, AWS Sagemaker/Bedrock, Private Deployments /// @@ -439,6 +440,7 @@ public sealed partial class ChatRequest /// When `NONE` is specified, the safety instruction will be omitted.
/// Safety modes are not yet configurable in combination with `tools`, `tool_results` and `documents` parameters.
/// **Note**: This parameter is only compatible with models [Command R 08-2024](https://docs.cohere.com/docs/command-r#august-2024-release), [Command R+ 08-2024](https://docs.cohere.com/docs/command-r-plus#august-2024-release) and newer.
+ /// **Note**: `command-r7b-12-2024` only supports `"CONTEXTUAL"` and `"STRICT"` modes.
/// Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments /// [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] diff --git a/src/libs/Cohere/Generated/Cohere.Models.ChatRequestPromptTruncation.g.cs b/src/libs/Cohere/Generated/Cohere.Models.ChatRequestPromptTruncation.g.cs index ccc64a3..9f3d796 100644 --- a/src/libs/Cohere/Generated/Cohere.Models.ChatRequestPromptTruncation.g.cs +++ b/src/libs/Cohere/Generated/Cohere.Models.ChatRequestPromptTruncation.g.cs @@ -9,7 +9,7 @@ namespace Cohere /// With `prompt_truncation` set to "AUTO", some elements from `chat_history` and `documents` will be dropped in an attempt to construct a prompt that fits within the model's context length limit. During this process the order of the documents and chat history will be changed and ranked by relevance.
/// With `prompt_truncation` set to "AUTO_PRESERVE_ORDER", some elements from `chat_history` and `documents` will be dropped in an attempt to construct a prompt that fits within the model's context length limit. During this process the order of the documents and chat history will be preserved as they are inputted into the API.
/// With `prompt_truncation` set to "OFF", no elements will be dropped. If the sum of the inputs exceeds the model's context length limit, a `TooManyTokens` error will be returned.
- /// Compatible Deployments:
+ /// Compatible Deployments:
/// - AUTO: Cohere Platform Only
/// - AUTO_PRESERVE_ORDER: Azure, AWS Sagemaker/Bedrock, Private Deployments /// diff --git a/src/libs/Cohere/Generated/Cohere.Models.ChatRequestSafetyMode.g.cs b/src/libs/Cohere/Generated/Cohere.Models.ChatRequestSafetyMode.g.cs index e761348..dd6680e 100644 --- a/src/libs/Cohere/Generated/Cohere.Models.ChatRequestSafetyMode.g.cs +++ b/src/libs/Cohere/Generated/Cohere.Models.ChatRequestSafetyMode.g.cs @@ -8,6 +8,7 @@ namespace Cohere /// When `NONE` is specified, the safety instruction will be omitted.
/// Safety modes are not yet configurable in combination with `tools`, `tool_results` and `documents` parameters.
/// **Note**: This parameter is only compatible with models [Command R 08-2024](https://docs.cohere.com/docs/command-r#august-2024-release), [Command R+ 08-2024](https://docs.cohere.com/docs/command-r-plus#august-2024-release) and newer.
+ /// **Note**: `command-r7b-12-2024` only supports `"CONTEXTUAL"` and `"STRICT"` modes.
/// Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments /// public enum ChatRequestSafetyMode @@ -17,7 +18,7 @@ public enum ChatRequestSafetyMode /// CONTEXTUAL, /// - /// + /// `command-r7b-12-2024` only supports `"CONTEXTUAL"` and `"STRICT"` modes. /// STRICT, /// diff --git a/src/libs/Cohere/Generated/Cohere.Models.Chatv2Request.g.cs b/src/libs/Cohere/Generated/Cohere.Models.Chatv2Request.g.cs index 4efe29f..3c84f54 100644 --- a/src/libs/Cohere/Generated/Cohere.Models.Chatv2Request.g.cs +++ b/src/libs/Cohere/Generated/Cohere.Models.Chatv2Request.g.cs @@ -76,7 +76,8 @@ public sealed partial class Chatv2Request /// Used to select the [safety instruction](https://docs.cohere.com/v2/docs/safety-modes) inserted into the prompt. Defaults to `CONTEXTUAL`.
/// When `OFF` is specified, the safety instruction will be omitted.
/// Safety modes are not yet configurable in combination with `tools`, `tool_results` and `documents` parameters.
- /// **Note**: This parameter is only compatible with models [Command R 08-2024](https://docs.cohere.com/v2/docs/command-r#august-2024-release), [Command R+ 08-2024](https://docs.cohere.com/v2/docs/command-r-plus#august-2024-release) and newer. + /// **Note**: This parameter is only compatible with models [Command R 08-2024](https://docs.cohere.com/v2/docs/command-r#august-2024-release), [Command R+ 08-2024](https://docs.cohere.com/v2/docs/command-r-plus#august-2024-release) and newer.
+ /// **Note**: `command-r7b-12-2024` only supports `"CONTEXTUAL"` and `"STRICT"` modes. ///
[global::System.Text.Json.Serialization.JsonPropertyName("safety_mode")] [global::System.Text.Json.Serialization.JsonConverter(typeof(global::Cohere.JsonConverters.Chatv2RequestSafetyModeJsonConverter))] @@ -196,7 +197,8 @@ public sealed partial class Chatv2Request /// Used to select the [safety instruction](https://docs.cohere.com/v2/docs/safety-modes) inserted into the prompt. Defaults to `CONTEXTUAL`.
/// When `OFF` is specified, the safety instruction will be omitted.
/// Safety modes are not yet configurable in combination with `tools`, `tool_results` and `documents` parameters.
- /// **Note**: This parameter is only compatible with models [Command R 08-2024](https://docs.cohere.com/v2/docs/command-r#august-2024-release), [Command R+ 08-2024](https://docs.cohere.com/v2/docs/command-r-plus#august-2024-release) and newer. + /// **Note**: This parameter is only compatible with models [Command R 08-2024](https://docs.cohere.com/v2/docs/command-r#august-2024-release), [Command R+ 08-2024](https://docs.cohere.com/v2/docs/command-r-plus#august-2024-release) and newer.
+ /// **Note**: `command-r7b-12-2024` only supports `"CONTEXTUAL"` and `"STRICT"` modes. /// /// /// The maximum number of tokens the model will generate as part of the response.
diff --git a/src/libs/Cohere/Generated/Cohere.Models.Chatv2RequestSafetyMode.g.cs b/src/libs/Cohere/Generated/Cohere.Models.Chatv2RequestSafetyMode.g.cs index d1c1261..a4cb48f 100644 --- a/src/libs/Cohere/Generated/Cohere.Models.Chatv2RequestSafetyMode.g.cs +++ b/src/libs/Cohere/Generated/Cohere.Models.Chatv2RequestSafetyMode.g.cs @@ -7,7 +7,8 @@ namespace Cohere /// Used to select the [safety instruction](https://docs.cohere.com/v2/docs/safety-modes) inserted into the prompt. Defaults to `CONTEXTUAL`.
/// When `OFF` is specified, the safety instruction will be omitted.
/// Safety modes are not yet configurable in combination with `tools`, `tool_results` and `documents` parameters.
- /// **Note**: This parameter is only compatible with models [Command R 08-2024](https://docs.cohere.com/v2/docs/command-r#august-2024-release), [Command R+ 08-2024](https://docs.cohere.com/v2/docs/command-r-plus#august-2024-release) and newer. + /// **Note**: This parameter is only compatible with models [Command R 08-2024](https://docs.cohere.com/v2/docs/command-r#august-2024-release), [Command R+ 08-2024](https://docs.cohere.com/v2/docs/command-r-plus#august-2024-release) and newer.
+ /// **Note**: `command-r7b-12-2024` only supports `"CONTEXTUAL"` and `"STRICT"` modes. /// public enum Chatv2RequestSafetyMode { @@ -16,7 +17,7 @@ public enum Chatv2RequestSafetyMode /// CONTEXTUAL, /// - /// + /// `command-r7b-12-2024` only supports `"CONTEXTUAL"` and `"STRICT"` modes. /// STRICT, /// diff --git a/src/libs/Cohere/Generated/Cohere.Models.CitationOptions.g.cs b/src/libs/Cohere/Generated/Cohere.Models.CitationOptions.g.cs index d8248c0..62f791e 100644 --- a/src/libs/Cohere/Generated/Cohere.Models.CitationOptions.g.cs +++ b/src/libs/Cohere/Generated/Cohere.Models.CitationOptions.g.cs @@ -10,7 +10,8 @@ public sealed partial class CitationOptions { /// /// Defaults to `"accurate"`.
- /// Dictates the approach taken to generating citations as part of the RAG flow by allowing the user to specify whether they want `"accurate"` results, `"fast"` results or no results. + /// Dictates the approach taken to generating citations as part of the RAG flow by allowing the user to specify whether they want `"accurate"` results, `"fast"` results or no results.
+ /// **Note**: `command-r7b-12-2024` only supports `"fast"` and `"off"` modes. Its default is `"fast"`. ///
[global::System.Text.Json.Serialization.JsonPropertyName("mode")] [global::System.Text.Json.Serialization.JsonConverter(typeof(global::Cohere.JsonConverters.CitationOptionsModeJsonConverter))] @@ -27,7 +28,8 @@ public sealed partial class CitationOptions ///
/// /// Defaults to `"accurate"`.
- /// Dictates the approach taken to generating citations as part of the RAG flow by allowing the user to specify whether they want `"accurate"` results, `"fast"` results or no results. + /// Dictates the approach taken to generating citations as part of the RAG flow by allowing the user to specify whether they want `"accurate"` results, `"fast"` results or no results.
+ /// **Note**: `command-r7b-12-2024` only supports `"fast"` and `"off"` modes. Its default is `"fast"`. /// [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] public CitationOptions( diff --git a/src/libs/Cohere/Generated/Cohere.Models.CitationOptionsMode.g.cs b/src/libs/Cohere/Generated/Cohere.Models.CitationOptionsMode.g.cs index 5889a24..885c754 100644 --- a/src/libs/Cohere/Generated/Cohere.Models.CitationOptionsMode.g.cs +++ b/src/libs/Cohere/Generated/Cohere.Models.CitationOptionsMode.g.cs @@ -5,7 +5,8 @@ namespace Cohere { /// /// Defaults to `"accurate"`.
- /// Dictates the approach taken to generating citations as part of the RAG flow by allowing the user to specify whether they want `"accurate"` results, `"fast"` results or no results. + /// Dictates the approach taken to generating citations as part of the RAG flow by allowing the user to specify whether they want `"accurate"` results, `"fast"` results or no results.
+ /// **Note**: `command-r7b-12-2024` only supports `"fast"` and `"off"` modes. Its default is `"fast"`. ///
public enum CitationOptionsMode { diff --git a/src/libs/Cohere/openapi.yaml b/src/libs/Cohere/openapi.yaml index 8ded627..1875df8 100644 --- a/src/libs/Cohere/openapi.yaml +++ b/src/libs/Cohere/openapi.yaml @@ -79,7 +79,7 @@ paths: - AUTO - AUTO_PRESERVE_ORDER type: string - description: "Defaults to `AUTO` when `connectors` are specified and `OFF` in all other cases.\n\nDictates how the prompt will be constructed.\n\nWith `prompt_truncation` set to \"AUTO\", some elements from `chat_history` and `documents` will be dropped in an attempt to construct a prompt that fits within the model's context length limit. During this process the order of the documents and chat history will be changed and ranked by relevance.\n\nWith `prompt_truncation` set to \"AUTO_PRESERVE_ORDER\", some elements from `chat_history` and `documents` will be dropped in an attempt to construct a prompt that fits within the model's context length limit. During this process the order of the documents and chat history will be preserved as they are inputted into the API.\n\nWith `prompt_truncation` set to \"OFF\", no elements will be dropped. If the sum of the inputs exceeds the model's context length limit, a `TooManyTokens` error will be returned.\n\nCompatible Deployments: \n - AUTO: Cohere Platform Only\n - AUTO_PRESERVE_ORDER: Azure, AWS Sagemaker/Bedrock, Private Deployments\n" + description: "Defaults to `AUTO` when `connectors` are specified and `OFF` in all other cases.\n\nDictates how the prompt will be constructed.\n\nWith `prompt_truncation` set to \"AUTO\", some elements from `chat_history` and `documents` will be dropped in an attempt to construct a prompt that fits within the model's context length limit. During this process the order of the documents and chat history will be changed and ranked by relevance.\n\nWith `prompt_truncation` set to \"AUTO_PRESERVE_ORDER\", some elements from `chat_history` and `documents` will be dropped in an attempt to construct a prompt that fits within the model's context length limit. During this process the order of the documents and chat history will be preserved as they are inputted into the API.\n\nWith `prompt_truncation` set to \"OFF\", no elements will be dropped. If the sum of the inputs exceeds the model's context length limit, a `TooManyTokens` error will be returned.\n\nCompatible Deployments:\n - AUTO: Cohere Platform Only\n - AUTO_PRESERVE_ORDER: Azure, AWS Sagemaker/Bedrock, Private Deployments\n" x-fern-audiences: - public connectors: @@ -204,7 +204,7 @@ paths: - CONTEXTUAL - STRICT - NONE - description: "Used to select the [safety instruction](https://docs.cohere.com/docs/safety-modes) inserted into the prompt. Defaults to `CONTEXTUAL`.\nWhen `NONE` is specified, the safety instruction will be omitted.\n\nSafety modes are not yet configurable in combination with `tools`, `tool_results` and `documents` parameters.\n\n**Note**: This parameter is only compatible with models [Command R 08-2024](https://docs.cohere.com/docs/command-r#august-2024-release), [Command R+ 08-2024](https://docs.cohere.com/docs/command-r-plus#august-2024-release) and newer.\n\nCompatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments\n" + description: "Used to select the [safety instruction](https://docs.cohere.com/docs/safety-modes) inserted into the prompt. Defaults to `CONTEXTUAL`.\nWhen `NONE` is specified, the safety instruction will be omitted.\n\nSafety modes are not yet configurable in combination with `tools`, `tool_results` and `documents` parameters.\n\n**Note**: This parameter is only compatible with models [Command R 08-2024](https://docs.cohere.com/docs/command-r#august-2024-release), [Command R+ 08-2024](https://docs.cohere.com/docs/command-r-plus#august-2024-release) and newer.\n\n**Note**: `command-r7b-12-2024` only supports `\"CONTEXTUAL\"` and `\"STRICT\"` modes.\n\nCompatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments\n" x-fern-audiences: - public x-fern-availability: beta @@ -802,7 +802,7 @@ paths: - CONTEXTUAL - STRICT - OFF - description: "Used to select the [safety instruction](https://docs.cohere.com/v2/docs/safety-modes) inserted into the prompt. Defaults to `CONTEXTUAL`.\nWhen `OFF` is specified, the safety instruction will be omitted.\n\nSafety modes are not yet configurable in combination with `tools`, `tool_results` and `documents` parameters.\n\n**Note**: This parameter is only compatible with models [Command R 08-2024](https://docs.cohere.com/v2/docs/command-r#august-2024-release), [Command R+ 08-2024](https://docs.cohere.com/v2/docs/command-r-plus#august-2024-release) and newer.\n" + description: "Used to select the [safety instruction](https://docs.cohere.com/v2/docs/safety-modes) inserted into the prompt. Defaults to `CONTEXTUAL`.\nWhen `OFF` is specified, the safety instruction will be omitted.\n\nSafety modes are not yet configurable in combination with `tools`, `tool_results` and `documents` parameters.\n\n**Note**: This parameter is only compatible with models [Command R 08-2024](https://docs.cohere.com/v2/docs/command-r#august-2024-release), [Command R+ 08-2024](https://docs.cohere.com/v2/docs/command-r-plus#august-2024-release) and newer.\n\n**Note**: `command-r7b-12-2024` only supports `\"CONTEXTUAL\"` and `\"STRICT\"` modes.\n" x-fern-audiences: - public max_tokens: @@ -12218,7 +12218,7 @@ components: - ACCURATE - OFF type: string - description: "Defaults to `\"accurate\"`.\nDictates the approach taken to generating citations as part of the RAG flow by allowing the user to specify whether they want `\"accurate\"` results, `\"fast\"` results or no results.\n" + description: "Defaults to `\"accurate\"`.\nDictates the approach taken to generating citations as part of the RAG flow by allowing the user to specify whether they want `\"accurate\"` results, `\"fast\"` results or no results.\n\n**Note**: `command-r7b-12-2024` only supports `\"fast\"` and `\"off\"` modes. Its default is `\"fast\"`.\n" description: Options for controlling citation generation. ResponseFormatTypeV2: enum: