diff --git a/OpenAI.SDK/ObjectModels/RequestModels/ChatCompletionCreateRequest.cs b/OpenAI.SDK/ObjectModels/RequestModels/ChatCompletionCreateRequest.cs index df294d17..c591adc1 100644 --- a/OpenAI.SDK/ObjectModels/RequestModels/ChatCompletionCreateRequest.cs +++ b/OpenAI.SDK/ObjectModels/RequestModels/ChatCompletionCreateRequest.cs @@ -94,7 +94,7 @@ public IList? StopCalculated /// - /// An upper bound for the number of tokens that can be generated for a completion, + /// An upper bound for the number of tokens that can be generated for a completion, /// including visible output tokens and reasoning tokens. /// /// @@ -267,6 +267,12 @@ public ResponseFormats? ChatResponseFormat [JsonPropertyName("top_logprobs")] public int? TopLogprobs { get; set; } + /// + /// Whether to enable parallel function calling during tool use. + /// + [JsonPropertyName("parallel_tool_calls")] + public bool? ParallelToolCalls { get; set; } + /// /// ID of the model to use. For models supported see start with Gpt_ /// @@ -291,4 +297,15 @@ public IEnumerable Validate() /// [JsonPropertyName("user")] public string User { get; set; } + + /// + /// Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service: + /// If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits until they are exhausted. + /// If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. + /// If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. + /// When not set, the default behavior is 'auto'. + /// When this parameter is set, the response body will include the service_tier utilized. + /// + [JsonPropertyName("service_tier")] + public string? ServiceTier { get; set; } } \ No newline at end of file diff --git a/OpenAI.SDK/ObjectModels/ResponseModels/ChatCompletionCreateResponse.cs b/OpenAI.SDK/ObjectModels/ResponseModels/ChatCompletionCreateResponse.cs index 64abb2cb..7386151f 100644 --- a/OpenAI.SDK/ObjectModels/ResponseModels/ChatCompletionCreateResponse.cs +++ b/OpenAI.SDK/ObjectModels/ResponseModels/ChatCompletionCreateResponse.cs @@ -5,21 +5,48 @@ namespace OpenAI.ObjectModels.ResponseModels; public record ChatCompletionCreateResponse : BaseResponse, IOpenAiModels.IId, IOpenAiModels.ICreatedAt { + /// + /// The model used for the chat completion. + /// [JsonPropertyName("model")] public string Model { get; set; } + /// + /// A list of chat completion choices. Can be more than one if n is greater than 1. + /// [JsonPropertyName("choices")] public List Choices { get; set; } + /// + /// Usage statistics for the completion request. + /// [JsonPropertyName("usage")] public UsageResponse Usage { get; set; } + /// + /// This fingerprint represents the backend configuration that the model runs with. + /// Can be used in conjunction with the seed request parameter to understand when backend changes have been made that + /// might impact determinism. + /// [JsonPropertyName("system_fingerprint")] public string SystemFingerPrint { get; set; } + /// + /// The service tier used for processing the request. This field is only included if the service_tier parameter is + /// specified in the request. + /// + [JsonPropertyName("service_tier")] + public string? ServiceTier { get; set; } + + /// + /// The Unix timestamp (in seconds) of when the chat completion was created. + /// [JsonPropertyName("created")] public int CreatedAt { get; set; } + /// + /// A unique identifier for the chat completion. + /// [JsonPropertyName("id")] public string Id { get; set; } } \ No newline at end of file diff --git a/Readme.md b/Readme.md index d72f7a77..6b44e32a 100644 --- a/Readme.md +++ b/Readme.md @@ -118,6 +118,8 @@ Needless to say, I cannot accept responsibility for any damage caused by using t ### 8.7.0 - Added Support for o1 reasing models (`o1-mini` and `o1-preview`). - Added `MaxCompletionTokens` for `chat completions`. +- Added support for `ParallelToolCalls` for `chat completions`. +- Added support for `ServiceTier` for `chat completions`. - Added support for `ChunkingStrategy` in `Vector Store` and `Vector Store Files`. - Added support for `Strict` in `ToolDefinition`. - Added support for `MaxNumberResults` and `RankingOptions` for `FileSearchTool`.