From 042719fb93da96810d69e33cb6b70f37b847dd3b Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Thu, 10 Oct 2024 01:01:40 +0000 Subject: [PATCH] feat: Updated OpenAPI spec --- ...licate.IReplicateApi.DeploymentsPredictionsCreate.g.cs | 8 ++++++-- .../Replicate.IReplicateApi.ModelsPredictionsCreate.g.cs | 8 ++++++-- .../Replicate.IReplicateApi.PredictionsCreate.g.cs | 8 ++++++-- ...plicate.ReplicateApi.DeploymentsPredictionsCreate.g.cs | 8 ++++++-- .../Replicate.ReplicateApi.ModelsPredictionsCreate.g.cs | 8 ++++++-- .../Replicate.ReplicateApi.PredictionsCreate.g.cs | 8 ++++++-- src/libs/Replicate/openapi.yaml | 4 +++- 7 files changed, 39 insertions(+), 13 deletions(-) diff --git a/src/libs/Replicate/Generated/Replicate.IReplicateApi.DeploymentsPredictionsCreate.g.cs b/src/libs/Replicate/Generated/Replicate.IReplicateApi.DeploymentsPredictionsCreate.g.cs index fa982a5..24e34e1 100644 --- a/src/libs/Replicate/Generated/Replicate.IReplicateApi.DeploymentsPredictionsCreate.g.cs +++ b/src/libs/Replicate/Generated/Replicate.IReplicateApi.DeploymentsPredictionsCreate.g.cs @@ -20,7 +20,9 @@ public partial interface IReplicateApi /// /// /// - /// + /// + /// Example: wait=5 + /// /// /// The token to cancel the operation with /// @@ -47,7 +49,9 @@ public partial interface IReplicateApi /// /// /// - /// + /// + /// Example: wait=5 + /// /// /// The model's input as a JSON object. The input schema depends on what model you are running. To see the available inputs, click the "API" tab on the model you are running or [get the model version](#models.versions.get) and look at its `openapi_schema` property. For example, [stability-ai/sdxl](https://replicate.com/stability-ai/sdxl) takes `prompt` as an input.
/// Files should be passed as HTTP URLs or data URLs.
diff --git a/src/libs/Replicate/Generated/Replicate.IReplicateApi.ModelsPredictionsCreate.g.cs b/src/libs/Replicate/Generated/Replicate.IReplicateApi.ModelsPredictionsCreate.g.cs index d1804e0..d5feb1a 100644 --- a/src/libs/Replicate/Generated/Replicate.IReplicateApi.ModelsPredictionsCreate.g.cs +++ b/src/libs/Replicate/Generated/Replicate.IReplicateApi.ModelsPredictionsCreate.g.cs @@ -20,7 +20,9 @@ public partial interface IReplicateApi /// /// /// - /// + /// + /// Example: wait=5 + /// /// /// The token to cancel the operation with /// @@ -47,7 +49,9 @@ public partial interface IReplicateApi /// /// /// - /// + /// + /// Example: wait=5 + /// /// /// The model's input as a JSON object. The input schema depends on what model you are running. To see the available inputs, click the "API" tab on the model you are running or [get the model version](#models.versions.get) and look at its `openapi_schema` property. For example, [stability-ai/sdxl](https://replicate.com/stability-ai/sdxl) takes `prompt` as an input.
/// Files should be passed as HTTP URLs or data URLs.
diff --git a/src/libs/Replicate/Generated/Replicate.IReplicateApi.PredictionsCreate.g.cs b/src/libs/Replicate/Generated/Replicate.IReplicateApi.PredictionsCreate.g.cs index aa62658..6c91888 100644 --- a/src/libs/Replicate/Generated/Replicate.IReplicateApi.PredictionsCreate.g.cs +++ b/src/libs/Replicate/Generated/Replicate.IReplicateApi.PredictionsCreate.g.cs @@ -18,7 +18,9 @@ public partial interface IReplicateApi /// The request will wait up to 60 seconds for the model to run. If this time is exceeded the prediction will be returned in a `"starting"` state and need to be retrieved using the `predictions.get` endpiont.
/// For a complete overview of the `predictions.create` API check out our documentation on [creating a prediction](https://replicate.com/docs/topics/predictions/create-a-prediction) which covers a variety of use cases. /// - /// + /// + /// Example: wait=5 + /// /// /// The token to cancel the operation with /// @@ -41,7 +43,9 @@ public partial interface IReplicateApi /// The request will wait up to 60 seconds for the model to run. If this time is exceeded the prediction will be returned in a `"starting"` state and need to be retrieved using the `predictions.get` endpiont.
/// For a complete overview of the `predictions.create` API check out our documentation on [creating a prediction](https://replicate.com/docs/topics/predictions/create-a-prediction) which covers a variety of use cases. /// - /// + /// + /// Example: wait=5 + /// /// /// The model's input as a JSON object. The input schema depends on what model you are running. To see the available inputs, click the "API" tab on the model you are running or [get the model version](#models.versions.get) and look at its `openapi_schema` property. For example, [stability-ai/sdxl](https://replicate.com/stability-ai/sdxl) takes `prompt` as an input.
/// Files should be passed as HTTP URLs or data URLs.
diff --git a/src/libs/Replicate/Generated/Replicate.ReplicateApi.DeploymentsPredictionsCreate.g.cs b/src/libs/Replicate/Generated/Replicate.ReplicateApi.DeploymentsPredictionsCreate.g.cs index b949850..f32ff35 100644 --- a/src/libs/Replicate/Generated/Replicate.ReplicateApi.DeploymentsPredictionsCreate.g.cs +++ b/src/libs/Replicate/Generated/Replicate.ReplicateApi.DeploymentsPredictionsCreate.g.cs @@ -38,7 +38,9 @@ partial void ProcessDeploymentsPredictionsCreateResponse( /// /// /// - /// + /// + /// Example: wait=5 + /// /// /// The token to cancel the operation with /// @@ -137,7 +139,9 @@ partial void ProcessDeploymentsPredictionsCreateResponse( /// /// /// - /// + /// + /// Example: wait=5 + /// /// /// The model's input as a JSON object. The input schema depends on what model you are running. To see the available inputs, click the "API" tab on the model you are running or [get the model version](#models.versions.get) and look at its `openapi_schema` property. For example, [stability-ai/sdxl](https://replicate.com/stability-ai/sdxl) takes `prompt` as an input.
/// Files should be passed as HTTP URLs or data URLs.
diff --git a/src/libs/Replicate/Generated/Replicate.ReplicateApi.ModelsPredictionsCreate.g.cs b/src/libs/Replicate/Generated/Replicate.ReplicateApi.ModelsPredictionsCreate.g.cs index a0055e8..e200191 100644 --- a/src/libs/Replicate/Generated/Replicate.ReplicateApi.ModelsPredictionsCreate.g.cs +++ b/src/libs/Replicate/Generated/Replicate.ReplicateApi.ModelsPredictionsCreate.g.cs @@ -43,7 +43,9 @@ partial void ProcessModelsPredictionsCreateResponseContent( /// /// /// - /// + /// + /// Example: wait=5 + /// /// /// The token to cancel the operation with /// @@ -165,7 +167,9 @@ partial void ProcessModelsPredictionsCreateResponseContent( /// /// /// - /// + /// + /// Example: wait=5 + /// /// /// The model's input as a JSON object. The input schema depends on what model you are running. To see the available inputs, click the "API" tab on the model you are running or [get the model version](#models.versions.get) and look at its `openapi_schema` property. For example, [stability-ai/sdxl](https://replicate.com/stability-ai/sdxl) takes `prompt` as an input.
/// Files should be passed as HTTP URLs or data URLs.
diff --git a/src/libs/Replicate/Generated/Replicate.ReplicateApi.PredictionsCreate.g.cs b/src/libs/Replicate/Generated/Replicate.ReplicateApi.PredictionsCreate.g.cs index c69de35..4d9a443 100644 --- a/src/libs/Replicate/Generated/Replicate.ReplicateApi.PredictionsCreate.g.cs +++ b/src/libs/Replicate/Generated/Replicate.ReplicateApi.PredictionsCreate.g.cs @@ -32,7 +32,9 @@ partial void ProcessPredictionsCreateResponse( /// The request will wait up to 60 seconds for the model to run. If this time is exceeded the prediction will be returned in a `"starting"` state and need to be retrieved using the `predictions.get` endpiont.
/// For a complete overview of the `predictions.create` API check out our documentation on [creating a prediction](https://replicate.com/docs/topics/predictions/create-a-prediction) which covers a variety of use cases. /// - /// + /// + /// Example: wait=5 + /// /// /// The token to cancel the operation with /// @@ -123,7 +125,9 @@ partial void ProcessPredictionsCreateResponse( /// The request will wait up to 60 seconds for the model to run. If this time is exceeded the prediction will be returned in a `"starting"` state and need to be retrieved using the `predictions.get` endpiont.
/// For a complete overview of the `predictions.create` API check out our documentation on [creating a prediction](https://replicate.com/docs/topics/predictions/create-a-prediction) which covers a variety of use cases. /// - /// + /// + /// Example: wait=5 + /// /// /// The model's input as a JSON object. The input schema depends on what model you are running. To see the available inputs, click the "API" tab on the model you are running or [get the model version](#models.versions.get) and look at its `openapi_schema` property. For example, [stability-ai/sdxl](https://replicate.com/stability-ai/sdxl) takes `prompt` as an input.
/// Files should be passed as HTTP URLs or data URLs.
diff --git a/src/libs/Replicate/openapi.yaml b/src/libs/Replicate/openapi.yaml index 26a84a7..4b080ac 100644 --- a/src/libs/Replicate/openapi.yaml +++ b/src/libs/Replicate/openapi.yaml @@ -1001,9 +1001,11 @@ components: prefer_header: name: Prefer in: header - description: "When you provide the `Prefer: wait` header, the request will block and wait up to 60 seconds for the model to finish generating output. The output will be included in the `output` field of the prediction response, so you don't need to use webhooks or polling to retrieve it.\n\nYou can specify a shorter timeout duration if needed. For example, `Prefer: wait=5` will wait for 5 seconds instead of the default 60 seconds.\n\nIf the model doesn't finish within the specified duration, the request will return the incomplete prediction object with status set to `starting` or `processing`. You can then fetch the prediction again via the URL provided in the `Location` header, or the `urls.get` field in the JSON response. Even if the timeout is exceeded, the prediction will continue processing in the background.\n\nThe `Prefer` header is not enabled by default. If you don't include this header in your request, the response will immediately return the prediction in a `starting` state.\n" + description: "Leave the request open and wait for the model to finish generating output. Set to `wait=n` where n is a number of seconds between 1 and 60.\n\nSee https://replicate.com/docs/topics/predictions/create-a-prediction#sync-mode for more information." schema: + pattern: '^wait(=([1-9]|[1-9][0-9]|60))?$' type: string + example: wait=5 securitySchemes: bearerAuth: type: http