diff --git a/assets/src/generated/graphql.ts b/assets/src/generated/graphql.ts index 72166d510..7838b7c26 100644 --- a/assets/src/generated/graphql.ts +++ b/assets/src/generated/graphql.ts @@ -330,11 +330,15 @@ export type AnthropicSettings = { __typename?: 'AnthropicSettings'; /** the anthropic model version to use */ model?: Maybe; + /** the model to use for tool calls, which are less frequent and require more complex reasoning */ + toolModel?: Maybe; }; export type AnthropicSettingsAttributes = { accessToken?: InputMaybe; model?: InputMaybe; + /** the model to use for tool calls, which are less frequent and require more complex reasoning */ + toolModel?: InputMaybe; }; /** a representation of a kubernetes api deprecation */ @@ -635,6 +639,8 @@ export type AzureOpenaiAttributes = { endpoint: Scalars['String']['input']; /** the exact model you wish to use */ model?: InputMaybe; + /** the model to use for tool calls, which are less frequent and require more complex reasoning */ + toolModel?: InputMaybe; }; /** Settings for configuring against Azure OpenAI */ @@ -644,6 +650,9 @@ export type AzureOpenaiSettings = { apiVersion?: Maybe; /** the endpoint of your azure openai version, should look like: https://{endpoint}/openai/deployments/{deployment-id} */ endpoint: Scalars['String']['output']; + model?: Maybe; + /** the model to use for tool calls, which are less frequent and require more complex reasoning */ + toolModel?: Maybe; }; export type AzureSettingsAttributes = { @@ -689,6 +698,8 @@ export type BedrockAiAttributes = { modelId: Scalars['String']['input']; /** aws secret access key to use, you can also use IRSA for self-hosted consoles */ secretAccessKey?: InputMaybe; + /** the model to use for tool calls, which are less frequent and require more complex reasoning */ + toolModelId?: InputMaybe; }; /** Settings for usage of AWS Bedrock for LLMs */ @@ -698,6 +709,8 @@ export type BedrockAiSettings = { accessKeyId?: Maybe; /** the bedrock model to use */ modelId: Scalars['String']['output']; + /** the model to use for tool calls, which are less frequent and require more complex reasoning */ + toolModelId?: Maybe; }; export type BindingAttributes = { @@ -3879,6 +3892,8 @@ export type OllamaAttributes = { /** An http authorization header to use on calls to the Ollama api */ authorization?: InputMaybe; model: Scalars['String']['input']; + /** the model to use for tool calls, which are less frequent and require more complex reasoning */ + toolModel?: InputMaybe; url: Scalars['String']['input']; }; @@ -3886,6 +3901,8 @@ export type OllamaAttributes = { export type OllamaSettings = { __typename?: 'OllamaSettings'; model: Scalars['String']['output']; + /** the model to use for tool calls, which are less frequent and require more complex reasoning */ + toolModel?: Maybe; /** the url your ollama deployment is hosted on */ url: Scalars['String']['output']; }; @@ -3897,12 +3914,16 @@ export type OpenaiSettings = { baseUrl?: Maybe; /** the openai model version to use */ model?: Maybe; + /** the model to use for tool calls, which are less frequent and require more complex reasoning */ + toolModel?: Maybe; }; export type OpenaiSettingsAttributes = { accessToken?: InputMaybe; baseUrl?: InputMaybe; model?: InputMaybe; + /** the model to use for tool calls, which are less frequent and require more complex reasoning */ + toolModel?: InputMaybe; }; export enum Operation { @@ -9364,6 +9385,8 @@ export type VertexAiAttributes = { project: Scalars['String']['input']; /** optional service account json to auth to the GCP vertex apis */ serviceAccountJson?: InputMaybe; + /** the model to use for tool calls, which are less frequent and require more complex reasoning */ + toolModel?: InputMaybe; }; /** Settings for usage of GCP VertexAI for LLMs */ @@ -9375,6 +9398,8 @@ export type VertexAiSettings = { model?: Maybe; /** the gcp project id to use */ project: Scalars['String']['output']; + /** the model to use for tool calls, which are less frequent and require more complex reasoning */ + toolModel?: Maybe; }; export type VerticalPodAutoscaler = { diff --git a/charts/controller/crds/deployments.plural.sh_deploymentsettings.yaml b/charts/controller/crds/deployments.plural.sh_deploymentsettings.yaml index 8e873d591..8c92f894a 100644 --- a/charts/controller/crds/deployments.plural.sh_deploymentsettings.yaml +++ b/charts/controller/crds/deployments.plural.sh_deploymentsettings.yaml @@ -81,6 +81,10 @@ spec: - key type: object x-kubernetes-map-type: atomic + toolModel: + description: Model to use for tool calling, which is less + frequent and often requires more advanced reasoning + type: string required: - tokenSecretRef type: object @@ -122,6 +126,10 @@ spec: - key type: object x-kubernetes-map-type: atomic + toolModel: + description: Model to use for tool calling, which is less + frequent and often requires more advanced reasoning + type: string required: - endpoint - tokenSecretRef @@ -158,6 +166,10 @@ spec: - key type: object x-kubernetes-map-type: atomic + toolModelId: + description: Model to use for tool calling, which is less + frequent and often requires more advanced reasoning + type: string required: - modelId type: object @@ -196,6 +208,10 @@ spec: - key type: object x-kubernetes-map-type: atomic + toolModel: + description: Model to use for tool calling, which is less + frequent and often requires more advanced reasoning + type: string url: description: URL is the url this model is queryable on type: string @@ -236,6 +252,10 @@ spec: - key type: object x-kubernetes-map-type: atomic + toolModel: + description: Model to use for tool calling, which is less + frequent and often requires more advanced reasoning + type: string required: - tokenSecretRef type: object @@ -288,6 +308,10 @@ spec: - key type: object x-kubernetes-map-type: atomic + toolModel: + description: Model to use for tool calling, which is less + frequent and often requires more advanced reasoning + type: string required: - location - project diff --git a/go/client/models_gen.go b/go/client/models_gen.go index 62e7f7cd0..6c5da6223 100644 --- a/go/client/models_gen.go +++ b/go/client/models_gen.go @@ -243,11 +243,15 @@ type AlertEdge struct { type AnthropicSettings struct { // the anthropic model version to use Model *string `json:"model,omitempty"` + // the model to use for tool calls, which are less frequent and require more complex reasoning + ToolModel *string `json:"toolModel,omitempty"` } type AnthropicSettingsAttributes struct { AccessToken *string `json:"accessToken,omitempty"` Model *string `json:"model,omitempty"` + // the model to use for tool calls, which are less frequent and require more complex reasoning + ToolModel *string `json:"toolModel,omitempty"` } // a representation of a kubernetes api deprecation @@ -476,6 +480,8 @@ type AzureOpenaiAttributes struct { APIVersion *string `json:"apiVersion,omitempty"` // the exact model you wish to use Model *string `json:"model,omitempty"` + // the model to use for tool calls, which are less frequent and require more complex reasoning + ToolModel *string `json:"toolModel,omitempty"` // the azure openai access token to use AccessToken string `json:"accessToken"` } @@ -483,7 +489,10 @@ type AzureOpenaiAttributes struct { // Settings for configuring against Azure OpenAI type AzureOpenaiSettings struct { // the endpoint of your azure openai version, should look like: https://{endpoint}/openai/deployments/{deployment-id} - Endpoint string `json:"endpoint"` + Endpoint string `json:"endpoint"` + Model *string `json:"model,omitempty"` + // the model to use for tool calls, which are less frequent and require more complex reasoning + ToolModel *string `json:"toolModel,omitempty"` // the api version you want to use APIVersion *string `json:"apiVersion,omitempty"` } @@ -526,6 +535,8 @@ type BackupAttributes struct { type BedrockAiAttributes struct { // the bedrock model id to use ModelID string `json:"modelId"` + // the model to use for tool calls, which are less frequent and require more complex reasoning + ToolModelID *string `json:"toolModelId,omitempty"` // aws access key id to use, you can also use IRSA for self-hosted consoles AccessKeyID *string `json:"accessKeyId,omitempty"` // aws secret access key to use, you can also use IRSA for self-hosted consoles @@ -536,6 +547,8 @@ type BedrockAiAttributes struct { type BedrockAiSettings struct { // the bedrock model to use ModelID string `json:"modelId"` + // the model to use for tool calls, which are less frequent and require more complex reasoning + ToolModelID *string `json:"toolModelId,omitempty"` // the aws access key to use, can also use IRSA when console is self-hosted AccessKeyID *string `json:"accessKeyId,omitempty"` } @@ -3190,7 +3203,9 @@ type OidcProviderAttributes struct { type OllamaAttributes struct { Model string `json:"model"` - URL string `json:"url"` + // the model to use for tool calls, which are less frequent and require more complex reasoning + ToolModel *string `json:"toolModel,omitempty"` + URL string `json:"url"` // An http authorization header to use on calls to the Ollama api Authorization *string `json:"authorization,omitempty"` } @@ -3198,6 +3213,8 @@ type OllamaAttributes struct { // Settings for a self-hosted ollama-based LLM deployment type OllamaSettings struct { Model string `json:"model"` + // the model to use for tool calls, which are less frequent and require more complex reasoning + ToolModel *string `json:"toolModel,omitempty"` // the url your ollama deployment is hosted on URL string `json:"url"` } @@ -3208,12 +3225,16 @@ type OpenaiSettings struct { BaseURL *string `json:"baseUrl,omitempty"` // the openai model version to use Model *string `json:"model,omitempty"` + // the model to use for tool calls, which are less frequent and require more complex reasoning + ToolModel *string `json:"toolModel,omitempty"` } type OpenaiSettingsAttributes struct { BaseURL *string `json:"baseUrl,omitempty"` AccessToken *string `json:"accessToken,omitempty"` Model *string `json:"model,omitempty"` + // the model to use for tool calls, which are less frequent and require more complex reasoning + ToolModel *string `json:"toolModel,omitempty"` } type OverlayUpdate struct { @@ -5762,6 +5783,8 @@ type VersionReference struct { type VertexAiAttributes struct { // the vertex model id to use Model *string `json:"model,omitempty"` + // the model to use for tool calls, which are less frequent and require more complex reasoning + ToolModel *string `json:"toolModel,omitempty"` // optional service account json to auth to the GCP vertex apis ServiceAccountJSON *string `json:"serviceAccountJson,omitempty"` // custom vertexai endpoint if for dedicated customer deployments @@ -5776,6 +5799,8 @@ type VertexAiAttributes struct { type VertexAiSettings struct { // the vertex ai model to use Model *string `json:"model,omitempty"` + // the model to use for tool calls, which are less frequent and require more complex reasoning + ToolModel *string `json:"toolModel,omitempty"` // the gcp project id to use Project string `json:"project"` // the gcp region the model diff --git a/go/controller/api/v1alpha1/deploymentsettings_types.go b/go/controller/api/v1alpha1/deploymentsettings_types.go index a1c3ad425..c983b6c3f 100644 --- a/go/controller/api/v1alpha1/deploymentsettings_types.go +++ b/go/controller/api/v1alpha1/deploymentsettings_types.go @@ -235,6 +235,7 @@ func (in *AISettings) Attributes(ctx context.Context, c client.Client, namespace AccessToken: &token, Model: in.OpenAI.Model, BaseURL: in.OpenAI.BaseUrl, + ToolModel: in.OpenAI.ToolModel, } case console.AiProviderAnthropic: if in.Anthropic == nil { @@ -249,6 +250,7 @@ func (in *AISettings) Attributes(ctx context.Context, c client.Client, namespace attr.Anthropic = &console.AnthropicSettingsAttributes{ AccessToken: lo.ToPtr(token), Model: in.Anthropic.Model, + ToolModel: in.Anthropic.ToolModel, } case console.AiProviderAzure: if in.Azure == nil { @@ -264,6 +266,7 @@ func (in *AISettings) Attributes(ctx context.Context, c client.Client, namespace Endpoint: in.Azure.Endpoint, APIVersion: in.Azure.ApiVersion, Model: in.Azure.Model, + ToolModel: in.Azure.ToolModel, AccessToken: token, } case console.AiProviderVertex: @@ -295,6 +298,7 @@ func (in *AISettings) Attributes(ctx context.Context, c client.Client, namespace attr.Bedrock = &console.BedrockAiAttributes{ ModelID: in.Bedrock.ModelID, + ToolModelID: in.Bedrock.ToolModelId, AccessKeyID: in.Bedrock.AccessKeyId, SecretAccessKey: secret, } @@ -311,6 +315,7 @@ func (in *AISettings) Attributes(ctx context.Context, c client.Client, namespace attr.Ollama = &console.OllamaAttributes{ URL: in.Ollama.URL, Model: in.Ollama.Model, + ToolModel: in.Ollama.ToolModel, Authorization: auth, } } @@ -324,6 +329,11 @@ type AIProviderSettings struct { // +kubebuilder:validation:Optional Model *string `json:"model,omitempty"` + // Model to use for tool calling, which is less frequent and often requires more advanced reasoning + // + // +kubebuilder:validation:Optional + ToolModel *string `json:"toolModel,omitempty"` + // A custom base url to use, for reimplementations of the same API scheme (for instance Together.ai uses the OpenAI API spec) // // +kubebuilder:validation:Optional @@ -348,6 +358,11 @@ type OllamaSettings struct { // +kubebuilder:validation:Required Model string `json:"model"` + // Model to use for tool calling, which is less frequent and often requires more advanced reasoning + // + // +kubebuilder:validation:Optional + ToolModel *string `json:"toolModel,omitempty"` + // TokenSecretRef is a reference to the local secret holding the contents of a HTTP Authorization header // to send to your ollama api in case authorization is required (eg for an instance hosted on a public network) // @@ -371,6 +386,11 @@ type AzureOpenAISettings struct { // +kubebuilder:validation:Optional Model *string `json:"model,omitempty"` + // Model to use for tool calling, which is less frequent and often requires more advanced reasoning + // + // +kubebuilder:validation:Optional + ToolModel *string `json:"toolModel,omitempty"` + // TokenSecretRef is a reference to the local secret holding the token to access // the configured AI provider. // @@ -384,6 +404,11 @@ type BedrockSettings struct { // +kubebuilder:validation:Required ModelID string `json:"modelId"` + // Model to use for tool calling, which is less frequent and often requires more advanced reasoning + // + // +kubebuilder:validation:Optional + ToolModelId *string `json:"toolModelId,omitempty"` + // An AWS Access Key ID to use, can also use IRSA to acquire credentials // // +kubebuilder:validation:Optional @@ -401,6 +426,11 @@ type VertexSettings struct { // +kubebuilder:validation:Optional Model *string `json:"model,omitempty"` + // Model to use for tool calling, which is less frequent and often requires more advanced reasoning + // + // +kubebuilder:validation:Optional + ToolModel *string `json:"toolModel,omitempty"` + // The GCP project you'll be using // // +kubebuilder:validation:Required diff --git a/go/controller/api/v1alpha1/zz_generated.deepcopy.go b/go/controller/api/v1alpha1/zz_generated.deepcopy.go index c45b1f4fb..499b4d992 100644 --- a/go/controller/api/v1alpha1/zz_generated.deepcopy.go +++ b/go/controller/api/v1alpha1/zz_generated.deepcopy.go @@ -36,6 +36,11 @@ func (in *AIProviderSettings) DeepCopyInto(out *AIProviderSettings) { *out = new(string) **out = **in } + if in.ToolModel != nil { + in, out := &in.ToolModel, &out.ToolModel + *out = new(string) + **out = **in + } if in.BaseUrl != nil { in, out := &in.BaseUrl, &out.BaseUrl *out = new(string) @@ -122,6 +127,11 @@ func (in *AzureOpenAISettings) DeepCopyInto(out *AzureOpenAISettings) { *out = new(string) **out = **in } + if in.ToolModel != nil { + in, out := &in.ToolModel, &out.ToolModel + *out = new(string) + **out = **in + } in.TokenSecretRef.DeepCopyInto(&out.TokenSecretRef) } @@ -138,6 +148,11 @@ func (in *AzureOpenAISettings) DeepCopy() *AzureOpenAISettings { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BedrockSettings) DeepCopyInto(out *BedrockSettings) { *out = *in + if in.ToolModelId != nil { + in, out := &in.ToolModelId, &out.ToolModelId + *out = new(string) + **out = **in + } if in.AccessKeyId != nil { in, out := &in.AccessKeyId, &out.AccessKeyId *out = new(string) @@ -3099,6 +3114,11 @@ func (in *ObserverTarget) DeepCopy() *ObserverTarget { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *OllamaSettings) DeepCopyInto(out *OllamaSettings) { *out = *in + if in.ToolModel != nil { + in, out := &in.ToolModel, &out.ToolModel + *out = new(string) + **out = **in + } if in.AuthorizationSecretRef != nil { in, out := &in.AuthorizationSecretRef, &out.AuthorizationSecretRef *out = new(v1.SecretKeySelector) @@ -5267,6 +5287,11 @@ func (in *VertexSettings) DeepCopyInto(out *VertexSettings) { *out = new(string) **out = **in } + if in.ToolModel != nil { + in, out := &in.ToolModel, &out.ToolModel + *out = new(string) + **out = **in + } if in.Endpoint != nil { in, out := &in.Endpoint, &out.Endpoint *out = new(string) diff --git a/go/controller/config/crd/bases/deployments.plural.sh_deploymentsettings.yaml b/go/controller/config/crd/bases/deployments.plural.sh_deploymentsettings.yaml index 8e873d591..8c92f894a 100644 --- a/go/controller/config/crd/bases/deployments.plural.sh_deploymentsettings.yaml +++ b/go/controller/config/crd/bases/deployments.plural.sh_deploymentsettings.yaml @@ -81,6 +81,10 @@ spec: - key type: object x-kubernetes-map-type: atomic + toolModel: + description: Model to use for tool calling, which is less + frequent and often requires more advanced reasoning + type: string required: - tokenSecretRef type: object @@ -122,6 +126,10 @@ spec: - key type: object x-kubernetes-map-type: atomic + toolModel: + description: Model to use for tool calling, which is less + frequent and often requires more advanced reasoning + type: string required: - endpoint - tokenSecretRef @@ -158,6 +166,10 @@ spec: - key type: object x-kubernetes-map-type: atomic + toolModelId: + description: Model to use for tool calling, which is less + frequent and often requires more advanced reasoning + type: string required: - modelId type: object @@ -196,6 +208,10 @@ spec: - key type: object x-kubernetes-map-type: atomic + toolModel: + description: Model to use for tool calling, which is less + frequent and often requires more advanced reasoning + type: string url: description: URL is the url this model is queryable on type: string @@ -236,6 +252,10 @@ spec: - key type: object x-kubernetes-map-type: atomic + toolModel: + description: Model to use for tool calling, which is less + frequent and often requires more advanced reasoning + type: string required: - tokenSecretRef type: object @@ -288,6 +308,10 @@ spec: - key type: object x-kubernetes-map-type: atomic + toolModel: + description: Model to use for tool calling, which is less + frequent and often requires more advanced reasoning + type: string required: - location - project diff --git a/go/controller/docs/api.md b/go/controller/docs/api.md index 7f41b6a2b..7d2237b6c 100644 --- a/go/controller/docs/api.md +++ b/go/controller/docs/api.md @@ -53,6 +53,7 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | | `model` _string_ | Model is the LLM model name to use. | | Optional: {}
| +| `toolModel` _string_ | Model to use for tool calling, which is less frequent and often requires more advanced reasoning | | Optional: {}
| | `baseUrl` _string_ | A custom base url to use, for reimplementations of the same API scheme (for instance Together.ai uses the OpenAI API spec) | | Optional: {}
| | `tokenSecretRef` _[SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core)_ | TokenSecretRef is a reference to the local secret holding the token to access
the configured AI provider. | | Required: {}
| @@ -98,6 +99,7 @@ _Appears in:_ | `endpoint` _string_ | Your Azure OpenAI endpoint, should be formatted like: https://{endpoint}/openai/deployments/{deployment-id}" | | Required: {}
| | `apiVersion` _string_ | The azure openai Data plane - inference api version to use, defaults to 2024-10-01-preview or the latest available | | Optional: {}
| | `model` _string_ | The OpenAi Model you wish to use. If not specified, Plural will provide a default | | Optional: {}
| +| `toolModel` _string_ | Model to use for tool calling, which is less frequent and often requires more advanced reasoning | | Optional: {}
| | `tokenSecretRef` _[SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core)_ | TokenSecretRef is a reference to the local secret holding the token to access
the configured AI provider. | | Required: {}
| @@ -115,6 +117,7 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | | `modelId` _string_ | The AWS Bedrock Model ID to use | | Required: {}
| +| `toolModelId` _string_ | Model to use for tool calling, which is less frequent and often requires more advanced reasoning | | Optional: {}
| | `accessKeyId` _string_ | An AWS Access Key ID to use, can also use IRSA to acquire credentials | | Optional: {}
| | `secretAccessKeyRef` _[SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core)_ | An AWS Secret Access Key to use, can also use IRSA to acquire credentials | | Optional: {}
| @@ -1602,6 +1605,7 @@ _Appears in:_ | --- | --- | --- | --- | | `url` _string_ | URL is the url this model is queryable on | | Required: {}
| | `model` _string_ | Model is the Ollama model to use when querying the /chat api | | Required: {}
| +| `toolModel` _string_ | Model to use for tool calling, which is less frequent and often requires more advanced reasoning | | Optional: {}
| | `tokenSecretRef` _[SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core)_ | TokenSecretRef is a reference to the local secret holding the contents of a HTTP Authorization header
to send to your ollama api in case authorization is required (eg for an instance hosted on a public network) | | Optional: {}
| @@ -2731,6 +2735,7 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | | `model` _string_ | The Vertex AI model to use | | Optional: {}
| +| `toolModel` _string_ | Model to use for tool calling, which is less frequent and often requires more advanced reasoning | | Optional: {}
| | `project` _string_ | The GCP project you'll be using | | Required: {}
| | `location` _string_ | The GCP region Vertex is queried from | | Required: {}
| | `endpoint` _string_ | A custom endpoint for self-deployed models | | Optional: {}
| diff --git a/lib/console/ai/evidence/base.ex b/lib/console/ai/evidence/base.ex index 8c53db214..92fbaaf3a 100644 --- a/lib/console/ai/evidence/base.ex +++ b/lib/console/ai/evidence/base.ex @@ -27,6 +27,9 @@ defmodule Console.AI.Evidence.Base do def prepend(list, l) when is_list(l), do: l ++ list def prepend(list, msg), do: [msg | list] + def append(list, l) when is_list(l), do: list ++ l + def append(list, msg), do: list ++ [msg] + def distro(:byok), do: "vanilla" def distro(distro), do: distro diff --git a/lib/console/ai/fixer.ex b/lib/console/ai/fixer.ex index 18d022d95..b89f4f1e1 100644 --- a/lib/console/ai/fixer.ex +++ b/lib/console/ai/fixer.ex @@ -3,6 +3,7 @@ defmodule Console.AI.Fixer do Owns logic for generating service/stack/etc insight fix recommendations """ use Console.Services.Base + import Console.AI.Evidence.Base, only: [prepend: 2, append: 2] import Console.AI.Policy alias Console.Schema.{AiInsight, Service, Stack, User, PullRequest} alias Console.AI.Fixer.Service, as: ServiceFixer @@ -12,12 +13,14 @@ defmodule Console.AI.Fixer do @prompt """ Please provide the most straightforward code or configuration change available based on the information I've already provided above to fix this issue. - Be sure to explicitly state the Git repository and full file names that are needed to change, alongside the complete content of the files that need to be modified. + Be sure to explicitly state the Git repository and full file names that are needed to change, alongside the content of the files that need to be modified with enough surrounding context to understand what changed. """ @tool """ Please spawn a Pull Request to fix the issue described above. The code change should be the most direct - and straightforward way to fix the issue described, avoid any extraneous changes or modifying files not listed. + and straightforward way to fix the issue described. Change only the minimal amount of lines in the original files + provided to successfully fix the issue, avoid any extraneous changes as they will potentially break additional + functionality upon application. """ @callback prompt(struct, binary) :: {:ok, Provider.history} | Console.error @@ -42,18 +45,12 @@ defmodule Console.AI.Fixer do Generate a fix recommendation from an ai insight struct """ @spec pr(AiInsight.t, Provider.history) :: {:ok, PullRequest.t} | Console.error - def pr(%AiInsight{service: %Service{} = svc, text: text}, history) do - pr_prompt(text, "service", history) - |> ask(@tool) - |> Provider.tool_call([Pr]) - |> handle_tool_call(%{service_id: svc.id}) - end - - def pr(%AiInsight{stack: %Stack{} = stack, text: text}, history) do - pr_prompt(text, "stack", history) - |> ask(@tool) - |> Provider.tool_call([Pr]) - |> handle_tool_call(%{stack_id: stack.id}) + def pr(%AiInsight{service: svc, stack: stack} = insight, history) when is_map(svc) or is_map(stack) do + with {:ok, prompt} <- pr_prompt(insight, history) do + ask(prompt, @tool) + |> Provider.tool_call([Pr]) + |> handle_tool_call(pluck(insight)) + end end def pr(_, _), do: {:error, "ai fix recommendations not supported for this insight"} @@ -93,15 +90,34 @@ defmodule Console.AI.Fixer do defp ask(prompt, task \\ @prompt), do: prompt ++ [{:user, task}] - defp pr_prompt(insight, scope, history) when is_list(history) do - [ - {:user, """ - We've found an issue with a failing Plural #{scope}: + defp pr_prompt(%AiInsight{text: insight} = i, history) do + with {:ok, msgs} <- fix_prompt(i) do + msgs + |> prepend({:user, """ + We've found an issue with a failing Plural #{insight_scope(i)}: #{insight} - We've also found the appropriate fix. I'll list it below: - """} | history - ] + We'll want to make a code change to fix the issue identified. Here's the evidence used to generate the code change: + """}) + |> maybe_add_fix(history) + |> ok() + end + end + + defp fix_prompt(%AiInsight{stack: %Stack{} = stack, text: text}), do: StackFixer.prompt(stack, text) + defp fix_prompt(%AiInsight{service: %Service{} = stack, text: text}), do: ServiceFixer.prompt(stack, text) + + defp insight_scope(%AiInsight{service: %Service{}}), do: :service + defp insight_scope(%AiInsight{stack: %Stack{}}), do: :stack + + defp pluck(%AiInsight{service: %Service{id: id}}), do: %{service_id: id} + defp pluck(%AiInsight{stack: %Stack{id: id}}), do: %{stack_id: id} + + defp maybe_add_fix(prompt, [_ | _] = history) do + prompt + |> append({:user, "We've also found a code change needed to fix the above issue, described below. Note that sometimes this will sometimes represent a PARTIAL change to the underlying file, don't delete unrelated content if that's not what's relevant to change:"}) + |> append(history) end + defp maybe_add_fix(prompt, _), do: prompt end diff --git a/lib/console/ai/provider/anthropic.ex b/lib/console/ai/provider/anthropic.ex index 6e79dd29f..7a638a61f 100644 --- a/lib/console/ai/provider/anthropic.ex +++ b/lib/console/ai/provider/anthropic.ex @@ -10,7 +10,7 @@ defmodule Console.AI.Anthropic do @default_model "claude-3-5-sonnet-latest" - defstruct [:access_key, :model, :stream] + defstruct [:access_key, :model, :tool_model, :stream] @type t :: %__MODULE__{} @@ -37,7 +37,14 @@ defmodule Console.AI.Anthropic do def spec(), do: %__MODULE__{content: [Anthropic.Content.spec()]} end - def new(opts), do: %__MODULE__{access_key: opts.access_token, model: opts.model, stream: Stream.stream()} + def new(opts) do + %__MODULE__{ + access_key: opts.access_token, + model: opts.model, + tool_model: opts.tool_model, + stream: Stream.stream() + } + end @doc """ Generate a anthropic completion from diff --git a/lib/console/ai/provider/azure.ex b/lib/console/ai/provider/azure.ex index c4abf2590..0ccd20e48 100644 --- a/lib/console/ai/provider/azure.ex +++ b/lib/console/ai/provider/azure.ex @@ -7,7 +7,7 @@ defmodule Console.AI.Azure do require Logger - defstruct [:access_key, :api_version, :base_url, :model] + defstruct [:access_key, :api_version, :base_url, :model, :tool_model] @api_vsn "2024-10-01-preview" @@ -18,6 +18,7 @@ defmodule Console.AI.Azure do access_key: opts.access_key, api_version: opts.api_version, model: opts.model, + tool_model: opts.tool_model, base_url: "#{opts.endpoint}/openai" } end diff --git a/lib/console/ai/provider/bedrock.ex b/lib/console/ai/provider/bedrock.ex index ec59075d5..c1c7cc071 100644 --- a/lib/console/ai/provider/bedrock.ex +++ b/lib/console/ai/provider/bedrock.ex @@ -6,7 +6,7 @@ defmodule Console.AI.Bedrock do require Logger - defstruct [:model_id, :access_key_id, :secret_access_key] + defstruct [:model_id, :tool_model_id, :access_key_id, :secret_access_key] @type t :: %__MODULE__{} @@ -15,6 +15,7 @@ defmodule Console.AI.Bedrock do def new(opts) do %__MODULE__{ model_id: opts.model_id, + tool_model_id: opts.tool_model_id, access_key_id: opts.access_key_id, secret_access_key: opts.secret_access_key } diff --git a/lib/console/ai/provider/ollama.ex b/lib/console/ai/provider/ollama.ex index cfd1dd22e..58457d307 100644 --- a/lib/console/ai/provider/ollama.ex +++ b/lib/console/ai/provider/ollama.ex @@ -6,7 +6,7 @@ defmodule Console.AI.Ollama do require Logger - defstruct [:url, :model, :authorization] + defstruct [:url, :model, :tool_model, :authorization] @type t :: %__MODULE__{} @@ -32,7 +32,14 @@ defmodule Console.AI.Ollama do def spec(), do: %__MODULE__{message: [Ollama.Message.spec()]} end - def new(opts), do: %__MODULE__{url: opts.url, model: opts.model, authorization: opts.authorization} + def new(opts) do + %__MODULE__{ + url: opts.url, + model: opts.model, + tool_model: opts.tool_model, + authorization: opts.authorization + } + end @doc """ Generate a anthropic completion from diff --git a/lib/console/ai/provider/openai.ex b/lib/console/ai/provider/openai.ex index 6b6865329..d6891dd0c 100644 --- a/lib/console/ai/provider/openai.ex +++ b/lib/console/ai/provider/openai.ex @@ -12,7 +12,7 @@ defmodule Console.AI.OpenAI do def default_model(), do: @model - defstruct [:access_key, :model, :base_url, :params, :stream] + defstruct [:access_key, :model, :tool_model, :base_url, :params, :stream] @type t :: %__MODULE__{} @@ -57,6 +57,7 @@ defmodule Console.AI.OpenAI do %__MODULE__{ access_key: opts.access_token, model: opts.model, + tool_model: opts.tool_model, base_url: opts.base_url, stream: Stream.stream() } @@ -83,7 +84,7 @@ defmodule Console.AI.OpenAI do @spec tool_call(t(), Console.AI.Provider.history, [atom]) :: {:ok, binary} | {:ok, [Console.AI.Tool.t]} | Console.error def tool_call(%__MODULE__{} = openai, messages, tools) do history = Enum.map(messages, fn {role, msg} -> %{role: role, content: msg} end) - case chat(%{openai | stream: nil}, history, tools) do + case chat(%{openai | stream: nil, model: tool_model(openai)}, history, tools) do {:ok, %CompletionResponse{choices: [%Choice{message: %Message{tool_calls: [_ | _] = calls}} | _]}} -> {:ok, gen_tools(calls)} {:ok, %CompletionResponse{choices: [%Choice{message: %Message{content: content}} | _]}} -> @@ -148,6 +149,8 @@ defmodule Console.AI.OpenAI do |> Enum.filter(& &1) end + defp tool_model(%__MODULE__{model: m, tool_model: tm}), do: tm || m || "o1-mini" + defp tool_args(tool) do %{ type: :function, diff --git a/lib/console/ai/provider/vertex.ex b/lib/console/ai/provider/vertex.ex index 5b3cbafe4..1c7e2e657 100644 --- a/lib/console/ai/provider/vertex.ex +++ b/lib/console/ai/provider/vertex.ex @@ -10,7 +10,7 @@ defmodule Console.AI.Vertex do require Logger - defstruct [:service_account_json, :model, :project, :location, :endpoint] + defstruct [:service_account_json, :model, :tool_model, :project, :location, :endpoint] @type t :: %__MODULE__{} @@ -18,6 +18,7 @@ defmodule Console.AI.Vertex do %__MODULE__{ service_account_json: opts.service_account_json, model: opts.model, + tool_model: opts.tool_model, project: opts.project, location: opts.location, endpoint: opts.endpoint @@ -30,7 +31,12 @@ defmodule Console.AI.Vertex do @spec completion(t(), Console.AI.Provider.history) :: {:ok, binary} | Console.error def completion(%__MODULE__{} = vertex, messages) do with {:ok, %{token: token}} <- client(vertex) do - OpenAI.new(base_url: openai_url(vertex), access_token: token, model: openai_model(vertex)) + OpenAI.new( + base_url: openai_url(vertex), + access_token: token, + model: openai_model(vertex), + tool_model: openai_model(vertex) + ) |> OpenAI.completion(messages) end end @@ -41,7 +47,12 @@ defmodule Console.AI.Vertex do @spec tool_call(t(), Console.AI.Provider.history, [atom]) :: {:ok, binary} | {:ok, [Console.AI.Tool.t]} | Console.error def tool_call(%__MODULE__{} = vertex, messages, tools) do with {:ok, %{token: token}} <- client(vertex) do - OpenAI.new(base_url: openai_url(vertex), access_token: token, model: openai_model(vertex)) + OpenAI.new( + base_url: openai_url(vertex), + access_token: token, + model: openai_model(vertex), + tool_model: openai_model(vertex) + ) |> OpenAI.tool_call(messages, tools) end end diff --git a/lib/console/ai/tools/pr.ex b/lib/console/ai/tools/pr.ex index 571a342bc..85a1e952a 100644 --- a/lib/console/ai/tools/pr.ex +++ b/lib/console/ai/tools/pr.ex @@ -35,6 +35,7 @@ defmodule Console.AI.Tools.Pr do def description(), do: "Creates a pull request or merge request against a configured Source Control Management provider" def implement(%__MODULE__{repo_url: url, branch_name: branch, commit_message: msg} = pr) do + branch = "plrl/ai/#{branch}-#{Console.rand_alphanum(6)}" with {:conn, %ScmConnection{} = conn} <- {:conn, Tool.scm_connection()}, conn <- %{conn | author: Tool.actor()}, url = to_http(conn, url), @@ -43,7 +44,7 @@ defmodule Console.AI.Tools.Pr do {:ok, _} <- commit(conn, msg), {:ok, _} <- push(conn, branch), {:ok, identifier} <- slug(conn, url), - impl <- Dispatcher.dispatcher(conn) do + impl = Dispatcher.dispatcher(conn) do impl.create(%PrAutomation{ connection: conn, title: pr.pr_title, diff --git a/lib/console/graphql/deployments/settings.ex b/lib/console/graphql/deployments/settings.ex index 1f3bb945a..01bb6790b 100644 --- a/lib/console/graphql/deployments/settings.ex +++ b/lib/console/graphql/deployments/settings.ex @@ -65,15 +65,18 @@ defmodule Console.GraphQl.Deployments.Settings do field :base_url, :string field :access_token, :string field :model, :string + field :tool_model, :string, description: "the model to use for tool calls, which are less frequent and require more complex reasoning" end input_object :anthropic_settings_attributes do field :access_token, :string field :model, :string + field :tool_model, :string, description: "the model to use for tool calls, which are less frequent and require more complex reasoning" end input_object :ollama_attributes do field :model, non_null(:string) + field :tool_model, :string, description: "the model to use for tool calls, which are less frequent and require more complex reasoning" field :url, non_null(:string) field :authorization, :string, description: "An http authorization header to use on calls to the Ollama api" end @@ -82,17 +85,20 @@ defmodule Console.GraphQl.Deployments.Settings do field :endpoint, non_null(:string), description: "the endpoint of your azure openai version, should look like: https://{endpoint}/openai/deployments/{deployment-id}" field :api_version, :string, description: "the api version you want to use" field :model, :string, description: "the exact model you wish to use" + field :tool_model, :string, description: "the model to use for tool calls, which are less frequent and require more complex reasoning" field :access_token, non_null(:string), description: "the azure openai access token to use" end input_object :bedrock_ai_attributes do field :model_id, non_null(:string), description: "the bedrock model id to use" + field :tool_model_id, :string, description: "the model to use for tool calls, which are less frequent and require more complex reasoning" field :access_key_id, :string, description: "aws access key id to use, you can also use IRSA for self-hosted consoles" field :secret_access_key, :string, description: "aws secret access key to use, you can also use IRSA for self-hosted consoles" end input_object :vertex_ai_attributes do field :model, :string, description: "the vertex model id to use" + field :tool_model, :string, description: "the model to use for tool calls, which are less frequent and require more complex reasoning" field :service_account_json, :string, description: "optional service account json to auth to the GCP vertex apis" field :endpoint, :string, description: "custom vertexai endpoint if for dedicated customer deployments" field :project, non_null(:string), description: "the gcp project id to use" @@ -194,38 +200,45 @@ defmodule Console.GraphQl.Deployments.Settings do @desc "OpenAI connection information" object :openai_settings do - field :base_url, :string, description: "the base url to use when querying an OpenAI compatible API, leave blank for OpenAI" - field :model, :string, description: "the openai model version to use" + field :base_url, :string, description: "the base url to use when querying an OpenAI compatible API, leave blank for OpenAI" + field :model, :string, description: "the openai model version to use" + field :tool_model, :string, description: "the model to use for tool calls, which are less frequent and require more complex reasoning" end @desc "Anthropic connection information" object :anthropic_settings do - field :model, :string, description: "the anthropic model version to use" + field :model, :string, description: "the anthropic model version to use" + field :tool_model, :string, description: "the model to use for tool calls, which are less frequent and require more complex reasoning" end @desc "Settings for a self-hosted ollama-based LLM deployment" object :ollama_settings do - field :model, non_null(:string) - field :url, non_null(:string), description: "the url your ollama deployment is hosted on" + field :model, non_null(:string) + field :tool_model, :string, description: "the model to use for tool calls, which are less frequent and require more complex reasoning" + field :url, non_null(:string), description: "the url your ollama deployment is hosted on" end @desc "Settings for configuring against Azure OpenAI" object :azure_openai_settings do field :endpoint, non_null(:string), description: "the endpoint of your azure openai version, should look like: https://{endpoint}/openai/deployments/{deployment-id}" + field :model, :string + field :tool_model, :string, description: "the model to use for tool calls, which are less frequent and require more complex reasoning" field :api_version, :string, description: "the api version you want to use" end @desc "Settings for usage of AWS Bedrock for LLMs" object :bedrock_ai_settings do - field :model_id, non_null(:string), description: "the bedrock model to use" + field :model_id, non_null(:string), description: "the bedrock model to use" + field :tool_model_id, :string, description: "the model to use for tool calls, which are less frequent and require more complex reasoning" field :access_key_id, :string, description: "the aws access key to use, can also use IRSA when console is self-hosted" end @desc "Settings for usage of GCP VertexAI for LLMs" object :vertex_ai_settings do - field :model, :string, description: "the vertex ai model to use" - field :project, non_null(:string), description: "the gcp project id to use" - field :location, non_null(:string), description: "the gcp region the model" + field :model, :string, description: "the vertex ai model to use" + field :tool_model, :string, description: "the model to use for tool calls, which are less frequent and require more complex reasoning" + field :project, non_null(:string), description: "the gcp project id to use" + field :location, non_null(:string), description: "the gcp region the model" end connection node_type: :project diff --git a/lib/console/schema/deployment_settings.ex b/lib/console/schema/deployment_settings.ex index a913ddc4e..8eae550a3 100644 --- a/lib/console/schema/deployment_settings.ex +++ b/lib/console/schema/deployment_settings.ex @@ -68,16 +68,19 @@ defmodule Console.Schema.DeploymentSettings do field :base_url, :string field :access_token, EncryptedString field :model, :string + field :tool_model, :string end embeds_one :anthropic, Anthropic, on_replace: :update do field :base_url, :string field :access_token, EncryptedString field :model, :string + field :tool_model, :string end embeds_one :ollama, Ollama, on_replace: :update do field :model, :string + field :tool_model, :string field :url, :string field :authorization, EncryptedString end @@ -86,11 +89,13 @@ defmodule Console.Schema.DeploymentSettings do field :api_version, :string field :endpoint, :string field :model, :string + field :tool_model, :string field :access_key, EncryptedString end embeds_one :bedrock, Bedrock, on_replace: :update do field :model_id, :string + field :tool_model_id, :string field :access_key_id, :string field :secret_access_key, EncryptedString end @@ -98,6 +103,7 @@ defmodule Console.Schema.DeploymentSettings do embeds_one :vertex, Vertex, on_replace: :update do field :service_account_json, EncryptedString field :model, :string + field :tool_model, :string field :project, :string field :endpoint, :string field :location, :string @@ -178,30 +184,30 @@ defmodule Console.Schema.DeploymentSettings do defp ai_api_changeset(model, attrs) do model - |> cast(attrs, ~w(access_token model base_url)a) + |> cast(attrs, ~w(access_token model tool_model base_url)a) end defp ollama_changeset(model, attrs) do model - |> cast(attrs, ~w(url model authorization)a) + |> cast(attrs, ~w(url model tool_model authorization)a) |> validate_required(~w(url model)a) end defp azure_openai_changeset(model, attrs) do model - |> cast(attrs, ~w(endpoint api_version access_token model)a) + |> cast(attrs, ~w(endpoint api_version access_token tool_model model)a) |> validate_required(~w(access_token endpoint)a) end defp bedrock_changeset(model, attrs) do model - |> cast(attrs, ~w(model_id access_key_id secret_access_key)a) + |> cast(attrs, ~w(model_id tool_model_id access_key_id secret_access_key)a) |> validate_required(~w(model_id)a) end defp vertex_changeset(model, attrs) do model - |> cast(attrs, ~w(model service_account_json project location endpoint)a) + |> cast(attrs, ~w(model tool_model service_account_json project location endpoint)a) |> validate_required([:project, :location]) |> validate_change(:service_account_json, fn :service_account_json, json -> case Jason.decode(json) do diff --git a/plural/helm/console/crds/deployments.plural.sh_deploymentsettings.yaml b/plural/helm/console/crds/deployments.plural.sh_deploymentsettings.yaml index 8e873d591..8c92f894a 100644 --- a/plural/helm/console/crds/deployments.plural.sh_deploymentsettings.yaml +++ b/plural/helm/console/crds/deployments.plural.sh_deploymentsettings.yaml @@ -81,6 +81,10 @@ spec: - key type: object x-kubernetes-map-type: atomic + toolModel: + description: Model to use for tool calling, which is less + frequent and often requires more advanced reasoning + type: string required: - tokenSecretRef type: object @@ -122,6 +126,10 @@ spec: - key type: object x-kubernetes-map-type: atomic + toolModel: + description: Model to use for tool calling, which is less + frequent and often requires more advanced reasoning + type: string required: - endpoint - tokenSecretRef @@ -158,6 +166,10 @@ spec: - key type: object x-kubernetes-map-type: atomic + toolModelId: + description: Model to use for tool calling, which is less + frequent and often requires more advanced reasoning + type: string required: - modelId type: object @@ -196,6 +208,10 @@ spec: - key type: object x-kubernetes-map-type: atomic + toolModel: + description: Model to use for tool calling, which is less + frequent and often requires more advanced reasoning + type: string url: description: URL is the url this model is queryable on type: string @@ -236,6 +252,10 @@ spec: - key type: object x-kubernetes-map-type: atomic + toolModel: + description: Model to use for tool calling, which is less + frequent and often requires more advanced reasoning + type: string required: - tokenSecretRef type: object @@ -288,6 +308,10 @@ spec: - key type: object x-kubernetes-map-type: atomic + toolModel: + description: Model to use for tool calling, which is less + frequent and often requires more advanced reasoning + type: string required: - location - project diff --git a/schema/schema.graphql b/schema/schema.graphql index d704c6756..ecdadabe7 100644 --- a/schema/schema.graphql +++ b/schema/schema.graphql @@ -1137,18 +1137,30 @@ input CreatePrConfigAttributes { input OpenaiSettingsAttributes { baseUrl: String + accessToken: String + model: String + + "the model to use for tool calls, which are less frequent and require more complex reasoning" + toolModel: String } input AnthropicSettingsAttributes { accessToken: String + model: String + + "the model to use for tool calls, which are less frequent and require more complex reasoning" + toolModel: String } input OllamaAttributes { model: String! + "the model to use for tool calls, which are less frequent and require more complex reasoning" + toolModel: String + url: String! "An http authorization header to use on calls to the Ollama api" @@ -1165,6 +1177,9 @@ input AzureOpenaiAttributes { "the exact model you wish to use" model: String + "the model to use for tool calls, which are less frequent and require more complex reasoning" + toolModel: String + "the azure openai access token to use" accessToken: String! } @@ -1173,6 +1188,9 @@ input BedrockAiAttributes { "the bedrock model id to use" modelId: String! + "the model to use for tool calls, which are less frequent and require more complex reasoning" + toolModelId: String + "aws access key id to use, you can also use IRSA for self-hosted consoles" accessKeyId: String @@ -1184,6 +1202,9 @@ input VertexAiAttributes { "the vertex model id to use" model: String + "the model to use for tool calls, which are less frequent and require more complex reasoning" + toolModel: String + "optional service account json to auth to the GCP vertex apis" serviceAccountJson: String @@ -1334,18 +1355,27 @@ type OpenaiSettings { "the openai model version to use" model: String + + "the model to use for tool calls, which are less frequent and require more complex reasoning" + toolModel: String } "Anthropic connection information" type AnthropicSettings { "the anthropic model version to use" model: String + + "the model to use for tool calls, which are less frequent and require more complex reasoning" + toolModel: String } "Settings for a self-hosted ollama-based LLM deployment" type OllamaSettings { model: String! + "the model to use for tool calls, which are less frequent and require more complex reasoning" + toolModel: String + "the url your ollama deployment is hosted on" url: String! } @@ -1355,6 +1385,11 @@ type AzureOpenaiSettings { "the endpoint of your azure openai version, should look like: https:\/\/{endpoint}\/openai\/deployments\/{deployment-id}" endpoint: String! + model: String + + "the model to use for tool calls, which are less frequent and require more complex reasoning" + toolModel: String + "the api version you want to use" apiVersion: String } @@ -1364,6 +1399,9 @@ type BedrockAiSettings { "the bedrock model to use" modelId: String! + "the model to use for tool calls, which are less frequent and require more complex reasoning" + toolModelId: String + "the aws access key to use, can also use IRSA when console is self-hosted" accessKeyId: String } @@ -1373,6 +1411,9 @@ type VertexAiSettings { "the vertex ai model to use" model: String + "the model to use for tool calls, which are less frequent and require more complex reasoning" + toolModel: String + "the gcp project id to use" project: String! diff --git a/test/console/ai/fixer_test.exs b/test/console/ai/fixer_test.exs index 981cde404..56de199e2 100644 --- a/test/console/ai/fixer_test.exs +++ b/test/console/ai/fixer_test.exs @@ -6,14 +6,14 @@ defmodule Console.AI.FixerTest do describe "#pr/2" do test "it can spawn a fix pr" do insert(:scm_connection, token: "some-pat", default: true) - expect(Tentacat.Pulls, :create, fn _, "pluralsh", "console", %{head: "pr-test"} -> + expect(Tentacat.Pulls, :create, fn _, "pluralsh", "console", %{head: "plrl/ai/pr-test" <> _} -> {:ok, %{"html_url" => "https://github.com/pr/url"}, %HTTPoison.Response{}} end) - expect(Console.Deployments.Pr.Git, :setup, fn conn, "https://github.com/pluralsh/console.git", "pr-test" -> + expect(Console.Deployments.Pr.Git, :setup, fn conn, "https://github.com/pluralsh/console.git", "plrl/ai/pr-test" <> _ -> {:ok, %{conn | dir: Briefly.create!(directory: true)}} end) expect(Console.Deployments.Pr.Git, :commit, fn _, _ -> {:ok, ""} end) - expect(Console.Deployments.Pr.Git, :push, fn _, "pr-test" -> {:ok, ""} end) + expect(Console.Deployments.Pr.Git, :push, fn _, "plrl/ai/pr-test" <> _ -> {:ok, ""} end) expect(File, :write, fn _, "first" -> :ok end) expect(File, :write, fn _, "second" -> :ok end) expect(HTTPoison, :post, fn _, _, _, _ ->