diff --git a/assets/src/components/layout/Sidebar.tsx b/assets/src/components/layout/Sidebar.tsx
index e1d31390d..2a0c9615e 100644
--- a/assets/src/components/layout/Sidebar.tsx
+++ b/assets/src/components/layout/Sidebar.tsx
@@ -93,13 +93,6 @@ function getMenuItems({
path: HOME_ABS_PATH,
hotkeys: ['shift H', '1'],
},
- {
- text: 'Service catalog',
- expandedLabel: 'Service catalog',
- icon: ,
- path: CATALOGS_ABS_PATH,
- hotkeys: ['2'],
- },
{
text: 'Apps',
expandedLabel: 'Apps',
@@ -139,6 +132,13 @@ function getMenuItems({
path: `${AI_ABS_PATH}`,
hotkeys: ['shift A', '6'],
},
+ {
+ text: 'Service catalog',
+ expandedLabel: 'Service catalog',
+ icon: ,
+ path: CATALOGS_ABS_PATH,
+ hotkeys: ['2'],
+ },
{
text: 'Builds',
expandedLabel: 'Builds',
diff --git a/assets/src/generated/graphql.ts b/assets/src/generated/graphql.ts
index e001e385e..3e422111f 100644
--- a/assets/src/generated/graphql.ts
+++ b/assets/src/generated/graphql.ts
@@ -261,6 +261,8 @@ export type AiSettings = {
ollama?: Maybe;
openai?: Maybe;
provider?: Maybe;
+ /** ai provider to use with tool calls */
+ toolProvider?: Maybe;
toolsEnabled?: Maybe;
vertex?: Maybe;
};
@@ -273,6 +275,8 @@ export type AiSettingsAttributes = {
ollama?: InputMaybe;
openai?: InputMaybe;
provider?: InputMaybe;
+ /** ai provider to use with tool calls */
+ toolProvider?: InputMaybe;
tools?: InputMaybe;
vertex?: InputMaybe;
};
@@ -6684,6 +6688,7 @@ export type RootQueryType = {
violationStatistics?: Maybe>>;
vulnerabilityReport?: Maybe;
vulnerabilityReports?: Maybe;
+ vulnerabilityStatistics?: Maybe>>;
webhooks?: Maybe;
wireguardPeer?: Maybe;
wireguardPeers?: Maybe>>;
@@ -7731,7 +7736,16 @@ export type RootQueryTypeVulnerabilityReportsArgs = {
before?: InputMaybe;
clusters?: InputMaybe>>;
first?: InputMaybe;
+ grade?: InputMaybe;
last?: InputMaybe;
+ namespaces?: InputMaybe>>;
+ q?: InputMaybe;
+};
+
+
+export type RootQueryTypeVulnerabilityStatisticsArgs = {
+ clusters?: InputMaybe>>;
+ namespaces?: InputMaybe>>;
q?: InputMaybe;
};
@@ -9489,6 +9503,14 @@ export type VulnOsAttributes = {
name?: InputMaybe;
};
+export enum VulnReportGrade {
+ A = 'A',
+ B = 'B',
+ C = 'C',
+ D = 'D',
+ F = 'F'
+}
+
export enum VulnSeverity {
Critical = 'CRITICAL',
High = 'HIGH',
@@ -9596,6 +9618,12 @@ export type VulnerabilityReportEdge = {
node?: Maybe;
};
+export type VulnerabilityStatistic = {
+ __typename?: 'VulnerabilityStatistic';
+ count: Scalars['Int']['output'];
+ grade: VulnReportGrade;
+};
+
export type WaitingState = {
__typename?: 'WaitingState';
message?: Maybe;
diff --git a/charts/controller/crds/deployments.plural.sh_deploymentsettings.yaml b/charts/controller/crds/deployments.plural.sh_deploymentsettings.yaml
index 8c92f894a..7bca02dab 100644
--- a/charts/controller/crds/deployments.plural.sh_deploymentsettings.yaml
+++ b/charts/controller/crds/deployments.plural.sh_deploymentsettings.yaml
@@ -271,6 +271,18 @@ spec:
- BEDROCK
- VERTEX
type: string
+ toolProvider:
+ default: OPENAI
+ description: Provider to use for tool calling, in case you want
+ to use a different LLM more optimized to those tasks
+ enum:
+ - OPENAI
+ - ANTHROPIC
+ - OLLAMA
+ - AZURE
+ - BEDROCK
+ - VERTEX
+ type: string
vertex:
description: Vertex holds configuration for using GCP VertexAI
to generate LLM insights
diff --git a/charts/controller/crds/deployments.plural.sh_prautomations.yaml b/charts/controller/crds/deployments.plural.sh_prautomations.yaml
index 08ffada25..bbc28b85e 100644
--- a/charts/controller/crds/deployments.plural.sh_prautomations.yaml
+++ b/charts/controller/crds/deployments.plural.sh_prautomations.yaml
@@ -89,6 +89,49 @@ spec:
description: The base branch this pr will be based on (defaults to
the repo's main branch)
type: string
+ catalogRef:
+ description: CatalogRef the catalog this automation will belong to
+ properties:
+ apiVersion:
+ description: API version of the referent.
+ type: string
+ fieldPath:
+ description: |-
+ If referring to a piece of an object instead of an entire object, this string
+ should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
+ For example, if the object reference is to a container within a pod, this would take on a value like:
+ "spec.containers{name}" (where "name" refers to the name of the container that triggered
+ the event) or if no container name is specified "spec.containers[2]" (container with
+ index 2 in this pod). This syntax is chosen only to have some well-defined way of
+ referencing a part of an object.
+ type: string
+ kind:
+ description: |-
+ Kind of the referent.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ namespace:
+ description: |-
+ Namespace of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
+ type: string
+ resourceVersion:
+ description: |-
+ Specific resourceVersion to which this reference is made, if any.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
+ type: string
+ uid:
+ description: |-
+ UID of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
clusterRef:
description: ClusterRef a cluster this pr works on
properties:
diff --git a/go/client/models_gen.go b/go/client/models_gen.go
index 6c5da6223..7ac3510d4 100644
--- a/go/client/models_gen.go
+++ b/go/client/models_gen.go
@@ -184,9 +184,11 @@ type AiPinEdge struct {
// Settings for configuring access to common LLM providers
type AiSettings struct {
- Enabled *bool `json:"enabled,omitempty"`
- ToolsEnabled *bool `json:"toolsEnabled,omitempty"`
- Provider *AiProvider `json:"provider,omitempty"`
+ Enabled *bool `json:"enabled,omitempty"`
+ ToolsEnabled *bool `json:"toolsEnabled,omitempty"`
+ Provider *AiProvider `json:"provider,omitempty"`
+ // ai provider to use with tool calls
+ ToolProvider *AiProvider `json:"toolProvider,omitempty"`
Openai *OpenaiSettings `json:"openai,omitempty"`
Anthropic *AnthropicSettings `json:"anthropic,omitempty"`
Ollama *OllamaSettings `json:"ollama,omitempty"`
@@ -196,15 +198,17 @@ type AiSettings struct {
}
type AiSettingsAttributes struct {
- Enabled *bool `json:"enabled,omitempty"`
- Tools *ToolConfigAttributes `json:"tools,omitempty"`
- Provider *AiProvider `json:"provider,omitempty"`
- Openai *OpenaiSettingsAttributes `json:"openai,omitempty"`
- Anthropic *AnthropicSettingsAttributes `json:"anthropic,omitempty"`
- Ollama *OllamaAttributes `json:"ollama,omitempty"`
- Azure *AzureOpenaiAttributes `json:"azure,omitempty"`
- Bedrock *BedrockAiAttributes `json:"bedrock,omitempty"`
- Vertex *VertexAiAttributes `json:"vertex,omitempty"`
+ Enabled *bool `json:"enabled,omitempty"`
+ Tools *ToolConfigAttributes `json:"tools,omitempty"`
+ Provider *AiProvider `json:"provider,omitempty"`
+ // ai provider to use with tool calls
+ ToolProvider *AiProvider `json:"toolProvider,omitempty"`
+ Openai *OpenaiSettingsAttributes `json:"openai,omitempty"`
+ Anthropic *AnthropicSettingsAttributes `json:"anthropic,omitempty"`
+ Ollama *OllamaAttributes `json:"ollama,omitempty"`
+ Azure *AzureOpenaiAttributes `json:"azure,omitempty"`
+ Bedrock *BedrockAiAttributes `json:"bedrock,omitempty"`
+ Vertex *VertexAiAttributes `json:"vertex,omitempty"`
}
type Alert struct {
@@ -5980,6 +5984,11 @@ type VulnerabilityReportEdge struct {
Cursor *string `json:"cursor,omitempty"`
}
+type VulnerabilityStatistic struct {
+ Grade VulnReportGrade `json:"grade"`
+ Count int64 `json:"count"`
+}
+
type WaitingState struct {
Message *string `json:"message,omitempty"`
Reason *string `json:"reason,omitempty"`
@@ -8544,6 +8553,53 @@ func (e ValidationUniqScope) MarshalGQL(w io.Writer) {
fmt.Fprint(w, strconv.Quote(e.String()))
}
+type VulnReportGrade string
+
+const (
+ VulnReportGradeA VulnReportGrade = "A"
+ VulnReportGradeB VulnReportGrade = "B"
+ VulnReportGradeC VulnReportGrade = "C"
+ VulnReportGradeD VulnReportGrade = "D"
+ VulnReportGradeF VulnReportGrade = "F"
+)
+
+var AllVulnReportGrade = []VulnReportGrade{
+ VulnReportGradeA,
+ VulnReportGradeB,
+ VulnReportGradeC,
+ VulnReportGradeD,
+ VulnReportGradeF,
+}
+
+func (e VulnReportGrade) IsValid() bool {
+ switch e {
+ case VulnReportGradeA, VulnReportGradeB, VulnReportGradeC, VulnReportGradeD, VulnReportGradeF:
+ return true
+ }
+ return false
+}
+
+func (e VulnReportGrade) String() string {
+ return string(e)
+}
+
+func (e *VulnReportGrade) UnmarshalGQL(v interface{}) error {
+ str, ok := v.(string)
+ if !ok {
+ return fmt.Errorf("enums must be strings")
+ }
+
+ *e = VulnReportGrade(str)
+ if !e.IsValid() {
+ return fmt.Errorf("%s is not a valid VulnReportGrade", str)
+ }
+ return nil
+}
+
+func (e VulnReportGrade) MarshalGQL(w io.Writer) {
+ fmt.Fprint(w, strconv.Quote(e.String()))
+}
+
type VulnSeverity string
const (
diff --git a/go/controller/api/v1alpha1/catalog_types.go b/go/controller/api/v1alpha1/catalog_types.go
index 909769748..e1fa3b9a1 100644
--- a/go/controller/api/v1alpha1/catalog_types.go
+++ b/go/controller/api/v1alpha1/catalog_types.go
@@ -129,3 +129,17 @@ func (c *Catalog) Diff(hasher Hasher) (changed bool, sha string, err error) {
return !c.Status.IsSHAEqual(currentSha), currentSha, nil
}
+
+// ConsoleID implements [PluralResource] interface
+func (in *Catalog) ConsoleID() *string {
+ return in.Status.ID
+}
+
+// ConsoleName implements [PluralResource] interface
+func (in *Catalog) ConsoleName() string {
+ if in.Spec.Name != nil {
+ return *in.Spec.Name
+ }
+
+ return in.Name
+}
diff --git a/go/controller/api/v1alpha1/deploymentsettings_types.go b/go/controller/api/v1alpha1/deploymentsettings_types.go
index c983b6c3f..86e14478e 100644
--- a/go/controller/api/v1alpha1/deploymentsettings_types.go
+++ b/go/controller/api/v1alpha1/deploymentsettings_types.go
@@ -183,6 +183,13 @@ type AISettings struct {
// +kubebuilder:validation:Optional
Provider *console.AiProvider `json:"provider,omitempty"`
+ // Provider to use for tool calling, in case you want to use a different LLM more optimized to those tasks
+ //
+ // +kubebuilder:validation:Enum=OPENAI;ANTHROPIC;OLLAMA;AZURE;BEDROCK;VERTEX
+ // +kubebuilder:default=OPENAI
+ // +kubebuilder:validation:Optional
+ ToolProvider *console.AiProvider `json:"toolProvider,omitempty"`
+
// OpenAI holds the OpenAI provider configuration.
//
// +kubebuilder:validation:Optional
@@ -216,8 +223,9 @@ type AISettings struct {
func (in *AISettings) Attributes(ctx context.Context, c client.Client, namespace string) (*console.AiSettingsAttributes, error) {
attr := &console.AiSettingsAttributes{
- Enabled: in.Enabled,
- Provider: in.Provider,
+ Enabled: in.Enabled,
+ Provider: in.Provider,
+ ToolProvider: in.ToolProvider,
}
switch *in.Provider {
diff --git a/go/controller/api/v1alpha1/prautomation_types.go b/go/controller/api/v1alpha1/prautomation_types.go
index 88a790760..c0e6a3a32 100644
--- a/go/controller/api/v1alpha1/prautomation_types.go
+++ b/go/controller/api/v1alpha1/prautomation_types.go
@@ -177,6 +177,10 @@ type PrAutomationSpec struct {
// +kubebuilder:validation:Optional
ProjectRef *corev1.ObjectReference `json:"projectRef,omitempty"`
+ // CatalogRef the catalog this automation will belong to
+ // +kubebuilder:validation:Optional
+ CatalogRef *corev1.ObjectReference `json:"catalogRef,omitempty"`
+
// Bindings contain read and write policies of pr automation
// +kubebuilder:validation:Optional
Bindings *PrAutomationBindings `json:"bindings,omitempty"`
diff --git a/go/controller/api/v1alpha1/zz_generated.deepcopy.go b/go/controller/api/v1alpha1/zz_generated.deepcopy.go
index 499b4d992..22ff9c4e9 100644
--- a/go/controller/api/v1alpha1/zz_generated.deepcopy.go
+++ b/go/controller/api/v1alpha1/zz_generated.deepcopy.go
@@ -72,6 +72,11 @@ func (in *AISettings) DeepCopyInto(out *AISettings) {
*out = new(client.AiProvider)
**out = **in
}
+ if in.ToolProvider != nil {
+ in, out := &in.ToolProvider, &out.ToolProvider
+ *out = new(client.AiProvider)
+ **out = **in
+ }
if in.OpenAI != nil {
in, out := &in.OpenAI, &out.OpenAI
*out = new(AIProviderSettings)
@@ -3817,6 +3822,11 @@ func (in *PrAutomationSpec) DeepCopyInto(out *PrAutomationSpec) {
*out = new(v1.ObjectReference)
**out = **in
}
+ if in.CatalogRef != nil {
+ in, out := &in.CatalogRef, &out.CatalogRef
+ *out = new(v1.ObjectReference)
+ **out = **in
+ }
if in.Bindings != nil {
in, out := &in.Bindings, &out.Bindings
*out = new(PrAutomationBindings)
diff --git a/go/controller/config/crd/bases/deployments.plural.sh_deploymentsettings.yaml b/go/controller/config/crd/bases/deployments.plural.sh_deploymentsettings.yaml
index 8c92f894a..7bca02dab 100644
--- a/go/controller/config/crd/bases/deployments.plural.sh_deploymentsettings.yaml
+++ b/go/controller/config/crd/bases/deployments.plural.sh_deploymentsettings.yaml
@@ -271,6 +271,18 @@ spec:
- BEDROCK
- VERTEX
type: string
+ toolProvider:
+ default: OPENAI
+ description: Provider to use for tool calling, in case you want
+ to use a different LLM more optimized to those tasks
+ enum:
+ - OPENAI
+ - ANTHROPIC
+ - OLLAMA
+ - AZURE
+ - BEDROCK
+ - VERTEX
+ type: string
vertex:
description: Vertex holds configuration for using GCP VertexAI
to generate LLM insights
diff --git a/go/controller/config/crd/bases/deployments.plural.sh_prautomations.yaml b/go/controller/config/crd/bases/deployments.plural.sh_prautomations.yaml
index 08ffada25..bbc28b85e 100644
--- a/go/controller/config/crd/bases/deployments.plural.sh_prautomations.yaml
+++ b/go/controller/config/crd/bases/deployments.plural.sh_prautomations.yaml
@@ -89,6 +89,49 @@ spec:
description: The base branch this pr will be based on (defaults to
the repo's main branch)
type: string
+ catalogRef:
+ description: CatalogRef the catalog this automation will belong to
+ properties:
+ apiVersion:
+ description: API version of the referent.
+ type: string
+ fieldPath:
+ description: |-
+ If referring to a piece of an object instead of an entire object, this string
+ should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
+ For example, if the object reference is to a container within a pod, this would take on a value like:
+ "spec.containers{name}" (where "name" refers to the name of the container that triggered
+ the event) or if no container name is specified "spec.containers[2]" (container with
+ index 2 in this pod). This syntax is chosen only to have some well-defined way of
+ referencing a part of an object.
+ type: string
+ kind:
+ description: |-
+ Kind of the referent.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ namespace:
+ description: |-
+ Namespace of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
+ type: string
+ resourceVersion:
+ description: |-
+ Specific resourceVersion to which this reference is made, if any.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
+ type: string
+ uid:
+ description: |-
+ UID of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
clusterRef:
description: ClusterRef a cluster this pr works on
properties:
diff --git a/go/controller/docs/api.md b/go/controller/docs/api.md
index 7d2237b6c..a427d9ec2 100644
--- a/go/controller/docs/api.md
+++ b/go/controller/docs/api.md
@@ -73,6 +73,7 @@ _Appears in:_
| --- | --- | --- | --- |
| `enabled` _boolean_ | Enabled defines whether to enable the AI integration or not. | false | Optional: {}
|
| `provider` _[AiProvider](#aiprovider)_ | Provider defines which of the supported LLM providers should be used. | OPENAI | Enum: [OPENAI ANTHROPIC OLLAMA AZURE BEDROCK VERTEX]
Optional: {}
|
+| `toolProvider` _[AiProvider](#aiprovider)_ | Provider to use for tool calling, in case you want to use a different LLM more optimized to those tasks | OPENAI | Enum: [OPENAI ANTHROPIC OLLAMA AZURE BEDROCK VERTEX]
Optional: {}
|
| `openAI` _[AIProviderSettings](#aiprovidersettings)_ | OpenAI holds the OpenAI provider configuration. | | Optional: {}
|
| `anthropic` _[AIProviderSettings](#aiprovidersettings)_ | Anthropic holds the Anthropic provider configuration. | | Optional: {}
|
| `ollama` _[OllamaSettings](#ollamasettings)_ | Ollama holds configuration for a self-hosted Ollama deployment, more details available at https://github.com/ollama/ollama | | Optional: {}
|
@@ -1951,6 +1952,7 @@ _Appears in:_
| `repositoryRef` _[ObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#objectreference-v1-core)_ | RepositoryRef the repository this automation uses. | | Optional: {}
|
| `serviceRef` _[ObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#objectreference-v1-core)_ | ServiceRef the service this PR acts on. | | Optional: {}
|
| `projectRef` _[ObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#objectreference-v1-core)_ | ProjectRef the project this automation belongs to. | | Optional: {}
|
+| `catalogRef` _[ObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#objectreference-v1-core)_ | CatalogRef the catalog this automation will belong to | | Optional: {}
|
| `bindings` _[PrAutomationBindings](#prautomationbindings)_ | Bindings contain read and write policies of pr automation | | Optional: {}
|
| `configuration` _[PrAutomationConfiguration](#prautomationconfiguration) array_ | Configuration self-service configuration for the UI wizard generating this PR | | Optional: {}
|
| `confirmation` _[PrAutomationConfirmation](#prautomationconfirmation)_ | Additional details to verify all prerequisites are satisfied before generating this pr | | Optional: {}
|
diff --git a/go/controller/internal/controller/prautomation_attributes.go b/go/controller/internal/controller/prautomation_attributes.go
index 78a8be146..5f2e62f8b 100644
--- a/go/controller/internal/controller/prautomation_attributes.go
+++ b/go/controller/internal/controller/prautomation_attributes.go
@@ -43,5 +43,12 @@ func (in *PrAutomationReconciler) attributes(ctx context.Context, pra *v1alpha1.
return nil, err
}
- return pra.Attributes(clusterID, serviceID, connectionID, repositoryID, projectID), nil
+ catalogID, err := helper.IDFromRef(pra.Spec.CatalogRef, &v1alpha1.Catalog{})
+ if err != nil {
+ return nil, err
+ }
+
+ attrs := pra.Attributes(clusterID, serviceID, connectionID, repositoryID, projectID)
+ attrs.CatalogID = catalogID
+ return attrs, nil
}
diff --git a/lib/console/ai/provider.ex b/lib/console/ai/provider.ex
index 3bb8d4ae9..77b931922 100644
--- a/lib/console/ai/provider.ex
+++ b/lib/console/ai/provider.ex
@@ -28,7 +28,7 @@ defmodule Console.AI.Provider do
def tools?() do
Console.Deployments.Settings.cached()
- |> client()
+ |> tool_client()
|> case do
{:ok, %mod{}} -> mod.tools?()
_ -> false
@@ -43,7 +43,7 @@ defmodule Console.AI.Provider do
def tool_call(history, tools, opts \\ []) do
settings = Console.Deployments.Settings.cached()
- with {:ok, %mod{} = client} <- client(settings),
+ with {:ok, %mod{} = client} <- tool_client(settings),
{:ok, result} <- mod.tool_call(client, add_preface(history, opts), tools),
do: handle_tool_calls(result, tools)
end
@@ -51,6 +51,10 @@ defmodule Console.AI.Provider do
def summary(text),
do: completion([{:user, text}], preface: @summary)
+ defp tool_client(%DeploymentSettings{ai: %AI{tool_provider: p}} = settings) when not is_nil(p),
+ do: client(put_in(settings.ai.provider, p))
+ defp tool_client(settings), do: client(settings)
+
defp client(%DeploymentSettings{ai: %AI{enabled: true, provider: :openai, openai: %{} = openai}}),
do: {:ok, OpenAI.new(openai)}
defp client(%DeploymentSettings{ai: %AI{enabled: true, provider: :anthropic, anthropic: %{} = anthropic}}),
diff --git a/lib/console/ai/provider/anthropic.ex b/lib/console/ai/provider/anthropic.ex
index 7a638a61f..bef94897c 100644
--- a/lib/console/ai/provider/anthropic.ex
+++ b/lib/console/ai/provider/anthropic.ex
@@ -22,7 +22,7 @@ defmodule Console.AI.Anthropic do
defmodule Content do
@type t :: %__MODULE__{}
- defstruct [:text, :type]
+ defstruct [:text, :type, :name, :input]
def spec(), do: %__MODULE__{}
end
@@ -59,9 +59,18 @@ defmodule Console.AI.Anthropic do
end
end
- def tool_call(_, _, _), do: {:error, "tool calling not implemented for this provider"}
+ def tool_call(anthropic, messages, tools) do
+ case chat(anthropic, messages, tools) do
+ {:ok, %MessageResponse{content: [%Content{type: "tool_use"}] = tools}} ->
+ {:ok, gen_tools(tools)}
+ {:ok, %MessageResponse{content: content}} ->
+ {:ok, format_content(content)}
+ {:ok, _} -> {:error, "could not generate an ai completion for this context"}
+ error -> error
+ end
+ end
- def tools?(), do: false
+ def tools?(), do: true
defp chat(%__MODULE__{access_key: token, model: model, stream: %Stream{} = stream}, history) do
Stream.Exec.anthropic(fn ->
@@ -77,15 +86,17 @@ defmodule Console.AI.Anthropic do
end, stream)
end
- defp chat(%__MODULE__{access_key: token, model: model}, history) do
+ defp chat(%__MODULE__{access_key: token, model: model}, history, tools \\ nil) do
{system, history} = split(history)
url("/messages")
- |> HTTPoison.post(Jason.encode!(%{
+ |> HTTPoison.post(Jason.encode!(Console.drop_nils(%{
model: model || @default_model,
system: system,
messages: history,
- max_tokens: @max_tokens
- }), json_headers(token), @options)
+ max_tokens: @max_tokens,
+ tool_choice: (if is_list(tools), do: %{tool_choice: :any}, else: nil),
+ tools: (if is_list(tools), do: Enum.map(tools, &tool_args/1), else: nil)
+ })), json_headers(token), @options)
|> handle_response(MessageResponse.spec())
end
@@ -118,4 +129,21 @@ defmodule Console.AI.Anthropic do
defp json_headers(token), do: headers(@base_headers, token)
defp headers(headers, token), do: [{"x-api-key", token} | headers]
+
+ defp gen_tools(calls) do
+ Enum.map(calls, fn
+ %Content{type: "tool_use", name: n, input: args} ->
+ %Console.AI.Tool{name: n, arguments: args}
+ _ -> nil
+ end)
+ |> Enum.filter(& &1)
+ end
+
+ defp tool_args(tool) do
+ %{
+ name: tool.name(),
+ description: tool.description(),
+ input_schema: tool.json_schema()
+ }
+ end
end
diff --git a/lib/console/graphql/deployments/policy.ex b/lib/console/graphql/deployments/policy.ex
index f612a08b7..37b9aa2dc 100644
--- a/lib/console/graphql/deployments/policy.ex
+++ b/lib/console/graphql/deployments/policy.ex
@@ -250,6 +250,11 @@ defmodule Console.GraphQl.Deployments.Policy do
field :namespace, non_null(:string)
end
+ object :vulnerability_statistic do
+ field :grade, non_null(:vuln_report_grade)
+ field :count, non_null(:integer)
+ end
+
connection node_type: :policy_constraint
connection node_type: :vulnerability_report
@@ -269,8 +274,10 @@ defmodule Console.GraphQl.Deployments.Policy do
connection field :vulnerability_reports, node_type: :vulnerability_report do
middleware Authenticated
- arg :clusters, list_of(:id)
- arg :q, :string
+ arg :clusters, list_of(:id)
+ arg :namespaces, list_of(:string)
+ arg :q, :string
+ arg :grade, :vuln_report_grade
resolve &Deployments.list_vulnerabilities/2
end
@@ -308,6 +315,15 @@ defmodule Console.GraphQl.Deployments.Policy do
resolve &Deployments.resolve_vulnerability/2
end
+
+ field :vulnerability_statistics, list_of(:vulnerability_statistic) do
+ middleware Authenticated
+ arg :clusters, list_of(:id)
+ arg :namespaces, list_of(:string)
+ arg :q, :string
+
+ resolve &Deployments.vulnerability_statistics/2
+ end
end
object :public_policy_mutations do
diff --git a/lib/console/graphql/deployments/settings.ex b/lib/console/graphql/deployments/settings.ex
index 01bb6790b..579c9e635 100644
--- a/lib/console/graphql/deployments/settings.ex
+++ b/lib/console/graphql/deployments/settings.ex
@@ -42,15 +42,16 @@ defmodule Console.GraphQl.Deployments.Settings do
end
input_object :ai_settings_attributes do
- field :enabled, :boolean
- field :tools, :tool_config_attributes
- field :provider, :ai_provider
- field :openai, :openai_settings_attributes
- field :anthropic, :anthropic_settings_attributes
- field :ollama, :ollama_attributes
- field :azure, :azure_openai_attributes
- field :bedrock, :bedrock_ai_attributes
- field :vertex, :vertex_ai_attributes
+ field :enabled, :boolean
+ field :tools, :tool_config_attributes
+ field :provider, :ai_provider
+ field :tool_provider, :ai_provider, description: "ai provider to use with tool calls"
+ field :openai, :openai_settings_attributes
+ field :anthropic, :anthropic_settings_attributes
+ field :ollama, :ollama_attributes
+ field :azure, :azure_openai_attributes
+ field :bedrock, :bedrock_ai_attributes
+ field :vertex, :vertex_ai_attributes
end
input_object :tool_config_attributes do
@@ -190,6 +191,7 @@ defmodule Console.GraphQl.Deployments.Settings do
field :enabled, :boolean
field :tools_enabled, :boolean, resolve: fn _, _, _ -> {:ok, Console.AI.Provider.tools?()} end
field :provider, :ai_provider
+ field :tool_provider, :ai_provider, description: "ai provider to use with tool calls"
field :openai, :openai_settings
field :anthropic, :anthropic_settings
field :ollama, :ollama_settings
diff --git a/lib/console/graphql/resolvers/deployments/policy.ex b/lib/console/graphql/resolvers/deployments/policy.ex
index 8fbcecb3f..7c57cc6a5 100644
--- a/lib/console/graphql/resolvers/deployments/policy.ex
+++ b/lib/console/graphql/resolvers/deployments/policy.ex
@@ -62,6 +62,15 @@ defmodule Console.GraphQl.Resolvers.Deployments.Policy do
|> ok()
end
+ def vulnerability_statistics(args, %{context: %{current_user: user}}) do
+ VulnerabilityReport.for_user(user)
+ |> maybe_search(VulnerabilityReport, args)
+ |> vuln_filters(args)
+ |> VulnerabilityReport.grades()
+ |> Console.Repo.all()
+ |> ok()
+ end
+
def fetch_constraint(%{ref: %{name: name, kind: kind}, cluster_id: cluster_id}, _, _) do
path = Kube.Client.Base.path("constraints.gatekeeper.sh", "v1beta1", String.downcase(kind), nil, name)
with %Cluster{} = cluster <- Clusters.get_cluster(cluster_id),
@@ -93,7 +102,9 @@ defmodule Console.GraphQl.Resolvers.Deployments.Policy do
defp vuln_filters(query, args) do
Enum.reduce(args, query, fn
- {:clusters, ids}, q -> VulnerabilityReport.for_clusters(q, ids)
+ {:clusters, [_ | _] = ids}, q -> VulnerabilityReport.for_clusters(q, ids)
+ {:grade, g}, q when not is_nil(g) -> VulnerabilityReport.for_grade(q, g)
+ {:namespaces, [_ | _] = ns}, q -> VulnerabilityReport.for_namespaces(q, ns)
_, q -> q
end)
end
diff --git a/lib/console/schema/deployment_settings.ex b/lib/console/schema/deployment_settings.ex
index 8eae550a3..b19b7988d 100644
--- a/lib/console/schema/deployment_settings.ex
+++ b/lib/console/schema/deployment_settings.ex
@@ -55,8 +55,9 @@ defmodule Console.Schema.DeploymentSettings do
end
embeds_one :ai, AI, on_replace: :update do
- field :enabled, :boolean, default: false
- field :provider, AIProvider, default: :openai
+ field :enabled, :boolean, default: false
+ field :provider, AIProvider, default: :openai
+ field :tool_provider, AIProvider
embeds_one :tools, ToolsConfig, on_replace: :update do
embeds_one :create_pr, PrToolConfig, on_replace: :update do
@@ -172,7 +173,7 @@ defmodule Console.Schema.DeploymentSettings do
defp ai_changeset(model, attrs) do
model
- |> cast(attrs, ~w(enabled provider)a)
+ |> cast(attrs, ~w(enabled provider tool_provider)a)
|> cast_embed(:tools, with: &tool_config_changeset/2)
|> cast_embed(:openai, with: &ai_api_changeset/2)
|> cast_embed(:anthropic, with: &ai_api_changeset/2)
diff --git a/lib/console/schema/vulnerability_report.ex b/lib/console/schema/vulnerability_report.ex
index eca85e541..b7a73c0d2 100644
--- a/lib/console/schema/vulnerability_report.ex
+++ b/lib/console/schema/vulnerability_report.ex
@@ -54,6 +54,17 @@ defmodule Console.Schema.VulnerabilityReport do
from(vr in query, where: ilike(vr.artifact_url, ^"%#{q}%"))
end
+ def for_grade(query \\ __MODULE__, grade) do
+ from(vr in query, where: vr.grade == ^grade)
+ end
+
+ def for_namespaces(query \\ __MODULE__, namespaces) do
+ from(vr in query,
+ join: ns in assoc(vr, :namespaces),
+ where: ns.namespace in ^namespaces
+ )
+ end
+
def for_clusters(query \\ __MODULE__, ids) do
from(vr in query, where: vr.cluster_id in ^ids)
end
@@ -66,6 +77,10 @@ defmodule Console.Schema.VulnerabilityReport do
from(vr in query, order_by: ^order)
end
+ def grades(query \\ __MODULE__) do
+ from(s in query, group_by: s.grade, select: %{grade: s.grade, count: count(s.id, :distinct)})
+ end
+
def distinct(query), do: from(p in query, distinct: true)
@valid ~w(artifact_url grade cluster_id)a
diff --git a/plural/helm/console/crds/deployments.plural.sh_deploymentsettings.yaml b/plural/helm/console/crds/deployments.plural.sh_deploymentsettings.yaml
index 8c92f894a..7bca02dab 100644
--- a/plural/helm/console/crds/deployments.plural.sh_deploymentsettings.yaml
+++ b/plural/helm/console/crds/deployments.plural.sh_deploymentsettings.yaml
@@ -271,6 +271,18 @@ spec:
- BEDROCK
- VERTEX
type: string
+ toolProvider:
+ default: OPENAI
+ description: Provider to use for tool calling, in case you want
+ to use a different LLM more optimized to those tasks
+ enum:
+ - OPENAI
+ - ANTHROPIC
+ - OLLAMA
+ - AZURE
+ - BEDROCK
+ - VERTEX
+ type: string
vertex:
description: Vertex holds configuration for using GCP VertexAI
to generate LLM insights
diff --git a/plural/helm/console/crds/deployments.plural.sh_prautomations.yaml b/plural/helm/console/crds/deployments.plural.sh_prautomations.yaml
index 08ffada25..bbc28b85e 100644
--- a/plural/helm/console/crds/deployments.plural.sh_prautomations.yaml
+++ b/plural/helm/console/crds/deployments.plural.sh_prautomations.yaml
@@ -89,6 +89,49 @@ spec:
description: The base branch this pr will be based on (defaults to
the repo's main branch)
type: string
+ catalogRef:
+ description: CatalogRef the catalog this automation will belong to
+ properties:
+ apiVersion:
+ description: API version of the referent.
+ type: string
+ fieldPath:
+ description: |-
+ If referring to a piece of an object instead of an entire object, this string
+ should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
+ For example, if the object reference is to a container within a pod, this would take on a value like:
+ "spec.containers{name}" (where "name" refers to the name of the container that triggered
+ the event) or if no container name is specified "spec.containers[2]" (container with
+ index 2 in this pod). This syntax is chosen only to have some well-defined way of
+ referencing a part of an object.
+ type: string
+ kind:
+ description: |-
+ Kind of the referent.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ namespace:
+ description: |-
+ Namespace of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
+ type: string
+ resourceVersion:
+ description: |-
+ Specific resourceVersion to which this reference is made, if any.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
+ type: string
+ uid:
+ description: |-
+ UID of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
clusterRef:
description: ClusterRef a cluster this pr works on
properties:
diff --git a/schema/schema.graphql b/schema/schema.graphql
index ecdadabe7..facc401e4 100644
--- a/schema/schema.graphql
+++ b/schema/schema.graphql
@@ -379,7 +379,9 @@ type RootQueryType {
after: String, first: Int, before: String, last: Int, kind: String, namespace: String, kinds: [String], namespaces: [String], clusters: [ID], violated: Boolean, q: String
): PolicyConstraintConnection
- vulnerabilityReports(after: String, first: Int, before: String, last: Int, clusters: [ID], q: String): VulnerabilityReportConnection
+ vulnerabilityReports(
+ after: String, first: Int, before: String, last: Int, clusters: [ID], namespaces: [String], q: String, grade: VulnReportGrade
+ ): VulnerabilityReportConnection
violationStatistics(field: ConstraintViolationField!): [ViolationStatistic]
@@ -391,6 +393,8 @@ type RootQueryType {
vulnerabilityReport(id: ID!): VulnerabilityReport
+ vulnerabilityStatistics(clusters: [ID], namespaces: [String], q: String): [VulnerabilityStatistic]
+
managedNamespaces(after: String, first: Int, before: String, last: Int, projectId: ID): ManagedNamespaceConnection
globalService(id: ID!): GlobalService
@@ -1116,13 +1120,24 @@ input StackSettingsAttributes {
input AiSettingsAttributes {
enabled: Boolean
+
tools: ToolConfigAttributes
+
provider: AiProvider
+
+ "ai provider to use with tool calls"
+ toolProvider: AiProvider
+
openai: OpenaiSettingsAttributes
+
anthropic: AnthropicSettingsAttributes
+
ollama: OllamaAttributes
+
azure: AzureOpenaiAttributes
+
bedrock: BedrockAiAttributes
+
vertex: VertexAiAttributes
}
@@ -1338,13 +1353,24 @@ type SmtpSettings {
"Settings for configuring access to common LLM providers"
type AiSettings {
enabled: Boolean
+
toolsEnabled: Boolean
+
provider: AiProvider
+
+ "ai provider to use with tool calls"
+ toolProvider: AiProvider
+
openai: OpenaiSettings
+
anthropic: AnthropicSettings
+
ollama: OllamaSettings
+
azure: AzureOpenaiSettings
+
bedrock: BedrockAiSettings
+
vertex: VertexAiSettings
}
@@ -2479,6 +2505,14 @@ enum VulnSeverity {
CRITICAL
}
+enum VulnReportGrade {
+ A
+ B
+ C
+ D
+ F
+}
+
enum PolicyAggregate {
CLUSTER
ENFORCEMENT
@@ -2736,6 +2770,11 @@ type NamespaceVuln {
namespace: String!
}
+type VulnerabilityStatistic {
+ grade: VulnReportGrade!
+ count: Int!
+}
+
type PolicyConstraintConnection {
pageInfo: PageInfo!
edges: [PolicyConstraintEdge]
diff --git a/test/console/graphql/queries/deployments/policy_queries_test.exs b/test/console/graphql/queries/deployments/policy_queries_test.exs
index e93696099..7a3e6b1ca 100644
--- a/test/console/graphql/queries/deployments/policy_queries_test.exs
+++ b/test/console/graphql/queries/deployments/policy_queries_test.exs
@@ -18,6 +18,32 @@ defmodule Console.GraphQl.Deployments.PolicyQueriesTest do
end
end
+ describe "vulnerabilityStatistics" do
+ test "it can count vulns by grade" do
+ insert_list(3, :vulnerability_report, grade: :f)
+ insert_list(2, :vulnerability_report, grade: :d)
+ insert_list(1, :vulnerability_report, grade: :c)
+ insert_list(2, :vulnerability_report, grade: :b)
+ insert_list(3, :vulnerability_report, grade: :a)
+
+ {:ok, %{data: %{"vulnerabilityStatistics" => found}}} = run_query("""
+ query {
+ vulnerabilityStatistics {
+ grade
+ count
+ }
+ }
+ """, %{}, %{current_user: admin_user()})
+
+ by_grade = Map.new(found, & {&1["grade"], &1["count"]})
+ assert by_grade["F"] == 3
+ assert by_grade["D"] == 2
+ assert by_grade["C"] == 1
+ assert by_grade["B"] == 2
+ assert by_grade["A"] == 3
+ end
+ end
+
describe "vulnerabilityReport" do
test "it can fetch a vuln report" do
user = insert(:user)