diff --git a/config/enterprise_versions.yml b/config/enterprise_versions.yml index c190bfb8d3..aa95ff6c74 100644 --- a/config/enterprise_versions.yml +++ b/config/enterprise_versions.yml @@ -46,12 +46,12 @@ components: image: tigera/dex version: master eck-kibana: - version: 7.17.25 + version: 8.15.3 kibana: image: tigera/kibana version: master eck-elasticsearch: - version: 7.17.25 + version: 8.15.3 elasticsearch: image: tigera/elasticsearch version: master diff --git a/pkg/components/enterprise.go b/pkg/components/enterprise.go index 67089cd2c1..ec01fec101 100644 --- a/pkg/components/enterprise.go +++ b/pkg/components/enterprise.go @@ -69,12 +69,12 @@ var ( } ComponentEckElasticsearch = Component{ - Version: "7.17.25", + Version: "8.15.3", Registry: "", } ComponentEckKibana = Component{ - Version: "7.17.25", + Version: "8.15.3", Registry: "", } diff --git a/pkg/crds/calico/crd.projectcalico.org_felixconfigurations.yaml b/pkg/crds/calico/crd.projectcalico.org_felixconfigurations.yaml index 4d58f55bbc..2a1fa1a446 100644 --- a/pkg/crds/calico/crd.projectcalico.org_felixconfigurations.yaml +++ b/pkg/crds/calico/crd.projectcalico.org_felixconfigurations.yaml @@ -81,6 +81,25 @@ spec: for debugging purposes. \n Deprecated: Use BPFConnectTimeLoadBalancing [Default: true]" type: boolean + bpfConntrackLogLevel: + description: 'BPFConntrackLogLevel controls the log level of the BPF + conntrack cleanup program, which runs periodically to clean up expired + BPF conntrack entries. [Default: Off].' + enum: + - "Off" + - Debug + type: string + bpfConntrackMode: + description: 'BPFConntrackCleanupMode controls how BPF conntrack entries + are cleaned up. `Auto` will use a BPF program if supported, falling + back to userspace if not. `Userspace` will always use the userspace + cleanup code. `BPFProgram` will always use the BPF program (failing + if not supported). [Default: Auto]' + enum: + - Auto + - Userspace + - BPFProgram + type: string bpfDSROptoutCIDRs: description: BPFDSROptoutCIDRs is a list of CIDRs which are excluded from DSR. That is, clients in those CIDRs will access service node @@ -95,7 +114,8 @@ spec: that Calico workload traffic flows over as well as any interfaces that handle incoming traffic to nodeports and services from outside the cluster. It should not match the workload interfaces (usually - named cali...). + named cali...) or any other special device managed by Calico itself + (e.g., tunnels). type: string bpfDisableGROForIfaces: description: BPFDisableGROForIfaces is a regular expression that controls @@ -217,6 +237,13 @@ spec: connection. Warning: changing the size of the conntrack map can cause disruption.' type: integer + bpfMapSizeConntrackCleanupQueue: + description: BPFMapSizeConntrackCleanupQueue sets the size for the + map used to hold NAT conntrack entries that are queued for cleanup. This + should be big enough to hold all the NAT entries that expire within + one cleanup interval. + minimum: 1 + type: integer bpfMapSizeIPSets: description: BPFMapSizeIPSets sets the size for ipsets map. The IP sets map must be large enough to hold an entry for each endpoint diff --git a/pkg/render/logstorage.go b/pkg/render/logstorage.go index a9883ebd67..b1ebceb03e 100644 --- a/pkg/render/logstorage.go +++ b/pkg/render/logstorage.go @@ -40,6 +40,7 @@ import ( "github.com/tigera/operator/pkg/common" "github.com/tigera/operator/pkg/components" "github.com/tigera/operator/pkg/dns" + "github.com/tigera/operator/pkg/ptr" relasticsearch "github.com/tigera/operator/pkg/render/common/elasticsearch" rmeta "github.com/tigera/operator/pkg/render/common/meta" "github.com/tigera/operator/pkg/render/common/networkpolicy" @@ -358,15 +359,10 @@ func (es *elasticsearchComponent) podTemplate() corev1.PodTemplateSpec { }, } - sc := securitycontext.NewRootContext(false) - // These capabilities are required for docker-entrypoint.sh. - // See: https://github.com/elastic/elasticsearch/blob/7.17/distribution/docker/src/docker/bin/docker-entrypoint.sh. - // TODO Consider removing for Elasticsearch v8+. - sc.Capabilities.Add = []corev1.Capability{ - "SETGID", - "SETUID", - "SYS_CHROOT", - } + sc := securitycontext.NewNonRootContext() + // Set the user and group to be the default elasticsearch ID + sc.RunAsUser = ptr.Int64ToPtr(1000) + sc.RunAsGroup = ptr.Int64ToPtr(1000) esContainer := corev1.Container{ Name: "elasticsearch", @@ -717,9 +713,12 @@ func (es *elasticsearchComponent) nodeSets() []esv1.NodeSet { // NodeSet func (es *elasticsearchComponent) nodeSetTemplate(pvcTemplate corev1.PersistentVolumeClaim) esv1.NodeSet { config := map[string]interface{}{ - "node.master": "true", - "node.data": "true", - "node.ingest": "true", + "node.roles": []string{ + "data", + "ingest", + "master", + "remote_cluster_client", + }, "cluster.max_shards_per_node": 10000, // Disable geoip downloader. This removes an error from the startup logs, because our network policy blocks it. "ingest.geoip.downloader.enabled": false, diff --git a/pkg/render/logstorage_test.go b/pkg/render/logstorage_test.go index 7f29522e2a..2afa79941d 100644 --- a/pkg/render/logstorage_test.go +++ b/pkg/render/logstorage_test.go @@ -217,13 +217,12 @@ var _ = Describe("Elasticsearch rendering tests", func() { esContainer := resultES.Spec.NodeSets[0].PodTemplate.Spec.Containers[0] Expect(*esContainer.SecurityContext.AllowPrivilegeEscalation).To(BeFalse()) Expect(*esContainer.SecurityContext.Privileged).To(BeFalse()) - Expect(*esContainer.SecurityContext.RunAsGroup).To(BeEquivalentTo(0)) - Expect(*esContainer.SecurityContext.RunAsNonRoot).To(BeFalse()) - Expect(*esContainer.SecurityContext.RunAsUser).To(BeEquivalentTo(0)) + Expect(*esContainer.SecurityContext.RunAsGroup).To(BeEquivalentTo(1000)) + Expect(*esContainer.SecurityContext.RunAsNonRoot).To(BeTrue()) + Expect(*esContainer.SecurityContext.RunAsUser).To(BeEquivalentTo(1000)) Expect(esContainer.SecurityContext.Capabilities).To(Equal( &corev1.Capabilities{ Drop: []corev1.Capability{"ALL"}, - Add: []corev1.Capability{"SETGID", "SETUID", "SYS_CHROOT"}, }, )) Expect(esContainer.SecurityContext.SeccompProfile).To(Equal( @@ -242,9 +241,7 @@ var _ = Describe("Elasticsearch rendering tests", func() { // Check that the expected config made it's way to the Elastic CR Expect(nodeSet.Config.Data).Should(Equal(map[string]interface{}{ - "node.master": "true", - "node.data": "true", - "node.ingest": "true", + "node.roles": []string{"data", "ingest", "master", "remote_cluster_client"}, "cluster.max_shards_per_node": 10000, "ingest.geoip.downloader.enabled": false, })) @@ -970,12 +967,10 @@ var _ = Describe("Elasticsearch rendering tests", func() { }, })) Expect(nodeSets[0].Config.Data).Should(Equal(map[string]interface{}{ - "node.master": "true", - "node.data": "true", - "node.ingest": "true", - "cluster.max_shards_per_node": 10000, - "ingest.geoip.downloader.enabled": false, - "node.attr.zone": "us-west-2a", + "node.roles": []string{"data", "ingest", "master", "remote_cluster_client"}, + "cluster.max_shards_per_node": 10000, + "ingest.geoip.downloader.enabled": false, + "node.attr.zone": "us-west-2a", "cluster.routing.allocation.awareness.attributes": "zone", })) @@ -991,12 +986,10 @@ var _ = Describe("Elasticsearch rendering tests", func() { }, })) Expect(nodeSets[1].Config.Data).Should(Equal(map[string]interface{}{ - "node.master": "true", - "node.data": "true", - "node.ingest": "true", - "cluster.max_shards_per_node": 10000, - "ingest.geoip.downloader.enabled": false, - "node.attr.zone": "us-west-2b", + "node.roles": []string{"data", "ingest", "master", "remote_cluster_client"}, + "cluster.max_shards_per_node": 10000, + "ingest.geoip.downloader.enabled": false, + "node.attr.zone": "us-west-2b", "cluster.routing.allocation.awareness.attributes": "zone", })) }) @@ -1063,13 +1056,11 @@ var _ = Describe("Elasticsearch rendering tests", func() { }, })) Expect(nodeSets[0].Config.Data).Should(Equal(map[string]interface{}{ - "node.master": "true", - "node.data": "true", - "node.ingest": "true", - "cluster.max_shards_per_node": 10000, - "ingest.geoip.downloader.enabled": false, - "node.attr.zone": "us-west-2a", - "node.attr.rack": "rack1", + "node.roles": []string{"data", "ingest", "master", "remote_cluster_client"}, + "cluster.max_shards_per_node": 10000, + "ingest.geoip.downloader.enabled": false, + "node.attr.zone": "us-west-2a", + "node.attr.rack": "rack1", "cluster.routing.allocation.awareness.attributes": "zone,rack", })) @@ -1094,13 +1085,11 @@ var _ = Describe("Elasticsearch rendering tests", func() { }, })) Expect(nodeSets[1].Config.Data).Should(Equal(map[string]interface{}{ - "node.master": "true", - "node.data": "true", - "node.ingest": "true", - "cluster.max_shards_per_node": 10000, - "ingest.geoip.downloader.enabled": false, - "node.attr.zone": "us-west-2b", - "node.attr.rack": "rack1", + "node.roles": []string{"data", "ingest", "master", "remote_cluster_client"}, + "cluster.max_shards_per_node": 10000, + "ingest.geoip.downloader.enabled": false, + "node.attr.zone": "us-west-2b", + "node.attr.rack": "rack1", "cluster.routing.allocation.awareness.attributes": "zone,rack", })) })