diff --git a/go.mod b/go.mod index 803c6bab4..e49a02c43 100644 --- a/go.mod +++ b/go.mod @@ -7,8 +7,8 @@ toolchain go1.22.6 require ( github.com/onsi/ginkgo v1.16.5 github.com/onsi/gomega v1.34.1 - k8s.io/apimachinery v0.31.0 - k8s.io/client-go v0.31.0 + k8s.io/apimachinery v0.31.1 + k8s.io/client-go v0.31.1 open-cluster-management.io/api v0.14.0 sigs.k8s.io/controller-runtime v0.19.0 ) @@ -57,7 +57,7 @@ require ( go.opentelemetry.io/otel/trace v1.28.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/goleak v1.3.0 // indirect - golang.org/x/crypto v0.26.0 // indirect + golang.org/x/crypto v0.27.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240708141625-4ad9e859172b // indirect google.golang.org/grpc v1.65.0 // indirect ) @@ -91,12 +91,12 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/nxadm/tail v1.4.8 // indirect - github.com/openshift/api v0.0.0-20240830142653-85dc560939ef - github.com/openshift/library-go v0.0.0-20240821135116-ade3966091b1 + github.com/openshift/api v0.0.0-20241203091751-58d4ac495429 + github.com/openshift/library-go v0.0.0-20241213140326-389618b826f5 github.com/openshift/machine-config-operator v0.0.1-0.20230815171034-c2bb862bc08a github.com/pborman/uuid v1.2.1 github.com/pkg/errors v0.9.1 - github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.69.1 + github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.74.0 github.com/prometheus-operator/prometheus-operator/pkg/client v0.68.0 github.com/prometheus/client_golang v1.19.1 github.com/prometheus/client_model v0.6.1 @@ -111,11 +111,11 @@ require ( go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 golang.org/x/mod v0.20.0 - golang.org/x/net v0.28.0 + golang.org/x/net v0.29.0 golang.org/x/oauth2 v0.21.0 // indirect - golang.org/x/sys v0.23.0 // indirect - golang.org/x/term v0.23.0 // indirect - golang.org/x/text v0.17.0 // indirect + golang.org/x/sys v0.25.0 // indirect + golang.org/x/term v0.24.0 // indirect + golang.org/x/text v0.18.0 // indirect golang.org/x/time v0.5.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/protobuf v1.34.2 // indirect @@ -123,14 +123,14 @@ require ( gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 - k8s.io/api v0.31.0 - k8s.io/apiextensions-apiserver v0.31.0 - k8s.io/apiserver v0.31.0 - k8s.io/component-base v0.31.0 // indirect + k8s.io/api v0.31.1 + k8s.io/apiextensions-apiserver v0.31.1 + k8s.io/apiserver v0.31.1 + k8s.io/component-base v0.31.1 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect k8s.io/pod-security-admission v0.31.0 - k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect + k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 // indirect sigs.k8s.io/controller-tools v0.16.1 sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect @@ -170,7 +170,7 @@ require ( github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/onsi/ginkgo/v2 v2.20.0 // indirect - github.com/openshift/client-go v0.0.0-20240528061634-b054aa794d87 // indirect + github.com/openshift/client-go v0.0.0-20241203091221-452dfb8fa071 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/prometheus v0.54.1 github.com/rivo/uniseg v0.4.7 // indirect @@ -184,7 +184,7 @@ require ( golang.org/x/sync v0.8.0 // indirect golang.org/x/tools v0.24.0 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect - k8s.io/kube-aggregator v0.30.1 // indirect + k8s.io/kube-aggregator v0.31.1 // indirect sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96 // indirect ) diff --git a/go.sum b/go.sum index 0a5511cdf..5ef52317e 100644 --- a/go.sum +++ b/go.sum @@ -288,6 +288,7 @@ github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpO github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= +github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= @@ -389,6 +390,8 @@ github.com/openshift/library-go v0.0.0-20240731134552-8211143dfde7 h1:FluOIEKNSU github.com/openshift/library-go v0.0.0-20240731134552-8211143dfde7/go.mod h1:PdASVamWinll2BPxiUpXajTwZxV8A1pQbWEsCN1od7I= github.com/openshift/library-go v0.0.0-20240821135116-ade3966091b1 h1:Kp33k9pOeJTW7c2zPjwbhnsNijuSKW6c+gfX2LJhD8U= github.com/openshift/library-go v0.0.0-20240821135116-ade3966091b1/go.mod h1:PdASVamWinll2BPxiUpXajTwZxV8A1pQbWEsCN1od7I= +github.com/openshift/library-go v0.0.0-20241213140326-389618b826f5 h1:TlKl01ZwP3Hyq5pU0e1RfmW6yO/VtDt+EmL9+pqyWvQ= +github.com/openshift/library-go v0.0.0-20241213140326-389618b826f5/go.mod h1:eGSI6tp7yUVr4V2d0WrVt2l5s3iCwAh8Hi0RC9Fo16U= github.com/openshift/machine-config-operator v0.0.1-0.20230815171034-c2bb862bc08a h1:3KR43D0bbEi3IYSS6b7abKWbj93RJyuxoHImmYaiWZU= github.com/openshift/machine-config-operator v0.0.1-0.20230815171034-c2bb862bc08a/go.mod h1:kP51fbL8QBSY/mAkFicoF73x0QSraPrX4BjWIdzFPio= github.com/pborman/uuid v1.2.1 h1:+ZZIw58t/ozdjRaXh/3awHfmWRbzYxJoAdNJxe/3pvw= @@ -405,6 +408,8 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.69.1 h1:hOnp+1FLBm+ifsyiRbunmfSs99jKAq+Tr5elCmo5l5U= github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.69.1/go.mod h1:JtflYMUMay9HGil4aRg+dSj6X6mngtuBJf/ULOCxbxI= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.74.0 h1:AHzMWDxNiAVscJL6+4wkvFRTpMnJqiaZFEKA/osaBXE= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.74.0/go.mod h1:wAR5JopumPtAZnu0Cjv2PSqV4p4QB09LMhc6fZZTXuA= github.com/prometheus-operator/prometheus-operator/pkg/client v0.68.0 h1:8FS0sXpFkFPxp2gfkxyEMnhZV9yhf7xPbpsIeUZHlzM= github.com/prometheus-operator/prometheus-operator/pkg/client v0.68.0/go.mod h1:ul4ND0BMCcOX1OSZvbJA1/lh7yQ2ILHNKuZIojGISe4= github.com/prometheus/alertmanager v0.27.0 h1:V6nTa2J5V4s8TG4C4HtrBP/WNSebCCTYGGv4qecA/+I= @@ -533,6 +538,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= +golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= +golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -607,6 +614,8 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -680,11 +689,15 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.23.0 h1:YfKFowiIMvtgl1UERQoTPPToxltDeZfbj4H7dVUCwmM= golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= +golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= +golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -695,6 +708,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -870,20 +885,34 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.31.0 h1:b9LiSjR2ym/SzTOlfMHm1tr7/21aD7fSkqgD/CVJBCo= k8s.io/api v0.31.0/go.mod h1:0YiFF+JfFxMM6+1hQei8FY8M7s1Mth+z/q7eF1aJkTE= +k8s.io/api v0.31.1 h1:Xe1hX/fPW3PXYYv8BlozYqw63ytA92snr96zMW9gWTU= +k8s.io/api v0.31.1/go.mod h1:sbN1g6eY6XVLeqNsZGLnI5FwVseTrZX7Fv3O26rhAaI= k8s.io/apiextensions-apiserver v0.31.0 h1:fZgCVhGwsclj3qCw1buVXCV6khjRzKC5eCFt24kyLSk= k8s.io/apiextensions-apiserver v0.31.0/go.mod h1:b9aMDEYaEe5sdK+1T0KU78ApR/5ZVp4i56VacZYEHxk= +k8s.io/apiextensions-apiserver v0.31.1 h1:L+hwULvXx+nvTYX/MKM3kKMZyei+UiSXQWciX/N6E40= +k8s.io/apiextensions-apiserver v0.31.1/go.mod h1:tWMPR3sgW+jsl2xm9v7lAyRF1rYEK71i9G5dRtkknoQ= k8s.io/apimachinery v0.31.0 h1:m9jOiSr3FoSSL5WO9bjm1n6B9KROYYgNZOb4tyZ1lBc= k8s.io/apimachinery v0.31.0/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/apimachinery v0.31.1 h1:mhcUBbj7KUjaVhyXILglcVjuS4nYXiwC+KKFBgIVy7U= +k8s.io/apimachinery v0.31.1/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= k8s.io/apiserver v0.31.0 h1:p+2dgJjy+bk+B1Csz+mc2wl5gHwvNkC9QJV+w55LVrY= k8s.io/apiserver v0.31.0/go.mod h1:KI9ox5Yu902iBnnyMmy7ajonhKnkeZYJhTZ/YI+WEMk= +k8s.io/apiserver v0.31.1 h1:Sars5ejQDCRBY5f7R3QFHdqN3s61nhkpaX8/k1iEw1c= +k8s.io/apiserver v0.31.1/go.mod h1:lzDhpeToamVZJmmFlaLwdYZwd7zB+WYRYIboqA1kGxM= k8s.io/client-go v0.31.0 h1:QqEJzNjbN2Yv1H79SsS+SWnXkBgVu4Pj3CJQgbx0gI8= k8s.io/client-go v0.31.0/go.mod h1:Y9wvC76g4fLjmU0BA+rV+h2cncoadjvjjkkIGoTLcGU= +k8s.io/client-go v0.31.1 h1:f0ugtWSbWpxHR7sjVpQwuvw9a3ZKLXX0u0itkFXufb0= +k8s.io/client-go v0.31.1/go.mod h1:sKI8871MJN2OyeqRlmA4W4KM9KBdBUpDLu/43eGemCg= k8s.io/component-base v0.31.0 h1:/KIzGM5EvPNQcYgwq5NwoQBaOlVFrghoVGr8lG6vNRs= k8s.io/component-base v0.31.0/go.mod h1:TYVuzI1QmN4L5ItVdMSXKvH7/DtvIuas5/mm8YT3rTo= +k8s.io/component-base v0.31.1 h1:UpOepcrX3rQ3ab5NB6g5iP0tvsgJWzxTyAo20sgYSy8= +k8s.io/component-base v0.31.1/go.mod h1:WGeaw7t/kTsqpVTaCoVEtillbqAhF2/JgvO0LDOMa0w= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-aggregator v0.30.1 h1:ymR2BsxDacTKwzKTuNhGZttuk009c+oZbSeD+IPX5q4= k8s.io/kube-aggregator v0.30.1/go.mod h1:SFbqWsM6ea8dHd3mPLsZFzJHbjBOS5ykIgJh4znZ5iQ= +k8s.io/kube-aggregator v0.31.1 h1:vrYBTTs3xMrpiEsmBjsLETZE9uuX67oQ8B3i1BFfMPw= +k8s.io/kube-aggregator v0.31.1/go.mod h1:+aW4NX50uneozN+BtoCxI4g7ND922p8Wy3tWKFDiWVk= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= k8s.io/pod-security-admission v0.30.3 h1:UDGZWR3ry/XrN/Ki/w7qrp49OwgQsKyh+6xWbexvJi8= @@ -892,6 +921,8 @@ k8s.io/pod-security-admission v0.31.0 h1:z8lTQ1+EZ8aX+xTrDTT2Udt1b9mzci2o2L2O4TU k8s.io/pod-security-admission v0.31.0/go.mod h1:672PutRBAIEOJJljOHDYhXiXrQDDFdB3z7hddN3Pv5c= k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 h1:MDF6h2H/h4tbzmtIKTuctcwZmY0tY9mD9fNT47QO6HI= +k8s.io/utils v0.0.0-20240921022957-49e7df575cb6/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= open-cluster-management.io/api v0.14.0 h1:yjhnNeO/QudiIoEi0i/yUYmP3iElAfUgtj4pHMV+4uM= open-cluster-management.io/api v0.14.0/go.mod h1:ltijKJhDifrPH0csvCUmFt5lzaERv+BBfh6X3l83rT0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= diff --git a/vendor/github.com/openshift/library-go/pkg/apiserver/jsonpatch/jsonpatch.go b/vendor/github.com/openshift/library-go/pkg/apiserver/jsonpatch/jsonpatch.go new file mode 100644 index 000000000..a718832b1 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/apiserver/jsonpatch/jsonpatch.go @@ -0,0 +1,87 @@ +package jsonpatch + +import ( + "encoding/json" + "fmt" + + utilerrors "k8s.io/apimachinery/pkg/util/errors" +) + +type PatchOperation struct { + Op string `json:"op,omitempty"` + Path string `json:"path,omitempty"` + Value interface{} `json:"value,omitempty"` +} + +const ( + patchTestOperation = "test" + patchRemoveOperation = "remove" +) + +type PatchSet struct { + patches []PatchOperation +} + +func New() *PatchSet { + return &PatchSet{} +} + +func (p *PatchSet) WithRemove(path string, test TestCondition) *PatchSet { + p.WithTest(test.path, test.value) + p.addOperation(patchRemoveOperation, path, nil) + return p +} + +func (p *PatchSet) WithTest(path string, value interface{}) *PatchSet { + p.addOperation(patchTestOperation, path, value) + return p +} + +func (p *PatchSet) IsEmpty() bool { + return len(p.patches) == 0 +} + +func (p *PatchSet) Marshal() ([]byte, error) { + if err := p.validate(); err != nil { + return nil, err + } + jsonBytes, err := json.Marshal(p.patches) + if err != nil { + return nil, err + } + return jsonBytes, nil +} + +func (p *PatchSet) addOperation(op, path string, value interface{}) { + patch := PatchOperation{ + Op: op, + Path: path, + Value: value, + } + p.patches = append(p.patches, patch) +} + +func (p *PatchSet) validate() error { + var errs []error + for i, patch := range p.patches { + if patch.Op == patchTestOperation { + // testing resourceVersion is fragile + // because it is likely to change frequently + // instead, test against a different field + // should be written. + if patch.Path == "/metadata/resourceVersion" { + errs = append(errs, fmt.Errorf("test operation at index: %d contains forbidden path: %q", i, patch.Path)) + } + } + } + return utilerrors.NewAggregate(errs) +} + +type TestCondition struct { + path string + value interface{} +} + +func NewTestCondition(path string, value interface{}) TestCondition { + return TestCondition{path, value} +} diff --git a/vendor/github.com/openshift/library-go/pkg/controller/factory/base_controller.go b/vendor/github.com/openshift/library-go/pkg/controller/factory/base_controller.go index 722d95d5e..bf5b1f373 100644 --- a/vendor/github.com/openshift/library-go/pkg/controller/factory/base_controller.go +++ b/vendor/github.com/openshift/library-go/pkg/controller/factory/base_controller.go @@ -7,6 +7,8 @@ import ( "sync" "time" + applyoperatorv1 "github.com/openshift/client-go/operator/applyconfigurations/operator/v1" + "github.com/robfig/cron" apierrors "k8s.io/apimachinery/pkg/api/errors" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -17,7 +19,6 @@ import ( operatorv1 "github.com/openshift/api/operator/v1" "github.com/openshift/library-go/pkg/operator/management" - "github.com/openshift/library-go/pkg/operator/v1helpers" operatorv1helpers "github.com/openshift/library-go/pkg/operator/v1helpers" ) @@ -29,23 +30,31 @@ var defaultCacheSyncTimeout = 10 * time.Minute // baseController represents generic Kubernetes controller boiler-plate type baseController struct { - name string - cachesToSync []cache.InformerSynced - sync func(ctx context.Context, controllerContext SyncContext) error - syncContext SyncContext - syncDegradedClient operatorv1helpers.OperatorClient - resyncEvery time.Duration - resyncSchedules []cron.Schedule - postStartHooks []PostStartHook - cacheSyncTimeout time.Duration + name string + controllerInstanceName string + cachesToSync []cache.InformerSynced + sync func(ctx context.Context, controllerContext SyncContext) error + syncContext SyncContext + syncDegradedClient operatorv1helpers.OperatorClient + resyncEvery time.Duration + resyncSchedules []cron.Schedule + postStartHooks []PostStartHook + cacheSyncTimeout time.Duration } var _ Controller = &baseController{} +// Name returns a controller name. func (c baseController) Name() string { return c.name } +// ControllerInstanceName specifies the controller instance. +// Useful when the same controller is used multiple times. +func (c baseController) ControllerInstanceName() string { + return c.controllerInstanceName +} + type scheduledJob struct { queue workqueue.RateLimitingInterface name string @@ -222,23 +231,25 @@ func (c *baseController) reportDegraded(ctx context.Context, reportedError error return reportedError } if reportedError != nil { - _, _, updateErr := v1helpers.UpdateStatus(ctx, c.syncDegradedClient, v1helpers.UpdateConditionFn(operatorv1.OperatorCondition{ - Type: c.name + "Degraded", - Status: operatorv1.ConditionTrue, - Reason: "SyncError", - Message: reportedError.Error(), - })) + condition := applyoperatorv1.OperatorStatus(). + WithConditions(applyoperatorv1.OperatorCondition(). + WithType(c.name + "Degraded"). + WithStatus(operatorv1.ConditionTrue). + WithReason("SyncError"). + WithMessage(reportedError.Error())) + updateErr := c.syncDegradedClient.ApplyOperatorStatus(ctx, ControllerFieldManager(c.name, "reportDegraded"), condition) if updateErr != nil { klog.Warningf("Updating status of %q failed: %v", c.Name(), updateErr) } return reportedError } - _, _, updateErr := v1helpers.UpdateStatus(ctx, c.syncDegradedClient, - v1helpers.UpdateConditionFn(operatorv1.OperatorCondition{ - Type: c.name + "Degraded", - Status: operatorv1.ConditionFalse, - Reason: "AsExpected", - })) + + condition := applyoperatorv1.OperatorStatus(). + WithConditions(applyoperatorv1.OperatorCondition(). + WithType(c.name + "Degraded"). + WithStatus(operatorv1.ConditionFalse). + WithReason("AsExpected")) + updateErr := c.syncDegradedClient.ApplyOperatorStatus(ctx, ControllerFieldManager(c.name, "reportDegraded"), condition) return updateErr } diff --git a/vendor/github.com/openshift/library-go/pkg/controller/factory/factory.go b/vendor/github.com/openshift/library-go/pkg/controller/factory/factory.go index a431f1f1a..1f9b15a72 100644 --- a/vendor/github.com/openshift/library-go/pkg/controller/factory/factory.go +++ b/vendor/github.com/openshift/library-go/pkg/controller/factory/factory.go @@ -25,17 +25,18 @@ func DefaultQueueKeysFunc(_ runtime.Object) []string { // Factory is generator that generate standard Kubernetes controllers. // Factory is really generic and should be only used for simple controllers that does not require special stuff.. type Factory struct { - sync SyncFunc - syncContext SyncContext - syncDegradedClient operatorv1helpers.OperatorClient - resyncInterval time.Duration - resyncSchedules []string - informers []filteredInformers - informerQueueKeys []informersWithQueueKey - bareInformers []Informer - postStartHooks []PostStartHook - namespaceInformers []*namespaceInformer - cachesToSync []cache.InformerSynced + sync SyncFunc + syncContext SyncContext + syncDegradedClient operatorv1helpers.OperatorClient + resyncInterval time.Duration + resyncSchedules []string + informers []filteredInformers + informerQueueKeys []informersWithQueueKey + bareInformers []Informer + postStartHooks []PostStartHook + namespaceInformers []*namespaceInformer + cachesToSync []cache.InformerSynced + controllerInstanceName string } // Informer represents any structure that allow to register event handlers and informs if caches are synced. @@ -237,6 +238,13 @@ func (f *Factory) WithSyncDegradedOnError(operatorClient operatorv1helpers.Opera return f } +// WithControllerInstanceName specifies the controller instance. +// Useful when the same controller is used multiple times. +func (f *Factory) WithControllerInstanceName(controllerInstanceName string) *Factory { + f.controllerInstanceName = controllerInstanceName + return f +} + // Controller produce a runnable controller. func (f *Factory) ToController(name string, eventRecorder events.Recorder) Controller { if f.sync == nil { @@ -266,15 +274,16 @@ func (f *Factory) ToController(name string, eventRecorder events.Recorder) Contr } c := &baseController{ - name: name, - syncDegradedClient: f.syncDegradedClient, - sync: f.sync, - resyncEvery: f.resyncInterval, - resyncSchedules: cronSchedules, - cachesToSync: append([]cache.InformerSynced{}, f.cachesToSync...), - syncContext: ctx, - postStartHooks: f.postStartHooks, - cacheSyncTimeout: defaultCacheSyncTimeout, + name: name, + controllerInstanceName: f.controllerInstanceName, + syncDegradedClient: f.syncDegradedClient, + sync: f.sync, + resyncEvery: f.resyncInterval, + resyncSchedules: cronSchedules, + cachesToSync: append([]cache.InformerSynced{}, f.cachesToSync...), + syncContext: ctx, + postStartHooks: f.postStartHooks, + cacheSyncTimeout: defaultCacheSyncTimeout, } for i := range f.informerQueueKeys { diff --git a/vendor/github.com/openshift/library-go/pkg/controller/factory/interfaces.go b/vendor/github.com/openshift/library-go/pkg/controller/factory/interfaces.go index 0ef98c670..f0cbfd0c8 100644 --- a/vendor/github.com/openshift/library-go/pkg/controller/factory/interfaces.go +++ b/vendor/github.com/openshift/library-go/pkg/controller/factory/interfaces.go @@ -2,6 +2,7 @@ package factory import ( "context" + "fmt" "k8s.io/client-go/util/workqueue" @@ -45,3 +46,11 @@ type SyncContext interface { // The syncContext.syncContext passed is the main controller syncContext, when cancelled it means the controller is being shut down. // The syncContext provides access to controller name, queue and event recorder. type SyncFunc func(ctx context.Context, controllerContext SyncContext) error + +func ControllerFieldManager(controllerName, usageName string) string { + return fmt.Sprintf("%s-%s", controllerName, usageName) +} + +func ControllerInstanceName(instanceName, controllerName string) string { + return fmt.Sprintf("%s-%s", instanceName, controllerName) +} diff --git a/vendor/github.com/openshift/library-go/pkg/crypto/crypto.go b/vendor/github.com/openshift/library-go/pkg/crypto/crypto.go index 63184d2eb..e6651fecc 100644 --- a/vendor/github.com/openshift/library-go/pkg/crypto/crypto.go +++ b/vendor/github.com/openshift/library-go/pkg/crypto/crypto.go @@ -150,9 +150,9 @@ var ciphers = map[string]uint16{ // ref: https://www.iana.org/assignments/tls-parameters/tls-parameters.xml var openSSLToIANACiphersMap = map[string]string{ // TLS 1.3 ciphers - not configurable in go 1.13, all of them are used in TLSv1.3 flows - // "TLS_AES_128_GCM_SHA256": "TLS_AES_128_GCM_SHA256", // 0x13,0x01 - // "TLS_AES_256_GCM_SHA384": "TLS_AES_256_GCM_SHA384", // 0x13,0x02 - // "TLS_CHACHA20_POLY1305_SHA256": "TLS_CHACHA20_POLY1305_SHA256", // 0x13,0x03 + "TLS_AES_128_GCM_SHA256": "TLS_AES_128_GCM_SHA256", // 0x13,0x01 + "TLS_AES_256_GCM_SHA384": "TLS_AES_256_GCM_SHA384", // 0x13,0x02 + "TLS_CHACHA20_POLY1305_SHA256": "TLS_CHACHA20_POLY1305_SHA256", // 0x13,0x03 // TLS 1.2 "ECDHE-ECDSA-AES128-GCM-SHA256": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", // 0xC0,0x2B diff --git a/vendor/github.com/openshift/library-go/pkg/operator/certrotation/client_cert_rotation_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/client_cert_rotation_controller.go index 71ae47ff1..d8569f2c8 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/certrotation/client_cert_rotation_controller.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/client_cert_rotation_controller.go @@ -98,7 +98,10 @@ func NewCertRotationController( WithPostStartHooks( c.targetCertRecheckerPostRunHook, ). - ToController("CertRotationController", recorder.WithComponentSuffix("cert-rotation-controller").WithComponentSuffix(name)) + ToController( + "CertRotationController", // don't change what is passed here unless you also remove the old FooDegraded condition + recorder.WithComponentSuffix("cert-rotation-controller").WithComponentSuffix(name), + ) } func (c CertRotationController) Sync(ctx context.Context, syncCtx factory.SyncContext) error { diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/OWNERS b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/OWNERS new file mode 100644 index 000000000..8cd5c0bca --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/OWNERS @@ -0,0 +1,4 @@ +reviewers: +- benluddy +approvers: +- benluddy diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/config_observer_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/config_observer_controller.go index 3b9f61180..418224998 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/config_observer_controller.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/config_observer_controller.go @@ -9,6 +9,8 @@ import ( "strings" "time" + applyoperatorv1 "github.com/openshift/client-go/operator/applyconfigurations/operator/v1" + "github.com/imdario/mergo" "k8s.io/klog/v2" @@ -44,6 +46,8 @@ type Listers interface { type ObserveConfigFunc func(listers Listers, recorder events.Recorder, existingConfig map[string]interface{}) (observedConfig map[string]interface{}, errs []error) type ConfigObserver struct { + controllerInstanceName string + // observers are called in an undefined order and their results are merged to // determine the observed configuration. observers []ObserveConfigFunc @@ -58,6 +62,7 @@ type ConfigObserver struct { } func NewConfigObserver( + name string, operatorClient v1helpers.OperatorClient, eventRecorder events.Recorder, listers Listers, @@ -65,6 +70,7 @@ func NewConfigObserver( observers ...ObserveConfigFunc, ) factory.Controller { return NewNestedConfigObserver( + name, operatorClient, eventRecorder, listers, @@ -95,6 +101,7 @@ func NewConfigObserver( // oauthAPIController := NewNestedConfigObserver(..., []string{"oauthAPIServer"} // oauthServerController := NewNestedConfigObserver(..., []string{"oauthServer"} func NewNestedConfigObserver( + name string, operatorClient v1helpers.OperatorClient, eventRecorder events.Recorder, listers Listers, @@ -104,14 +111,23 @@ func NewNestedConfigObserver( observers ...ObserveConfigFunc, ) factory.Controller { c := &ConfigObserver{ - operatorClient: operatorClient, - observers: observers, - listers: listers, - nestedConfigPath: nestedConfigPath, - degradedConditionType: degradedConditionPrefix + condition.ConfigObservationDegradedConditionType, + controllerInstanceName: factory.ControllerInstanceName(name, "ConfigObserver"), + operatorClient: operatorClient, + observers: observers, + listers: listers, + nestedConfigPath: nestedConfigPath, + degradedConditionType: degradedConditionPrefix + condition.ConfigObservationDegradedConditionType, } - return factory.New().ResyncEvery(time.Minute).WithSync(c.sync).WithInformers(append(informers, listersToInformer(listers)...)...).ToController("ConfigObserver", eventRecorder.WithComponentSuffix("config-observer")) + return factory.New(). + ResyncEvery(time.Minute). + WithSync(c.sync). + WithControllerInstanceName(c.controllerInstanceName). + WithInformers(append(informers, listersToInformer(listers)...)...). + ToController( + "ConfigObserver", // don't change what is passed here unless you also remove the old FooDegraded condition + eventRecorder.WithComponentSuffix("config-observer"), + ) } // sync reacts to a change in prereqs by finding information that is required to match another value in the cluster. This @@ -165,16 +181,18 @@ func (c ConfigObserver) sync(ctx context.Context, syncCtx factory.SyncContext) e configError := v1helpers.NewMultiLineAggregate(errs) // update failing condition - cond := operatorv1.OperatorCondition{ - Type: c.degradedConditionType, - Status: operatorv1.ConditionFalse, - } + condition := applyoperatorv1.OperatorCondition(). + WithType(c.degradedConditionType). + WithStatus(operatorv1.ConditionFalse) if configError != nil { - cond.Status = operatorv1.ConditionTrue - cond.Reason = "Error" - cond.Message = configError.Error() + condition = condition. + WithStatus(operatorv1.ConditionTrue). + WithReason("Error"). + WithMessage(configError.Error()) } - if _, _, updateError := v1helpers.UpdateStatus(ctx, c.operatorClient, v1helpers.UpdateConditionFn(cond)); updateError != nil { + status := applyoperatorv1.OperatorStatus().WithConditions(condition) + updateError := c.operatorClient.ApplyOperatorStatus(ctx, c.controllerInstanceName, status) + if updateError != nil { return updateError } diff --git a/vendor/github.com/openshift/library-go/pkg/operator/events/recorder.go b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder.go index f513a90f3..2918012ff 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/events/recorder.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder.go @@ -2,13 +2,13 @@ package events import ( "context" + "crypto/sha256" "errors" "fmt" - "os" - "time" - "k8s.io/client-go/kubernetes" "k8s.io/klog/v2" + "k8s.io/utils/clock" + "os" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -145,11 +145,12 @@ func guessControllerReferenceForNamespace(ctx context.Context, client corev1clie } // NewRecorder returns new event recorder. -func NewRecorder(client corev1client.EventInterface, sourceComponentName string, involvedObjectRef *corev1.ObjectReference) Recorder { +func NewRecorder(client corev1client.EventInterface, sourceComponentName string, involvedObjectRef *corev1.ObjectReference, clock clock.PassiveClock) Recorder { return &recorder{ eventClient: client, involvedObjectRef: involvedObjectRef, sourceComponent: sourceComponentName, + clock: clock, } } @@ -158,6 +159,7 @@ type recorder struct { eventClient corev1client.EventInterface involvedObjectRef *corev1.ObjectReference sourceComponent string + clock clock.PassiveClock // TODO: This is not the right way to pass the context, but there is no other way without breaking event interface ctx context.Context @@ -196,7 +198,7 @@ func (r *recorder) Warningf(reason, messageFmt string, args ...interface{}) { // Event emits the normal type event. func (r *recorder) Event(reason, message string) { - event := makeEvent(r.involvedObjectRef, r.sourceComponent, corev1.EventTypeNormal, reason, message) + event := makeEvent(r.clock, r.involvedObjectRef, r.sourceComponent, corev1.EventTypeNormal, reason, message) ctx := context.Background() if r.ctx != nil { ctx = r.ctx @@ -208,7 +210,7 @@ func (r *recorder) Event(reason, message string) { // Warning emits the warning type event. func (r *recorder) Warning(reason, message string) { - event := makeEvent(r.involvedObjectRef, r.sourceComponent, corev1.EventTypeWarning, reason, message) + event := makeEvent(r.clock, r.involvedObjectRef, r.sourceComponent, corev1.EventTypeWarning, reason, message) ctx := context.Background() if r.ctx != nil { ctx = r.ctx @@ -218,11 +220,12 @@ func (r *recorder) Warning(reason, message string) { } } -func makeEvent(involvedObjRef *corev1.ObjectReference, sourceComponent string, eventType, reason, message string) *corev1.Event { - currentTime := metav1.Time{Time: time.Now()} +func makeEvent(clock clock.PassiveClock, involvedObjRef *corev1.ObjectReference, sourceComponent string, eventType, reason, message string) *corev1.Event { + currentTime := metav1.Time{Time: clock.Now()} event := &corev1.Event{ ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%v.%x", involvedObjRef.Name, currentTime.UnixNano()), + // TODO this is always used to create a unique event. Perhaps we should hash the message to be unique enough for apply-configuration + Name: fmt.Sprintf("%v.%x.%s", involvedObjRef.Name, currentTime.UnixNano(), hashForEventNameSuffix(eventType, reason, message)), Namespace: involvedObjRef.Namespace, }, InvolvedObject: *involvedObjRef, @@ -236,3 +239,20 @@ func makeEvent(involvedObjRef *corev1.ObjectReference, sourceComponent string, e event.Source.Component = sourceComponent return event } + +func hashForEventNameSuffix(in ...string) string { + data := []byte{} + for _, curr := range in { + data = append(data, []byte(curr)...) + } + if len(data) == 0 { + return "MISSING" + } + + hash := sha256.New() + hash.Write(data) + hashBytes := hash.Sum(nil) + + // we're looking to deconflict names, not protect the crown jewels + return fmt.Sprintf("%x", hashBytes[len(hashBytes)-4:]) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_in_memory.go b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_in_memory.go index 75efe3e19..d97be0de6 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_in_memory.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_in_memory.go @@ -3,6 +3,7 @@ package events import ( "context" "fmt" + "k8s.io/utils/clock" "sync" corev1 "k8s.io/api/core/v1" @@ -12,6 +13,7 @@ import ( type inMemoryEventRecorder struct { events []*corev1.Event source string + clock clock.PassiveClock ctx context.Context sync.Mutex } @@ -31,8 +33,12 @@ type InMemoryRecorder interface { // NewInMemoryRecorder provides event recorder that stores all events recorded in memory and allow to replay them using the Events() method. // This recorder should be only used in unit tests. -func NewInMemoryRecorder(sourceComponent string) InMemoryRecorder { - return &inMemoryEventRecorder{events: []*corev1.Event{}, source: sourceComponent} +func NewInMemoryRecorder(sourceComponent string, clock clock.PassiveClock) InMemoryRecorder { + return &inMemoryEventRecorder{ + events: []*corev1.Event{}, + source: sourceComponent, + clock: clock, + } } func (r *inMemoryEventRecorder) ComponentName() string { @@ -65,7 +71,7 @@ func (r *inMemoryEventRecorder) Events() []*corev1.Event { func (r *inMemoryEventRecorder) Event(reason, message string) { r.Lock() defer r.Unlock() - event := makeEvent(&inMemoryDummyObjectReference, r.source, corev1.EventTypeNormal, reason, message) + event := makeEvent(r.clock, &inMemoryDummyObjectReference, r.source, corev1.EventTypeNormal, reason, message) r.events = append(r.events, event) } @@ -76,7 +82,7 @@ func (r *inMemoryEventRecorder) Eventf(reason, messageFmt string, args ...interf func (r *inMemoryEventRecorder) Warning(reason, message string) { r.Lock() defer r.Unlock() - event := makeEvent(&inMemoryDummyObjectReference, r.source, corev1.EventTypeWarning, reason, message) + event := makeEvent(r.clock, &inMemoryDummyObjectReference, r.source, corev1.EventTypeWarning, reason, message) klog.Info(event.String()) r.events = append(r.events, event) } diff --git a/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_logging.go b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_logging.go index 90639f2d9..1906454a9 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_logging.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_logging.go @@ -3,6 +3,7 @@ package events import ( "context" "fmt" + "k8s.io/utils/clock" corev1 "k8s.io/api/core/v1" "k8s.io/klog/v2" @@ -10,6 +11,7 @@ import ( type LoggingEventRecorder struct { component string + clock clock.PassiveClock ctx context.Context } @@ -19,8 +21,11 @@ func (r *LoggingEventRecorder) WithContext(ctx context.Context) Recorder { } // NewLoggingEventRecorder provides event recorder that will log all recorded events via klog. -func NewLoggingEventRecorder(component string) Recorder { - return &LoggingEventRecorder{component: component} +func NewLoggingEventRecorder(component string, clock clock.PassiveClock) Recorder { + return &LoggingEventRecorder{ + component: component, + clock: clock, + } } func (r *LoggingEventRecorder) ComponentName() string { @@ -40,7 +45,7 @@ func (r *LoggingEventRecorder) WithComponentSuffix(suffix string) Recorder { } func (r *LoggingEventRecorder) Event(reason, message string) { - event := makeEvent(&inMemoryDummyObjectReference, "", corev1.EventTypeNormal, reason, message) + event := makeEvent(r.clock, &inMemoryDummyObjectReference, "", corev1.EventTypeNormal, reason, message) klog.Info(event.String()) } @@ -49,7 +54,7 @@ func (r *LoggingEventRecorder) Eventf(reason, messageFmt string, args ...interfa } func (r *LoggingEventRecorder) Warning(reason, message string) { - event := makeEvent(&inMemoryDummyObjectReference, "", corev1.EventTypeWarning, reason, message) + event := makeEvent(r.clock, &inMemoryDummyObjectReference, "", corev1.EventTypeWarning, reason, message) klog.Warning(event.String()) } diff --git a/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_upstream.go b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_upstream.go index 0e41949a7..282a9033d 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_upstream.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_upstream.go @@ -3,6 +3,7 @@ package events import ( "context" "fmt" + "k8s.io/utils/clock" "strings" "sync" @@ -16,19 +17,19 @@ import ( ) // NewKubeRecorder returns new event recorder with tweaked correlator options. -func NewKubeRecorderWithOptions(client corev1client.EventInterface, options record.CorrelatorOptions, sourceComponentName string, involvedObjectRef *corev1.ObjectReference) Recorder { +func NewKubeRecorderWithOptions(client corev1client.EventInterface, options record.CorrelatorOptions, sourceComponentName string, involvedObjectRef *corev1.ObjectReference, clock clock.PassiveClock) Recorder { return (&upstreamRecorder{ client: client, component: sourceComponentName, involvedObjectRef: involvedObjectRef, options: options, - fallbackRecorder: NewRecorder(client, sourceComponentName, involvedObjectRef), + fallbackRecorder: NewRecorder(client, sourceComponentName, involvedObjectRef, clock), }).ForComponent(sourceComponentName) } // NewKubeRecorder returns new event recorder with default correlator options. -func NewKubeRecorder(client corev1client.EventInterface, sourceComponentName string, involvedObjectRef *corev1.ObjectReference) Recorder { - return NewKubeRecorderWithOptions(client, record.CorrelatorOptions{}, sourceComponentName, involvedObjectRef) +func NewKubeRecorder(client corev1client.EventInterface, sourceComponentName string, involvedObjectRef *corev1.ObjectReference, clock clock.PassiveClock) Recorder { + return NewKubeRecorderWithOptions(client, record.CorrelatorOptions{}, sourceComponentName, involvedObjectRef, clock) } // upstreamRecorder is an implementation of Recorder interface. diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apps.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apps.go index 650cf9b4f..c53e370de 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apps.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apps.go @@ -245,3 +245,27 @@ func ApplyDaemonSetWithForce(ctx context.Context, client appsclientv1.DaemonSets resourcehelper.ReportUpdateEvent(recorder, required, err) return actual, true, err } + +func DeleteDeployment(ctx context.Context, client appsclientv1.DeploymentsGetter, recorder events.Recorder, required *appsv1.Deployment) (*appsv1.Deployment, bool, error) { + err := client.Deployments(required.Namespace).Delete(ctx, required.Name, metav1.DeleteOptions{}) + if err != nil && apierrors.IsNotFound(err) { + return nil, false, nil + } + if err != nil { + return nil, false, err + } + resourcehelper.ReportDeleteEvent(recorder, required, err) + return nil, true, nil +} + +func DeleteDaemonSet(ctx context.Context, client appsclientv1.DaemonSetsGetter, recorder events.Recorder, required *appsv1.DaemonSet) (*appsv1.DaemonSet, bool, error) { + err := client.DaemonSets(required.Namespace).Delete(ctx, required.Name, metav1.DeleteOptions{}) + if err != nil && apierrors.IsNotFound(err) { + return nil, false, nil + } + if err != nil { + return nil, false, err + } + resourcehelper.ReportDeleteEvent(recorder, required, err) + return nil, true, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/core.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/core.go index f954d48cc..377e27806 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/core.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/core.go @@ -463,12 +463,21 @@ func ApplySecretImproved(ctx context.Context, client coreclientv1.SecretsGetter, // SyncConfigMap applies a ConfigMap from a location `sourceNamespace/sourceName` to `targetNamespace/targetName` func SyncConfigMap(ctx context.Context, client coreclientv1.ConfigMapsGetter, recorder events.Recorder, sourceNamespace, sourceName, targetNamespace, targetName string, ownerRefs []metav1.OwnerReference) (*corev1.ConfigMap, bool, error) { - return SyncPartialConfigMap(ctx, client, recorder, sourceNamespace, sourceName, targetNamespace, targetName, nil, ownerRefs) + return syncPartialConfigMap(ctx, client, recorder, sourceNamespace, sourceName, targetNamespace, targetName, nil, ownerRefs, nil) +} + +// SyncConfigMapWithLabels does what SyncConfigMap does, but adds additional labels to the target ConfigMap. +func SyncConfigMapWithLabels(ctx context.Context, client coreclientv1.ConfigMapsGetter, recorder events.Recorder, sourceNamespace, sourceName, targetNamespace, targetName string, ownerRefs []metav1.OwnerReference, labels map[string]string) (*corev1.ConfigMap, bool, error) { + return syncPartialConfigMap(ctx, client, recorder, sourceNamespace, sourceName, targetNamespace, targetName, nil, ownerRefs, labels) } // SyncPartialConfigMap does what SyncConfigMap does but it only synchronizes a subset of keys given by `syncedKeys`. // SyncPartialConfigMap will delete the target if `syncedKeys` are set but the source does not contain any of these keys. func SyncPartialConfigMap(ctx context.Context, client coreclientv1.ConfigMapsGetter, recorder events.Recorder, sourceNamespace, sourceName, targetNamespace, targetName string, syncedKeys sets.Set[string], ownerRefs []metav1.OwnerReference) (*corev1.ConfigMap, bool, error) { + return syncPartialConfigMap(ctx, client, recorder, sourceNamespace, sourceName, targetNamespace, targetName, syncedKeys, ownerRefs, nil) +} + +func syncPartialConfigMap(ctx context.Context, client coreclientv1.ConfigMapsGetter, recorder events.Recorder, sourceNamespace, sourceName, targetNamespace, targetName string, syncedKeys sets.Set[string], ownerRefs []metav1.OwnerReference, labels map[string]string) (*corev1.ConfigMap, bool, error) { source, err := client.ConfigMaps(sourceNamespace).Get(ctx, sourceName, metav1.GetOptions{}) switch { case apierrors.IsNotFound(err): @@ -500,6 +509,12 @@ func SyncPartialConfigMap(ctx context.Context, client coreclientv1.ConfigMapsGet source.Name = targetName source.ResourceVersion = "" source.OwnerReferences = ownerRefs + if labels != nil && source.Labels == nil { + source.Labels = map[string]string{} + } + for k, v := range labels { + source.Labels[k] = v + } return ApplyConfigMap(ctx, client, recorder, source) } } @@ -524,12 +539,21 @@ func deleteConfigMapSyncTarget(ctx context.Context, client coreclientv1.ConfigMa // SyncSecret applies a Secret from a location `sourceNamespace/sourceName` to `targetNamespace/targetName` func SyncSecret(ctx context.Context, client coreclientv1.SecretsGetter, recorder events.Recorder, sourceNamespace, sourceName, targetNamespace, targetName string, ownerRefs []metav1.OwnerReference) (*corev1.Secret, bool, error) { - return SyncPartialSecret(ctx, client, recorder, sourceNamespace, sourceName, targetNamespace, targetName, nil, ownerRefs) + return syncPartialSecret(ctx, client, recorder, sourceNamespace, sourceName, targetNamespace, targetName, nil, ownerRefs, nil) +} + +// SyncSecretWithLabels does what SyncSecret does, but adds additional labels to the target Secret. +func SyncSecretWithLabels(ctx context.Context, client coreclientv1.SecretsGetter, recorder events.Recorder, sourceNamespace, sourceName, targetNamespace, targetName string, ownerRefs []metav1.OwnerReference, labels map[string]string) (*corev1.Secret, bool, error) { + return syncPartialSecret(ctx, client, recorder, sourceNamespace, sourceName, targetNamespace, targetName, nil, ownerRefs, labels) } // SyncPartialSecret does what SyncSecret does but it only synchronizes a subset of keys given by `syncedKeys`. // SyncPartialSecret will delete the target if `syncedKeys` are set but the source does not contain any of these keys. func SyncPartialSecret(ctx context.Context, client coreclientv1.SecretsGetter, recorder events.Recorder, sourceNamespace, sourceName, targetNamespace, targetName string, syncedKeys sets.Set[string], ownerRefs []metav1.OwnerReference) (*corev1.Secret, bool, error) { + return syncPartialSecret(ctx, client, recorder, sourceNamespace, sourceName, targetNamespace, targetName, syncedKeys, ownerRefs, nil) +} + +func syncPartialSecret(ctx context.Context, client coreclientv1.SecretsGetter, recorder events.Recorder, sourceNamespace, sourceName, targetNamespace, targetName string, syncedKeys sets.Set[string], ownerRefs []metav1.OwnerReference, labels map[string]string) (*corev1.Secret, bool, error) { source, err := client.Secrets(sourceNamespace).Get(ctx, sourceName, metav1.GetOptions{}) switch { case apierrors.IsNotFound(err): @@ -579,6 +603,12 @@ func SyncPartialSecret(ctx context.Context, client coreclientv1.SecretsGetter, r source.Name = targetName source.ResourceVersion = "" source.OwnerReferences = ownerRefs + if labels != nil && source.Labels == nil { + source.Labels = map[string]string{} + } + for k, v := range labels { + source.Labels[k] = v + } return ApplySecret(ctx, client, recorder, source) } } diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/generic.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/generic.go index d812254dc..357efad61 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/generic.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/generic.go @@ -6,6 +6,8 @@ import ( admissionregistrationv1 "k8s.io/api/admissionregistration/v1" admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" policyv1 "k8s.io/api/policy/v1" rbacv1 "k8s.io/api/rbac/v1" @@ -317,6 +319,18 @@ func DeleteAll(ctx context.Context, clients *ClientHolder, recorder events.Recor } else { _, result.Changed, result.Error = DeleteRoleBinding(ctx, clients.kubeClient.RbacV1(), recorder, t) } + case *appsv1.Deployment: + if clients.kubeClient == nil { + result.Error = fmt.Errorf("missing kubeClient") + } else { + _, result.Changed, result.Error = DeleteDeployment(ctx, clients.kubeClient.AppsV1(), recorder, t) + } + case *appsv1.DaemonSet: + if clients.kubeClient == nil { + result.Error = fmt.Errorf("missing kubeClient") + } else { + _, result.Changed, result.Error = DeleteDaemonSet(ctx, clients.kubeClient.AppsV1(), recorder, t) + } case *policyv1.PodDisruptionBudget: if clients.kubeClient == nil { result.Error = fmt.Errorf("missing kubeClient") diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/monitoring.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/monitoring.go index 8b64f23b7..d0996a2af 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/monitoring.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/monitoring.go @@ -5,6 +5,7 @@ import ( "github.com/openshift/library-go/pkg/operator/events" "github.com/openshift/library-go/pkg/operator/resource/resourcehelper" + "github.com/openshift/library-go/pkg/operator/resource/resourcemerge" "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -14,137 +15,128 @@ import ( "k8s.io/klog/v2" ) +var alertmanagerGVR = schema.GroupVersionResource{Group: "monitoring.coreos.com", Version: "v1", Resource: "alertmanagers"} +var prometheusGVR = schema.GroupVersionResource{Group: "monitoring.coreos.com", Version: "v1", Resource: "prometheuses"} +var prometheusRuleGVR = schema.GroupVersionResource{Group: "monitoring.coreos.com", Version: "v1", Resource: "prometheusrules"} var serviceMonitorGVR = schema.GroupVersionResource{Group: "monitoring.coreos.com", Version: "v1", Resource: "servicemonitors"} -func ensureGenericSpec(required, existing *unstructured.Unstructured, mimicDefaultingFn mimicDefaultingFunc, equalityChecker equalityChecker) (*unstructured.Unstructured, bool, error) { - requiredCopy := required.DeepCopy() - mimicDefaultingFn(requiredCopy) - requiredSpec, _, err := unstructured.NestedMap(requiredCopy.UnstructuredContent(), "spec") - if err != nil { - return nil, false, err - } - existingSpec, _, err := unstructured.NestedMap(existing.UnstructuredContent(), "spec") - if err != nil { - return nil, false, err - } - - if equalityChecker.DeepEqual(existingSpec, requiredSpec) { - return existing, false, nil - } +// ApplyAlertmanager applies the Alertmanager. +func ApplyAlertmanager(ctx context.Context, client dynamic.Interface, recorder events.Recorder, required *unstructured.Unstructured) (*unstructured.Unstructured, bool, error) { + return ApplyUnstructuredResourceImproved(ctx, client, recorder, required, noCache, alertmanagerGVR, nil, nil) +} - existingCopy := existing.DeepCopy() - if err := unstructured.SetNestedMap(existingCopy.UnstructuredContent(), requiredSpec, "spec"); err != nil { - return nil, true, err - } +// DeleteAlertmanager deletes the Alertmanager. +func DeleteAlertmanager(ctx context.Context, client dynamic.Interface, recorder events.Recorder, required *unstructured.Unstructured) (*unstructured.Unstructured, bool, error) { + return DeleteUnstructuredResource(ctx, client, recorder, required, alertmanagerGVR) +} - return existingCopy, true, nil +// ApplyPrometheus applies the Prometheus. +func ApplyPrometheus(ctx context.Context, client dynamic.Interface, recorder events.Recorder, required *unstructured.Unstructured) (*unstructured.Unstructured, bool, error) { + return ApplyUnstructuredResourceImproved(ctx, client, recorder, required, noCache, prometheusGVR, nil, nil) } -// mimicDefaultingFunc is used to set fields that are defaulted. This allows for sparse manifests to apply correctly. -// For instance, if field .spec.foo is set to 10 if not set, then a function of this type could be used to set -// the field to 10 to match the comparison. This is soemtimes (often?) easier than updating the semantic equality. -// We often see this in places like RBAC and CRD. Logically it can happen generically too. -type mimicDefaultingFunc func(obj *unstructured.Unstructured) +// DeletePrometheus deletes the Prometheus. +func DeletePrometheus(ctx context.Context, client dynamic.Interface, recorder events.Recorder, required *unstructured.Unstructured) (*unstructured.Unstructured, bool, error) { + return DeleteUnstructuredResource(ctx, client, recorder, required, prometheusGVR) +} -func noDefaulting(obj *unstructured.Unstructured) {} +// ApplyPrometheusRule applies the PrometheusRule. +func ApplyPrometheusRule(ctx context.Context, client dynamic.Interface, recorder events.Recorder, required *unstructured.Unstructured) (*unstructured.Unstructured, bool, error) { + return ApplyUnstructuredResourceImproved(ctx, client, recorder, required, noCache, prometheusRuleGVR, nil, nil) +} -// equalityChecker allows for custom equality comparisons. This can be used to allow equality checks to skip certain -// operator managed fields. This capability allows something like .spec.scale to be specified or changed by a component -// like HPA. Use this capability sparingly. Most places ought to just use `equality.Semantic` -type equalityChecker interface { - DeepEqual(a1, a2 interface{}) bool +// DeletePrometheusRule deletes the PrometheusRule. +func DeletePrometheusRule(ctx context.Context, client dynamic.Interface, recorder events.Recorder, required *unstructured.Unstructured) (*unstructured.Unstructured, bool, error) { + return DeleteUnstructuredResource(ctx, client, recorder, required, prometheusRuleGVR) } -// ApplyServiceMonitor applies the Prometheus service monitor. +// ApplyServiceMonitor applies the ServiceMonitor. func ApplyServiceMonitor(ctx context.Context, client dynamic.Interface, recorder events.Recorder, required *unstructured.Unstructured) (*unstructured.Unstructured, bool, error) { - namespace := required.GetNamespace() - existing, err := client.Resource(serviceMonitorGVR).Namespace(namespace).Get(ctx, required.GetName(), metav1.GetOptions{}) - if errors.IsNotFound(err) { - newObj, createErr := client.Resource(serviceMonitorGVR).Namespace(namespace).Create(ctx, required, metav1.CreateOptions{}) - if createErr != nil { - recorder.Warningf("ServiceMonitorCreateFailed", "Failed to create ServiceMonitor.monitoring.coreos.com/v1: %v", createErr) - return nil, true, createErr - } - recorder.Eventf("ServiceMonitorCreated", "Created ServiceMonitor.monitoring.coreos.com/v1 because it was missing") - return newObj, true, nil - } - if err != nil { - return nil, false, err - } - - existingCopy := existing.DeepCopy() - - toUpdate, modified, err := ensureGenericSpec(required, existingCopy, noDefaulting, equality.Semantic) - if err != nil { - return nil, false, err - } - - if !modified { - return nil, false, nil - } - - if klog.V(2).Enabled() { - klog.Infof("ServiceMonitor %q changes: %v", namespace+"/"+required.GetName(), JSONPatchNoError(existing, toUpdate)) - } - - newObj, err := client.Resource(serviceMonitorGVR).Namespace(namespace).Update(ctx, toUpdate, metav1.UpdateOptions{}) - if err != nil { - recorder.Warningf("ServiceMonitorUpdateFailed", "Failed to update ServiceMonitor.monitoring.coreos.com/v1: %v", err) - return nil, true, err - } - - recorder.Eventf("ServiceMonitorUpdated", "Updated ServiceMonitor.monitoring.coreos.com/v1 because it changed") - return newObj, true, err + return ApplyUnstructuredResourceImproved(ctx, client, recorder, required, noCache, serviceMonitorGVR, nil, nil) } -var prometheusRuleGVR = schema.GroupVersionResource{Group: "monitoring.coreos.com", Version: "v1", Resource: "prometheusrules"} +// DeleteServiceMonitor deletes the ServiceMonitor. +func DeleteServiceMonitor(ctx context.Context, client dynamic.Interface, recorder events.Recorder, required *unstructured.Unstructured) (*unstructured.Unstructured, bool, error) { + return DeleteUnstructuredResource(ctx, client, recorder, required, serviceMonitorGVR) +} -// ApplyPrometheusRule applies the PrometheusRule -func ApplyPrometheusRule(ctx context.Context, client dynamic.Interface, recorder events.Recorder, required *unstructured.Unstructured) (*unstructured.Unstructured, bool, error) { +// ApplyUnstructuredResourceImproved can utilize the cache to reconcile the existing resource to the desired state. +// NOTE: A `nil` defaultingFunc and equalityChecker are assigned resourceapply.noDefaulting and equality.Semantic, +// respectively. Users are recommended to instantiate a cache to benefit from the memoization machinery. +func ApplyUnstructuredResourceImproved( + ctx context.Context, + client dynamic.Interface, + recorder events.Recorder, + required *unstructured.Unstructured, + cache ResourceCache, + resourceGVR schema.GroupVersionResource, + defaultingFunc mimicDefaultingFunc, + equalityChecker equalityChecker, +) (*unstructured.Unstructured, bool, error) { + name := required.GetName() namespace := required.GetNamespace() - existing, err := client.Resource(prometheusRuleGVR).Namespace(namespace).Get(ctx, required.GetName(), metav1.GetOptions{}) + // Create if resource does not exist, and update cache with new metadata. + if cache == nil { + cache = noCache + } + existing, err := client.Resource(resourceGVR).Namespace(namespace).Get(ctx, name, metav1.GetOptions{}) if errors.IsNotFound(err) { - newObj, createErr := client.Resource(prometheusRuleGVR).Namespace(namespace).Create(ctx, required, metav1.CreateOptions{}) - if createErr != nil { - recorder.Warningf("PrometheusRuleCreateFailed", "Failed to create PrometheusRule.monitoring.coreos.com/v1: %v", createErr) - return nil, true, createErr - } - recorder.Eventf("PrometheusRuleCreated", "Created PrometheusRule.monitoring.coreos.com/v1 because it was missing") - return newObj, true, nil + want, errCreate := client.Resource(resourceGVR).Namespace(namespace).Create(ctx, required, metav1.CreateOptions{}) + resourcehelper.ReportCreateEvent(recorder, required, errCreate) + cache.UpdateCachedResourceMetadata(required, want) + return want, true, errCreate } if err != nil { return nil, false, err } + // Skip if the cache is non-nil, and the metadata hashes and resource version hashes match. + if cache.SafeToSkipApply(required, existing) { + return existing, false, nil + } + existingCopy := existing.DeepCopy() - toUpdate, modified, err := ensureGenericSpec(required, existingCopy, noDefaulting, equality.Semantic) + // Replace and/or merge certain metadata fields. + didMetadataModify := false + err = resourcemerge.EnsureObjectMetaForUnstructured(&didMetadataModify, existingCopy, required) if err != nil { return nil, false, err } - if !modified { - return nil, false, nil + // Deep-check the spec objects for equality, and update the cache in either case. + if defaultingFunc == nil { + defaultingFunc = noDefaulting } - - if klog.V(2).Enabled() { - klog.Infof("PrometheusRule %q changes: %v", namespace+"/"+required.GetName(), JSONPatchNoError(existing, toUpdate)) + if equalityChecker == nil { + equalityChecker = equality.Semantic } - - newObj, err := client.Resource(prometheusRuleGVR).Namespace(namespace).Update(ctx, toUpdate, metav1.UpdateOptions{}) + didSpecModify := false + err = ensureGenericSpec(&didSpecModify, required, existingCopy, defaultingFunc, equalityChecker) if err != nil { - recorder.Warningf("PrometheusRuleUpdateFailed", "Failed to update PrometheusRule.monitoring.coreos.com/v1: %v", err) - return nil, true, err + return nil, false, err + } + if !didSpecModify && !didMetadataModify { + // Update cache even if certain fields are not modified, in order to maintain a consistent cache based on the + // resource hash. The resource hash depends on the entire metadata, not just the fields that were checked above, + cache.UpdateCachedResourceMetadata(required, existingCopy) + return existingCopy, false, nil } - recorder.Eventf("PrometheusRuleUpdated", "Updated PrometheusRule.monitoring.coreos.com/v1 because it changed") - return newObj, true, err + // Perform update if resource exists but different from the required (desired) one. + if klog.V(4).Enabled() { + klog.Infof("%s %q changes: %v", resourceGVR.String(), namespace+"/"+name, JSONPatchNoError(existing, existingCopy)) + } + actual, errUpdate := client.Resource(resourceGVR).Namespace(namespace).Update(ctx, existingCopy, metav1.UpdateOptions{}) + resourcehelper.ReportUpdateEvent(recorder, existingCopy, errUpdate) + cache.UpdateCachedResourceMetadata(existingCopy, actual) + return actual, true, errUpdate } -func DeletePrometheusRule(ctx context.Context, client dynamic.Interface, recorder events.Recorder, required *unstructured.Unstructured) (*unstructured.Unstructured, bool, error) { - namespace := required.GetNamespace() - err := client.Resource(prometheusRuleGVR).Namespace(namespace).Delete(ctx, required.GetName(), metav1.DeleteOptions{}) +// DeleteUnstructuredResource deletes the unstructured resource. +func DeleteUnstructuredResource(ctx context.Context, client dynamic.Interface, recorder events.Recorder, required *unstructured.Unstructured, resourceGVR schema.GroupVersionResource) (*unstructured.Unstructured, bool, error) { + err := client.Resource(resourceGVR).Namespace(required.GetNamespace()).Delete(ctx, required.GetName(), metav1.DeleteOptions{}) if err != nil && errors.IsNotFound(err) { return nil, false, nil } @@ -155,15 +147,40 @@ func DeletePrometheusRule(ctx context.Context, client dynamic.Interface, recorde return nil, true, nil } -func DeleteServiceMonitor(ctx context.Context, client dynamic.Interface, recorder events.Recorder, required *unstructured.Unstructured) (*unstructured.Unstructured, bool, error) { - namespace := required.GetNamespace() - err := client.Resource(serviceMonitorGVR).Namespace(namespace).Delete(ctx, required.GetName(), metav1.DeleteOptions{}) - if err != nil && errors.IsNotFound(err) { - return nil, false, nil +func ensureGenericSpec(didSpecModify *bool, required, existing *unstructured.Unstructured, mimicDefaultingFn mimicDefaultingFunc, equalityChecker equalityChecker) error { + mimicDefaultingFn(required) + requiredSpec, _, err := unstructured.NestedMap(required.UnstructuredContent(), "spec") + if err != nil { + return err } + existingSpec, _, err := unstructured.NestedMap(existing.UnstructuredContent(), "spec") if err != nil { - return nil, false, err + return err } - resourcehelper.ReportDeleteEvent(recorder, required, err) - return nil, true, nil + + if equalityChecker.DeepEqual(existingSpec, requiredSpec) { + return nil + } + + if err = unstructured.SetNestedMap(existing.UnstructuredContent(), requiredSpec, "spec"); err != nil { + return err + } + *didSpecModify = true + + return nil +} + +// mimicDefaultingFunc is used to set fields that are defaulted. This allows for sparse manifests to apply correctly. +// For instance, if field .spec.foo is set to 10 if not set, then a function of this type could be used to set +// the field to 10 to match the comparison. This is sometimes (often?) easier than updating the semantic equality. +// We often see this in places like RBAC and CRD. Logically it can happen generically too. +type mimicDefaultingFunc func(obj *unstructured.Unstructured) + +func noDefaulting(*unstructured.Unstructured) {} + +// equalityChecker allows for custom equality comparisons. This can be used to allow equality checks to skip certain +// operator managed fields. This capability allows something like .spec.scale to be specified or changed by a component +// like HPA. Use this capability sparingly. Most places ought to just use `equality.Semantic` +type equalityChecker interface { + DeepEqual(existing, required interface{}) bool } diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/unstructured.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/unstructured.go index 1adb01aee..b17e484a8 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/unstructured.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/unstructured.go @@ -4,10 +4,11 @@ import ( "context" "fmt" - "github.com/openshift/library-go/pkg/operator/events" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/dynamic" + + "github.com/openshift/library-go/pkg/operator/events" ) // ApplyKnownUnstructured applies few selected Unstructured types, where it semantic knowledge @@ -20,6 +21,10 @@ func ApplyKnownUnstructured(ctx context.Context, client dynamic.Interface, recor return ApplyPrometheusRule(ctx, client, recorder, obj) case schema.GroupKind{Group: "snapshot.storage.k8s.io", Kind: "VolumeSnapshotClass"}: return ApplyVolumeSnapshotClass(ctx, client, recorder, obj) + case schema.GroupKind{Group: "monitoring.coreos.com", Kind: "Alertmanager"}: + return ApplyAlertmanager(ctx, client, recorder, obj) + case schema.GroupKind{Group: "monitoring.coreos.com", Kind: "Prometheus"}: + return ApplyPrometheus(ctx, client, recorder, obj) } @@ -35,6 +40,10 @@ func DeleteKnownUnstructured(ctx context.Context, client dynamic.Interface, reco return DeletePrometheusRule(ctx, client, recorder, obj) case schema.GroupKind{Group: "snapshot.storage.k8s.io", Kind: "VolumeSnapshotClass"}: return DeleteVolumeSnapshotClass(ctx, client, recorder, obj) + case schema.GroupKind{Group: "monitoring.coreos.com", Kind: "Alertmanager"}: + return DeleteAlertmanager(ctx, client, recorder, obj) + case schema.GroupKind{Group: "monitoring.coreos.com", Kind: "Prometheus"}: + return DeletePrometheus(ctx, client, recorder, obj) } diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcehelper/resource_helpers.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcehelper/resource_helpers.go index 43ea9111c..e89974790 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcehelper/resource_helpers.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcehelper/resource_helpers.go @@ -63,6 +63,9 @@ func FormatResourceForCLI(obj runtime.Object) string { // GuessObjectGroupVersionKind returns a human readable for the passed runtime object. func GuessObjectGroupVersionKind(object runtime.Object) schema.GroupVersionKind { + if object == nil { + return schema.GroupVersionKind{Kind: ""} + } if gvk := object.GetObjectKind().GroupVersionKind(); len(gvk.Kind) > 0 { return gvk } diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/object_merger.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/object_merger.go index 4c5dcacaa..20e19a78f 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/object_merger.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/object_merger.go @@ -1,10 +1,14 @@ package resourcemerge import ( + errorsstdlib "errors" + "fmt" "reflect" "strings" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" ) @@ -18,6 +22,48 @@ func EnsureObjectMeta(modified *bool, existing *metav1.ObjectMeta, required meta MergeOwnerRefs(modified, &existing.OwnerReferences, required.OwnerReferences) } +func EnsureObjectMetaForUnstructured(modified *bool, existing *unstructured.Unstructured, required *unstructured.Unstructured) error { + + // Ensure metadata field is present on the object. + existingObjectMeta, found, err := unstructured.NestedMap(existing.Object, "metadata") + if err != nil { + return err + } + if !found { + return errorsstdlib.New(fmt.Sprintf("metadata not found in the existing object: %s/%s", existing.GetNamespace(), existing.GetName())) + } + var requiredObjectMeta map[string]interface{} + requiredObjectMeta, found, err = unstructured.NestedMap(required.Object, "metadata") + if err != nil { + return err + } + if !found { + return errorsstdlib.New(fmt.Sprintf("metadata not found in the required object: %s/%s", required.GetNamespace(), required.GetName())) + } + + // Cast the metadata to the correct type. + var existingObjectMetaTyped, requiredObjectMetaTyped metav1.ObjectMeta + err = runtime.DefaultUnstructuredConverter.FromUnstructured(existingObjectMeta, &existingObjectMetaTyped) + if err != nil { + return err + } + err = runtime.DefaultUnstructuredConverter.FromUnstructured(requiredObjectMeta, &requiredObjectMetaTyped) + if err != nil { + return err + } + + // Check if the metadata objects differ. This only checks for selective fields (excluding the resource version, among others). + EnsureObjectMeta(modified, &existingObjectMetaTyped, requiredObjectMetaTyped) + if *modified { + existing.Object["metadata"], err = runtime.DefaultUnstructuredConverter.ToUnstructured(&existingObjectMetaTyped) + if err != nil { + return err + } + } + + return nil +} + // WithCleanLabelsAndAnnotations cleans the metadata off the removal annotations/labels/ownerrefs // (those that end with trailing "-") func WithCleanLabelsAndAnnotations(obj metav1.Object) metav1.Object { diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/admission.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/admission.go index 572b915bc..11326c89d 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/admission.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/admission.go @@ -53,3 +53,21 @@ func ReadValidatingAdmissionPolicyBindingV1beta1OrDie(objBytes []byte) *admissio return requiredObj.(*admissionv1beta1.ValidatingAdmissionPolicyBinding) } + +func ReadValidatingAdmissionPolicyV1OrDie(objBytes []byte) *admissionv1.ValidatingAdmissionPolicy { + requiredObj, err := runtime.Decode(admissionCodecs.UniversalDecoder(admissionv1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + + return requiredObj.(*admissionv1.ValidatingAdmissionPolicy) +} + +func ReadValidatingAdmissionPolicyBindingV1OrDie(objBytes []byte) *admissionv1.ValidatingAdmissionPolicyBinding { + requiredObj, err := runtime.Decode(admissionCodecs.UniversalDecoder(admissionv1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + + return requiredObj.(*admissionv1.ValidatingAdmissionPolicyBinding) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/resourcesync_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/resourcesync_controller.go index 030fce823..cbfc2dd5f 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/resourcesync_controller.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/resourcesync_controller.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "fmt" + applyoperatorv1 "github.com/openshift/client-go/operator/applyconfigurations/operator/v1" "net/http" "sort" "strings" @@ -28,7 +29,7 @@ import ( // ResourceSyncController is a controller that will copy source configmaps and secrets to their destinations. // It will also mirror deletions by deleting destinations. type ResourceSyncController struct { - name string + controllerInstanceName string // syncRuleLock is used to ensure we avoid races on changes to syncing rules syncRuleLock sync.RWMutex // configMapSyncRules is a map from destination location to source location @@ -53,6 +54,7 @@ var _ factory.Controller = &ResourceSyncController{} // NewResourceSyncController creates ResourceSyncController. func NewResourceSyncController( + instanceName string, operatorConfigClient v1helpers.OperatorClient, kubeInformersForNamespaces v1helpers.KubeInformersForNamespaces, secretsGetter corev1client.SecretsGetter, @@ -60,8 +62,8 @@ func NewResourceSyncController( eventRecorder events.Recorder, ) *ResourceSyncController { c := &ResourceSyncController{ - name: "ResourceSyncController", - operatorConfigClient: operatorConfigClient, + controllerInstanceName: factory.ControllerInstanceName(instanceName, "ResourceSync"), + operatorConfigClient: operatorConfigClient, configMapSyncRules: syncRules{}, secretSyncRules: syncRules{}, @@ -85,7 +87,15 @@ func NewResourceSyncController( informers = append(informers, informer.Core().V1().Secrets().Informer()) } - f := factory.New().WithSync(c.Sync).WithSyncContext(c.syncCtx).WithInformers(informers...).ResyncEvery(time.Minute).ToController(c.name, eventRecorder.WithComponentSuffix("resource-sync-controller")) + f := factory.New(). + WithSync(c.Sync). + WithSyncContext(c.syncCtx). + WithInformers(informers...). + ResyncEvery(time.Minute). + ToController( + instanceName, // don't change what is passed here unless you also remove the old FooDegraded condition + eventRecorder.WithComponentSuffix("resource-sync-controller"), + ) c.runFn = f.Run return c @@ -96,7 +106,7 @@ func (c *ResourceSyncController) Run(ctx context.Context, workers int) { } func (c *ResourceSyncController) Name() string { - return c.name + return c.controllerInstanceName } func (c *ResourceSyncController) SyncConfigMap(destination, source ResourceLocation) error { @@ -250,24 +260,26 @@ func (c *ResourceSyncController) Sync(ctx context.Context, syncCtx factory.SyncC } if len(errors) > 0 { - cond := operatorv1.OperatorCondition{ - Type: condition.ResourceSyncControllerDegradedConditionType, - Status: operatorv1.ConditionTrue, - Reason: "Error", - Message: v1helpers.NewMultiLineAggregate(errors).Error(), - } - if _, _, updateError := v1helpers.UpdateStatus(ctx, c.operatorConfigClient, v1helpers.UpdateConditionFn(cond)); updateError != nil { - return updateError + condition := applyoperatorv1.OperatorStatus(). + WithConditions(applyoperatorv1.OperatorCondition(). + WithType(condition.ResourceSyncControllerDegradedConditionType). + WithStatus(operatorv1.ConditionTrue). + WithReason("Error"). + WithMessage(v1helpers.NewMultiLineAggregate(errors).Error())) + updateErr := c.operatorConfigClient.ApplyOperatorStatus(ctx, c.controllerInstanceName, condition) + if updateErr != nil { + return updateErr } return nil } - cond := operatorv1.OperatorCondition{ - Type: condition.ResourceSyncControllerDegradedConditionType, - Status: operatorv1.ConditionFalse, - } - if _, _, updateError := v1helpers.UpdateStatus(ctx, c.operatorConfigClient, v1helpers.UpdateConditionFn(cond)); updateError != nil { - return updateError + condition := applyoperatorv1.OperatorStatus(). + WithConditions(applyoperatorv1.OperatorCondition(). + WithType(condition.ResourceSyncControllerDegradedConditionType). + WithStatus(operatorv1.ConditionFalse)) + updateErr := c.operatorConfigClient.ApplyOperatorStatus(ctx, c.controllerInstanceName, condition) + if updateErr != nil { + return updateErr } return nil } diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/canonicalize.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/canonicalize.go new file mode 100644 index 000000000..b6a59e243 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/canonicalize.go @@ -0,0 +1,109 @@ +package v1helpers + +import ( + "fmt" + "slices" + "strings" + + operatorv1 "github.com/openshift/api/operator/v1" + applyoperatorv1 "github.com/openshift/client-go/operator/applyconfigurations/operator/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/json" + "k8s.io/utils/clock" + "k8s.io/utils/ptr" +) + +// ToStaticPodOperator returns the equivalent typed kind for the applyconfiguration. Due to differences in serialization like +// omitempty on strings versus pointers, the returned values can be slightly different. This is an expensive way to diff the +// result, but it is an effective one. +func ToStaticPodOperator(in *applyoperatorv1.StaticPodOperatorStatusApplyConfiguration) (*operatorv1.StaticPodOperatorStatus, error) { + if in == nil { + return nil, nil + } + jsonBytes, err := json.Marshal(in) + if err != nil { + return nil, fmt.Errorf("unable to serialize: %w", err) + } + + ret := &operatorv1.StaticPodOperatorStatus{} + if err := json.Unmarshal(jsonBytes, ret); err != nil { + return nil, fmt.Errorf("unable to deserialize: %w", err) + } + + return ret, nil +} + +func SetApplyConditionsLastTransitionTime(clock clock.PassiveClock, newConditions *[]applyoperatorv1.OperatorConditionApplyConfiguration, oldConditions []applyoperatorv1.OperatorConditionApplyConfiguration) { + if newConditions == nil { + return + } + + now := metav1.NewTime(clock.Now()) + for i := range *newConditions { + newCondition := (*newConditions)[i] + + // if the condition status is the same, then the lastTransitionTime doesn't change + if existingCondition := FindApplyCondition(oldConditions, newCondition.Type); existingCondition != nil && ptr.Equal(existingCondition.Status, newCondition.Status) { + newCondition.LastTransitionTime = existingCondition.LastTransitionTime + } + + // backstop to handle upgrade case too. If the newCondition doesn't have a lastTransitionTime it needs something + if newCondition.LastTransitionTime == nil { + newCondition.LastTransitionTime = &now + } + + (*newConditions)[i] = newCondition + } +} + +func FindApplyCondition(haystack []applyoperatorv1.OperatorConditionApplyConfiguration, conditionType *string) *applyoperatorv1.OperatorConditionApplyConfiguration { + for i := range haystack { + curr := haystack[i] + if ptr.Equal(curr.Type, conditionType) { + return &curr + } + } + + return nil +} + +func CanonicalizeStaticPodOperatorStatus(obj *applyoperatorv1.StaticPodOperatorStatusApplyConfiguration) { + if obj == nil { + return + } + CanonicalizeOperatorStatus(&obj.OperatorStatusApplyConfiguration) + slices.SortStableFunc(obj.NodeStatuses, CompareNodeStatusByNode) +} + +func CanonicalizeOperatorStatus(obj *applyoperatorv1.OperatorStatusApplyConfiguration) { + if obj == nil { + return + } + slices.SortStableFunc(obj.Conditions, CompareOperatorConditionByType) + slices.SortStableFunc(obj.Generations, CompareGenerationStatusByKeys) +} + +func CompareOperatorConditionByType(a, b applyoperatorv1.OperatorConditionApplyConfiguration) int { + return strings.Compare(ptr.Deref(a.Type, ""), ptr.Deref(b.Type, "")) +} + +func CompareGenerationStatusByKeys(a, b applyoperatorv1.GenerationStatusApplyConfiguration) int { + if c := strings.Compare(ptr.Deref(a.Group, ""), ptr.Deref(b.Group, "")); c != 0 { + return c + } + if c := strings.Compare(ptr.Deref(a.Resource, ""), ptr.Deref(b.Resource, "")); c != 0 { + return c + } + if c := strings.Compare(ptr.Deref(a.Namespace, ""), ptr.Deref(b.Namespace, "")); c != 0 { + return c + } + if c := strings.Compare(ptr.Deref(a.Name, ""), ptr.Deref(b.Name, "")); c != 0 { + return c + } + + return 0 +} + +func CompareNodeStatusByNode(a, b applyoperatorv1.NodeStatusApplyConfiguration) int { + return strings.Compare(ptr.Deref(a.NodeName, ""), ptr.Deref(b.NodeName, "")) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/informers.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/informers.go index 1dd17e759..cd3103124 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/informers.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/informers.go @@ -2,6 +2,7 @@ package v1helpers import ( "fmt" + "reflect" "time" corev1 "k8s.io/api/core/v1" @@ -18,6 +19,10 @@ type KubeInformersForNamespaces interface { InformersFor(namespace string) informers.SharedInformerFactory Namespaces() sets.Set[string] + // WaitForCacheSync blocks until all started informers' caches were synced + // or the stop channel gets closed. + WaitForCacheSync(stopCh <-chan struct{}) map[string]map[reflect.Type]bool + ConfigMapLister() corev1listers.ConfigMapLister SecretLister() corev1listers.SecretLister @@ -42,6 +47,16 @@ func NewKubeInformersForNamespaces(kubeClient kubernetes.Interface, namespaces . type kubeInformersForNamespaces map[string]informers.SharedInformerFactory +// WaitForCacheSync waits for all started informers' cache were synced. +func (i kubeInformersForNamespaces) WaitForCacheSync(stopCh <-chan struct{}) map[string]map[reflect.Type]bool { + ret := map[string]map[reflect.Type]bool{} + for namespace, informerFactory := range i { + ret[namespace] = informerFactory.WaitForCacheSync(stopCh) + } + + return ret +} + func (i kubeInformersForNamespaces) Start(stopCh <-chan struct{}) { for _, informer := range i { informer.Start(stopCh) diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/interfaces.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/interfaces.go index 1dd9c641a..50bfae945 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/interfaces.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/interfaces.go @@ -4,7 +4,11 @@ import ( "context" operatorv1 "github.com/openshift/api/operator/v1" + applyoperatorv1 "github.com/openshift/client-go/operator/applyconfigurations/operator/v1" + "github.com/openshift/library-go/pkg/apiserver/jsonpatch" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/client-go/tools/cache" ) @@ -20,6 +24,11 @@ type OperatorClient interface { UpdateOperatorSpec(ctx context.Context, oldResourceVersion string, in *operatorv1.OperatorSpec) (out *operatorv1.OperatorSpec, newResourceVersion string, err error) // UpdateOperatorStatus updates the status of the operator, assuming the given resource version. UpdateOperatorStatus(ctx context.Context, oldResourceVersion string, in *operatorv1.OperatorStatus) (out *operatorv1.OperatorStatus, err error) + + ApplyOperatorSpec(ctx context.Context, fieldManager string, applyConfiguration *applyoperatorv1.OperatorSpecApplyConfiguration) (err error) + ApplyOperatorStatus(ctx context.Context, fieldManager string, applyConfiguration *applyoperatorv1.OperatorStatusApplyConfiguration) (err error) + + PatchOperatorStatus(ctx context.Context, jsonPatch *jsonpatch.PatchSet) (err error) } type StaticPodOperatorClient interface { @@ -34,6 +43,11 @@ type StaticPodOperatorClient interface { UpdateStaticPodOperatorStatus(ctx context.Context, resourceVersion string, in *operatorv1.StaticPodOperatorStatus) (out *operatorv1.StaticPodOperatorStatus, err error) // UpdateStaticPodOperatorSpec updates the spec, assuming the given resource version. UpdateStaticPodOperatorSpec(ctx context.Context, resourceVersion string, in *operatorv1.StaticPodOperatorSpec) (out *operatorv1.StaticPodOperatorSpec, newResourceVersion string, err error) + + ApplyStaticPodOperatorSpec(ctx context.Context, fieldManager string, applyConfiguration *applyoperatorv1.StaticPodOperatorSpecApplyConfiguration) (err error) + ApplyStaticPodOperatorStatus(ctx context.Context, fieldManager string, applyConfiguration *applyoperatorv1.StaticPodOperatorStatusApplyConfiguration) (err error) + + PatchStaticOperatorStatus(ctx context.Context, jsonPatch *jsonpatch.PatchSet) (err error) } type OperatorClientWithFinalizers interface { @@ -43,3 +57,11 @@ type OperatorClientWithFinalizers interface { // RemoveFinalizer removes a finalizer from the operator CR, if it is there. No-op otherwise. RemoveFinalizer(ctx context.Context, finalizer string) error } + +type Foo interface { + ExtractOperatorSpec(fieldManager string) (*applyoperatorv1.OperatorSpecApplyConfiguration, error) + ExtractOperatorStatus(fieldManager string) (*applyoperatorv1.OperatorStatusApplyConfiguration, error) +} + +type OperatorSpecExtractor func(obj *unstructured.Unstructured, fieldManager string) (*applyoperatorv1.OperatorSpecApplyConfiguration, error) +type OperatorStatusExtractor func(obj *unstructured.Unstructured, fieldManager string) (*applyoperatorv1.OperatorStatusApplyConfiguration, error) diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/test_helpers.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/test_helpers.go index 7fa64719f..68694179a 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/test_helpers.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/test_helpers.go @@ -14,8 +14,12 @@ import ( "k8s.io/client-go/kubernetes" corev1listers "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" + "k8s.io/utils/ptr" operatorv1 "github.com/openshift/api/operator/v1" + v1 "github.com/openshift/api/operator/v1" + applyoperatorv1 "github.com/openshift/client-go/operator/applyconfigurations/operator/v1" + "github.com/openshift/library-go/pkg/apiserver/jsonpatch" ) // NewFakeSharedIndexInformer returns a fake shared index informer, suitable to use in static pod controller unit tests. @@ -81,7 +85,7 @@ func (fakeSharedIndexInformer) SetTransform(f cache.TransformFunc) error { func NewFakeStaticPodOperatorClient( staticPodSpec *operatorv1.StaticPodOperatorSpec, staticPodStatus *operatorv1.StaticPodOperatorStatus, triggerStatusErr func(rv string, status *operatorv1.StaticPodOperatorStatus) error, - triggerSpecErr func(rv string, spec *operatorv1.StaticPodOperatorSpec) error) StaticPodOperatorClient { + triggerSpecErr func(rv string, spec *operatorv1.StaticPodOperatorSpec) error) *fakeStaticPodOperatorClient { return &fakeStaticPodOperatorClient{ fakeStaticPodOperatorSpec: staticPodSpec, fakeStaticPodOperatorStatus: staticPodStatus, @@ -97,6 +101,8 @@ type fakeStaticPodOperatorClient struct { resourceVersion string triggerStatusUpdateError func(rv string, status *operatorv1.StaticPodOperatorStatus) error triggerSpecUpdateError func(rv string, status *operatorv1.StaticPodOperatorSpec) error + + patchedOperatorStatus *jsonpatch.PatchSet } func (c *fakeStaticPodOperatorClient) Informer() cache.SharedIndexInformer { @@ -155,6 +161,54 @@ func (c *fakeStaticPodOperatorClient) UpdateStaticPodOperatorSpec(ctx context.Co return c.fakeStaticPodOperatorSpec, c.resourceVersion, nil } +func (c *fakeStaticPodOperatorClient) ApplyOperatorSpec(ctx context.Context, fieldManager string, applyConfiguration *applyoperatorv1.OperatorSpecApplyConfiguration) (err error) { + return nil +} + +func (c *fakeStaticPodOperatorClient) ApplyOperatorStatus(ctx context.Context, fieldManager string, applyConfiguration *applyoperatorv1.OperatorStatusApplyConfiguration) (err error) { + if c.triggerStatusUpdateError != nil { + operatorStatus := &operatorv1.StaticPodOperatorStatus{OperatorStatus: *mergeOperatorStatusApplyConfiguration(&c.fakeStaticPodOperatorStatus.OperatorStatus, applyConfiguration)} + if err := c.triggerStatusUpdateError("", operatorStatus); err != nil { + return err + } + } + c.fakeStaticPodOperatorStatus = &operatorv1.StaticPodOperatorStatus{ + OperatorStatus: *mergeOperatorStatusApplyConfiguration(&c.fakeStaticPodOperatorStatus.OperatorStatus, applyConfiguration), + } + return nil +} + +func (c *fakeStaticPodOperatorClient) ApplyStaticPodOperatorSpec(ctx context.Context, fieldManager string, applyConfiguration *applyoperatorv1.StaticPodOperatorSpecApplyConfiguration) (err error) { + return nil +} + +func (c *fakeStaticPodOperatorClient) ApplyStaticPodOperatorStatus(ctx context.Context, fieldManager string, applyConfiguration *applyoperatorv1.StaticPodOperatorStatusApplyConfiguration) (err error) { + if c.triggerStatusUpdateError != nil { + operatorStatus := mergeStaticPodOperatorStatusApplyConfiguration(&c.fakeStaticPodOperatorStatus.OperatorStatus, applyConfiguration) + if err := c.triggerStatusUpdateError("", operatorStatus); err != nil { + return err + } + } + c.fakeStaticPodOperatorStatus = mergeStaticPodOperatorStatusApplyConfiguration(&c.fakeStaticPodOperatorStatus.OperatorStatus, applyConfiguration) + return nil +} + +func (c *fakeStaticPodOperatorClient) PatchOperatorStatus(ctx context.Context, jsonPatch *jsonpatch.PatchSet) (err error) { + return nil +} + +func (c *fakeStaticPodOperatorClient) PatchStaticOperatorStatus(ctx context.Context, jsonPatch *jsonpatch.PatchSet) (err error) { + if c.triggerStatusUpdateError != nil { + return c.triggerStatusUpdateError("", nil) + } + c.patchedOperatorStatus = jsonPatch + return nil +} + +func (c *fakeStaticPodOperatorClient) GetPatchedOperatorStatus() *jsonpatch.PatchSet { + return c.patchedOperatorStatus +} + func (c *fakeStaticPodOperatorClient) GetOperatorState() (*operatorv1.OperatorSpec, *operatorv1.OperatorStatus, string, error) { return &c.fakeStaticPodOperatorSpec.OperatorSpec, &c.fakeStaticPodOperatorStatus.OperatorStatus, c.resourceVersion, nil } @@ -210,11 +264,11 @@ func (n *fakeNodeLister) Get(name string) (*corev1.Node, error) { } // NewFakeOperatorClient returns a fake operator client suitable to use in static pod controller unit tests. -func NewFakeOperatorClient(spec *operatorv1.OperatorSpec, status *operatorv1.OperatorStatus, triggerErr func(rv string, status *operatorv1.OperatorStatus) error) OperatorClientWithFinalizers { +func NewFakeOperatorClient(spec *operatorv1.OperatorSpec, status *operatorv1.OperatorStatus, triggerErr func(rv string, status *operatorv1.OperatorStatus) error) *fakeOperatorClient { return NewFakeOperatorClientWithObjectMeta(nil, spec, status, triggerErr) } -func NewFakeOperatorClientWithObjectMeta(meta *metav1.ObjectMeta, spec *operatorv1.OperatorSpec, status *operatorv1.OperatorStatus, triggerErr func(rv string, status *operatorv1.OperatorStatus) error) OperatorClientWithFinalizers { +func NewFakeOperatorClientWithObjectMeta(meta *metav1.ObjectMeta, spec *operatorv1.OperatorSpec, status *operatorv1.OperatorStatus, triggerErr func(rv string, status *operatorv1.OperatorStatus) error) *fakeOperatorClient { return &fakeOperatorClient{ fakeOperatorSpec: spec, fakeOperatorStatus: status, @@ -230,6 +284,8 @@ type fakeOperatorClient struct { fakeObjectMeta *metav1.ObjectMeta resourceVersion string triggerStatusUpdateError func(rv string, status *operatorv1.OperatorStatus) error + + patchedOperatorStatus *jsonpatch.PatchSet } func (c *fakeOperatorClient) Informer() cache.SharedIndexInformer { @@ -283,6 +339,27 @@ func (c *fakeOperatorClient) UpdateOperatorSpec(ctx context.Context, resourceVer return c.fakeOperatorSpec, c.resourceVersion, nil } +func (c *fakeOperatorClient) ApplyOperatorSpec(ctx context.Context, fieldManager string, applyConfiguration *applyoperatorv1.OperatorSpecApplyConfiguration) (err error) { + return nil +} + +func (c *fakeOperatorClient) ApplyOperatorStatus(ctx context.Context, fieldManager string, applyConfiguration *applyoperatorv1.OperatorStatusApplyConfiguration) (err error) { + c.fakeOperatorStatus = mergeOperatorStatusApplyConfiguration(c.fakeOperatorStatus, applyConfiguration) + return nil +} + +func (c *fakeOperatorClient) PatchOperatorStatus(ctx context.Context, jsonPatch *jsonpatch.PatchSet) (err error) { + if c.triggerStatusUpdateError != nil { + return c.triggerStatusUpdateError("", nil) + } + c.patchedOperatorStatus = jsonPatch + return nil +} + +func (c *fakeOperatorClient) GetPatchedOperatorStatus() *jsonpatch.PatchSet { + return c.patchedOperatorStatus +} + func (c *fakeOperatorClient) EnsureFinalizer(ctx context.Context, finalizer string) error { if c.fakeObjectMeta == nil { c.fakeObjectMeta = &metav1.ObjectMeta{} @@ -311,3 +388,93 @@ func (c *fakeOperatorClient) RemoveFinalizer(ctx context.Context, finalizer stri func (c *fakeOperatorClient) SetObjectMeta(meta *metav1.ObjectMeta) { c.fakeObjectMeta = meta } + +func mergeOperatorStatusApplyConfiguration(currentOperatorStatus *v1.OperatorStatus, applyConfiguration *applyoperatorv1.OperatorStatusApplyConfiguration) *v1.OperatorStatus { + status := &v1.OperatorStatus{ + ObservedGeneration: ptr.Deref(applyConfiguration.ObservedGeneration, currentOperatorStatus.ObservedGeneration), + Version: ptr.Deref(applyConfiguration.Version, currentOperatorStatus.Version), + ReadyReplicas: ptr.Deref(applyConfiguration.ReadyReplicas, currentOperatorStatus.ReadyReplicas), + LatestAvailableRevision: ptr.Deref(applyConfiguration.LatestAvailableRevision, currentOperatorStatus.LatestAvailableRevision), + } + + for _, condition := range applyConfiguration.Conditions { + newCondition := operatorv1.OperatorCondition{ + Type: ptr.Deref(condition.Type, ""), + Status: ptr.Deref(condition.Status, ""), + Reason: ptr.Deref(condition.Reason, ""), + Message: ptr.Deref(condition.Message, ""), + } + status.Conditions = append(status.Conditions, newCondition) + } + var existingConditions []v1.OperatorCondition + for _, condition := range currentOperatorStatus.Conditions { + var foundCondition bool + for _, statusCondition := range status.Conditions { + if condition.Type == statusCondition.Type { + foundCondition = true + break + } + } + if !foundCondition { + existingConditions = append(existingConditions, condition) + } + } + status.Conditions = append(status.Conditions, existingConditions...) + + for _, generation := range applyConfiguration.Generations { + newGeneration := operatorv1.GenerationStatus{ + Group: ptr.Deref(generation.Group, ""), + Resource: ptr.Deref(generation.Resource, ""), + Namespace: ptr.Deref(generation.Namespace, ""), + Name: ptr.Deref(generation.Name, ""), + LastGeneration: ptr.Deref(generation.LastGeneration, 0), + Hash: ptr.Deref(generation.Hash, ""), + } + status.Generations = append(status.Generations, newGeneration) + } + var existingGenerations []v1.GenerationStatus + for _, generation := range currentOperatorStatus.Generations { + var foundGeneration bool + for _, statusGeneration := range status.Generations { + if generation.Namespace == statusGeneration.Namespace && generation.Name == statusGeneration.Name { + foundGeneration = true + break + } + } + if !foundGeneration { + existingGenerations = append(existingGenerations, generation) + } + } + status.Generations = append(status.Generations, existingGenerations...) + + return status +} + +func mergeStaticPodOperatorStatusApplyConfiguration(currentOperatorStatus *v1.OperatorStatus, applyConfiguration *applyoperatorv1.StaticPodOperatorStatusApplyConfiguration) *v1.StaticPodOperatorStatus { + status := &v1.StaticPodOperatorStatus{ + OperatorStatus: *mergeOperatorStatusApplyConfiguration(currentOperatorStatus, &applyConfiguration.OperatorStatusApplyConfiguration), + } + + for _, nodeStatus := range applyConfiguration.NodeStatuses { + newNodeStatus := operatorv1.NodeStatus{ + NodeName: ptr.Deref(nodeStatus.NodeName, ""), + CurrentRevision: ptr.Deref(nodeStatus.CurrentRevision, 0), + TargetRevision: ptr.Deref(nodeStatus.TargetRevision, 0), + LastFailedRevision: ptr.Deref(nodeStatus.LastFailedRevision, 0), + LastFailedTime: nil, + LastFailedReason: ptr.Deref(nodeStatus.LastFailedReason, ""), + LastFailedCount: ptr.Deref(nodeStatus.LastFailedCount, 0), + LastFallbackCount: ptr.Deref(nodeStatus.LastFallbackCount, 0), + LastFailedRevisionErrors: nil, + } + if nodeStatus.LastFailedTime != nil { + newNodeStatus.LastFailedTime = nodeStatus.LastFailedTime + } + for _, curr := range nodeStatus.LastFailedRevisionErrors { + newNodeStatus.LastFailedRevisionErrors = append(newNodeStatus.LastFailedRevisionErrors, curr) + } + status.NodeStatuses = append(status.NodeStatuses, newNodeStatus) + } + + return status +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/register.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/register.go index a9914fb1a..6f4298483 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/register.go +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/register.go @@ -14,6 +14,12 @@ package monitoring -const ( +// GroupName is set to var instead of const, since this provides the ability for clients importing the module - +// github.com/prometheus-operator/prometheus-operator/pkg/apis to manage the operator's objects in a different +// API group +// +// Use `ldflags` in the client side, e.g.: +// go run -ldflags="-s -X github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring.GroupName=monitoring.example.com" ./example/client/. +var ( GroupName = "monitoring.coreos.com" ) diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/alertmanager_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/alertmanager_types.go index 78815919f..f99dedf43 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/alertmanager_types.go +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/alertmanager_types.go @@ -83,17 +83,15 @@ type AlertmanagerSpec struct { Version string `json:"version,omitempty"` // Tag of Alertmanager container image to be deployed. Defaults to the value of `version`. // Version is ignored if Tag is set. - // Deprecated: use 'image' instead. The image tag can be specified - // as part of the image URL. + // Deprecated: use 'image' instead. The image tag can be specified as part of the image URL. Tag string `json:"tag,omitempty"` // SHA of Alertmanager container image to be deployed. Defaults to the value of `version`. // Similar to a tag, but the SHA explicitly deploys an immutable container image. // Version and Tag are ignored if SHA is set. - // Deprecated: use 'image' instead. The image digest can be specified - // as part of the image URL. + // Deprecated: use 'image' instead. The image digest can be specified as part of the image URL. SHA string `json:"sha,omitempty"` // Base image that is used to deploy pods, without tag. - // Deprecated: use 'image' instead + // Deprecated: use 'image' instead. BaseImage string `json:"baseImage,omitempty"` // An optional list of references to secrets in the same namespace // to use for pulling prometheus and alertmanager images from registries @@ -208,6 +206,9 @@ type AlertmanagerSpec struct { ClusterAdvertiseAddress string `json:"clusterAdvertiseAddress,omitempty"` // Interval between gossip attempts. ClusterGossipInterval GoDuration `json:"clusterGossipInterval,omitempty"` + // Defines the identifier that uniquely identifies the Alertmanager cluster. + // You should only set it when the Alertmanager cluster includes Alertmanager instances which are external to this Alertmanager resource. In practice, the addresses of the external instances are provided via the `.spec.additionalPeers` field. + ClusterLabel *string `json:"clusterLabel,omitempty"` // Interval between pushpull attempts. ClusterPushpullInterval GoDuration `json:"clusterPushpullInterval,omitempty"` // Timeout for cluster peering. @@ -239,14 +240,27 @@ type AlertmanagerSpec struct { HostAliases []HostAlias `json:"hostAliases,omitempty"` // Defines the web command line flags when starting Alertmanager. Web *AlertmanagerWebSpec `json:"web,omitempty"` - // EXPERIMENTAL: alertmanagerConfiguration specifies the configuration of Alertmanager. + // alertmanagerConfiguration specifies the configuration of Alertmanager. + // // If defined, it takes precedence over the `configSecret` field. - // This field may change in future releases. + // + // This is an *experimental feature*, it may change in any upcoming release + // in a breaking way. + // + //+optional AlertmanagerConfiguration *AlertmanagerConfiguration `json:"alertmanagerConfiguration,omitempty"` // AutomountServiceAccountToken indicates whether a service account token should be automatically mounted in the pod. // If the service account has `automountServiceAccountToken: true`, set the field to `false` to opt out of automounting API credentials. // +optional AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty"` + // Enable access to Alertmanager feature flags. By default, no features are enabled. + // Enabling features which are disabled by default is entirely outside the + // scope of what the maintainers will support and by doing so, you accept + // that this behaviour may break at any time without notice. + // + // It requires Alertmanager >= 0.27.0. + // +optional + EnableFeatures []string `json:"enableFeatures,omitempty"` } // AlertmanagerConfigMatcherStrategy defines the strategy used by AlertmanagerConfig objects to match alerts. diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/podmonitor_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/podmonitor_types.go index ee03102d0..aa0217501 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/podmonitor_types.go +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/podmonitor_types.go @@ -48,30 +48,72 @@ func (l *PodMonitor) DeepCopyObject() runtime.Object { // +k8s:openapi-gen=true type PodMonitorSpec struct { // The label to use to retrieve the job name from. + // `jobLabel` selects the label from the associated Kubernetes `Pod` + // object which will be used as the `job` label for all metrics. + // + // For example if `jobLabel` is set to `foo` and the Kubernetes `Pod` + // object is labeled with `foo: bar`, then Prometheus adds the `job="bar"` + // label to all ingested metrics. + // + // If the value of this field is empty, the `job` label of the metrics + // defaults to the namespace and name of the PodMonitor object (e.g. `/`). JobLabel string `json:"jobLabel,omitempty"` - // PodTargetLabels transfers labels on the Kubernetes Pod onto the target. + + // `podTargetLabels` defines the labels which are transferred from the + // associated Kubernetes `Pod` object onto the ingested metrics. + // PodTargetLabels []string `json:"podTargetLabels,omitempty"` - // A list of endpoints allowed as part of this PodMonitor. + + // List of endpoints part of this PodMonitor. + // + // +optional PodMetricsEndpoints []PodMetricsEndpoint `json:"podMetricsEndpoints"` - // Selector to select Pod objects. + + // Label selector to select the Kubernetes `Pod` objects. Selector metav1.LabelSelector `json:"selector"` - // Selector to select which namespaces the Endpoints objects are discovered from. + // Selector to select which namespaces the Kubernetes `Pods` objects + // are discovered from. NamespaceSelector NamespaceSelector `json:"namespaceSelector,omitempty"` - // SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + + // `sampleLimit` defines a per-scrape limit on the number of scraped samples + // that will be accepted. + // + // +optional SampleLimit *uint64 `json:"sampleLimit,omitempty"` - // TargetLimit defines a limit on the number of scraped targets that will be accepted. + + // `targetLimit` defines a limit on the number of scraped targets that will + // be accepted. + // // +optional TargetLimit *uint64 `json:"targetLimit,omitempty"` + + // `scrapeProtocols` defines the protocols to negotiate during a scrape. It tells clients the + // protocols supported by Prometheus in order of preference (from most to least preferred). + // + // If unset, Prometheus uses its default value. + // + // It requires Prometheus >= v2.49.0. + // + // +listType=set + // +optional + ScrapeProtocols []ScrapeProtocol `json:"scrapeProtocols,omitempty"` + // Per-scrape limit on number of labels that will be accepted for a sample. - // Only valid in Prometheus versions 2.27.0 and newer. + // + // It requires Prometheus >= v2.27.0. + // // +optional LabelLimit *uint64 `json:"labelLimit,omitempty"` // Per-scrape limit on length of labels name that will be accepted for a sample. - // Only valid in Prometheus versions 2.27.0 and newer. + // + // It requires Prometheus >= v2.27.0. + // // +optional LabelNameLengthLimit *uint64 `json:"labelNameLengthLimit,omitempty"` // Per-scrape limit on length of labels value that will be accepted for a sample. - // Only valid in Prometheus versions 2.27.0 and newer. + // + // It requires Prometheus >= v2.27.0. + // // +optional LabelValueLengthLimit *uint64 `json:"labelValueLengthLimit,omitempty"` // Per-scrape limit on the number of targets dropped by relabeling @@ -81,9 +123,27 @@ type PodMonitorSpec struct { // // +optional KeepDroppedTargets *uint64 `json:"keepDroppedTargets,omitempty"` - // Attaches node metadata to discovered targets. - // Requires Prometheus v2.35.0 and above. + + // `attachMetadata` defines additional metadata which is added to the + // discovered targets. + // + // It requires Prometheus >= v2.37.0. + // + // +optional AttachMetadata *AttachMetadata `json:"attachMetadata,omitempty"` + + // The scrape class to apply. + // +optional + // +kubebuilder:validation:MinLength=1 + ScrapeClassName *string `json:"scrapeClass,omitempty"` + + // When defined, bodySizeLimit specifies a job level limit on the size + // of uncompressed response body that will be accepted by Prometheus. + // + // It requires Prometheus >= v2.28.0. + // + // +optional + BodySizeLimit *ByteSize `json:"bodySizeLimit,omitempty"` } // PodMonitorList is a list of PodMonitors. @@ -102,66 +162,151 @@ func (l *PodMonitorList) DeepCopyObject() runtime.Object { return l.DeepCopy() } -// PodMetricsEndpoint defines a scrapeable endpoint of a Kubernetes Pod serving Prometheus metrics. +// PodMetricsEndpoint defines an endpoint serving Prometheus metrics to be scraped by +// Prometheus. +// // +k8s:openapi-gen=true type PodMetricsEndpoint struct { - // Name of the pod port this endpoint refers to. Mutually exclusive with targetPort. + // Name of the Pod port which this endpoint refers to. + // + // It takes precedence over `targetPort`. Port string `json:"port,omitempty"` - // Deprecated: Use 'port' instead. + + // Name or number of the target port of the `Pod` object behind the Service, the + // port must be specified with container port property. + // + // Deprecated: use 'port' instead. TargetPort *intstr.IntOrString `json:"targetPort,omitempty"` - // HTTP path to scrape for metrics. + + // HTTP path from which to scrape for metrics. + // // If empty, Prometheus uses the default value (e.g. `/metrics`). Path string `json:"path,omitempty"` + // HTTP scheme to use for scraping. - // `http` and `https` are the expected values unless you rewrite the `__scheme__` label via relabeling. + // + // `http` and `https` are the expected values unless you rewrite the + // `__scheme__` label via relabeling. + // // If empty, Prometheus uses the default value `http`. + // // +kubebuilder:validation:Enum=http;https Scheme string `json:"scheme,omitempty"` - // Optional HTTP URL parameters + + // `params` define optional HTTP URL parameters. Params map[string][]string `json:"params,omitempty"` - // Interval at which metrics should be scraped - // If not specified Prometheus' global scrape interval is used. + + // Interval at which Prometheus scrapes the metrics from the target. + // + // If empty, Prometheus uses the global scrape interval. Interval Duration `json:"interval,omitempty"` - // Timeout after which the scrape is ended - // If not specified, the Prometheus global scrape interval is used. + + // Timeout after which Prometheus considers the scrape to be failed. + // + // If empty, Prometheus uses the global scrape timeout unless it is less + // than the target's scrape interval value in which the latter is used. ScrapeTimeout Duration `json:"scrapeTimeout,omitempty"` - // TLS configuration to use when scraping the endpoint. - TLSConfig *PodMetricsEndpointTLSConfig `json:"tlsConfig,omitempty"` - // Secret to mount to read bearer token for scraping targets. The secret - // needs to be in the same namespace as the pod monitor and accessible by - // the Prometheus Operator. + + // TLS configuration to use when scraping the target. + // + // +optional + TLSConfig *SafeTLSConfig `json:"tlsConfig,omitempty"` + + // `bearerTokenSecret` specifies a key of a Secret containing the bearer + // token for scraping targets. The secret needs to be in the same namespace + // as the PodMonitor object and readable by the Prometheus Operator. + // + // +optional + // + // Deprecated: use `authorization` instead. BearerTokenSecret v1.SecretKeySelector `json:"bearerTokenSecret,omitempty"` - // HonorLabels chooses the metric's labels on collisions with target labels. + + // When true, `honorLabels` preserves the metric's labels when they collide + // with the target's labels. HonorLabels bool `json:"honorLabels,omitempty"` - // HonorTimestamps controls whether Prometheus respects the timestamps present in scraped data. + + // `honorTimestamps` controls whether Prometheus preserves the timestamps + // when exposed by the target. + // + // +optional HonorTimestamps *bool `json:"honorTimestamps,omitempty"` - // BasicAuth allow an endpoint to authenticate over basic authentication. - // More info: https://prometheus.io/docs/operating/configuration/#endpoint + + // `trackTimestampsStaleness` defines whether Prometheus tracks staleness of + // the metrics that have an explicit timestamp present in scraped data. + // Has no effect if `honorTimestamps` is false. + // + // It requires Prometheus >= v2.48.0. + // + // +optional + TrackTimestampsStaleness *bool `json:"trackTimestampsStaleness,omitempty"` + + // `basicAuth` configures the Basic Authentication credentials to use when + // scraping the target. + // + // Cannot be set at the same time as `authorization`, or `oauth2`. + // + // +optional BasicAuth *BasicAuth `json:"basicAuth,omitempty"` - // OAuth2 for the URL. Only valid in Prometheus versions 2.27.0 and newer. + + // `oauth2` configures the OAuth2 settings to use when scraping the target. + // + // It requires Prometheus >= 2.27.0. + // + // Cannot be set at the same time as `authorization`, or `basicAuth`. + // + // +optional OAuth2 *OAuth2 `json:"oauth2,omitempty"` - // Authorization section for this endpoint + + // `authorization` configures the Authorization header credentials to use when + // scraping the target. + // + // Cannot be set at the same time as `basicAuth`, or `oauth2`. + // + // +optional Authorization *SafeAuthorization `json:"authorization,omitempty"` - // MetricRelabelConfigs to apply to samples before ingestion. - MetricRelabelConfigs []*RelabelConfig `json:"metricRelabelings,omitempty"` - // RelabelConfigs to apply to samples before scraping. - // Prometheus Operator automatically adds relabelings for a few standard Kubernetes fields. + + // `metricRelabelings` configures the relabeling rules to apply to the + // samples before ingestion. + // + // +optional + MetricRelabelConfigs []RelabelConfig `json:"metricRelabelings,omitempty"` + + // `relabelings` configures the relabeling rules to apply the target's + // metadata labels. + // + // The Operator automatically adds relabelings for a few standard Kubernetes fields. + // // The original scrape job's name is available via the `__tmp_prometheus_job_name` label. + // // More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config - RelabelConfigs []*RelabelConfig `json:"relabelings,omitempty"` - // ProxyURL eg http://proxyserver:2195 Directs scrapes to proxy through this endpoint. + // + // +optional + RelabelConfigs []RelabelConfig `json:"relabelings,omitempty"` + + // `proxyURL` configures the HTTP Proxy URL (e.g. + // "http://proxyserver:2195") to go through when scraping the target. + // + // +optional ProxyURL *string `json:"proxyUrl,omitempty"` - // FollowRedirects configures whether scrape requests follow HTTP 3xx redirects. + + // `followRedirects` defines whether the scrape requests should follow HTTP + // 3xx redirects. + // + // +optional FollowRedirects *bool `json:"followRedirects,omitempty"` - // Whether to enable HTTP2. + + // `enableHttp2` can be used to disable HTTP2 when scraping the target. + // + // +optional EnableHttp2 *bool `json:"enableHttp2,omitempty"` - // Drop pods that are not running. (Failed, Succeeded). Enabled by default. + + // When true, the pods which are not running (e.g. either in Failed or + // Succeeded state) are dropped during the target discovery. + // + // If unset, the filtering is enabled. + // // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase + // + // +optional FilterRunning *bool `json:"filterRunning,omitempty"` } - -// PodMetricsEndpointTLSConfig specifies TLS configuration parameters. -// +k8s:openapi-gen=true -type PodMetricsEndpointTLSConfig struct { - SafeTLSConfig `json:",inline"` -} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/probe_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/probe_types.go index 59b85ae6a..4e8427c6c 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/probe_types.go +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/probe_types.go @@ -64,7 +64,7 @@ type ProbeSpec struct { // If not specified, the Prometheus global scrape timeout is used. ScrapeTimeout Duration `json:"scrapeTimeout,omitempty"` // TLS configuration to use when scraping the endpoint. - TLSConfig *ProbeTLSConfig `json:"tlsConfig,omitempty"` + TLSConfig *SafeTLSConfig `json:"tlsConfig,omitempty"` // Secret to mount to read bearer token for scraping targets. The secret // needs to be in the same namespace as the probe and accessible by // the Prometheus Operator. @@ -75,7 +75,7 @@ type ProbeSpec struct { // OAuth2 for the URL. Only valid in Prometheus versions 2.27.0 and newer. OAuth2 *OAuth2 `json:"oauth2,omitempty"` // MetricRelabelConfigs to apply to samples before ingestion. - MetricRelabelConfigs []*RelabelConfig `json:"metricRelabelings,omitempty"` + MetricRelabelConfigs []RelabelConfig `json:"metricRelabelings,omitempty"` // Authorization section for this endpoint Authorization *SafeAuthorization `json:"authorization,omitempty"` // SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. @@ -84,6 +84,16 @@ type ProbeSpec struct { // TargetLimit defines a limit on the number of scraped targets that will be accepted. // +optional TargetLimit *uint64 `json:"targetLimit,omitempty"` + // `scrapeProtocols` defines the protocols to negotiate during a scrape. It tells clients the + // protocols supported by Prometheus in order of preference (from most to least preferred). + // + // If unset, Prometheus uses its default value. + // + // It requires Prometheus >= v2.49.0. + // + // +listType=set + // +optional + ScrapeProtocols []ScrapeProtocol `json:"scrapeProtocols,omitempty"` // Per-scrape limit on number of labels that will be accepted for a sample. // Only valid in Prometheus versions 2.27.0 and newer. // +optional @@ -103,6 +113,11 @@ type ProbeSpec struct { // // +optional KeepDroppedTargets *uint64 `json:"keepDroppedTargets,omitempty"` + + // The scrape class to apply. + // +optional + // +kubebuilder:validation:MinLength=1 + ScrapeClassName *string `json:"scrapeClass,omitempty"` } // ProbeTargets defines how to discover the probed targets. @@ -151,7 +166,7 @@ type ProbeTargetStaticConfig struct { // RelabelConfigs to apply to the label set of the targets before it gets // scraped. // More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config - RelabelConfigs []*RelabelConfig `json:"relabelingConfigs,omitempty"` + RelabelConfigs []RelabelConfig `json:"relabelingConfigs,omitempty"` } // ProbeTargetIngress defines the set of Ingress objects considered for probing. @@ -169,7 +184,7 @@ type ProbeTargetIngress struct { // probed URL. // The original scrape job's name is available via the `__tmp_prometheus_job_name` label. // More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config - RelabelConfigs []*RelabelConfig `json:"relabelingConfigs,omitempty"` + RelabelConfigs []RelabelConfig `json:"relabelingConfigs,omitempty"` } // ProberSpec contains specification parameters for the Prober used for probing. @@ -205,9 +220,3 @@ type ProbeList struct { func (l *ProbeList) DeepCopyObject() runtime.Object { return l.DeepCopy() } - -// ProbeTLSConfig specifies TLS configuration parameters for the prober. -// +k8s:openapi-gen=true -type ProbeTLSConfig struct { - SafeTLSConfig `json:",inline"` -} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/prometheus_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/prometheus_types.go index fff9bf06d..c8aed40be 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/prometheus_types.go +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/prometheus_types.go @@ -17,10 +17,12 @@ package v1 import ( "strings" + appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/intstr" ) @@ -30,16 +32,29 @@ const ( PrometheusKindKey = "prometheus" ) +// ScrapeProtocol represents a protocol used by Prometheus for scraping metrics. +// Supported values are: +// * `OpenMetricsText0.0.1` +// * `OpenMetricsText1.0.0` +// * `PrometheusProto` +// * `PrometheusText0.0.4` +// +kubebuilder:validation:Enum=PrometheusProto;OpenMetricsText0.0.1;OpenMetricsText1.0.0;PrometheusText0.0.4 +type ScrapeProtocol string + // PrometheusInterface is used by Prometheus and PrometheusAgent to share common methods, e.g. config generation. // +k8s:deepcopy-gen=false type PrometheusInterface interface { metav1.ObjectMetaAccessor - GetTypeMeta() metav1.TypeMeta + schema.ObjectKind + GetCommonPrometheusFields() CommonPrometheusFields SetCommonPrometheusFields(CommonPrometheusFields) + GetStatus() PrometheusStatus } +var _ = PrometheusInterface(&Prometheus{}) + func (l *Prometheus) GetCommonPrometheusFields() CommonPrometheusFields { return l.Spec.CommonPrometheusFields } @@ -48,14 +63,34 @@ func (l *Prometheus) SetCommonPrometheusFields(f CommonPrometheusFields) { l.Spec.CommonPrometheusFields = f } -func (l *Prometheus) GetTypeMeta() metav1.TypeMeta { - return l.TypeMeta -} - func (l *Prometheus) GetStatus() PrometheusStatus { return l.Status } +// +kubebuilder:validation:Enum=OnResource;OnShard +type AdditionalLabelSelectors string + +const ( + // Automatically add a label selector that will select all pods matching the same Prometheus/PrometheusAgent resource (irrespective of their shards). + ResourceNameLabelSelector AdditionalLabelSelectors = "OnResource" + + // Automatically add a label selector that will select all pods matching the same shard. + ShardAndResourceNameLabelSelector AdditionalLabelSelectors = "OnShard" +) + +type CoreV1TopologySpreadConstraint v1.TopologySpreadConstraint + +type TopologySpreadConstraint struct { + CoreV1TopologySpreadConstraint `json:",inline"` + + //+optional + // Defines what Prometheus Operator managed labels should be added to labelSelector on the topologySpreadConstraint. + AdditionalLabelSelectors *AdditionalLabelSelectors `json:"additionalLabelSelectors,omitempty"` +} + +// +kubebuilder:validation:MinLength:=1 +type EnableFeature string + // CommonPrometheusFields are the options available to both the Prometheus server and agent. // +k8s:deepcopy-gen=true type CommonPrometheusFields struct { @@ -89,9 +124,8 @@ type CommonPrometheusFields struct { // namespace only. ServiceMonitorNamespaceSelector *metav1.LabelSelector `json:"serviceMonitorNamespaceSelector,omitempty"` - // *Experimental* PodMonitors to be selected for target discovery. An empty - // label selector matches all objects. A null label selector matches no - // objects. + // PodMonitors to be selected for target discovery. An empty label selector + // matches all objects. A null label selector matches no objects. // // If `spec.serviceMonitorSelector`, `spec.podMonitorSelector`, `spec.probeSelector` // and `spec.scrapeConfigSelector` are null, the Prometheus configuration is unmanaged. @@ -107,9 +141,8 @@ type CommonPrometheusFields struct { // namespace only. PodMonitorNamespaceSelector *metav1.LabelSelector `json:"podMonitorNamespaceSelector,omitempty"` - // *Experimental* Probes to be selected for target discovery. An empty - // label selector matches all objects. A null label selector matches no - // objects. + // Probes to be selected for target discovery. An empty label selector + // matches all objects. A null label selector matches no objects. // // If `spec.serviceMonitorSelector`, `spec.podMonitorSelector`, `spec.probeSelector` // and `spec.scrapeConfigSelector` are null, the Prometheus configuration is unmanaged. @@ -120,14 +153,13 @@ type CommonPrometheusFields struct { // of the custom resource definition. It is recommended to use // `spec.additionalScrapeConfigs` instead. ProbeSelector *metav1.LabelSelector `json:"probeSelector,omitempty"` - // *Experimental* Namespaces to match for Probe discovery. An empty label + // Namespaces to match for Probe discovery. An empty label // selector matches all namespaces. A null label selector matches the // current namespace only. ProbeNamespaceSelector *metav1.LabelSelector `json:"probeNamespaceSelector,omitempty"` - // *Experimental* ScrapeConfigs to be selected for target discovery. An - // empty label selector matches all objects. A null label selector matches - // no objects. + // ScrapeConfigs to be selected for target discovery. An empty label + // selector matches all objects. A null label selector matches no objects. // // If `spec.serviceMonitorSelector`, `spec.podMonitorSelector`, `spec.probeSelector` // and `spec.scrapeConfigSelector` are null, the Prometheus configuration is unmanaged. @@ -137,10 +169,18 @@ type CommonPrometheusFields struct { // This behavior is *deprecated* and will be removed in the next major version // of the custom resource definition. It is recommended to use // `spec.additionalScrapeConfigs` instead. + // + // Note that the ScrapeConfig custom resource definition is currently at Alpha level. + // + // +optional ScrapeConfigSelector *metav1.LabelSelector `json:"scrapeConfigSelector,omitempty"` // Namespaces to match for ScrapeConfig discovery. An empty label selector // matches all namespaces. A null label selector matches the current - // current namespace only. + // namespace only. + // + // Note that the ScrapeConfig custom resource definition is currently at Alpha level. + // + // +optional ScrapeConfigNamespaceSelector *metav1.LabelSelector `json:"scrapeConfigNamespaceSelector,omitempty"` // Version of Prometheus being deployed. The operator uses this information @@ -183,7 +223,7 @@ type CommonPrometheusFields struct { // Default: 1 // +optional Replicas *int32 `json:"replicas,omitempty"` - // EXPERIMENTAL: Number of shards to distribute targets onto. `spec.replicas` + // Number of shards to distribute targets onto. `spec.replicas` // multiplied by `spec.shards` is the total number of Pods created. // // Note that scaling down shards will not reshard data onto remaining @@ -229,6 +269,17 @@ type CommonPrometheusFields struct { // Number of seconds to wait until a scrape request times out. ScrapeTimeout Duration `json:"scrapeTimeout,omitempty"` + // The protocols to negotiate during a scrape. It tells clients the + // protocols supported by Prometheus in order of preference (from most to least preferred). + // + // If unset, Prometheus uses its default value. + // + // It requires Prometheus >= v2.49.0. + // + // +listType=set + // +optional + ScrapeProtocols []ScrapeProtocol `json:"scrapeProtocols,omitempty"` + // The labels to add to any time series or alerts when communicating with // external systems (federation, remote storage, Alertmanager). // Labels defined by `spec.replicaExternalLabelName` and @@ -254,7 +305,10 @@ type CommonPrometheusFields struct { // that this behaviour may break at any time without notice. // // For more information see https://prometheus.io/docs/prometheus/latest/feature_flags/ - EnableFeatures []string `json:"enableFeatures,omitempty"` + // + // +listType:=set + // +optional + EnableFeatures []EnableFeature `json:"enableFeatures,omitempty"` // The external URL under which the Prometheus service is externally // available. This is necessary to generate correct URLs (for instance if @@ -281,6 +335,14 @@ type CommonPrometheusFields struct { // container, that are generated as a result of StorageSpec objects. VolumeMounts []v1.VolumeMount `json:"volumeMounts,omitempty"` + // The field controls if and how PVCs are deleted during the lifecycle of a StatefulSet. + // The default behavior is all PVCs are retained. + // This is an alpha field from kubernetes 1.23 until 1.26 and a beta field from 1.26. + // It requires enabling the StatefulSetAutoDeletePVC feature gate. + // + // +optional + PersistentVolumeClaimRetentionPolicy *appsv1.StatefulSetPersistentVolumeClaimRetentionPolicy `json:"persistentVolumeClaimRetentionPolicy,omitempty"` + // Defines the configuration of the Prometheus web server. Web *PrometheusWebSpec `json:"web,omitempty"` @@ -294,6 +356,14 @@ type CommonPrometheusFields struct { // Prometheus Pods. ServiceAccountName string `json:"serviceAccountName,omitempty"` + // AutomountServiceAccountToken indicates whether a service account token should be automatically mounted in the pod. + // If the field isn't set, the operator mounts the service account token by default. + // + // **Warning:** be aware that by default, Prometheus requires the service account token for Kubernetes service discovery. + // It is possible to use strategic merge patch to project the service account token into the 'prometheus' container. + // +optional + AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty"` + // Secrets is a list of Secrets in the same namespace as the Prometheus // object, which shall be mounted into the Prometheus Pods. // Each Secret is added to the StatefulSet definition as a volume named `secret-`. @@ -311,9 +381,10 @@ type CommonPrometheusFields struct { // Defines the Pods' tolerations if specified. // +optional Tolerations []v1.Toleration `json:"tolerations,omitempty"` + // Defines the pod's topology spread constraints if specified. - // +optional - TopologySpreadConstraints []v1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` + //+optional + TopologySpreadConstraints []TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` // Defines the list of remote write configurations. // +optional @@ -420,7 +491,7 @@ type CommonPrometheusFields struct { // object. IgnoreNamespaceSelectors bool `json:"ignoreNamespaceSelectors,omitempty"` - // When not empty, a label will be added to + // When not empty, a label will be added to: // // 1. All metrics scraped from `ServiceMonitor`, `PodMonitor`, `Probe` and `ScrapeConfig` objects. // 2. All metrics generated from recording rules defined in `PrometheusRule` objects. @@ -431,13 +502,13 @@ type CommonPrometheusFields struct { // // The label's name is this field's value. // The label's value is the namespace of the `ServiceMonitor`, - // `PodMonitor`, `Probe` or `PrometheusRule` object. + // `PodMonitor`, `Probe`, `PrometheusRule` or `ScrapeConfig` object. EnforcedNamespaceLabel string `json:"enforcedNamespaceLabel,omitempty"` // When defined, enforcedSampleLimit specifies a global limit on the number // of scraped samples that will be accepted. This overrides any // `spec.sampleLimit` set by ServiceMonitor, PodMonitor, Probe objects - // unless `spec.sampleLimit` is greater than zero and less than than + // unless `spec.sampleLimit` is greater than zero and less than // `spec.enforcedSampleLimit`. // // It is meant to be used by admins to keep the overall number of @@ -564,9 +635,10 @@ type CommonPrometheusFields struct { // +optional PodTargetLabels []string `json:"podTargetLabels,omitempty"` - // EXPERIMENTAL: TracingConfig configures tracing in Prometheus. This is an - // experimental feature, it may change in any upcoming release in a - // breaking way. + // TracingConfig configures tracing in Prometheus. + // + // This is an *experimental feature*, it may change in any upcoming release + // in a breaking way. // // +optional TracingConfig *PrometheusTracingConfig `json:"tracingConfig,omitempty"` @@ -607,6 +679,54 @@ type CommonPrometheusFields struct { // // +optional KeepDroppedTargets *uint64 `json:"keepDroppedTargets,omitempty"` + + // Defines the strategy used to reload the Prometheus configuration. + // If not specified, the configuration is reloaded using the /-/reload HTTP endpoint. + // +optional + ReloadStrategy *ReloadStrategyType `json:"reloadStrategy,omitempty"` + + // Defines the maximum time that the `prometheus` container's startup probe will wait before being considered failed. The startup probe will return success after the WAL replay is complete. + // If set, the value should be greater than 60 (seconds). Otherwise it will be equal to 600 seconds (15 minutes). + // +optional + // +kubebuilder:validation:Minimum=60 + MaximumStartupDurationSeconds *int32 `json:"maximumStartupDurationSeconds,omitempty"` + + // List of scrape classes to expose to scraping objects such as + // PodMonitors, ServiceMonitors, Probes and ScrapeConfigs. + // + // This is an *experimental feature*, it may change in any upcoming release + // in a breaking way. + // + // +listType=map + // +listMapKey=name + ScrapeClasses []ScrapeClass `json:"scrapeClasses,omitempty"` +} + +// +kubebuilder:validation:Enum=HTTP;ProcessSignal +type ReloadStrategyType string + +const ( + // HTTPReloadStrategyType reloads the configuration using the /-/reload HTTP endpoint. + HTTPReloadStrategyType ReloadStrategyType = "HTTP" + + // ProcessSignalReloadStrategyType reloads the configuration by sending a SIGHUP signal to the process. + ProcessSignalReloadStrategyType ReloadStrategyType = "ProcessSignal" +) + +func (cpf *CommonPrometheusFields) PrometheusURIScheme() string { + if cpf.Web != nil && cpf.Web.TLSConfig != nil { + return "https" + } + + return "http" +} + +func (cpf *CommonPrometheusFields) WebRoutePrefix() string { + if cpf.RoutePrefix != "" { + return cpf.RoutePrefix + } + + return "/" } // +genclient @@ -620,6 +740,9 @@ type CommonPrometheusFields struct { // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" // +kubebuilder:printcolumn:name="Paused",type="boolean",JSONPath=".status.paused",description="Whether the resource reconciliation is paused or not",priority=1 // +kubebuilder:subresource:status +// +kubebuilder:subresource:scale:specpath=.spec.shards,statuspath=.status.shards,selectorpath=.status.selector +// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale +// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale // Prometheus defines a Prometheus deployment. type Prometheus struct { @@ -661,13 +784,11 @@ func (l *PrometheusList) DeepCopyObject() runtime.Object { type PrometheusSpec struct { CommonPrometheusFields `json:",inline"` - // *Deprecated: use 'spec.image' instead.* + // Deprecated: use 'spec.image' instead. BaseImage string `json:"baseImage,omitempty"` - // *Deprecated: use 'spec.image' instead. The image's tag can be specified - // as part of the image name.* + // Deprecated: use 'spec.image' instead. The image's tag can be specified as part of the image name. Tag string `json:"tag,omitempty"` - // *Deprecated: use 'spec.image' instead. The image's digest can be - // specified as part of the image name.* + // Deprecated: use 'spec.image' instead. The image's digest can be specified as part of the image name. SHA string `json:"sha,omitempty"` // How long to retain the Prometheus data. @@ -685,8 +806,8 @@ type PrometheusSpec struct { // Defines the list of PrometheusRule objects to which the namespace label // enforcement doesn't apply. // This is only relevant when `spec.enforcedNamespaceLabel` is set to true. - // *Deprecated: use `spec.excludedFromEnforcement` instead.* // +optional + // Deprecated: use `spec.excludedFromEnforcement` instead. PrometheusRulesExcludedFromEnforce []PrometheusRuleExcludeConfig `json:"prometheusRulesExcludedFromEnforce,omitempty"` // PrometheusRule objects to be selected for rule evaluation. An empty // label selector matches all objects. A null label selector matches no @@ -745,8 +866,6 @@ type PrometheusSpec struct { // Defines the configuration of the optional Thanos sidecar. // - // This section is experimental, it may change significantly without - // deprecation notice in any release. // +optional Thanos *ThanosSpec `json:"thanos,omitempty"` @@ -766,7 +885,7 @@ type PrometheusSpec struct { // AllowOverlappingBlocks enables vertical compaction and vertical query // merge in Prometheus. // - // *Deprecated: this flag has no effect for Prometheus >= 2.39.0 where overlapping blocks are enabled by default.* + // Deprecated: this flag has no effect for Prometheus >= 2.39.0 where overlapping blocks are enabled by default. AllowOverlappingBlocks bool `json:"allowOverlappingBlocks,omitempty"` // Exemplars related settings that are runtime reloadable. @@ -861,6 +980,10 @@ type PrometheusStatus struct { // +listMapKey=shardID // +optional ShardStatuses []ShardStatus `json:"shardStatuses,omitempty"` + // Shards is the most recently observed number of shards. + Shards int32 `json:"shards,omitempty"` + // The selector used to match the pods targeted by this Prometheus resource. + Selector string `json:"selector,omitempty"` } // AlertingSpec defines parameters for alerting configuration of Prometheus servers. @@ -880,7 +1003,7 @@ type AlertingSpec struct { // // +k8s:openapi-gen=true type StorageSpec struct { - // *Deprecated: subPath usage will be removed in a future release.* + // Deprecated: subPath usage will be removed in a future release. DisableMountSubPath bool `json:"disableMountSubPath,omitempty"` // EmptyDirVolumeSource to be used by the StatefulSet. // If specified, it takes precedence over `ephemeral` and `volumeClaimTemplate`. @@ -960,16 +1083,14 @@ type ThanosSpec struct { // +optional Version *string `json:"version,omitempty"` - // *Deprecated: use 'image' instead. The image's tag can be specified as - // part of the image name.* // +optional + // Deprecated: use 'image' instead. The image's tag can be specified as as part of the image name. Tag *string `json:"tag,omitempty"` - // *Deprecated: use 'image' instead. The image digest can be specified - // as part of the image name.* // +optional + // Deprecated: use 'image' instead. The image digest can be specified as part of the image name. SHA *string `json:"sha,omitempty"` - // *Deprecated: use 'image' instead.* // +optional + // Deprecated: use 'image' instead. BaseImage *string `json:"baseImage,omitempty"` // Defines the resources requests and limits of the Thanos sidecar. @@ -990,7 +1111,7 @@ type ThanosSpec struct { // +optional ObjectStorageConfigFile *string `json:"objectStorageConfigFile,omitempty"` - // *Deprecated: use `grpcListenLocal` and `httpListenLocal` instead.* + // Deprecated: use `grpcListenLocal` and `httpListenLocal` instead. ListenLocal bool `json:"listenLocal,omitempty"` // When true, the Thanos sidecar listens on the loopback interface instead @@ -1007,22 +1128,23 @@ type ThanosSpec struct { // Defines the tracing configuration for the Thanos sidecar. // + // `tracingConfigFile` takes precedence over this field. + // // More info: https://thanos.io/tip/thanos/tracing.md/ // - // This is an experimental feature, it may change in any upcoming release + // This is an *experimental feature*, it may change in any upcoming release // in a breaking way. // - // tracingConfigFile takes precedence over this field. // +optional TracingConfig *v1.SecretKeySelector `json:"tracingConfig,omitempty"` // Defines the tracing configuration file for the Thanos sidecar. // + // This field takes precedence over `tracingConfig`. + // // More info: https://thanos.io/tip/thanos/tracing.md/ // - // This is an experimental feature, it may change in any upcoming release + // This is an *experimental feature*, it may change in any upcoming release // in a breaking way. - // - // This field takes precedence over tracingConfig. TracingConfigFile string `json:"tracingConfigFile,omitempty"` // Configures the TLS parameters for the gRPC server providing the StoreAPI. @@ -1142,7 +1264,7 @@ type RemoteWriteSpec struct { BasicAuth *BasicAuth `json:"basicAuth,omitempty"` // File from which to read bearer token for the URL. // - // *Deprecated: this will be removed in a future release. Prefer using `authorization`.* + // Deprecated: this will be removed in a future release. Prefer using `authorization`. BearerTokenFile string `json:"bearerTokenFile,omitempty"` // Authorization section for the URL. // @@ -1173,7 +1295,7 @@ type RemoteWriteSpec struct { // *Warning: this field shouldn't be used because the token value appears // in clear-text. Prefer using `authorization`.* // - // *Deprecated: this will be removed in a future release.* + // Deprecated: this will be removed in a future release. BearerToken string `json:"bearerToken,omitempty"` // TLS Config to use for the URL. @@ -1190,6 +1312,10 @@ type RemoteWriteSpec struct { // MetadataConfig configures the sending of series metadata to the remote storage. // +optional MetadataConfig *MetadataConfig `json:"metadataConfig,omitempty"` + + // Whether to enable HTTP2. + // +optional + EnableHttp2 *bool `json:"enableHTTP2,omitempty"` } // QueueConfig allows the tuning of remote write's queue_config parameters. @@ -1206,16 +1332,26 @@ type QueueConfig struct { // MaxSamplesPerSend is the maximum number of samples per send. MaxSamplesPerSend int `json:"maxSamplesPerSend,omitempty"` // BatchSendDeadline is the maximum time a sample will wait in buffer. - BatchSendDeadline string `json:"batchSendDeadline,omitempty"` + // +optional + BatchSendDeadline *Duration `json:"batchSendDeadline,omitempty"` // MaxRetries is the maximum number of times to retry a batch on recoverable errors. MaxRetries int `json:"maxRetries,omitempty"` // MinBackoff is the initial retry delay. Gets doubled for every retry. - MinBackoff string `json:"minBackoff,omitempty"` + // +optional + MinBackoff *Duration `json:"minBackoff,omitempty"` // MaxBackoff is the maximum retry delay. - MaxBackoff string `json:"maxBackoff,omitempty"` + // +optional + MaxBackoff *Duration `json:"maxBackoff,omitempty"` // Retry upon receiving a 429 status code from the remote-write storage. - // This is experimental feature and might change in the future. + // + // This is an *experimental feature*, it may change in any upcoming release + // in a breaking way. RetryOnRateLimit bool `json:"retryOnRateLimit,omitempty"` + // SampleAgeLimit drops samples older than the limit. + // It requires Prometheus >= v2.50.0. + // + // +optional + SampleAgeLimit *Duration `json:"sampleAgeLimit,omitempty"` } // Sigv4 optionally configures AWS's Signature Verification 4 signing process to @@ -1246,8 +1382,40 @@ type AzureAD struct { // +optional Cloud *string `json:"cloud,omitempty"` // ManagedIdentity defines the Azure User-assigned Managed identity. + // Cannot be set at the same time as `oauth` or `sdk`. + // +optional + ManagedIdentity *ManagedIdentity `json:"managedIdentity,omitempty"` + // OAuth defines the oauth config that is being used to authenticate. + // Cannot be set at the same time as `managedIdentity` or `sdk`. + // + // It requires Prometheus >= v2.48.0. + // + // +optional + OAuth *AzureOAuth `json:"oauth,omitempty"` + // SDK defines the Azure SDK config that is being used to authenticate. + // See https://learn.microsoft.com/en-us/azure/developer/go/azure-sdk-authentication + // Cannot be set at the same time as `oauth` or `managedIdentity`. + // + // It requires Prometheus >= 2.52.0. + // +optional + SDK *AzureSDK `json:"sdk,omitempty"` +} + +// AzureOAuth defines the Azure OAuth settings. +// +k8s:openapi-gen=true +type AzureOAuth struct { + // `clientID` is the clientId of the Azure Active Directory application that is being used to authenticate. + // +required + // +kubebuilder:validation:MinLength=1 + ClientID string `json:"clientId"` + // `clientSecret` specifies a key of a Secret containing the client secret of the Azure Active Directory application that is being used to authenticate. // +required - ManagedIdentity ManagedIdentity `json:"managedIdentity"` + ClientSecret v1.SecretKeySelector `json:"clientSecret"` + // `tenantId` is the tenant ID of the Azure Active Directory application that is being used to authenticate. + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:Pattern:=^[0-9a-zA-Z-.]+$ + TenantID string `json:"tenantId"` } // ManagedIdentity defines the Azure User-assigned Managed identity. @@ -1258,6 +1426,14 @@ type ManagedIdentity struct { ClientID string `json:"clientId"` } +// AzureSDK is used to store azure SDK config values. +type AzureSDK struct { + // `tenantId` is the tenant ID of the azure active directory application that is being used to authenticate. + // +optional + // +kubebuilder:validation:Pattern:=^[0-9a-zA-Z-.]+$ + TenantID *string `json:"tenantId,omitempty"` +} + // RemoteReadSpec defines the configuration for Prometheus to read back samples // from a remote endpoint. // +k8s:openapi-gen=true @@ -1307,7 +1483,7 @@ type RemoteReadSpec struct { BasicAuth *BasicAuth `json:"basicAuth,omitempty"` // File from which to read the bearer token for the URL. // - // *Deprecated: this will be removed in a future release. Prefer using `authorization`.* + // Deprecated: this will be removed in a future release. Prefer using `authorization`. BearerTokenFile string `json:"bearerTokenFile,omitempty"` // Authorization section for the URL. // @@ -1321,7 +1497,7 @@ type RemoteReadSpec struct { // *Warning: this field shouldn't be used because the token value appears // in clear-text. Prefer using `authorization`.* // - // *Deprecated: this will be removed in a future release.* + // Deprecated: this will be removed in a future release. BearerToken string `json:"bearerToken,omitempty"` // TLS Config to use for the URL. @@ -1361,7 +1537,7 @@ type RelabelConfig struct { SourceLabels []LabelName `json:"sourceLabels,omitempty"` // Separator is the string between concatenated SourceLabels. - Separator string `json:"separator,omitempty"` + Separator *string `json:"separator,omitempty"` // Label to which the resulting string is written in a replacement. // @@ -1383,7 +1559,9 @@ type RelabelConfig struct { // regular expression matches. // // Regex capture groups are available. - Replacement string `json:"replacement,omitempty"` + // + //+optional + Replacement *string `json:"replacement,omitempty"` // Action to perform based on the regex matching. // @@ -1419,7 +1597,7 @@ type APIServerConfig struct { // // Cannot be set at the same time as `basicAuth`, `authorization`, or `bearerToken`. // - // *Deprecated: this will be removed in a future release. Prefer using `authorization`.* + // Deprecated: this will be removed in a future release. Prefer using `authorization`. BearerTokenFile string `json:"bearerTokenFile,omitempty"` // TLS Config to use for the API server. @@ -1438,7 +1616,7 @@ type APIServerConfig struct { // *Warning: this field shouldn't be used because the token value appears // in clear-text. Prefer using `authorization`.* // - // *Deprecated: this will be removed in a future release.* + // Deprecated: this will be removed in a future release. BearerToken string `json:"bearerToken,omitempty"` } @@ -1476,7 +1654,7 @@ type AlertmanagerEndpoints struct { // // Cannot be set at the same time as `basicAuth`, `authorization`, or `sigv4`. // - // *Deprecated: this will be removed in a future release. Prefer using `authorization`.* + // Deprecated: this will be removed in a future release. Prefer using `authorization`. BearerTokenFile string `json:"bearerTokenFile,omitempty"` // Authorization section for Alertmanager. @@ -1508,6 +1686,17 @@ type AlertmanagerEndpoints struct { // // +optional EnableHttp2 *bool `json:"enableHttp2,omitempty"` + + // Relabel configuration applied to the discovered Alertmanagers. + // + // +optional + RelabelConfigs []RelabelConfig `json:"relabelings,omitempty"` + + // Relabeling configs applied before sending alerts to a specific Alertmanager. + // It requires Prometheus >= v2.51.0. + // + // +optional + AlertRelabelConfigs []RelabelConfig `json:"alertRelabelings,omitempty"` } // +k8s:openapi-gen=true @@ -1569,7 +1758,8 @@ type TSDBSpec struct { // An out-of-order/out-of-bounds sample is ingested into the TSDB as long as // the timestamp of the sample is >= (TSDB.MaxTime - outOfOrderTimeWindow). // - // Out of order ingestion is an experimental feature. + // This is an *experimental feature*, it may change in any upcoming release + // in a breaking way. // // It requires Prometheus >= v2.39.0. OutOfOrderTimeWindow Duration `json:"outOfOrderTimeWindow,omitempty"` @@ -1648,3 +1838,40 @@ type AuthorizationValidationError struct { func (e *AuthorizationValidationError) Error() string { return e.err } + +type ScrapeClass struct { + // Name of the scrape class. + // + // +kubebuilder:validation:MinLength=1 + // +required + Name string `json:"name"` + + // Default indicates that the scrape applies to all scrape objects that + // don't configure an explicit scrape class name. + // + // Only one scrape class can be set as the default. + // + // +optional + Default *bool `json:"default,omitempty"` + + // TLSConfig defines the TLS settings to use for the scrape. When the + // scrape objects define their own CA, certificate and/or key, they take + // precedence over the corresponding scrape class fields. + // + // For now only the `caFile`, `certFile` and `keyFile` fields are supported. + // + // +optional + TLSConfig *TLSConfig `json:"tlsConfig,omitempty"` + + // Relabelings configures the relabeling rules to apply to all scrape targets. + // + // The Operator automatically adds relabelings for a few standard Kubernetes fields + // like `__meta_kubernetes_namespace` and `__meta_kubernetes_service_name`. + // Then the Operator adds the scrape class relabelings defined here. + // Then the Operator adds the target-specific relabelings defined in the scrape object. + // + // More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + // + // +optional + Relabelings []RelabelConfig `json:"relabelings,omitempty"` +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/servicemonitor_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/servicemonitor_types.go index 9a0890dc9..8002a1328 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/servicemonitor_types.go +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/servicemonitor_types.go @@ -43,43 +43,82 @@ func (l *ServiceMonitor) DeepCopyObject() runtime.Object { return l.DeepCopy() } -// ServiceMonitorSpec contains specification parameters for a ServiceMonitor. +// ServiceMonitorSpec defines the specification parameters for a ServiceMonitor. // +k8s:openapi-gen=true type ServiceMonitorSpec struct { - // JobLabel selects the label from the associated Kubernetes service which will be used as the `job` label for all metrics. + // `jobLabel` selects the label from the associated Kubernetes `Service` + // object which will be used as the `job` label for all metrics. // - // For example: - // If in `ServiceMonitor.spec.jobLabel: foo` and in `Service.metadata.labels.foo: bar`, - // then the `job="bar"` label is added to all metrics. + // For example if `jobLabel` is set to `foo` and the Kubernetes `Service` + // object is labeled with `foo: bar`, then Prometheus adds the `job="bar"` + // label to all ingested metrics. // - // If the value of this field is empty or if the label doesn't exist for the given Service, the `job` label of the metrics defaults to the name of the Kubernetes Service. + // If the value of this field is empty or if the label doesn't exist for + // the given Service, the `job` label of the metrics defaults to the name + // of the associated Kubernetes `Service`. JobLabel string `json:"jobLabel,omitempty"` - // TargetLabels transfers labels from the Kubernetes `Service` onto the created metrics. + + // `targetLabels` defines the labels which are transferred from the + // associated Kubernetes `Service` object onto the ingested metrics. + // + // +optional TargetLabels []string `json:"targetLabels,omitempty"` - // PodTargetLabels transfers labels on the Kubernetes `Pod` onto the created metrics. + // `podTargetLabels` defines the labels which are transferred from the + // associated Kubernetes `Pod` object onto the ingested metrics. + // + // +optional PodTargetLabels []string `json:"podTargetLabels,omitempty"` - // A list of endpoints allowed as part of this ServiceMonitor. + + // List of endpoints part of this ServiceMonitor. + // + // +optional Endpoints []Endpoint `json:"endpoints"` - // Selector to select Endpoints objects. + + // Label selector to select the Kubernetes `Endpoints` objects. Selector metav1.LabelSelector `json:"selector"` - // Selector to select which namespaces the Kubernetes Endpoints objects are discovered from. + // Selector to select which namespaces the Kubernetes `Endpoints` objects + // are discovered from. NamespaceSelector NamespaceSelector `json:"namespaceSelector,omitempty"` - // SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + + // `sampleLimit` defines a per-scrape limit on the number of scraped samples + // that will be accepted. + // // +optional SampleLimit *uint64 `json:"sampleLimit,omitempty"` - // TargetLimit defines a limit on the number of scraped targets that will be accepted. + + // `scrapeProtocols` defines the protocols to negotiate during a scrape. It tells clients the + // protocols supported by Prometheus in order of preference (from most to least preferred). + // + // If unset, Prometheus uses its default value. + // + // It requires Prometheus >= v2.49.0. + // + // +listType=set + // +optional + ScrapeProtocols []ScrapeProtocol `json:"scrapeProtocols,omitempty"` + + // `targetLimit` defines a limit on the number of scraped targets that will + // be accepted. + // // +optional TargetLimit *uint64 `json:"targetLimit,omitempty"` + // Per-scrape limit on number of labels that will be accepted for a sample. - // Only valid in Prometheus versions 2.27.0 and newer. + // + // It requires Prometheus >= v2.27.0. + // // +optional LabelLimit *uint64 `json:"labelLimit,omitempty"` // Per-scrape limit on length of labels name that will be accepted for a sample. - // Only valid in Prometheus versions 2.27.0 and newer. + // + // It requires Prometheus >= v2.27.0. + // // +optional LabelNameLengthLimit *uint64 `json:"labelNameLengthLimit,omitempty"` // Per-scrape limit on length of labels value that will be accepted for a sample. - // Only valid in Prometheus versions 2.27.0 and newer. + // + // It requires Prometheus >= v2.27.0. + // // +optional LabelValueLengthLimit *uint64 `json:"labelValueLengthLimit,omitempty"` // Per-scrape limit on the number of targets dropped by relabeling @@ -89,9 +128,27 @@ type ServiceMonitorSpec struct { // // +optional KeepDroppedTargets *uint64 `json:"keepDroppedTargets,omitempty"` - // Attaches node metadata to discovered targets. - // Requires Prometheus v2.37.0 and above. + + // `attachMetadata` defines additional metadata which is added to the + // discovered targets. + // + // It requires Prometheus >= v2.37.0. + // + // +optional AttachMetadata *AttachMetadata `json:"attachMetadata,omitempty"` + + // The scrape class to apply. + // +optional + // +kubebuilder:validation:MinLength=1 + ScrapeClassName *string `json:"scrapeClass,omitempty"` + + // When defined, bodySizeLimit specifies a job level limit on the size + // of uncompressed response body that will be accepted by Prometheus. + // + // It requires Prometheus >= v2.28.0. + // + // +optional + BodySizeLimit *ByteSize `json:"bodySizeLimit,omitempty"` } // ServiceMonitorList is a list of ServiceMonitors. diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/thanos_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/thanos_types.go index c155df068..82c569ee1 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/thanos_types.go +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/thanos_types.go @@ -197,10 +197,23 @@ type ThanosRulerSpec struct { // of what the maintainers will support and by doing so, you accept that this behaviour may break // at any time without notice. InitContainers []v1.Container `json:"initContainers,omitempty"` - // TracingConfig configures tracing in Thanos. This is an experimental feature, it may change in any upcoming release in a breaking way. + // TracingConfig configures tracing in Thanos. + // + // `tracingConfigFile` takes precedence over this field. + // + // This is an *experimental feature*, it may change in any upcoming release + // in a breaking way. + // + //+optional TracingConfig *v1.SecretKeySelector `json:"tracingConfig,omitempty"` // TracingConfig specifies the path of the tracing configuration file. - // When used alongside with TracingConfig, TracingConfigFile takes precedence. + // + // This field takes precedence over `tracingConfig`. + // + // This is an *experimental feature*, it may change in any upcoming release + // in a breaking way. + // + //+optional TracingConfigFile string `json:"tracingConfigFile,omitempty"` // Labels configure the external label pairs to ThanosRuler. A default replica label // `thanos_ruler_replica` will be always added as a label with the value of the pod's name and it will be dropped in the alerts. @@ -250,6 +263,14 @@ type ThanosRulerSpec struct { // operator itself) or when providing an invalid argument the reconciliation will // fail and an error will be logged. AdditionalArgs []Argument `json:"additionalArgs,omitempty"` + // Defines the configuration of the ThanosRuler web server. + Web *ThanosRulerWebSpec `json:"web,omitempty"` +} + +// ThanosRulerWebSpec defines the configuration of the ThanosRuler web server. +// +k8s:openapi-gen=true +type ThanosRulerWebSpec struct { + WebConfigFileFields `json:",inline"` } // ThanosRulerStatus is the most recent observed status of the ThanosRuler. Read-only. diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/types.go index 2d379f64f..a49b87d66 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/types.go +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/types.go @@ -75,6 +75,35 @@ type PrometheusRuleExcludeConfig struct { RuleName string `json:"ruleName"` } +type ProxyConfig struct { + // `proxyURL` defines the HTTP proxy server to use. + // + // It requires Prometheus >= v2.43.0. + // +kubebuilder:validation:Pattern:="^http(s)?://.+$" + // +optional + ProxyURL *string `json:"proxyUrl,omitempty"` + // `noProxy` is a comma-separated string that can contain IPs, CIDR notation, domain names + // that should be excluded from proxying. IP and domain names can + // contain port numbers. + // + // It requires Prometheus >= v2.43.0. + // +optional + NoProxy *string `json:"noProxy,omitempty"` + // Whether to use the proxy configuration defined by environment variables (HTTP_PROXY, HTTPS_PROXY, and NO_PROXY). + // If unset, Prometheus uses its default value. + // + // It requires Prometheus >= v2.43.0. + // +optional + ProxyFromEnvironment *bool `json:"proxyFromEnvironment,omitempty"` + // ProxyConnectHeader optionally specifies headers to send to + // proxies during CONNECT requests. + // + // It requires Prometheus >= v2.43.0. + // +optional + // +mapType:=atomic + ProxyConnectHeader map[string][]v1.SecretKeySelector `json:"proxyConnectHeader,omitempty"` +} + // ObjectReference references a PodMonitor, ServiceMonitor, Probe or PrometheusRule object. type ObjectReference struct { // Group of the referent. When not specified, it defaults to `monitoring.coreos.com` @@ -200,8 +229,8 @@ type EmbeddedPersistentVolumeClaim struct { // +optional Spec v1.PersistentVolumeClaimSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // *Deprecated: this field is never set.* // +optional + // Deprecated: this field is never set. Status v1.PersistentVolumeClaimStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -350,90 +379,200 @@ func (c *WebTLSConfig) Validate() error { return nil } -// LabelName is a valid Prometheus label name which may only contain ASCII letters, numbers, as well as underscores. +// LabelName is a valid Prometheus label name which may only contain ASCII +// letters, numbers, as well as underscores. +// // +kubebuilder:validation:Pattern:="^[a-zA-Z_][a-zA-Z0-9_]*$" type LabelName string -// Endpoint defines a scrapeable endpoint serving Prometheus metrics. +// Endpoint defines an endpoint serving Prometheus metrics to be scraped by +// Prometheus. +// // +k8s:openapi-gen=true type Endpoint struct { - // Name of the service port this endpoint refers to. Mutually exclusive with targetPort. + // Name of the Service port which this endpoint refers to. + // + // It takes precedence over `targetPort`. Port string `json:"port,omitempty"` - // Name or number of the target port of the Pod behind the Service, the port must be specified with container port property. Mutually exclusive with port. + + // Name or number of the target port of the `Pod` object behind the + // Service. The port must be specified with the container's port property. + // + // +optional TargetPort *intstr.IntOrString `json:"targetPort,omitempty"` - // HTTP path to scrape for metrics. + + // HTTP path from which to scrape for metrics. + // // If empty, Prometheus uses the default value (e.g. `/metrics`). Path string `json:"path,omitempty"` + // HTTP scheme to use for scraping. - // `http` and `https` are the expected values unless you rewrite the `__scheme__` label via relabeling. + // + // `http` and `https` are the expected values unless you rewrite the + // `__scheme__` label via relabeling. + // // If empty, Prometheus uses the default value `http`. + // // +kubebuilder:validation:Enum=http;https Scheme string `json:"scheme,omitempty"` - // Optional HTTP URL parameters + + // params define optional HTTP URL parameters. Params map[string][]string `json:"params,omitempty"` - // Interval at which metrics should be scraped - // If not specified Prometheus' global scrape interval is used. + + // Interval at which Prometheus scrapes the metrics from the target. + // + // If empty, Prometheus uses the global scrape interval. Interval Duration `json:"interval,omitempty"` - // Timeout after which the scrape is ended - // If not specified, the Prometheus global scrape timeout is used unless it is less than `Interval` in which the latter is used. + + // Timeout after which Prometheus considers the scrape to be failed. + // + // If empty, Prometheus uses the global scrape timeout unless it is less + // than the target's scrape interval value in which the latter is used. ScrapeTimeout Duration `json:"scrapeTimeout,omitempty"` - // TLS configuration to use when scraping the endpoint + + // TLS configuration to use when scraping the target. + // + // +optional TLSConfig *TLSConfig `json:"tlsConfig,omitempty"` - // File to read bearer token for scraping targets. + + // File to read bearer token for scraping the target. + // + // Deprecated: use `authorization` instead. BearerTokenFile string `json:"bearerTokenFile,omitempty"` - // Secret to mount to read bearer token for scraping targets. The secret - // needs to be in the same namespace as the service monitor and accessible by - // the Prometheus Operator. - //+ optional + + // `bearerTokenSecret` specifies a key of a Secret containing the bearer + // token for scraping targets. The secret needs to be in the same namespace + // as the ServiceMonitor object and readable by the Prometheus Operator. + // + // +optional + // + // Deprecated: use `authorization` instead. BearerTokenSecret *v1.SecretKeySelector `json:"bearerTokenSecret,omitempty"` - // Authorization section for this endpoint + + // `authorization` configures the Authorization header credentials to use when + // scraping the target. + // + // Cannot be set at the same time as `basicAuth`, or `oauth2`. + // + // +optional Authorization *SafeAuthorization `json:"authorization,omitempty"` - // HonorLabels chooses the metric's labels on collisions with target labels. + + // When true, `honorLabels` preserves the metric's labels when they collide + // with the target's labels. HonorLabels bool `json:"honorLabels,omitempty"` - // HonorTimestamps controls whether Prometheus respects the timestamps present in scraped data. + + // `honorTimestamps` controls whether Prometheus preserves the timestamps + // when exposed by the target. + // + // +optional HonorTimestamps *bool `json:"honorTimestamps,omitempty"` - // BasicAuth allow an endpoint to authenticate over basic authentication - // More info: https://prometheus.io/docs/operating/configuration/#endpoints + + // `trackTimestampsStaleness` defines whether Prometheus tracks staleness of + // the metrics that have an explicit timestamp present in scraped data. + // Has no effect if `honorTimestamps` is false. + // + // It requires Prometheus >= v2.48.0. + // + // +optional + TrackTimestampsStaleness *bool `json:"trackTimestampsStaleness,omitempty"` + + // `basicAuth` configures the Basic Authentication credentials to use when + // scraping the target. + // + // Cannot be set at the same time as `authorization`, or `oauth2`. + // + // +optional BasicAuth *BasicAuth `json:"basicAuth,omitempty"` - // OAuth2 for the URL. Only valid in Prometheus versions 2.27.0 and newer. + + // `oauth2` configures the OAuth2 settings to use when scraping the target. + // + // It requires Prometheus >= 2.27.0. + // + // Cannot be set at the same time as `authorization`, or `basicAuth`. + // + // +optional OAuth2 *OAuth2 `json:"oauth2,omitempty"` - // MetricRelabelConfigs to apply to samples before ingestion. - MetricRelabelConfigs []*RelabelConfig `json:"metricRelabelings,omitempty"` - // RelabelConfigs to apply to samples before scraping. - // Prometheus Operator automatically adds relabelings for a few standard Kubernetes fields. + + // `metricRelabelings` configures the relabeling rules to apply to the + // samples before ingestion. + // + // +optional + MetricRelabelConfigs []RelabelConfig `json:"metricRelabelings,omitempty"` + + // `relabelings` configures the relabeling rules to apply the target's + // metadata labels. + // + // The Operator automatically adds relabelings for a few standard Kubernetes fields. + // // The original scrape job's name is available via the `__tmp_prometheus_job_name` label. + // // More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config - RelabelConfigs []*RelabelConfig `json:"relabelings,omitempty"` - // ProxyURL eg http://proxyserver:2195 Directs scrapes to proxy through this endpoint. + // + // +optional + RelabelConfigs []RelabelConfig `json:"relabelings,omitempty"` + + // `proxyURL` configures the HTTP Proxy URL (e.g. + // "http://proxyserver:2195") to go through when scraping the target. + // + // +optional ProxyURL *string `json:"proxyUrl,omitempty"` - // FollowRedirects configures whether scrape requests follow HTTP 3xx redirects. + + // `followRedirects` defines whether the scrape requests should follow HTTP + // 3xx redirects. + // + // +optional FollowRedirects *bool `json:"followRedirects,omitempty"` - // Whether to enable HTTP2. + + // `enableHttp2` can be used to disable HTTP2 when scraping the target. + // + // +optional EnableHttp2 *bool `json:"enableHttp2,omitempty"` - // Drop pods that are not running. (Failed, Succeeded). Enabled by default. + + // When true, the pods which are not running (e.g. either in Failed or + // Succeeded state) are dropped during the target discovery. + // + // If unset, the filtering is enabled. + // // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase + // + // +optional FilterRunning *bool `json:"filterRunning,omitempty"` } type AttachMetadata struct { - // When set to true, Prometheus must have permissions to get Nodes. - Node bool `json:"node,omitempty"` + // When set to true, Prometheus must have the `get` permission on the + // `Nodes` objects. + // + // +optional + Node *bool `json:"node,omitempty"` } -// OAuth2 allows an endpoint to authenticate with OAuth2. -// More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#oauth2 +// OAuth2 configures OAuth2 settings. +// // +k8s:openapi-gen=true type OAuth2 struct { - // The secret or configmap containing the OAuth2 client id + // `clientId` specifies a key of a Secret or ConfigMap containing the + // OAuth2 client's ID. ClientID SecretOrConfigMap `json:"clientId"` - // The secret containing the OAuth2 client secret + + // `clientSecret` specifies a key of a Secret containing the OAuth2 + // client's secret. ClientSecret v1.SecretKeySelector `json:"clientSecret"` - // The URL to fetch the token from + + // `tokenURL` configures the URL to fetch the token from. + // // +kubebuilder:validation:MinLength=1 TokenURL string `json:"tokenUrl"` - // OAuth2 scopes used for the token request + + // `scopes` defines the OAuth2 scopes used for the token request. + // + // +optional. Scopes []string `json:"scopes,omitempty"` - // Parameters to append to the token URL + + // `endpointParams` configures the HTTP parameters to append to the token + // URL. + // + // +optional EndpointParams map[string]string `json:"endpointParams,omitempty"` } @@ -463,15 +602,16 @@ func (o *OAuth2) Validate() error { return nil } -// BasicAuth allow an endpoint to authenticate over basic authentication -// More info: https://prometheus.io/docs/operating/configuration/#endpoints +// BasicAuth configures HTTP Basic Authentication settings. +// // +k8s:openapi-gen=true type BasicAuth struct { - // The secret in the service monitor namespace that contains the username - // for authentication. + // `username` specifies a key of a Secret containing the username for + // authentication. Username v1.SecretKeySelector `json:"username,omitempty"` - // The secret in the service monitor namespace that contains the password - // for authentication. + + // `password` specifies a key of a Secret containing the password for + // authentication. Password v1.SecretKeySelector `json:"password,omitempty"` } @@ -483,61 +623,75 @@ type SecretOrConfigMap struct { ConfigMap *v1.ConfigMapKeySelector `json:"configMap,omitempty"` } -// SecretOrConfigMapValidationError is returned by SecretOrConfigMap.Validate() -// on semantically invalid configurations. -// +k8s:openapi-gen=false -type SecretOrConfigMapValidationError struct { - err string -} - -func (e *SecretOrConfigMapValidationError) Error() string { - return e.err -} - -// Validate semantically validates the given TLSConfig. +// Validate semantically validates the given SecretOrConfigMap. func (c *SecretOrConfigMap) Validate() error { + if c == nil { + return nil + } + if c.Secret != nil && c.ConfigMap != nil { - return &SecretOrConfigMapValidationError{"SecretOrConfigMap can not specify both Secret and ConfigMap"} + return fmt.Errorf("cannot specify both Secret and ConfigMap") } return nil } +func (c *SecretOrConfigMap) String() string { + if c == nil { + return "" + } + + switch { + case c.Secret != nil: + return fmt.Sprintf("", c.Secret.LocalObjectReference.Name, c.Secret.Key) + case c.ConfigMap != nil: + return fmt.Sprintf("", c.ConfigMap.LocalObjectReference.Name, c.ConfigMap.Key) + } + + return "" +} + // SafeTLSConfig specifies safe TLS configuration parameters. // +k8s:openapi-gen=true type SafeTLSConfig struct { // Certificate authority used when verifying server certificates. CA SecretOrConfigMap `json:"ca,omitempty"` + // Client certificate to present when doing client-authentication. Cert SecretOrConfigMap `json:"cert,omitempty"` + // Secret containing the client key file for the targets. KeySecret *v1.SecretKeySelector `json:"keySecret,omitempty"` + // Used to verify the hostname for the targets. - ServerName string `json:"serverName,omitempty"` + //+optional + ServerName *string `json:"serverName,omitempty"` + // Disable target certificate validation. - InsecureSkipVerify bool `json:"insecureSkipVerify,omitempty"` + //+optional + InsecureSkipVerify *bool `json:"insecureSkipVerify,omitempty"` } // Validate semantically validates the given SafeTLSConfig. func (c *SafeTLSConfig) Validate() error { if c.CA != (SecretOrConfigMap{}) { if err := c.CA.Validate(); err != nil { - return err + return fmt.Errorf("ca %s: %w", c.CA.String(), err) } } if c.Cert != (SecretOrConfigMap{}) { if err := c.Cert.Validate(); err != nil { - return err + return fmt.Errorf("cert %s: %w", c.Cert.String(), err) } } if c.Cert != (SecretOrConfigMap{}) && c.KeySecret == nil { - return &TLSConfigValidationError{"client cert specified without client key"} + return fmt.Errorf("client cert specified without client key") } if c.KeySecret != nil && c.Cert == (SecretOrConfigMap{}) { - return &TLSConfigValidationError{"client key specified without client cert"} + return fmt.Errorf("client key specified without client cert") } return nil @@ -555,50 +709,39 @@ type TLSConfig struct { KeyFile string `json:"keyFile,omitempty"` } -// TLSConfigValidationError is returned by TLSConfig.Validate() on semantically -// invalid tls configurations. -// +k8s:openapi-gen=false -type TLSConfigValidationError struct { - err string -} - -func (e *TLSConfigValidationError) Error() string { - return e.err -} - // Validate semantically validates the given TLSConfig. func (c *TLSConfig) Validate() error { if c.CA != (SecretOrConfigMap{}) { if c.CAFile != "" { - return &TLSConfigValidationError{"tls config can not both specify CAFile and CA"} + return fmt.Errorf("cannot specify both caFile and ca") } if err := c.CA.Validate(); err != nil { - return &TLSConfigValidationError{"tls config CA is invalid"} + return fmt.Errorf("SecretOrConfigMap ca: %w", err) } } if c.Cert != (SecretOrConfigMap{}) { if c.CertFile != "" { - return &TLSConfigValidationError{"tls config can not both specify CertFile and Cert"} + return fmt.Errorf("cannot specify both certFile and cert") } if err := c.Cert.Validate(); err != nil { - return &TLSConfigValidationError{"tls config Cert is invalid"} + return fmt.Errorf("SecretOrConfigMap cert: %w", err) } } if c.KeyFile != "" && c.KeySecret != nil { - return &TLSConfigValidationError{"tls config can not both specify KeyFile and KeySecret"} + return fmt.Errorf("cannot specify both keyFile and keySecret") } hasCert := c.CertFile != "" || c.Cert != (SecretOrConfigMap{}) hasKey := c.KeyFile != "" || c.KeySecret != nil if hasCert && !hasKey { - return &TLSConfigValidationError{"tls config can not specify client cert without client key"} + return fmt.Errorf("cannot specify client cert without client key") } if hasKey && !hasCert { - return &TLSConfigValidationError{"tls config can not specify client key without client cert"} + return fmt.Errorf("cannot specify client key without client cert") } return nil @@ -631,3 +774,13 @@ type Argument struct { // Argument value, e.g. 30s. Can be empty for name-only arguments (e.g. --storage.tsdb.no-lockfile) Value string `json:"value,omitempty"` } + +// The valid options for Role. +const ( + RoleNode = "node" + RolePod = "pod" + RoleService = "service" + RoleEndpoint = "endpoints" + RoleEndpointSlice = "endpointslice" + RoleIngress = "ingress" +) diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/zz_generated.deepcopy.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/zz_generated.deepcopy.go index 101879d8e..b1e63c9ce 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/zz_generated.deepcopy.go @@ -1,5 +1,4 @@ //go:build !ignore_autogenerated -// +build !ignore_autogenerated // Copyright The prometheus-operator Authors // @@ -20,6 +19,7 @@ package v1 import ( + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" @@ -172,6 +172,20 @@ func (in *AlertmanagerEndpoints) DeepCopyInto(out *AlertmanagerEndpoints) { *out = new(bool) **out = **in } + if in.RelabelConfigs != nil { + in, out := &in.RelabelConfigs, &out.RelabelConfigs + *out = make([]RelabelConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AlertRelabelConfigs != nil { + in, out := &in.AlertRelabelConfigs, &out.AlertRelabelConfigs + *out = make([]RelabelConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertmanagerEndpoints. @@ -356,6 +370,11 @@ func (in *AlertmanagerSpec) DeepCopyInto(out *AlertmanagerSpec) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.ClusterLabel != nil { + in, out := &in.ClusterLabel, &out.ClusterLabel + *out = new(string) + **out = **in + } if in.AlertmanagerConfigSelector != nil { in, out := &in.AlertmanagerConfigSelector, &out.AlertmanagerConfigSelector *out = new(metav1.LabelSelector) @@ -394,6 +413,11 @@ func (in *AlertmanagerSpec) DeepCopyInto(out *AlertmanagerSpec) { *out = new(bool) **out = **in } + if in.EnableFeatures != nil { + in, out := &in.EnableFeatures, &out.EnableFeatures + *out = make([]string, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertmanagerSpec. @@ -487,6 +511,11 @@ func (in *Argument) DeepCopy() *Argument { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AttachMetadata) DeepCopyInto(out *AttachMetadata) { *out = *in + if in.Node != nil { + in, out := &in.Node, &out.Node + *out = new(bool) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AttachMetadata. @@ -538,7 +567,21 @@ func (in *AzureAD) DeepCopyInto(out *AzureAD) { *out = new(string) **out = **in } - out.ManagedIdentity = in.ManagedIdentity + if in.ManagedIdentity != nil { + in, out := &in.ManagedIdentity, &out.ManagedIdentity + *out = new(ManagedIdentity) + **out = **in + } + if in.OAuth != nil { + in, out := &in.OAuth, &out.OAuth + *out = new(AzureOAuth) + (*in).DeepCopyInto(*out) + } + if in.SDK != nil { + in, out := &in.SDK, &out.SDK + *out = new(AzureSDK) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureAD. @@ -551,6 +594,42 @@ func (in *AzureAD) DeepCopy() *AzureAD { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureOAuth) DeepCopyInto(out *AzureOAuth) { + *out = *in + in.ClientSecret.DeepCopyInto(&out.ClientSecret) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureOAuth. +func (in *AzureOAuth) DeepCopy() *AzureOAuth { + if in == nil { + return nil + } + out := new(AzureOAuth) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureSDK) DeepCopyInto(out *AzureSDK) { + *out = *in + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureSDK. +func (in *AzureSDK) DeepCopy() *AzureSDK { + if in == nil { + return nil + } + out := new(AzureSDK) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BasicAuth) DeepCopyInto(out *BasicAuth) { *out = *in @@ -646,6 +725,11 @@ func (in *CommonPrometheusFields) DeepCopyInto(out *CommonPrometheusFields) { *out = new(string) **out = **in } + if in.ScrapeProtocols != nil { + in, out := &in.ScrapeProtocols, &out.ScrapeProtocols + *out = make([]ScrapeProtocol, len(*in)) + copy(*out, *in) + } if in.ExternalLabels != nil { in, out := &in.ExternalLabels, &out.ExternalLabels *out = make(map[string]string, len(*in)) @@ -655,7 +739,7 @@ func (in *CommonPrometheusFields) DeepCopyInto(out *CommonPrometheusFields) { } if in.EnableFeatures != nil { in, out := &in.EnableFeatures, &out.EnableFeatures - *out = make([]string, len(*in)) + *out = make([]EnableFeature, len(*in)) copy(*out, *in) } if in.Storage != nil { @@ -677,6 +761,11 @@ func (in *CommonPrometheusFields) DeepCopyInto(out *CommonPrometheusFields) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.PersistentVolumeClaimRetentionPolicy != nil { + in, out := &in.PersistentVolumeClaimRetentionPolicy, &out.PersistentVolumeClaimRetentionPolicy + *out = new(appsv1.StatefulSetPersistentVolumeClaimRetentionPolicy) + **out = **in + } if in.Web != nil { in, out := &in.Web, &out.Web *out = new(PrometheusWebSpec) @@ -690,6 +779,11 @@ func (in *CommonPrometheusFields) DeepCopyInto(out *CommonPrometheusFields) { (*out)[key] = val } } + if in.AutomountServiceAccountToken != nil { + in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken + *out = new(bool) + **out = **in + } if in.Secrets != nil { in, out := &in.Secrets, &out.Secrets *out = make([]string, len(*in)) @@ -714,7 +808,7 @@ func (in *CommonPrometheusFields) DeepCopyInto(out *CommonPrometheusFields) { } if in.TopologySpreadConstraints != nil { in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints - *out = make([]corev1.TopologySpreadConstraint, len(*in)) + *out = make([]TopologySpreadConstraint, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -858,6 +952,23 @@ func (in *CommonPrometheusFields) DeepCopyInto(out *CommonPrometheusFields) { *out = new(uint64) **out = **in } + if in.ReloadStrategy != nil { + in, out := &in.ReloadStrategy, &out.ReloadStrategy + *out = new(ReloadStrategyType) + **out = **in + } + if in.MaximumStartupDurationSeconds != nil { + in, out := &in.MaximumStartupDurationSeconds, &out.MaximumStartupDurationSeconds + *out = new(int32) + **out = **in + } + if in.ScrapeClasses != nil { + in, out := &in.ScrapeClasses, &out.ScrapeClasses + *out = make([]ScrapeClass, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonPrometheusFields. @@ -886,6 +997,46 @@ func (in *Condition) DeepCopy() *Condition { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CoreV1TopologySpreadConstraint) DeepCopyInto(out *CoreV1TopologySpreadConstraint) { + *out = *in + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.MinDomains != nil { + in, out := &in.MinDomains, &out.MinDomains + *out = new(int32) + **out = **in + } + if in.NodeAffinityPolicy != nil { + in, out := &in.NodeAffinityPolicy, &out.NodeAffinityPolicy + *out = new(corev1.NodeInclusionPolicy) + **out = **in + } + if in.NodeTaintsPolicy != nil { + in, out := &in.NodeTaintsPolicy, &out.NodeTaintsPolicy + *out = new(corev1.NodeInclusionPolicy) + **out = **in + } + if in.MatchLabelKeys != nil { + in, out := &in.MatchLabelKeys, &out.MatchLabelKeys + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreV1TopologySpreadConstraint. +func (in *CoreV1TopologySpreadConstraint) DeepCopy() *CoreV1TopologySpreadConstraint { + if in == nil { + return nil + } + out := new(CoreV1TopologySpreadConstraint) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EmbeddedObjectMetadata) DeepCopyInto(out *EmbeddedObjectMetadata) { *out = *in @@ -950,7 +1101,8 @@ func (in *Endpoint) DeepCopyInto(out *Endpoint) { if val == nil { (*out)[key] = nil } else { - in, out := &val, &outVal + inVal := (*in)[key] + in, out := &inVal, &outVal *out = make([]string, len(*in)) copy(*out, *in) } @@ -977,6 +1129,11 @@ func (in *Endpoint) DeepCopyInto(out *Endpoint) { *out = new(bool) **out = **in } + if in.TrackTimestampsStaleness != nil { + in, out := &in.TrackTimestampsStaleness, &out.TrackTimestampsStaleness + *out = new(bool) + **out = **in + } if in.BasicAuth != nil { in, out := &in.BasicAuth, &out.BasicAuth *out = new(BasicAuth) @@ -989,24 +1146,16 @@ func (in *Endpoint) DeepCopyInto(out *Endpoint) { } if in.MetricRelabelConfigs != nil { in, out := &in.MetricRelabelConfigs, &out.MetricRelabelConfigs - *out = make([]*RelabelConfig, len(*in)) + *out = make([]RelabelConfig, len(*in)) for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(RelabelConfig) - (*in).DeepCopyInto(*out) - } + (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.RelabelConfigs != nil { in, out := &in.RelabelConfigs, &out.RelabelConfigs - *out = make([]*RelabelConfig, len(*in)) + *out = make([]RelabelConfig, len(*in)) for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(RelabelConfig) - (*in).DeepCopyInto(*out) - } + (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.ProxyURL != nil { @@ -1321,7 +1470,8 @@ func (in *PodMetricsEndpoint) DeepCopyInto(out *PodMetricsEndpoint) { if val == nil { (*out)[key] = nil } else { - in, out := &val, &outVal + inVal := (*in)[key] + in, out := &inVal, &outVal *out = make([]string, len(*in)) copy(*out, *in) } @@ -1330,7 +1480,7 @@ func (in *PodMetricsEndpoint) DeepCopyInto(out *PodMetricsEndpoint) { } if in.TLSConfig != nil { in, out := &in.TLSConfig, &out.TLSConfig - *out = new(PodMetricsEndpointTLSConfig) + *out = new(SafeTLSConfig) (*in).DeepCopyInto(*out) } in.BearerTokenSecret.DeepCopyInto(&out.BearerTokenSecret) @@ -1339,6 +1489,11 @@ func (in *PodMetricsEndpoint) DeepCopyInto(out *PodMetricsEndpoint) { *out = new(bool) **out = **in } + if in.TrackTimestampsStaleness != nil { + in, out := &in.TrackTimestampsStaleness, &out.TrackTimestampsStaleness + *out = new(bool) + **out = **in + } if in.BasicAuth != nil { in, out := &in.BasicAuth, &out.BasicAuth *out = new(BasicAuth) @@ -1356,24 +1511,16 @@ func (in *PodMetricsEndpoint) DeepCopyInto(out *PodMetricsEndpoint) { } if in.MetricRelabelConfigs != nil { in, out := &in.MetricRelabelConfigs, &out.MetricRelabelConfigs - *out = make([]*RelabelConfig, len(*in)) + *out = make([]RelabelConfig, len(*in)) for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(RelabelConfig) - (*in).DeepCopyInto(*out) - } + (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.RelabelConfigs != nil { in, out := &in.RelabelConfigs, &out.RelabelConfigs - *out = make([]*RelabelConfig, len(*in)) + *out = make([]RelabelConfig, len(*in)) for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(RelabelConfig) - (*in).DeepCopyInto(*out) - } + (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.ProxyURL != nil { @@ -1408,22 +1555,6 @@ func (in *PodMetricsEndpoint) DeepCopy() *PodMetricsEndpoint { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodMetricsEndpointTLSConfig) DeepCopyInto(out *PodMetricsEndpointTLSConfig) { - *out = *in - in.SafeTLSConfig.DeepCopyInto(&out.SafeTLSConfig) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodMetricsEndpointTLSConfig. -func (in *PodMetricsEndpointTLSConfig) DeepCopy() *PodMetricsEndpointTLSConfig { - if in == nil { - return nil - } - out := new(PodMetricsEndpointTLSConfig) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PodMonitor) DeepCopyInto(out *PodMonitor) { *out = *in @@ -1497,6 +1628,11 @@ func (in *PodMonitorSpec) DeepCopyInto(out *PodMonitorSpec) { *out = new(uint64) **out = **in } + if in.ScrapeProtocols != nil { + in, out := &in.ScrapeProtocols, &out.ScrapeProtocols + *out = make([]ScrapeProtocol, len(*in)) + copy(*out, *in) + } if in.LabelLimit != nil { in, out := &in.LabelLimit, &out.LabelLimit *out = new(uint64) @@ -1520,6 +1656,16 @@ func (in *PodMonitorSpec) DeepCopyInto(out *PodMonitorSpec) { if in.AttachMetadata != nil { in, out := &in.AttachMetadata, &out.AttachMetadata *out = new(AttachMetadata) + (*in).DeepCopyInto(*out) + } + if in.ScrapeClassName != nil { + in, out := &in.ScrapeClassName, &out.ScrapeClassName + *out = new(string) + **out = **in + } + if in.BodySizeLimit != nil { + in, out := &in.BodySizeLimit, &out.BodySizeLimit + *out = new(ByteSize) **out = **in } } @@ -1587,7 +1733,7 @@ func (in *ProbeSpec) DeepCopyInto(out *ProbeSpec) { in.Targets.DeepCopyInto(&out.Targets) if in.TLSConfig != nil { in, out := &in.TLSConfig, &out.TLSConfig - *out = new(ProbeTLSConfig) + *out = new(SafeTLSConfig) (*in).DeepCopyInto(*out) } in.BearerTokenSecret.DeepCopyInto(&out.BearerTokenSecret) @@ -1603,13 +1749,9 @@ func (in *ProbeSpec) DeepCopyInto(out *ProbeSpec) { } if in.MetricRelabelConfigs != nil { in, out := &in.MetricRelabelConfigs, &out.MetricRelabelConfigs - *out = make([]*RelabelConfig, len(*in)) + *out = make([]RelabelConfig, len(*in)) for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(RelabelConfig) - (*in).DeepCopyInto(*out) - } + (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.Authorization != nil { @@ -1627,6 +1769,11 @@ func (in *ProbeSpec) DeepCopyInto(out *ProbeSpec) { *out = new(uint64) **out = **in } + if in.ScrapeProtocols != nil { + in, out := &in.ScrapeProtocols, &out.ScrapeProtocols + *out = make([]ScrapeProtocol, len(*in)) + copy(*out, *in) + } if in.LabelLimit != nil { in, out := &in.LabelLimit, &out.LabelLimit *out = new(uint64) @@ -1647,6 +1794,11 @@ func (in *ProbeSpec) DeepCopyInto(out *ProbeSpec) { *out = new(uint64) **out = **in } + if in.ScrapeClassName != nil { + in, out := &in.ScrapeClassName, &out.ScrapeClassName + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProbeSpec. @@ -1659,22 +1811,6 @@ func (in *ProbeSpec) DeepCopy() *ProbeSpec { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ProbeTLSConfig) DeepCopyInto(out *ProbeTLSConfig) { - *out = *in - in.SafeTLSConfig.DeepCopyInto(&out.SafeTLSConfig) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProbeTLSConfig. -func (in *ProbeTLSConfig) DeepCopy() *ProbeTLSConfig { - if in == nil { - return nil - } - out := new(ProbeTLSConfig) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ProbeTargetIngress) DeepCopyInto(out *ProbeTargetIngress) { *out = *in @@ -1682,13 +1818,9 @@ func (in *ProbeTargetIngress) DeepCopyInto(out *ProbeTargetIngress) { in.NamespaceSelector.DeepCopyInto(&out.NamespaceSelector) if in.RelabelConfigs != nil { in, out := &in.RelabelConfigs, &out.RelabelConfigs - *out = make([]*RelabelConfig, len(*in)) + *out = make([]RelabelConfig, len(*in)) for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(RelabelConfig) - (*in).DeepCopyInto(*out) - } + (*in)[i].DeepCopyInto(&(*out)[i]) } } } @@ -1720,13 +1852,9 @@ func (in *ProbeTargetStaticConfig) DeepCopyInto(out *ProbeTargetStaticConfig) { } if in.RelabelConfigs != nil { in, out := &in.RelabelConfigs, &out.RelabelConfigs - *out = make([]*RelabelConfig, len(*in)) + *out = make([]RelabelConfig, len(*in)) for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(RelabelConfig) - (*in).DeepCopyInto(*out) - } + (*in)[i].DeepCopyInto(&(*out)[i]) } } } @@ -2101,6 +2229,54 @@ func (in *PrometheusWebSpec) DeepCopy() *PrometheusWebSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyConfig) DeepCopyInto(out *ProxyConfig) { + *out = *in + if in.ProxyURL != nil { + in, out := &in.ProxyURL, &out.ProxyURL + *out = new(string) + **out = **in + } + if in.NoProxy != nil { + in, out := &in.NoProxy, &out.NoProxy + *out = new(string) + **out = **in + } + if in.ProxyFromEnvironment != nil { + in, out := &in.ProxyFromEnvironment, &out.ProxyFromEnvironment + *out = new(bool) + **out = **in + } + if in.ProxyConnectHeader != nil { + in, out := &in.ProxyConnectHeader, &out.ProxyConnectHeader + *out = make(map[string][]corev1.SecretKeySelector, len(*in)) + for key, val := range *in { + var outVal []corev1.SecretKeySelector + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = make([]corev1.SecretKeySelector, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyConfig. +func (in *ProxyConfig) DeepCopy() *ProxyConfig { + if in == nil { + return nil + } + out := new(ProxyConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *QuerySpec) DeepCopyInto(out *QuerySpec) { *out = *in @@ -2139,6 +2315,26 @@ func (in *QuerySpec) DeepCopy() *QuerySpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *QueueConfig) DeepCopyInto(out *QueueConfig) { *out = *in + if in.BatchSendDeadline != nil { + in, out := &in.BatchSendDeadline, &out.BatchSendDeadline + *out = new(Duration) + **out = **in + } + if in.MinBackoff != nil { + in, out := &in.MinBackoff, &out.MinBackoff + *out = new(Duration) + **out = **in + } + if in.MaxBackoff != nil { + in, out := &in.MaxBackoff, &out.MaxBackoff + *out = new(Duration) + **out = **in + } + if in.SampleAgeLimit != nil { + in, out := &in.SampleAgeLimit, &out.SampleAgeLimit + *out = new(Duration) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueConfig. @@ -2159,6 +2355,16 @@ func (in *RelabelConfig) DeepCopyInto(out *RelabelConfig) { *out = make([]LabelName, len(*in)) copy(*out, *in) } + if in.Separator != nil { + in, out := &in.Separator, &out.Separator + *out = new(string) + **out = **in + } + if in.Replacement != nil { + in, out := &in.Replacement, &out.Replacement + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RelabelConfig. @@ -2290,13 +2496,18 @@ func (in *RemoteWriteSpec) DeepCopyInto(out *RemoteWriteSpec) { if in.QueueConfig != nil { in, out := &in.QueueConfig, &out.QueueConfig *out = new(QueueConfig) - **out = **in + (*in).DeepCopyInto(*out) } if in.MetadataConfig != nil { in, out := &in.MetadataConfig, &out.MetadataConfig *out = new(MetadataConfig) **out = **in } + if in.EnableHttp2 != nil { + in, out := &in.EnableHttp2, &out.EnableHttp2 + *out = new(bool) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoteWriteSpec. @@ -2442,6 +2653,16 @@ func (in *SafeTLSConfig) DeepCopyInto(out *SafeTLSConfig) { *out = new(corev1.SecretKeySelector) (*in).DeepCopyInto(*out) } + if in.ServerName != nil { + in, out := &in.ServerName, &out.ServerName + *out = new(string) + **out = **in + } + if in.InsecureSkipVerify != nil { + in, out := &in.InsecureSkipVerify, &out.InsecureSkipVerify + *out = new(bool) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SafeTLSConfig. @@ -2455,41 +2676,58 @@ func (in *SafeTLSConfig) DeepCopy() *SafeTLSConfig { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecretOrConfigMap) DeepCopyInto(out *SecretOrConfigMap) { +func (in *ScrapeClass) DeepCopyInto(out *ScrapeClass) { *out = *in - if in.Secret != nil { - in, out := &in.Secret, &out.Secret - *out = new(corev1.SecretKeySelector) - (*in).DeepCopyInto(*out) + if in.Default != nil { + in, out := &in.Default, &out.Default + *out = new(bool) + **out = **in } - if in.ConfigMap != nil { - in, out := &in.ConfigMap, &out.ConfigMap - *out = new(corev1.ConfigMapKeySelector) + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(TLSConfig) (*in).DeepCopyInto(*out) } + if in.Relabelings != nil { + in, out := &in.Relabelings, &out.Relabelings + *out = make([]RelabelConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretOrConfigMap. -func (in *SecretOrConfigMap) DeepCopy() *SecretOrConfigMap { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScrapeClass. +func (in *ScrapeClass) DeepCopy() *ScrapeClass { if in == nil { return nil } - out := new(SecretOrConfigMap) + out := new(ScrapeClass) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecretOrConfigMapValidationError) DeepCopyInto(out *SecretOrConfigMapValidationError) { +func (in *SecretOrConfigMap) DeepCopyInto(out *SecretOrConfigMap) { *out = *in + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.ConfigMap != nil { + in, out := &in.ConfigMap, &out.ConfigMap + *out = new(corev1.ConfigMapKeySelector) + (*in).DeepCopyInto(*out) + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretOrConfigMapValidationError. -func (in *SecretOrConfigMapValidationError) DeepCopy() *SecretOrConfigMapValidationError { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretOrConfigMap. +func (in *SecretOrConfigMap) DeepCopy() *SecretOrConfigMap { if in == nil { return nil } - out := new(SecretOrConfigMapValidationError) + out := new(SecretOrConfigMap) in.DeepCopyInto(out) return out } @@ -2567,6 +2805,11 @@ func (in *ServiceMonitorSpec) DeepCopyInto(out *ServiceMonitorSpec) { *out = new(uint64) **out = **in } + if in.ScrapeProtocols != nil { + in, out := &in.ScrapeProtocols, &out.ScrapeProtocols + *out = make([]ScrapeProtocol, len(*in)) + copy(*out, *in) + } if in.TargetLimit != nil { in, out := &in.TargetLimit, &out.TargetLimit *out = new(uint64) @@ -2595,6 +2838,16 @@ func (in *ServiceMonitorSpec) DeepCopyInto(out *ServiceMonitorSpec) { if in.AttachMetadata != nil { in, out := &in.AttachMetadata, &out.AttachMetadata *out = new(AttachMetadata) + (*in).DeepCopyInto(*out) + } + if in.ScrapeClassName != nil { + in, out := &in.ScrapeClassName, &out.ScrapeClassName + *out = new(string) + **out = **in + } + if in.BodySizeLimit != nil { + in, out := &in.BodySizeLimit, &out.BodySizeLimit + *out = new(ByteSize) **out = **in } } @@ -2691,21 +2944,6 @@ func (in *TLSConfig) DeepCopy() *TLSConfig { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TLSConfigValidationError) DeepCopyInto(out *TLSConfigValidationError) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSConfigValidationError. -func (in *TLSConfigValidationError) DeepCopy() *TLSConfigValidationError { - if in == nil { - return nil - } - out := new(TLSConfigValidationError) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TSDBSpec) DeepCopyInto(out *TSDBSpec) { *out = *in @@ -2950,6 +3188,11 @@ func (in *ThanosRulerSpec) DeepCopyInto(out *ThanosRulerSpec) { *out = make([]Argument, len(*in)) copy(*out, *in) } + if in.Web != nil { + in, out := &in.Web, &out.Web + *out = new(ThanosRulerWebSpec) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThanosRulerSpec. @@ -2984,6 +3227,22 @@ func (in *ThanosRulerStatus) DeepCopy() *ThanosRulerStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThanosRulerWebSpec) DeepCopyInto(out *ThanosRulerWebSpec) { + *out = *in + in.WebConfigFileFields.DeepCopyInto(&out.WebConfigFileFields) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThanosRulerWebSpec. +func (in *ThanosRulerWebSpec) DeepCopy() *ThanosRulerWebSpec { + if in == nil { + return nil + } + out := new(ThanosRulerWebSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ThanosSpec) DeepCopyInto(out *ThanosSpec) { *out = *in @@ -3057,6 +3316,27 @@ func (in *ThanosSpec) DeepCopy() *ThanosSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopologySpreadConstraint) DeepCopyInto(out *TopologySpreadConstraint) { + *out = *in + in.CoreV1TopologySpreadConstraint.DeepCopyInto(&out.CoreV1TopologySpreadConstraint) + if in.AdditionalLabelSelectors != nil { + in, out := &in.AdditionalLabelSelectors, &out.AdditionalLabelSelectors + *out = new(AdditionalLabelSelectors) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopologySpreadConstraint. +func (in *TopologySpreadConstraint) DeepCopy() *TopologySpreadConstraint { + if in == nil { + return nil + } + out := new(TopologySpreadConstraint) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *WebConfigFileFields) DeepCopyInto(out *WebConfigFileFields) { *out = *in diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1/alertmanager_config_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1/alertmanager_config_types.go index 2b10b74dc..dc7986829 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1/alertmanager_config_types.go +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1/alertmanager_config_types.go @@ -18,6 +18,8 @@ import ( "encoding/json" "errors" "fmt" + "html/template" + "regexp" "strings" monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" @@ -561,17 +563,31 @@ type OpsGenieConfigResponder struct { Username string `json:"username,omitempty"` // Type of responder. // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:Enum=team;teams;user;escalation;schedule Type string `json:"type"` } +const opsgenieValidTypesRe = `^(team|teams|user|escalation|schedule)$` + +var opsgenieTypeMatcher = regexp.MustCompile(opsgenieValidTypesRe) + // Validate ensures OpsGenieConfigResponder is valid. func (r *OpsGenieConfigResponder) Validate() error { if r.ID == "" && r.Name == "" && r.Username == "" { return errors.New("responder must have at least an ID, a Name or an Username defined") } - return nil + if strings.Contains(r.Type, "{{") { + _, err := template.New("").Parse(r.Type) + if err != nil { + return fmt.Errorf("responder %v type is not a valid template: %w", r, err) + } + return nil + } + + if opsgenieTypeMatcher.MatchString(strings.ToLower(r.Type)) { + return nil + } + return fmt.Errorf("opsGenieConfig responder %v type does not match valid options %s", r, opsgenieValidTypesRe) } // HTTPConfig defines a client HTTP configuration. @@ -792,6 +808,9 @@ type PushoverConfig struct { // A title for supplementary URL, otherwise just the URL is shown // +optional URLTitle string `json:"urlTitle,omitempty"` + // The name of a device to send the notification to + // +optional + Device *string `json:"device,omitempty"` // The name of one of the sounds supported by device clients to override the user's default sound choice // +optional Sound string `json:"sound,omitempty"` @@ -910,6 +929,10 @@ type MSTeamsConfig struct { // Message title template. // +optional Title *string `json:"title,omitempty"` + // Message summary template. + // It requires Alertmanager >= 0.27.0. + // +optional + Summary *string `json:"summary,omitempty"` // Message body template. // +optional Text *string `json:"text,omitempty"` @@ -957,8 +980,8 @@ type Matcher struct { // +optional MatchType MatchType `json:"matchType,omitempty"` // Whether to match on equality (false) or regular-expression (true). - // Deprecated as of AlertManager >= v0.22.0 where a user should use MatchType instead. // +optional + // Deprecated: for AlertManager >= v0.22.0, `matchType` should be used instead. Regex bool `json:"regex,omitempty"` } @@ -1093,7 +1116,7 @@ type DayOfMonthRange struct { // MonthRange is an inclusive range of months of the year beginning in January // Months can be specified by name (e.g 'January') by numerical month (e.g '1') or as an inclusive range (e.g 'January:March', '1:3', '1:March') -// +kubebuilder:validation:Pattern=`^((?i)january|february|march|april|may|june|july|august|september|october|november|december|[1-12])(?:((:((?i)january|february|march|april|may|june|july|august|september|october|november|december|[1-12]))$)|$)` +// +kubebuilder:validation:Pattern=`^((?i)january|february|march|april|may|june|july|august|september|october|november|december|1[0-2]|[1-9])(?:((:((?i)january|february|march|april|may|june|july|august|september|october|november|december|1[0-2]|[1-9]))$)|$)` type MonthRange string // YearRange is an inclusive range of years diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1/prometheusagent_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1/prometheusagent_types.go index 1495360ff..f3335b85f 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1/prometheusagent_types.go +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1/prometheusagent_types.go @@ -34,10 +34,6 @@ func (l *PrometheusAgent) SetCommonPrometheusFields(f monitoringv1.CommonPrometh l.Spec.CommonPrometheusFields = f } -func (l *PrometheusAgent) GetTypeMeta() metav1.TypeMeta { - return l.TypeMeta -} - func (l *PrometheusAgent) GetStatus() monitoringv1.PrometheusStatus { return l.Status } @@ -53,6 +49,9 @@ func (l *PrometheusAgent) GetStatus() monitoringv1.PrometheusStatus { // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" // +kubebuilder:printcolumn:name="Paused",type="boolean",JSONPath=".status.paused",description="Whether the resource reconciliation is paused or not",priority=1 // +kubebuilder:subresource:status +// +kubebuilder:subresource:scale:specpath=.spec.shards,statuspath=.status.shards,selectorpath=.status.selector +// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale +// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale // PrometheusAgent defines a Prometheus agent deployment. type PrometheusAgent struct { diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1/scrapeconfig_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1/scrapeconfig_types.go index 001df7dc1..1be3fc8a7 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1/scrapeconfig_types.go +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1/scrapeconfig_types.go @@ -34,23 +34,50 @@ type Target string // +kubebuilder:validation:Pattern=`^[^*]*(\*[^/]*)?\.(json|yml|yaml|JSON|YML|YAML)$` type SDFile string +// NamespaceDiscovery is the configuration for discovering +// Kubernetes namespaces. +type NamespaceDiscovery struct { + // Includes the namespace in which the Prometheus pod exists to the list of watched namesapces. + // +optional + IncludeOwnNamespace *bool `json:"ownNamespace,omitempty"` + // List of namespaces where to watch for resources. + // If empty and `ownNamespace` isn't true, Prometheus watches for resources in all namespaces. + // +optional + Names []string `json:"names,omitempty"` +} + +type AttachMetadata struct { + // Attaches node metadata to discovered targets. + // When set to true, Prometheus must have the `get` permission on the + // `Nodes` objects. + // Only valid for Pod, Endpoint and Endpointslice roles. + // + // +optional + Node *bool `json:"node,omitempty"` +} + // EC2Filter is the configuration for filtering EC2 instances. type EC2Filter struct { Name string `json:"name"` Values []string `json:"values"` } -// K8SRole is role of the service in Kubernetes. -// Currently the only supported role is "Node". -// +kubebuilder:validation:Enum=Node;node -type K8SRole string +// DockerFilter is the configuration to limit the discovery process to a subset of available resources. +type DockerFilter struct { + Name string `json:"name"` + Values []string `json:"values"` +} + +// Role is role of the service in Kubernetes. +// +kubebuilder:validation:Enum=Node;node;Service;service;Pod;pod;Endpoints;endpoints;EndpointSlice;endpointslice;Ingress;ingress +type Role string // K8SSelectorConfig is Kubernetes Selector Config type K8SSelectorConfig struct { // +kubebuilder:validation:Required - Role K8SRole `json:"role"` - Label string `json:"label,omitempty"` - Field string `json:"field,omitempty"` + Role Role `json:"role"` + Label string `json:"label,omitempty"` + Field string `json:"field,omitempty"` } // +genclient @@ -112,12 +139,39 @@ type ScrapeConfigSpec struct { // EC2SDConfigs defines a list of EC2 service discovery configurations. // +optional EC2SDConfigs []EC2SDConfig `json:"ec2SDConfigs,omitempty"` + // AzureSDConfigs defines a list of Azure service discovery configurations. + // +optional + AzureSDConfigs []AzureSDConfig `json:"azureSDConfigs,omitempty"` + // GCESDConfigs defines a list of GCE service discovery configurations. + // +optional + GCESDConfigs []GCESDConfig `json:"gceSDConfigs,omitempty"` + // OpenStackSDConfigs defines a list of OpenStack service discovery configurations. + // +optional + OpenStackSDConfigs []OpenStackSDConfig `json:"openstackSDConfigs,omitempty"` + // DigitalOceanSDConfigs defines a list of DigitalOcean service discovery configurations. + // +optional + DigitalOceanSDConfigs []DigitalOceanSDConfig `json:"digitalOceanSDConfigs,omitempty"` + // KumaSDConfigs defines a list of Kuma service discovery configurations. + // +optional + KumaSDConfigs []KumaSDConfig `json:"kumaSDConfigs,omitempty"` + // EurekaSDConfigs defines a list of Eureka service discovery configurations. + // +optional + EurekaSDConfigs []EurekaSDConfig `json:"eurekaSDConfigs,omitempty"` + // DockerSDConfigs defines a list of Docker service discovery configurations. + // +optional + DockerSDConfigs []DockerSDConfig `json:"dockerSDConfigs,omitempty"` + // HetznerSDConfigs defines a list of Hetzner service discovery configurations. + // +optional + HetznerSDConfigs []HetznerSDConfig `json:"hetznerSDConfigs,omitempty"` + // NomadSDConfigs defines a list of Nomad service discovery configurations. + // +optional + NomadSDConfigs []NomadSDConfig `json:"NomadSDConfigs,omitempty"` // RelabelConfigs defines how to rewrite the target's labels before scraping. // Prometheus Operator automatically adds relabelings for a few standard Kubernetes fields. // The original scrape job's name is available via the `__tmp_prometheus_job_name` label. // More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config // +optional - RelabelConfigs []*v1.RelabelConfig `json:"relabelings,omitempty"` + RelabelConfigs []v1.RelabelConfig `json:"relabelings,omitempty"` // MetricsPath HTTP path to scrape for metrics. If empty, Prometheus uses the default value (e.g. /metrics). // +optional MetricsPath *string `json:"metricsPath,omitempty"` @@ -127,9 +181,26 @@ type ScrapeConfigSpec struct { // ScrapeTimeout is the number of seconds to wait until a scrape request times out. // +optional ScrapeTimeout *v1.Duration `json:"scrapeTimeout,omitempty"` + // The protocols to negotiate during a scrape. It tells clients the + // protocols supported by Prometheus in order of preference (from most to least preferred). + // + // If unset, Prometheus uses its default value. + // + // It requires Prometheus >= v2.49.0. + // + // +listType=set + // +optional + ScrapeProtocols []v1.ScrapeProtocol `json:"scrapeProtocols,omitempty"` // HonorTimestamps controls whether Prometheus respects the timestamps present in scraped data. // +optional HonorTimestamps *bool `json:"honorTimestamps,omitempty"` + // TrackTimestampsStaleness whether Prometheus tracks staleness of + // the metrics that have an explicit timestamp present in scraped data. + // Has no effect if `honorTimestamps` is false. + // It requires Prometheus >= v2.48.0. + // + // +optional + TrackTimestampsStaleness *bool `json:"trackTimestampsStaleness,omitempty"` // HonorLabels chooses the metric's labels on collisions with target labels. // +optional HonorLabels *bool `json:"honorLabels,omitempty"` @@ -142,6 +213,13 @@ type ScrapeConfigSpec struct { // +kubebuilder:validation:Enum=HTTP;HTTPS // +optional Scheme *string `json:"scheme,omitempty"` + // When false, Prometheus will request uncompressed response from the scraped target. + // + // It requires Prometheus >= v2.49.0. + // + // If unset, Prometheus uses true by default. + // +optional + EnableCompression *bool `json:"enableCompression,omitempty"` // BasicAuth information to use on every scrape request. // +optional BasicAuth *v1.BasicAuth `json:"basicAuth,omitempty"` @@ -178,7 +256,15 @@ type ScrapeConfigSpec struct { KeepDroppedTargets *uint64 `json:"keepDroppedTargets,omitempty"` // MetricRelabelConfigs to apply to samples before ingestion. // +optional - MetricRelabelConfigs []*v1.RelabelConfig `json:"metricRelabelings,omitempty"` + MetricRelabelConfigs []v1.RelabelConfig `json:"metricRelabelings,omitempty"` + // ProxyConfig allows customizing the proxy behaviour for this scrape config. + // +optional + v1.ProxyConfig `json:",inline"` + + // The scrape class to apply. + // +optional + // +kubebuilder:validation:MinLength=1 + ScrapeClassName *string `json:"scrapeClass,omitempty"` } // StaticConfig defines a Prometheus static configuration. @@ -231,15 +317,57 @@ type HTTPSDConfig struct { // TLS configuration applying to the target HTTP endpoint. // +optional TLSConfig *v1.SafeTLSConfig `json:"tlsConfig,omitempty"` + // ProxyConfig allows customizing the proxy behaviour for this scrape config. + // +optional + v1.ProxyConfig `json:",inline"` } // KubernetesSDConfig allows retrieving scrape targets from Kubernetes' REST API. // See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config // +k8s:openapi-gen=true type KubernetesSDConfig struct { + // The API server address consisting of a hostname or IP address followed + // by an optional port number. + // If left empty, Prometheus is assumed to run inside + // of the cluster. It will discover API servers automatically and use the pod's + // CA certificate and bearer token file at /var/run/secrets/kubernetes.io/serviceaccount/. + // +optional + APIServer *string `json:"apiServer,omitempty"` // Role of the Kubernetes entities that should be discovered. // +required - Role K8SRole `json:"role"` + Role Role `json:"role"` + // BasicAuth information to use on every scrape request. + // Cannot be set at the same time as `authorization`, or `oauth2`. + // +optional + BasicAuth *v1.BasicAuth `json:"basicAuth,omitempty"` + // Authorization header to use on every scrape request. + // Cannot be set at the same time as `basicAuth`, or `oauth2`. + // +optional + Authorization *v1.SafeAuthorization `json:"authorization,omitempty"` + // Optional OAuth 2.0 configuration. + // Cannot be set at the same time as `authorization`, or `basicAuth`. + // +optional + OAuth2 *v1.OAuth2 `json:"oauth2,omitempty"` + // ProxyConfig allows customizing the proxy behaviour for this scrape config. + // +optional + v1.ProxyConfig `json:",inline"` + // Configure whether HTTP requests follow HTTP 3xx redirects. + // +optional + FollowRedirects *bool `json:"followRedirects,omitempty"` + // Whether to enable HTTP2. + // +optional + EnableHTTP2 *bool `json:"enableHTTP2,omitempty"` + // TLS configuration to use on every scrape request. + // +optional + TLSConfig *v1.SafeTLSConfig `json:"tlsConfig,omitempty"` + // Optional namespace discovery. If omitted, Prometheus discovers targets across all namespaces. + // +optional + Namespaces *NamespaceDiscovery `json:"namespaces,omitempty"` + // Optional metadata to attach to discovered targets. + // It requires Prometheus >= v2.35.0 for `pod` role and + // Prometheus >= v2.37.0 for `endpoints` and `endpointslice` roles. + // +optional + AttachMetadata *AttachMetadata `json:"attachMetadata,omitempty"` // Selector to select objects. // +optional // +listType=map @@ -282,20 +410,20 @@ type ConsulSDConfig struct { // The string by which Consul tags are joined into the tag label. // If unset, Prometheus uses its default value. // +optional - TagSeparator *string `json:"tag_separator,omitempty"` + TagSeparator *string `json:"tagSeparator,omitempty"` // Node metadata key/value pairs to filter nodes for a given service. // +mapType:=atomic // +optional - NodeMeta map[string]string `json:"node_meta,omitempty"` + NodeMeta map[string]string `json:"nodeMeta,omitempty"` // Allow stale Consul results (see https://www.consul.io/api/features/consistency.html). Will reduce load on Consul. // If unset, Prometheus uses its default value. // +optional - AllowStale *bool `json:"allow_stale,omitempty"` + AllowStale *bool `json:"allowStale,omitempty"` // The time after which the provided names are refreshed. // On large setup it might be a good idea to increase this value because the catalog will change all the time. // If unset, Prometheus uses its default value. // +optional - RefreshInterval *v1.Duration `json:"refresh_interval,omitempty"` + RefreshInterval *v1.Duration `json:"refreshInterval,omitempty"` // BasicAuth information to authenticate against the Consul Server. // More info: https://prometheus.io/docs/operating/configuration/#endpoints // +optional @@ -306,30 +434,17 @@ type ConsulSDConfig struct { // Optional OAuth 2.0 configuration. // +optional Oauth2 *v1.OAuth2 `json:"oauth2,omitempty"` - // Optional proxy URL. - // +optional - ProxyUrl *string `json:"proxy_url,omitempty"` - // Comma-separated string that can contain IPs, CIDR notation, domain names - // that should be excluded from proxying. IP and domain names can - // contain port numbers. - // +optional - NoProxy *string `json:"no_proxy,omitempty"` - // Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) - // If unset, Prometheus uses its default value. - // +optional - ProxyFromEnvironment *bool `json:"proxy_from_environment,omitempty"` - // Specifies headers to send to proxies during CONNECT requests. - // +mapType:=atomic + // ProxyConfig allows customizing the proxy behaviour for this scrape config. // +optional - ProxyConnectHeader map[string]corev1.SecretKeySelector `json:"proxy_connect_header,omitempty"` + v1.ProxyConfig `json:",inline"` // Configure whether HTTP requests follow HTTP 3xx redirects. // If unset, Prometheus uses its default value. // +optional - FollowRedirects *bool `json:"follow_redirects,omitempty"` + FollowRedirects *bool `json:"followRedirects,omitempty"` // Whether to enable HTTP2. // If unset, Prometheus uses its default value. // +optional - EnableHttp2 *bool `json:"enable_http2,omitempty"` + EnableHttp2 *bool `json:"enableHTTP2,omitempty"` // TLS Config // +optional TLSConfig *v1.SafeTLSConfig `json:"tlsConfig,omitempty"` @@ -347,9 +462,12 @@ type DNSSDConfig struct { // If not set, Prometheus uses its default value. // +optional RefreshInterval *v1.Duration `json:"refreshInterval,omitempty"` - // The type of DNS query to perform. One of SRV, A, AAAA or MX. + // The type of DNS query to perform. One of SRV, A, AAAA, MX or NS. // If not set, Prometheus uses its default value. - // +kubebuilder:validation:Enum=SRV;A;AAAA;MX + // + // When set to NS, It requires Prometheus >= 2.49.0. + // + // +kubebuilder:validation:Enum=SRV;A;AAAA;MX;NS // +optional Type *string `json:"type"` // The port number used if the query type is not SRV @@ -390,3 +508,401 @@ type EC2SDConfig struct { // +optional Filters []*EC2Filter `json:"filters"` } + +// AzureSDConfig allow retrieving scrape targets from Azure VMs. +// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#azure_sd_config +// +k8s:openapi-gen=true +type AzureSDConfig struct { + // The Azure environment. + // +optional + Environment *string `json:"environment,omitempty"` + // # The authentication method, either `OAuth` or `ManagedIdentity` or `SDK`. + // See https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/overview + // SDK authentication method uses environment variables by default. + // See https://learn.microsoft.com/en-us/azure/developer/go/azure-sdk-authentication + // +kubebuilder:validation:Enum=OAuth;ManagedIdentity;SDK + // +optional + AuthenticationMethod *string `json:"authenticationMethod,omitempty"` + // The subscription ID. Always required. + // +kubebuilder:validation:MinLength=1 + // +required + SubscriptionID string `json:"subscriptionID"` + // Optional tenant ID. Only required with the OAuth authentication method. + // +optional + TenantID *string `json:"tenantID,omitempty"` + // Optional client ID. Only required with the OAuth authentication method. + // +optional + ClientID *string `json:"clientID,omitempty"` + // Optional client secret. Only required with the OAuth authentication method. + // +optional + ClientSecret *corev1.SecretKeySelector `json:"clientSecret,omitempty"` + // Optional resource group name. Limits discovery to this resource group. + // +optional + ResourceGroup *string `json:"resourceGroup,omitempty"` + // RefreshInterval configures the refresh interval at which Prometheus will re-read the instance list. + // +optional + RefreshInterval *v1.Duration `json:"refreshInterval,omitempty"` + // The port to scrape metrics from. If using the public IP address, this must + // instead be specified in the relabeling rule. + // +optional + Port *int `json:"port"` +} + +// GCESDConfig configures scrape targets from GCP GCE instances. +// The private IP address is used by default, but may be changed to +// the public IP address with relabeling. +// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#gce_sd_config +// +// The GCE service discovery will load the Google Cloud credentials +// from the file specified by the GOOGLE_APPLICATION_CREDENTIALS environment variable. +// See https://cloud.google.com/kubernetes-engine/docs/tutorials/authenticating-to-cloud-platform +// +// A pre-requisite for using GCESDConfig is that a Secret containing valid +// Google Cloud credentials is mounted into the Prometheus or PrometheusAgent +// pod via the `.spec.secrets` field and that the GOOGLE_APPLICATION_CREDENTIALS +// environment variable is set to /etc/prometheus/secrets//. +// +k8s:openapi-gen=true +type GCESDConfig struct { + // The Google Cloud Project ID + // +kubebuilder:validation:MinLength:=1 + // +required + Project string `json:"project"` + // The zone of the scrape targets. If you need multiple zones use multiple GCESDConfigs. + // +kubebuilder:validation:MinLength:=1 + // +required + Zone string `json:"zone"` + // Filter can be used optionally to filter the instance list by other criteria + // Syntax of this filter is described in the filter query parameter section: + // https://cloud.google.com/compute/docs/reference/latest/instances/list + // +optional + Filter *string `json:"filter,omitempty"` + // RefreshInterval configures the refresh interval at which Prometheus will re-read the instance list. + // +optional + RefreshInterval *v1.Duration `json:"refreshInterval,omitempty"` + // The port to scrape metrics from. If using the public IP address, this must + // instead be specified in the relabeling rule. + // +optional + Port *int `json:"port"` + // The tag separator is used to separate the tags on concatenation + // +optional + TagSeparator *string `json:"tagSeparator,omitempty"` +} + +// OpenStackSDConfig allow retrieving scrape targets from OpenStack Nova instances. +// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#openstack_sd_config +// +k8s:openapi-gen=true +type OpenStackSDConfig struct { + // The OpenStack role of entities that should be discovered. + // +kubebuilder:validation:Enum=Instance;instance;Hypervisor;hypervisor + // +required + Role string `json:"role"` + // The OpenStack Region. + // +kubebuilder:validation:MinLength:=1 + // +required + Region string `json:"region"` + // IdentityEndpoint specifies the HTTP endpoint that is required to work with + // the Identity API of the appropriate version. + // +optional + IdentityEndpoint *string `json:"identityEndpoint,omitempty"` + // Username is required if using Identity V2 API. Consult with your provider's + // control panel to discover your account's username. + // In Identity V3, either userid or a combination of username + // and domainId or domainName are needed + // +optional + Username *string `json:"username,omitempty"` + // UserID + // +optional + UserID *string `json:"userid,omitempty"` + // Password for the Identity V2 and V3 APIs. Consult with your provider's + // control panel to discover your account's preferred method of authentication. + // +optional + Password *corev1.SecretKeySelector `json:"password,omitempty"` + // At most one of domainId and domainName must be provided if using username + // with Identity V3. Otherwise, either are optional. + // +optional + DomainName *string `json:"domainName,omitempty"` + // DomainID + // +optional + DomainID *string `json:"domainID,omitempty"` + // The ProjectId and ProjectName fields are optional for the Identity V2 API. + // Some providers allow you to specify a ProjectName instead of the ProjectId. + // Some require both. Your provider's authentication policies will determine + // how these fields influence authentication. + // +optional + ProjectName *string `json:"projectName,omitempty"` + // ProjectID + // +optional + ProjectID *string `json:"projectID,omitempty"` + // The ApplicationCredentialID or ApplicationCredentialName fields are + // required if using an application credential to authenticate. Some providers + // allow you to create an application credential to authenticate rather than a + // password. + // +optional + ApplicationCredentialName *string `json:"applicationCredentialName,omitempty"` + // ApplicationCredentialID + // +optional + ApplicationCredentialID *string `json:"applicationCredentialId,omitempty"` + // The applicationCredentialSecret field is required if using an application + // credential to authenticate. + // +optional + ApplicationCredentialSecret *corev1.SecretKeySelector `json:"applicationCredentialSecret,omitempty"` + // Whether the service discovery should list all instances for all projects. + // It is only relevant for the 'instance' role and usually requires admin permissions. + // +optional + AllTenants *bool `json:"allTenants,omitempty"` + // Refresh interval to re-read the instance list. + // +optional + RefreshInterval *v1.Duration `json:"refreshInterval,omitempty"` + // The port to scrape metrics from. If using the public IP address, this must + // instead be specified in the relabeling rule. + // +optional + Port *int `json:"port"` + // Availability of the endpoint to connect to. + // +kubebuilder:validation:Enum=Public;public;Admin;admin;Internal;internal + // +optional + Availability *string `json:"availability,omitempty"` + // TLS configuration applying to the target HTTP endpoint. + // +optional + TLSConfig *v1.SafeTLSConfig `json:"tlsConfig,omitempty"` +} + +// DigitalOceanSDConfig allow retrieving scrape targets from DigitalOcean's Droplets API. +// This service discovery uses the public IPv4 address by default, by that can be changed with relabeling +// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#digitalocean_sd_config +// +k8s:openapi-gen=true +type DigitalOceanSDConfig struct { + // Authorization header configuration to authenticate against the DigitalOcean API. + // Cannot be set at the same time as `oauth2`. + // +optional + Authorization *v1.SafeAuthorization `json:"authorization,omitempty"` + // Optional OAuth 2.0 configuration. + // Cannot be set at the same time as `authorization`. + // +optional + OAuth2 *v1.OAuth2 `json:"oauth2,omitempty"` + // ProxyConfig allows customizing the proxy behaviour for this scrape config. + // +optional + v1.ProxyConfig `json:",inline"` + // Configure whether HTTP requests follow HTTP 3xx redirects. + // +optional + FollowRedirects *bool `json:"followRedirects,omitempty"` + // Whether to enable HTTP2. + // +optional + EnableHTTP2 *bool `json:"enableHTTP2,omitempty"` + // TLS configuration applying to the target HTTP endpoint. + // +optional + TLSConfig *v1.SafeTLSConfig `json:"tlsConfig,omitempty"` + // The port to scrape metrics from. + // +optional + Port *int `json:"port,omitempty"` + // Refresh interval to re-read the instance list. + // +optional + RefreshInterval *v1.Duration `json:"refreshInterval,omitempty"` +} + +// KumaSDConfig allow retrieving scrape targets from Kuma's control plane. +// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kuma_sd_config +// +k8s:openapi-gen=true +type KumaSDConfig struct { + // Address of the Kuma Control Plane's MADS xDS server. + // +kubebuilder:validation:MinLength=1 + // +required + Server string `json:"server"` + // Client id is used by Kuma Control Plane to compute Monitoring Assignment for specific Prometheus backend. + // +optional + ClientID *string `json:"clientID,omitempty"` + // The time to wait between polling update requests. + // +optional + RefreshInterval *v1.Duration `json:"refreshInterval,omitempty"` + // The time after which the monitoring assignments are refreshed. + // +optional + FetchTimeout *v1.Duration `json:"fetchTimeout,omitempty"` + // ProxyConfig allows customizing the proxy behaviour for this scrape config. + // +optional + v1.ProxyConfig `json:",inline"` + // TLS configuration to use on every scrape request + // +optional + TLSConfig *v1.SafeTLSConfig `json:"tlsConfig,omitempty"` + // BasicAuth information to use on every scrape request. + // +optional + BasicAuth *v1.BasicAuth `json:"basicAuth,omitempty"` + // Authorization header to use on every scrape request. + // +optional + Authorization *v1.SafeAuthorization `json:"authorization,omitempty"` + // Optional OAuth 2.0 configuration. + // Cannot be set at the same time as `authorization`, or `basicAuth`. + // +optional + OAuth2 *v1.OAuth2 `json:"oauth2,omitempty"` + // Configure whether HTTP requests follow HTTP 3xx redirects. + // +optional + FollowRedirects *bool `json:"followRedirects,omitempty"` + // Whether to enable HTTP2. + // +optional + EnableHTTP2 *bool `json:"enableHTTP2,omitempty"` +} + +// Eureka SD configurations allow retrieving scrape targets using the Eureka REST API. +// Prometheus will periodically check the REST endpoint and create a target for every app instance. +// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#eureka_sd_config +// +k8s:openapi-gen=true +type EurekaSDConfig struct { + // The URL to connect to the Eureka server. + // +kubebuilder:validation:MinLength=1 + // +required + Server string `json:"server"` + // BasicAuth information to use on every scrape request. + // +optional + BasicAuth *v1.BasicAuth `json:"basicAuth,omitempty"` + // Authorization header to use on every scrape request. + // +optional + Authorization *v1.SafeAuthorization `json:"authorization,omitempty"` + // Optional OAuth 2.0 configuration. + // Cannot be set at the same time as `authorization` or `basic_auth`. + // +optional + OAuth2 *v1.OAuth2 `json:"oauth2,omitempty"` + // TLS configuration applying to the target HTTP endpoint. + // +optional + TLSConfig *v1.SafeTLSConfig `json:"tlsConfig,omitempty"` + // ProxyConfig allows customizing the proxy behaviour for this scrape config. + // +optional + v1.ProxyConfig `json:",inline"` + // Configure whether HTTP requests follow HTTP 3xx redirects. + // +optional + FollowRedirects *bool `json:"followRedirects,omitempty"` + // Whether to enable HTTP2. + // +optional + EnableHTTP2 *bool `json:"enableHTTP2,omitempty"` + // Refresh interval to re-read the instance list. + // +optional + RefreshInterval *v1.Duration `json:"refreshInterval,omitempty"` +} + +// Docker SD configurations allow retrieving scrape targets from Docker Engine hosts. +// This SD discovers "containers" and will create a target for each network IP and +// port the container is configured to expose. +// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#docker_sd_config +// +k8s:openapi-gen=true +type DockerSDConfig struct { + // Address of the docker daemon + // +kubebuilder:validation:MinLength=1 + // +required + Host string `json:"host"` + // ProxyConfig allows customizing the proxy behaviour for this scrape config. + // +optional + v1.ProxyConfig `json:",inline"` + // TLS configuration applying to the target HTTP endpoint. + // +optional + TLSConfig *v1.SafeTLSConfig `json:"tlsConfig,omitempty"` + // The port to scrape metrics from. + // +optional + Port *int `json:"port,omitempty"` + // The host to use if the container is in host networking mode. + // +optional + HostNetworkingHost *string `json:"hostNetworkingHost,omitempty"` + // Optional filters to limit the discovery process to a subset of the available resources. + // +optional + Filters *[]DockerFilter `json:"filters,omitempty"` + // Time after which the container is refreshed. + // +optional + RefreshInterval *v1.Duration `json:"refreshInterval,omitempty"` + // BasicAuth information to use on every scrape request. + // +optional + BasicAuth *v1.BasicAuth `json:"basicAuth,omitempty"` + // Authorization header configuration to authenticate against the Docker API. + // Cannot be set at the same time as `oauth2`. + // +optional + Authorization *v1.SafeAuthorization `json:"authorization,omitempty"` + // Optional OAuth 2.0 configuration. + // Cannot be set at the same time as `authorization`. + // +optional + OAuth2 *v1.OAuth2 `json:"oauth2,omitempty"` + // Configure whether HTTP requests follow HTTP 3xx redirects. + // +optional + FollowRedirects *bool `json:"followRedirects,omitempty"` + // Whether to enable HTTP2. + // +optional + EnableHTTP2 *bool `json:"enableHTTP2,omitempty"` +} + +// HetznerSDConfig allow retrieving scrape targets from Hetzner Cloud API and Robot API. +// This service discovery uses the public IPv4 address by default, but that can be changed with relabeling +// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#hetzner_sd_config +// +k8s:openapi-gen=true +type HetznerSDConfig struct { + // The Hetzner role of entities that should be discovered. + // +kubebuilder:validation:Enum=hcloud;Hcloud;robot;Robot + // +required + Role string `json:"role"` + // BasicAuth information to use on every scrape request, required when role is robot. + // Role hcloud does not support basic auth. + // +optional + BasicAuth *v1.BasicAuth `json:"basicAuth,omitempty"` + // Authorization header configuration, required when role is hcloud. + // Role robot does not support bearer token authentication. + // +optional + Authorization *v1.SafeAuthorization `json:"authorization,omitempty"` + // Optional OAuth 2.0 configuration. + // Cannot be used at the same time as `basic_auth` or `authorization`. + // +optional + OAuth2 *v1.OAuth2 `json:"oauth2,omitempty"` + // ProxyConfig allows customizing the proxy behaviour for this scrape config. + // +optional + v1.ProxyConfig `json:",inline"` + // Configure whether HTTP requests follow HTTP 3xx redirects. + // +optional + FollowRedirects *bool `json:"followRedirects,omitempty"` + // Whether to enable HTTP2. + // +optional + EnableHTTP2 *bool `json:"enableHTTP2,omitempty"` + // TLS configuration to use on every scrape request. + // +optional + TLSConfig *v1.SafeTLSConfig `json:"tlsConfig,omitempty"` + // The port to scrape metrics from. + // +optional + Port *int `json:"port,omitempty"` + // The time after which the servers are refreshed. + // +optional + RefreshInterval *v1.Duration `json:"refreshInterval,omitempty"` +} + +// NomadSDConfig configurations allow retrieving scrape targets from Nomad's Service API. +// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#nomad_sd_config +// +k8s:openapi-gen=true +type NomadSDConfig struct { + // The information to access the Nomad API. It is to be defined + // as the Nomad documentation requires. + // +optional + AllowStale *bool `json:"allowStale,omitempty"` + // +optional + Namespace *string `json:"namespace,omitempty"` + // +optional + RefreshInterval *v1.Duration `json:"refreshInterval,omitempty"` + // +optional + Region *string `json:"region,omitempty"` + // +kubebuilder:validation:MinLength=1 + // +required + Server string `json:"server"` + // +optional + TagSeparator *string `json:"tagSeparator,omitempty"` + // BasicAuth information to use on every scrape request. + // +optional + BasicAuth *v1.BasicAuth `json:"basicAuth,omitempty"` + // Authorization header to use on every scrape request. + // +optional + Authorization *v1.SafeAuthorization `json:"authorization,omitempty"` + // Optional OAuth 2.0 configuration. + // Cannot be set at the same time as `authorization` or `basic_auth`. + // +optional + OAuth2 *v1.OAuth2 `json:"oauth2,omitempty"` + // TLS configuration applying to the target HTTP endpoint. + // +optional + TLSConfig *v1.SafeTLSConfig `json:"tlsConfig,omitempty"` + // ProxyConfig allows customizing the proxy behaviour for this scrape config. + // +optional + v1.ProxyConfig `json:",inline"` + // Configure whether HTTP requests follow HTTP 3xx redirects. + // +optional + FollowRedirects *bool `json:"followRedirects,omitempty"` + // Whether to enable HTTP2. + // +optional + EnableHTTP2 *bool `json:"enableHTTP2,omitempty"` +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1/validation.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1/validation.go index 9a124a70d..995f4675f 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1/validation.go +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1/validation.go @@ -183,12 +183,12 @@ func (yr YearRange) Parse() (*ParsedRange, error) { start, err := strconv.Atoi(startStr) if err != nil { - fmt.Errorf("start year cannot be %s parsed: %w", startStr, err) + return nil, fmt.Errorf("start year cannot be %s parsed: %w", startStr, err) } end, err := strconv.Atoi(endStr) if err != nil { - fmt.Errorf("end year cannot be %s parsed: %w", endStr, err) + return nil, fmt.Errorf("end year cannot be %s parsed: %w", endStr, err) } if start > end { @@ -221,16 +221,16 @@ func (w Weekday) Int() (int, error) { func (m Month) Int() (int, error) { normaliseMonth := Month(strings.ToLower(string(m))) - day, found := months[normaliseMonth] + month, found := months[normaliseMonth] if !found { i, err := strconv.Atoi(string(normaliseMonth)) - if err != nil { - return day, fmt.Errorf("%s is an invalid month", m) + if err != nil || i < 1 || i > 12 { + return month, fmt.Errorf("%s is an invalid month", m) } - day = i + month = i } - return day, nil + return month, nil } // Validate the DayOfMonthRange diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1/zz_generated.deepcopy.go index fc14a49e3..25c12fc2f 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1/zz_generated.deepcopy.go @@ -1,5 +1,4 @@ //go:build !ignore_autogenerated -// +build !ignore_autogenerated // Copyright The prometheus-operator Authors // @@ -112,6 +111,81 @@ func (in *AlertmanagerConfigSpec) DeepCopy() *AlertmanagerConfigSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AttachMetadata) DeepCopyInto(out *AttachMetadata) { + *out = *in + if in.Node != nil { + in, out := &in.Node, &out.Node + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AttachMetadata. +func (in *AttachMetadata) DeepCopy() *AttachMetadata { + if in == nil { + return nil + } + out := new(AttachMetadata) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureSDConfig) DeepCopyInto(out *AzureSDConfig) { + *out = *in + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = new(string) + **out = **in + } + if in.AuthenticationMethod != nil { + in, out := &in.AuthenticationMethod, &out.AuthenticationMethod + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecret != nil { + in, out := &in.ClientSecret, &out.ClientSecret + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroup != nil { + in, out := &in.ResourceGroup, &out.ResourceGroup + *out = new(string) + **out = **in + } + if in.RefreshInterval != nil { + in, out := &in.RefreshInterval, &out.RefreshInterval + *out = new(monitoringv1.Duration) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureSDConfig. +func (in *AzureSDConfig) DeepCopy() *AzureSDConfig { + if in == nil { + return nil + } + out := new(AzureSDConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ConsulSDConfig) DeepCopyInto(out *ConsulSDConfig) { *out = *in @@ -187,28 +261,7 @@ func (in *ConsulSDConfig) DeepCopyInto(out *ConsulSDConfig) { *out = new(monitoringv1.OAuth2) (*in).DeepCopyInto(*out) } - if in.ProxyUrl != nil { - in, out := &in.ProxyUrl, &out.ProxyUrl - *out = new(string) - **out = **in - } - if in.NoProxy != nil { - in, out := &in.NoProxy, &out.NoProxy - *out = new(string) - **out = **in - } - if in.ProxyFromEnvironment != nil { - in, out := &in.ProxyFromEnvironment, &out.ProxyFromEnvironment - *out = new(bool) - **out = **in - } - if in.ProxyConnectHeader != nil { - in, out := &in.ProxyConnectHeader, &out.ProxyConnectHeader - *out = make(map[string]corev1.SecretKeySelector, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } + in.ProxyConfig.DeepCopyInto(&out.ProxyConfig) if in.FollowRedirects != nil { in, out := &in.FollowRedirects, &out.FollowRedirects *out = new(bool) @@ -286,6 +339,57 @@ func (in *DayOfMonthRange) DeepCopy() *DayOfMonthRange { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DigitalOceanSDConfig) DeepCopyInto(out *DigitalOceanSDConfig) { + *out = *in + if in.Authorization != nil { + in, out := &in.Authorization, &out.Authorization + *out = new(monitoringv1.SafeAuthorization) + (*in).DeepCopyInto(*out) + } + if in.OAuth2 != nil { + in, out := &in.OAuth2, &out.OAuth2 + *out = new(monitoringv1.OAuth2) + (*in).DeepCopyInto(*out) + } + in.ProxyConfig.DeepCopyInto(&out.ProxyConfig) + if in.FollowRedirects != nil { + in, out := &in.FollowRedirects, &out.FollowRedirects + *out = new(bool) + **out = **in + } + if in.EnableHTTP2 != nil { + in, out := &in.EnableHTTP2, &out.EnableHTTP2 + *out = new(bool) + **out = **in + } + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(monitoringv1.SafeTLSConfig) + (*in).DeepCopyInto(*out) + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int) + **out = **in + } + if in.RefreshInterval != nil { + in, out := &in.RefreshInterval, &out.RefreshInterval + *out = new(monitoringv1.Duration) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DigitalOceanSDConfig. +func (in *DigitalOceanSDConfig) DeepCopy() *DigitalOceanSDConfig { + if in == nil { + return nil + } + out := new(DigitalOceanSDConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DiscordConfig) DeepCopyInto(out *DiscordConfig) { *out = *in @@ -322,6 +426,98 @@ func (in *DiscordConfig) DeepCopy() *DiscordConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerFilter) DeepCopyInto(out *DockerFilter) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerFilter. +func (in *DockerFilter) DeepCopy() *DockerFilter { + if in == nil { + return nil + } + out := new(DockerFilter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerSDConfig) DeepCopyInto(out *DockerSDConfig) { + *out = *in + in.ProxyConfig.DeepCopyInto(&out.ProxyConfig) + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(monitoringv1.SafeTLSConfig) + (*in).DeepCopyInto(*out) + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int) + **out = **in + } + if in.HostNetworkingHost != nil { + in, out := &in.HostNetworkingHost, &out.HostNetworkingHost + *out = new(string) + **out = **in + } + if in.Filters != nil { + in, out := &in.Filters, &out.Filters + *out = new([]DockerFilter) + if **in != nil { + in, out := *in, *out + *out = make([]DockerFilter, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + } + if in.RefreshInterval != nil { + in, out := &in.RefreshInterval, &out.RefreshInterval + *out = new(monitoringv1.Duration) + **out = **in + } + if in.BasicAuth != nil { + in, out := &in.BasicAuth, &out.BasicAuth + *out = new(monitoringv1.BasicAuth) + (*in).DeepCopyInto(*out) + } + if in.Authorization != nil { + in, out := &in.Authorization, &out.Authorization + *out = new(monitoringv1.SafeAuthorization) + (*in).DeepCopyInto(*out) + } + if in.OAuth2 != nil { + in, out := &in.OAuth2, &out.OAuth2 + *out = new(monitoringv1.OAuth2) + (*in).DeepCopyInto(*out) + } + if in.FollowRedirects != nil { + in, out := &in.FollowRedirects, &out.FollowRedirects + *out = new(bool) + **out = **in + } + if in.EnableHTTP2 != nil { + in, out := &in.EnableHTTP2, &out.EnableHTTP2 + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerSDConfig. +func (in *DockerSDConfig) DeepCopy() *DockerSDConfig { + if in == nil { + return nil + } + out := new(DockerSDConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EC2Filter) DeepCopyInto(out *EC2Filter) { *out = *in @@ -453,6 +649,57 @@ func (in *EmailConfig) DeepCopy() *EmailConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EurekaSDConfig) DeepCopyInto(out *EurekaSDConfig) { + *out = *in + if in.BasicAuth != nil { + in, out := &in.BasicAuth, &out.BasicAuth + *out = new(monitoringv1.BasicAuth) + (*in).DeepCopyInto(*out) + } + if in.Authorization != nil { + in, out := &in.Authorization, &out.Authorization + *out = new(monitoringv1.SafeAuthorization) + (*in).DeepCopyInto(*out) + } + if in.OAuth2 != nil { + in, out := &in.OAuth2, &out.OAuth2 + *out = new(monitoringv1.OAuth2) + (*in).DeepCopyInto(*out) + } + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(monitoringv1.SafeTLSConfig) + (*in).DeepCopyInto(*out) + } + in.ProxyConfig.DeepCopyInto(&out.ProxyConfig) + if in.FollowRedirects != nil { + in, out := &in.FollowRedirects, &out.FollowRedirects + *out = new(bool) + **out = **in + } + if in.EnableHTTP2 != nil { + in, out := &in.EnableHTTP2, &out.EnableHTTP2 + *out = new(bool) + **out = **in + } + if in.RefreshInterval != nil { + in, out := &in.RefreshInterval, &out.RefreshInterval + *out = new(monitoringv1.Duration) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EurekaSDConfig. +func (in *EurekaSDConfig) DeepCopy() *EurekaSDConfig { + if in == nil { + return nil + } + out := new(EurekaSDConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FileSDConfig) DeepCopyInto(out *FileSDConfig) { *out = *in @@ -478,6 +725,41 @@ func (in *FileSDConfig) DeepCopy() *FileSDConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCESDConfig) DeepCopyInto(out *GCESDConfig) { + *out = *in + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(string) + **out = **in + } + if in.RefreshInterval != nil { + in, out := &in.RefreshInterval, &out.RefreshInterval + *out = new(monitoringv1.Duration) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int) + **out = **in + } + if in.TagSeparator != nil { + in, out := &in.TagSeparator, &out.TagSeparator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCESDConfig. +func (in *GCESDConfig) DeepCopy() *GCESDConfig { + if in == nil { + return nil + } + out := new(GCESDConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HTTPConfig) DeepCopyInto(out *HTTPConfig) { *out = *in @@ -546,6 +828,7 @@ func (in *HTTPSDConfig) DeepCopyInto(out *HTTPSDConfig) { *out = new(monitoringv1.SafeTLSConfig) (*in).DeepCopyInto(*out) } + in.ProxyConfig.DeepCopyInto(&out.ProxyConfig) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPSDConfig. @@ -558,6 +841,62 @@ func (in *HTTPSDConfig) DeepCopy() *HTTPSDConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HetznerSDConfig) DeepCopyInto(out *HetznerSDConfig) { + *out = *in + if in.BasicAuth != nil { + in, out := &in.BasicAuth, &out.BasicAuth + *out = new(monitoringv1.BasicAuth) + (*in).DeepCopyInto(*out) + } + if in.Authorization != nil { + in, out := &in.Authorization, &out.Authorization + *out = new(monitoringv1.SafeAuthorization) + (*in).DeepCopyInto(*out) + } + if in.OAuth2 != nil { + in, out := &in.OAuth2, &out.OAuth2 + *out = new(monitoringv1.OAuth2) + (*in).DeepCopyInto(*out) + } + in.ProxyConfig.DeepCopyInto(&out.ProxyConfig) + if in.FollowRedirects != nil { + in, out := &in.FollowRedirects, &out.FollowRedirects + *out = new(bool) + **out = **in + } + if in.EnableHTTP2 != nil { + in, out := &in.EnableHTTP2, &out.EnableHTTP2 + *out = new(bool) + **out = **in + } + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(monitoringv1.SafeTLSConfig) + (*in).DeepCopyInto(*out) + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int) + **out = **in + } + if in.RefreshInterval != nil { + in, out := &in.RefreshInterval, &out.RefreshInterval + *out = new(monitoringv1.Duration) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HetznerSDConfig. +func (in *HetznerSDConfig) DeepCopy() *HetznerSDConfig { + if in == nil { + return nil + } + out := new(HetznerSDConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *InhibitRule) DeepCopyInto(out *InhibitRule) { *out = *in @@ -598,42 +937,149 @@ func (in *K8SSelectorConfig) DeepCopy() *K8SSelectorConfig { if in == nil { return nil } - out := new(K8SSelectorConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KeyValue) DeepCopyInto(out *KeyValue) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyValue. -func (in *KeyValue) DeepCopy() *KeyValue { - if in == nil { - return nil + out := new(K8SSelectorConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeyValue) DeepCopyInto(out *KeyValue) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyValue. +func (in *KeyValue) DeepCopy() *KeyValue { + if in == nil { + return nil + } + out := new(KeyValue) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesSDConfig) DeepCopyInto(out *KubernetesSDConfig) { + *out = *in + if in.APIServer != nil { + in, out := &in.APIServer, &out.APIServer + *out = new(string) + **out = **in + } + if in.BasicAuth != nil { + in, out := &in.BasicAuth, &out.BasicAuth + *out = new(monitoringv1.BasicAuth) + (*in).DeepCopyInto(*out) + } + if in.Authorization != nil { + in, out := &in.Authorization, &out.Authorization + *out = new(monitoringv1.SafeAuthorization) + (*in).DeepCopyInto(*out) + } + if in.OAuth2 != nil { + in, out := &in.OAuth2, &out.OAuth2 + *out = new(monitoringv1.OAuth2) + (*in).DeepCopyInto(*out) + } + in.ProxyConfig.DeepCopyInto(&out.ProxyConfig) + if in.FollowRedirects != nil { + in, out := &in.FollowRedirects, &out.FollowRedirects + *out = new(bool) + **out = **in + } + if in.EnableHTTP2 != nil { + in, out := &in.EnableHTTP2, &out.EnableHTTP2 + *out = new(bool) + **out = **in + } + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(monitoringv1.SafeTLSConfig) + (*in).DeepCopyInto(*out) + } + if in.Namespaces != nil { + in, out := &in.Namespaces, &out.Namespaces + *out = new(NamespaceDiscovery) + (*in).DeepCopyInto(*out) + } + if in.AttachMetadata != nil { + in, out := &in.AttachMetadata, &out.AttachMetadata + *out = new(AttachMetadata) + (*in).DeepCopyInto(*out) + } + if in.Selectors != nil { + in, out := &in.Selectors, &out.Selectors + *out = make([]K8SSelectorConfig, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesSDConfig. +func (in *KubernetesSDConfig) DeepCopy() *KubernetesSDConfig { + if in == nil { + return nil + } + out := new(KubernetesSDConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KumaSDConfig) DeepCopyInto(out *KumaSDConfig) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.RefreshInterval != nil { + in, out := &in.RefreshInterval, &out.RefreshInterval + *out = new(monitoringv1.Duration) + **out = **in + } + if in.FetchTimeout != nil { + in, out := &in.FetchTimeout, &out.FetchTimeout + *out = new(monitoringv1.Duration) + **out = **in + } + in.ProxyConfig.DeepCopyInto(&out.ProxyConfig) + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(monitoringv1.SafeTLSConfig) + (*in).DeepCopyInto(*out) + } + if in.BasicAuth != nil { + in, out := &in.BasicAuth, &out.BasicAuth + *out = new(monitoringv1.BasicAuth) + (*in).DeepCopyInto(*out) + } + if in.Authorization != nil { + in, out := &in.Authorization, &out.Authorization + *out = new(monitoringv1.SafeAuthorization) + (*in).DeepCopyInto(*out) } - out := new(KeyValue) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubernetesSDConfig) DeepCopyInto(out *KubernetesSDConfig) { - *out = *in - if in.Selectors != nil { - in, out := &in.Selectors, &out.Selectors - *out = make([]K8SSelectorConfig, len(*in)) - copy(*out, *in) + if in.OAuth2 != nil { + in, out := &in.OAuth2, &out.OAuth2 + *out = new(monitoringv1.OAuth2) + (*in).DeepCopyInto(*out) + } + if in.FollowRedirects != nil { + in, out := &in.FollowRedirects, &out.FollowRedirects + *out = new(bool) + **out = **in + } + if in.EnableHTTP2 != nil { + in, out := &in.EnableHTTP2, &out.EnableHTTP2 + *out = new(bool) + **out = **in } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesSDConfig. -func (in *KubernetesSDConfig) DeepCopy() *KubernetesSDConfig { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KumaSDConfig. +func (in *KumaSDConfig) DeepCopy() *KumaSDConfig { if in == nil { return nil } - out := new(KubernetesSDConfig) + out := new(KumaSDConfig) in.DeepCopyInto(out) return out } @@ -652,6 +1098,11 @@ func (in *MSTeamsConfig) DeepCopyInto(out *MSTeamsConfig) { *out = new(string) **out = **in } + if in.Summary != nil { + in, out := &in.Summary, &out.Summary + *out = new(string) + **out = **in + } if in.Text != nil { in, out := &in.Text, &out.Text *out = new(string) @@ -711,6 +1162,197 @@ func (in *MuteTimeInterval) DeepCopy() *MuteTimeInterval { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamespaceDiscovery) DeepCopyInto(out *NamespaceDiscovery) { + *out = *in + if in.IncludeOwnNamespace != nil { + in, out := &in.IncludeOwnNamespace, &out.IncludeOwnNamespace + *out = new(bool) + **out = **in + } + if in.Names != nil { + in, out := &in.Names, &out.Names + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceDiscovery. +func (in *NamespaceDiscovery) DeepCopy() *NamespaceDiscovery { + if in == nil { + return nil + } + out := new(NamespaceDiscovery) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NomadSDConfig) DeepCopyInto(out *NomadSDConfig) { + *out = *in + if in.AllowStale != nil { + in, out := &in.AllowStale, &out.AllowStale + *out = new(bool) + **out = **in + } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } + if in.RefreshInterval != nil { + in, out := &in.RefreshInterval, &out.RefreshInterval + *out = new(monitoringv1.Duration) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.TagSeparator != nil { + in, out := &in.TagSeparator, &out.TagSeparator + *out = new(string) + **out = **in + } + if in.BasicAuth != nil { + in, out := &in.BasicAuth, &out.BasicAuth + *out = new(monitoringv1.BasicAuth) + (*in).DeepCopyInto(*out) + } + if in.Authorization != nil { + in, out := &in.Authorization, &out.Authorization + *out = new(monitoringv1.SafeAuthorization) + (*in).DeepCopyInto(*out) + } + if in.OAuth2 != nil { + in, out := &in.OAuth2, &out.OAuth2 + *out = new(monitoringv1.OAuth2) + (*in).DeepCopyInto(*out) + } + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(monitoringv1.SafeTLSConfig) + (*in).DeepCopyInto(*out) + } + in.ProxyConfig.DeepCopyInto(&out.ProxyConfig) + if in.FollowRedirects != nil { + in, out := &in.FollowRedirects, &out.FollowRedirects + *out = new(bool) + **out = **in + } + if in.EnableHTTP2 != nil { + in, out := &in.EnableHTTP2, &out.EnableHTTP2 + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NomadSDConfig. +func (in *NomadSDConfig) DeepCopy() *NomadSDConfig { + if in == nil { + return nil + } + out := new(NomadSDConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenStackSDConfig) DeepCopyInto(out *OpenStackSDConfig) { + *out = *in + if in.IdentityEndpoint != nil { + in, out := &in.IdentityEndpoint, &out.IdentityEndpoint + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.UserID != nil { + in, out := &in.UserID, &out.UserID + *out = new(string) + **out = **in + } + if in.Password != nil { + in, out := &in.Password, &out.Password + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.DomainID != nil { + in, out := &in.DomainID, &out.DomainID + *out = new(string) + **out = **in + } + if in.ProjectName != nil { + in, out := &in.ProjectName, &out.ProjectName + *out = new(string) + **out = **in + } + if in.ProjectID != nil { + in, out := &in.ProjectID, &out.ProjectID + *out = new(string) + **out = **in + } + if in.ApplicationCredentialName != nil { + in, out := &in.ApplicationCredentialName, &out.ApplicationCredentialName + *out = new(string) + **out = **in + } + if in.ApplicationCredentialID != nil { + in, out := &in.ApplicationCredentialID, &out.ApplicationCredentialID + *out = new(string) + **out = **in + } + if in.ApplicationCredentialSecret != nil { + in, out := &in.ApplicationCredentialSecret, &out.ApplicationCredentialSecret + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.AllTenants != nil { + in, out := &in.AllTenants, &out.AllTenants + *out = new(bool) + **out = **in + } + if in.RefreshInterval != nil { + in, out := &in.RefreshInterval, &out.RefreshInterval + *out = new(monitoringv1.Duration) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int) + **out = **in + } + if in.Availability != nil { + in, out := &in.Availability, &out.Availability + *out = new(string) + **out = **in + } + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(monitoringv1.SafeTLSConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackSDConfig. +func (in *OpenStackSDConfig) DeepCopy() *OpenStackSDConfig { + if in == nil { + return nil + } + out := new(OpenStackSDConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *OpsGenieConfig) DeepCopyInto(out *OpsGenieConfig) { *out = *in @@ -942,6 +1584,11 @@ func (in *PushoverConfig) DeepCopyInto(out *PushoverConfig) { *out = new(string) **out = **in } + if in.Device != nil { + in, out := &in.Device, &out.Device + *out = new(string) + **out = **in + } if in.HTTPConfig != nil { in, out := &in.HTTPConfig, &out.HTTPConfig *out = new(HTTPConfig) @@ -1242,15 +1889,74 @@ func (in *ScrapeConfigSpec) DeepCopyInto(out *ScrapeConfigSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.AzureSDConfigs != nil { + in, out := &in.AzureSDConfigs, &out.AzureSDConfigs + *out = make([]AzureSDConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GCESDConfigs != nil { + in, out := &in.GCESDConfigs, &out.GCESDConfigs + *out = make([]GCESDConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OpenStackSDConfigs != nil { + in, out := &in.OpenStackSDConfigs, &out.OpenStackSDConfigs + *out = make([]OpenStackSDConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DigitalOceanSDConfigs != nil { + in, out := &in.DigitalOceanSDConfigs, &out.DigitalOceanSDConfigs + *out = make([]DigitalOceanSDConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KumaSDConfigs != nil { + in, out := &in.KumaSDConfigs, &out.KumaSDConfigs + *out = make([]KumaSDConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EurekaSDConfigs != nil { + in, out := &in.EurekaSDConfigs, &out.EurekaSDConfigs + *out = make([]EurekaSDConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DockerSDConfigs != nil { + in, out := &in.DockerSDConfigs, &out.DockerSDConfigs + *out = make([]DockerSDConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HetznerSDConfigs != nil { + in, out := &in.HetznerSDConfigs, &out.HetznerSDConfigs + *out = make([]HetznerSDConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NomadSDConfigs != nil { + in, out := &in.NomadSDConfigs, &out.NomadSDConfigs + *out = make([]NomadSDConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } if in.RelabelConfigs != nil { in, out := &in.RelabelConfigs, &out.RelabelConfigs - *out = make([]*monitoringv1.RelabelConfig, len(*in)) + *out = make([]monitoringv1.RelabelConfig, len(*in)) for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(monitoringv1.RelabelConfig) - (*in).DeepCopyInto(*out) - } + (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.MetricsPath != nil { @@ -1268,11 +1974,21 @@ func (in *ScrapeConfigSpec) DeepCopyInto(out *ScrapeConfigSpec) { *out = new(monitoringv1.Duration) **out = **in } + if in.ScrapeProtocols != nil { + in, out := &in.ScrapeProtocols, &out.ScrapeProtocols + *out = make([]monitoringv1.ScrapeProtocol, len(*in)) + copy(*out, *in) + } if in.HonorTimestamps != nil { in, out := &in.HonorTimestamps, &out.HonorTimestamps *out = new(bool) **out = **in } + if in.TrackTimestampsStaleness != nil { + in, out := &in.TrackTimestampsStaleness, &out.TrackTimestampsStaleness + *out = new(bool) + **out = **in + } if in.HonorLabels != nil { in, out := &in.HonorLabels, &out.HonorLabels *out = new(bool) @@ -1286,7 +2002,8 @@ func (in *ScrapeConfigSpec) DeepCopyInto(out *ScrapeConfigSpec) { if val == nil { (*out)[key] = nil } else { - in, out := &val, &outVal + inVal := (*in)[key] + in, out := &inVal, &outVal *out = make([]string, len(*in)) copy(*out, *in) } @@ -1298,6 +2015,11 @@ func (in *ScrapeConfigSpec) DeepCopyInto(out *ScrapeConfigSpec) { *out = new(string) **out = **in } + if in.EnableCompression != nil { + in, out := &in.EnableCompression, &out.EnableCompression + *out = new(bool) + **out = **in + } if in.BasicAuth != nil { in, out := &in.BasicAuth, &out.BasicAuth *out = new(monitoringv1.BasicAuth) @@ -1345,15 +2067,17 @@ func (in *ScrapeConfigSpec) DeepCopyInto(out *ScrapeConfigSpec) { } if in.MetricRelabelConfigs != nil { in, out := &in.MetricRelabelConfigs, &out.MetricRelabelConfigs - *out = make([]*monitoringv1.RelabelConfig, len(*in)) + *out = make([]monitoringv1.RelabelConfig, len(*in)) for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(monitoringv1.RelabelConfig) - (*in).DeepCopyInto(*out) - } + (*in)[i].DeepCopyInto(&(*out)[i]) } } + in.ProxyConfig.DeepCopyInto(&out.ProxyConfig) + if in.ScrapeClassName != nil { + in, out := &in.ScrapeClassName, &out.ScrapeClassName + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScrapeConfigSpec. diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1beta1/alertmanager_config_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1beta1/alertmanager_config_types.go index dda3ac08c..16dbbf308 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1beta1/alertmanager_config_types.go +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1beta1/alertmanager_config_types.go @@ -18,6 +18,8 @@ import ( "encoding/json" "errors" "fmt" + "html/template" + "regexp" "strings" monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" @@ -559,13 +561,28 @@ type OpsGenieConfigResponder struct { Type string `json:"type"` } +const opsgenieValidTypesRe = `^(team|teams|user|escalation|schedule)$` + +var opsgenieTypeMatcher = regexp.MustCompile(opsgenieValidTypesRe) + // Validate ensures OpsGenieConfigResponder is valid. func (r *OpsGenieConfigResponder) Validate() error { if r.ID == "" && r.Name == "" && r.Username == "" { return errors.New("responder must have at least an ID, a Name or an Username defined") } - return nil + if strings.Contains(r.Type, "{{") { + _, err := template.New("").Parse(r.Type) + if err != nil { + return fmt.Errorf("responder %v type is not a valid template: %w", r, err) + } + return nil + } + + if opsgenieTypeMatcher.MatchString(strings.ToLower(r.Type)) { + return nil + } + return fmt.Errorf("opsGenieConfig responder %v type does not match valid options %s", r, opsgenieValidTypesRe) } // HTTPConfig defines a client HTTP configuration. @@ -784,6 +801,9 @@ type PushoverConfig struct { // A title for supplementary URL, otherwise just the URL is shown // +optional URLTitle string `json:"urlTitle,omitempty"` + // The name of a device to send the notification to + // +optional + Device *string `json:"device,omitempty"` // The name of one of the sounds supported by device clients to override the user's default sound choice // +optional Sound string `json:"sound,omitempty"` @@ -902,6 +922,10 @@ type MSTeamsConfig struct { // Message title template. // +optional Title *string `json:"title,omitempty"` + // Message summary template. + // It requires Alertmanager >= 0.27.0. + // +optional + Summary *string `json:"summary,omitempty"` // Message body template. // +optional Text *string `json:"text,omitempty"` @@ -1093,7 +1117,7 @@ type DayOfMonthRange struct { // MonthRange is an inclusive range of months of the year beginning in January // Months can be specified by name (e.g 'January') by numerical month (e.g '1') or as an inclusive range (e.g 'January:March', '1:3', '1:March') -// +kubebuilder:validation:Pattern=`^((?i)january|february|march|april|may|june|july|august|september|october|november|december|[1-12])(?:((:((?i)january|february|march|april|may|june|july|august|september|october|november|december|[1-12]))$)|$)` +// +kubebuilder:validation:Pattern=`^((?i)january|february|march|april|may|june|july|august|september|october|november|december|1[0-2]|[1-9])(?:((:((?i)january|february|march|april|may|june|july|august|september|october|november|december|1[0-2]|[1-9]))$)|$)` type MonthRange string // YearRange is an inclusive range of years diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1beta1/conversion_from.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1beta1/conversion_from.go index daba52948..964ad3878 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1beta1/conversion_from.go +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1beta1/conversion_from.go @@ -32,6 +32,7 @@ func convertRouteFrom(in *v1alpha1.Route) (*Route, error) { out := &Route{ Receiver: in.Receiver, + Continue: in.Continue, GroupBy: in.GroupBy, GroupWait: in.GroupWait, GroupInterval: in.GroupInterval, @@ -414,6 +415,7 @@ func convertPushoverConfigFrom(in v1alpha1.PushoverConfig) PushoverConfig { Message: in.Message, URL: in.URL, URLTitle: in.URLTitle, + Device: in.Device, Sound: in.Sound, Priority: in.Priority, Retry: in.Retry, @@ -457,6 +459,7 @@ func convertMSTeamsConfigFrom(in v1alpha1.MSTeamsConfig) MSTeamsConfig { SendResolved: in.SendResolved, WebhookURL: in.WebhookURL, Title: in.Title, + Summary: in.Summary, Text: in.Text, HTTPConfig: convertHTTPConfigFrom(in.HTTPConfig), } diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1beta1/conversion_to.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1beta1/conversion_to.go index 1e96af965..739323b01 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1beta1/conversion_to.go +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1beta1/conversion_to.go @@ -32,6 +32,7 @@ func convertRouteTo(in *Route) (*v1alpha1.Route, error) { out := &v1alpha1.Route{ Receiver: in.Receiver, + Continue: in.Continue, GroupBy: in.GroupBy, GroupWait: in.GroupWait, GroupInterval: in.GroupInterval, @@ -410,6 +411,7 @@ func convertPushoverConfigTo(in PushoverConfig) v1alpha1.PushoverConfig { Message: in.Message, URL: in.URL, URLTitle: in.URLTitle, + Device: in.Device, Sound: in.Sound, Priority: in.Priority, Retry: in.Retry, @@ -454,6 +456,7 @@ func convertMSTeamsConfigTo(in MSTeamsConfig) v1alpha1.MSTeamsConfig { WebhookURL: in.WebhookURL, Title: in.Title, Text: in.Text, + Summary: in.Summary, HTTPConfig: convertHTTPConfigTo(in.HTTPConfig), } } diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1beta1/validation.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1beta1/validation.go index 640ecee5d..01e0322f2 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1beta1/validation.go +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1beta1/validation.go @@ -183,12 +183,12 @@ func (yr YearRange) Parse() (*ParsedRange, error) { start, err := strconv.Atoi(startStr) if err != nil { - fmt.Errorf("start year cannot be %s parsed: %w", startStr, err) + return nil, fmt.Errorf("start year cannot be %s parsed: %w", startStr, err) } end, err := strconv.Atoi(endStr) if err != nil { - fmt.Errorf("end year cannot be %s parsed: %w", endStr, err) + return nil, fmt.Errorf("end year cannot be %s parsed: %w", endStr, err) } if start > end { @@ -221,16 +221,16 @@ func (w Weekday) Int() (int, error) { func (m Month) Int() (int, error) { normaliseMonth := Month(strings.ToLower(string(m))) - day, found := months[normaliseMonth] + month, found := months[normaliseMonth] if !found { i, err := strconv.Atoi(string(normaliseMonth)) - if err != nil { - return day, fmt.Errorf("%s is an invalid month", m) + if err != nil || i < 1 || i > 12 { + return month, fmt.Errorf("%s is an invalid month", m) } - day = i + month = i } - return day, nil + return month, nil } // Validate the DayOfMonthRange diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1beta1/zz_generated.deepcopy.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1beta1/zz_generated.deepcopy.go index ed2a3b1b9..0308df55f 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1beta1/zz_generated.deepcopy.go +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1beta1/zz_generated.deepcopy.go @@ -1,5 +1,4 @@ //go:build !ignore_autogenerated -// +build !ignore_autogenerated // Copyright The prometheus-operator Authors // @@ -321,6 +320,11 @@ func (in *MSTeamsConfig) DeepCopyInto(out *MSTeamsConfig) { *out = new(string) **out = **in } + if in.Summary != nil { + in, out := &in.Summary, &out.Summary + *out = new(string) + **out = **in + } if in.Text != nil { in, out := &in.Text, &out.Text *out = new(string) @@ -521,6 +525,11 @@ func (in *PushoverConfig) DeepCopyInto(out *PushoverConfig) { *out = new(string) **out = **in } + if in.Device != nil { + in, out := &in.Device, &out.Device + *out = new(string) + **out = **in + } if in.HTTPConfig != nil { in, out := &in.HTTPConfig, &out.HTTPConfig *out = new(HTTPConfig) diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index d07dd09eb..e14b766a3 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -552,6 +552,7 @@ ccflags="$@" $2 !~ /^RTC_VL_(ACCURACY|BACKUP|DATA)/ && $2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTC|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P|NETNSA)_/ || $2 ~ /^SOCK_|SK_DIAG_|SKNLGRP_$/ || + $2 ~ /^(CONNECT|SAE)_/ || $2 ~ /^FIORDCHK$/ || $2 ~ /^SIOC/ || $2 ~ /^TIOC/ || diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 2d15200ad..099867dee 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -566,6 +566,43 @@ func PthreadFchdir(fd int) (err error) { return pthread_fchdir_np(fd) } +// Connectx calls connectx(2) to initiate a connection on a socket. +// +// srcIf, srcAddr, and dstAddr are filled into a [SaEndpoints] struct and passed as the endpoints argument. +// +// - srcIf is the optional source interface index. 0 means unspecified. +// - srcAddr is the optional source address. nil means unspecified. +// - dstAddr is the destination address. +// +// On success, Connectx returns the number of bytes enqueued for transmission. +func Connectx(fd int, srcIf uint32, srcAddr, dstAddr Sockaddr, associd SaeAssocID, flags uint32, iov []Iovec, connid *SaeConnID) (n uintptr, err error) { + endpoints := SaEndpoints{ + Srcif: srcIf, + } + + if srcAddr != nil { + addrp, addrlen, err := srcAddr.sockaddr() + if err != nil { + return 0, err + } + endpoints.Srcaddr = (*RawSockaddr)(addrp) + endpoints.Srcaddrlen = uint32(addrlen) + } + + if dstAddr != nil { + addrp, addrlen, err := dstAddr.sockaddr() + if err != nil { + return 0, err + } + endpoints.Dstaddr = (*RawSockaddr)(addrp) + endpoints.Dstaddrlen = uint32(addrlen) + } + + err = connectx(fd, &endpoints, associd, flags, iov, &n, connid) + return +} + +//sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) //sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) //sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_hurd.go b/vendor/golang.org/x/sys/unix/syscall_hurd.go index ba46651f8..a6a2d2fc2 100644 --- a/vendor/golang.org/x/sys/unix/syscall_hurd.go +++ b/vendor/golang.org/x/sys/unix/syscall_hurd.go @@ -11,6 +11,7 @@ package unix int ioctl(int, unsigned long int, uintptr_t); */ import "C" +import "unsafe" func ioctl(fd int, req uint, arg uintptr) (err error) { r0, er := C.ioctl(C.int(fd), C.ulong(req), C.uintptr_t(arg)) diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go index 4308ac177..d73c4652e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go @@ -237,6 +237,9 @@ const ( CLOCK_UPTIME_RAW_APPROX = 0x9 CLONE_NOFOLLOW = 0x1 CLONE_NOOWNERCOPY = 0x2 + CONNECT_DATA_AUTHENTICATED = 0x4 + CONNECT_DATA_IDEMPOTENT = 0x2 + CONNECT_RESUME_ON_READ_WRITE = 0x1 CR0 = 0x0 CR1 = 0x1000 CR2 = 0x2000 @@ -1265,6 +1268,10 @@ const ( RTV_SSTHRESH = 0x20 RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 + SAE_ASSOCID_ALL = 0xffffffff + SAE_ASSOCID_ANY = 0x0 + SAE_CONNID_ALL = 0xffffffff + SAE_CONNID_ANY = 0x0 SCM_CREDS = 0x3 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go index c8068a7a1..4a55a4005 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go @@ -237,6 +237,9 @@ const ( CLOCK_UPTIME_RAW_APPROX = 0x9 CLONE_NOFOLLOW = 0x1 CLONE_NOOWNERCOPY = 0x2 + CONNECT_DATA_AUTHENTICATED = 0x4 + CONNECT_DATA_IDEMPOTENT = 0x2 + CONNECT_RESUME_ON_READ_WRITE = 0x1 CR0 = 0x0 CR1 = 0x1000 CR2 = 0x2000 @@ -1265,6 +1268,10 @@ const ( RTV_SSTHRESH = 0x20 RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 + SAE_ASSOCID_ALL = 0xffffffff + SAE_ASSOCID_ANY = 0x0 + SAE_CONNID_ALL = 0xffffffff + SAE_CONNID_ANY = 0x0 SCM_CREDS = 0x3 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go index da08b2ab3..1ec2b1407 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go @@ -581,6 +581,8 @@ const ( AT_EMPTY_PATH = 0x1000 AT_REMOVEDIR = 0x200 RENAME_NOREPLACE = 1 << 0 + ST_RDONLY = 1 + ST_NOSUID = 2 ) const ( diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index b622533ef..24b346e1a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -841,6 +841,26 @@ var libc_pthread_fchdir_np_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) { + var _p0 unsafe.Pointer + if len(iov) > 0 { + _p0 = unsafe.Pointer(&iov[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall9(libc_connectx_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(endpoints)), uintptr(associd), uintptr(flags), uintptr(_p0), uintptr(len(iov)), uintptr(unsafe.Pointer(n)), uintptr(unsafe.Pointer(connid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_connectx_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_connectx connectx "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index cfe6646ba..ebd213100 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -248,6 +248,11 @@ TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8 DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB) +TEXT libc_connectx_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_connectx(SB) +GLOBL ·libc_connectx_trampoline_addr(SB), RODATA, $8 +DATA ·libc_connectx_trampoline_addr(SB)/8, $libc_connectx_trampoline<>(SB) + TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 13f624f69..824b9c2d5 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -841,6 +841,26 @@ var libc_pthread_fchdir_np_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) { + var _p0 unsafe.Pointer + if len(iov) > 0 { + _p0 = unsafe.Pointer(&iov[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall9(libc_connectx_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(endpoints)), uintptr(associd), uintptr(flags), uintptr(_p0), uintptr(len(iov)), uintptr(unsafe.Pointer(n)), uintptr(unsafe.Pointer(connid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_connectx_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_connectx connectx "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index fe222b75d..4f178a229 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -248,6 +248,11 @@ TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8 DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB) +TEXT libc_connectx_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_connectx(SB) +GLOBL ·libc_connectx_trampoline_addr(SB), RODATA, $8 +DATA ·libc_connectx_trampoline_addr(SB)/8, $libc_connectx_trampoline<>(SB) + TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index 091d107f3..d003c3d43 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -306,6 +306,19 @@ type XVSockPgen struct { type _Socklen uint32 +type SaeAssocID uint32 + +type SaeConnID uint32 + +type SaEndpoints struct { + Srcif uint32 + Srcaddr *RawSockaddr + Srcaddrlen uint32 + Dstaddr *RawSockaddr + Dstaddrlen uint32 + _ [4]byte +} + type Xucred struct { Version uint32 Uid uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index 28ff4ef74..0d45a941a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -306,6 +306,19 @@ type XVSockPgen struct { type _Socklen uint32 +type SaeAssocID uint32 + +type SaeConnID uint32 + +type SaEndpoints struct { + Srcif uint32 + Srcaddr *RawSockaddr + Srcaddrlen uint32 + Dstaddr *RawSockaddr + Dstaddrlen uint32 + _ [4]byte +} + type Xucred struct { Version uint32 Uid uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go index 6cbd094a3..51e13eb05 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go @@ -625,6 +625,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go index 7c03b6ee7..d002d8ef3 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go @@ -630,6 +630,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go index 422107ee8..3f863d898 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -616,6 +616,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go index 505a12acf..61c729310 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go @@ -610,6 +610,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go index cc986c790..b5d17414f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go @@ -612,6 +612,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index b102b95a0..9f2550dc3 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -2486,7 +2486,7 @@ type XDPMmapOffsets struct { type XDPUmemReg struct { Addr uint64 Len uint64 - Chunk_size uint32 + Size uint32 Headroom uint32 Flags uint32 Tx_metadata_len uint32 @@ -3807,6 +3807,9 @@ const ( ETHTOOL_MSG_PSE_GET_REPLY = 0x25 ETHTOOL_MSG_RSS_GET_REPLY = 0x26 ETHTOOL_MSG_KERNEL_MAX = 0x2b + ETHTOOL_FLAG_COMPACT_BITSETS = 0x1 + ETHTOOL_FLAG_OMIT_REPLY = 0x2 + ETHTOOL_FLAG_STATS = 0x4 ETHTOOL_A_HEADER_UNSPEC = 0x0 ETHTOOL_A_HEADER_DEV_INDEX = 0x1 ETHTOOL_A_HEADER_DEV_NAME = 0x2 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 15adc0414..ad05b51a6 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -727,6 +727,37 @@ const ( RISCV_HWPROBE_EXT_ZBA = 0x8 RISCV_HWPROBE_EXT_ZBB = 0x10 RISCV_HWPROBE_EXT_ZBS = 0x20 + RISCV_HWPROBE_EXT_ZICBOZ = 0x40 + RISCV_HWPROBE_EXT_ZBC = 0x80 + RISCV_HWPROBE_EXT_ZBKB = 0x100 + RISCV_HWPROBE_EXT_ZBKC = 0x200 + RISCV_HWPROBE_EXT_ZBKX = 0x400 + RISCV_HWPROBE_EXT_ZKND = 0x800 + RISCV_HWPROBE_EXT_ZKNE = 0x1000 + RISCV_HWPROBE_EXT_ZKNH = 0x2000 + RISCV_HWPROBE_EXT_ZKSED = 0x4000 + RISCV_HWPROBE_EXT_ZKSH = 0x8000 + RISCV_HWPROBE_EXT_ZKT = 0x10000 + RISCV_HWPROBE_EXT_ZVBB = 0x20000 + RISCV_HWPROBE_EXT_ZVBC = 0x40000 + RISCV_HWPROBE_EXT_ZVKB = 0x80000 + RISCV_HWPROBE_EXT_ZVKG = 0x100000 + RISCV_HWPROBE_EXT_ZVKNED = 0x200000 + RISCV_HWPROBE_EXT_ZVKNHA = 0x400000 + RISCV_HWPROBE_EXT_ZVKNHB = 0x800000 + RISCV_HWPROBE_EXT_ZVKSED = 0x1000000 + RISCV_HWPROBE_EXT_ZVKSH = 0x2000000 + RISCV_HWPROBE_EXT_ZVKT = 0x4000000 + RISCV_HWPROBE_EXT_ZFH = 0x8000000 + RISCV_HWPROBE_EXT_ZFHMIN = 0x10000000 + RISCV_HWPROBE_EXT_ZIHINTNTL = 0x20000000 + RISCV_HWPROBE_EXT_ZVFH = 0x40000000 + RISCV_HWPROBE_EXT_ZVFHMIN = 0x80000000 + RISCV_HWPROBE_EXT_ZFA = 0x100000000 + RISCV_HWPROBE_EXT_ZTSO = 0x200000000 + RISCV_HWPROBE_EXT_ZACAS = 0x400000000 + RISCV_HWPROBE_EXT_ZICOND = 0x800000000 + RISCV_HWPROBE_EXT_ZIHINTPAUSE = 0x1000000000 RISCV_HWPROBE_KEY_CPUPERF_0 = 0x5 RISCV_HWPROBE_MISALIGNED_UNKNOWN = 0x0 RISCV_HWPROBE_MISALIGNED_EMULATED = 0x1 @@ -734,4 +765,6 @@ const ( RISCV_HWPROBE_MISALIGNED_FAST = 0x3 RISCV_HWPROBE_MISALIGNED_UNSUPPORTED = 0x4 RISCV_HWPROBE_MISALIGNED_MASK = 0x7 + RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE = 0x6 + RISCV_HWPROBE_WHICH_CPUS = 0x1 ) diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 1fa34fd17..5cee9a314 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -313,6 +313,10 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys SetConsoleMode(console Handle, mode uint32) (err error) = kernel32.SetConsoleMode //sys GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) = kernel32.GetConsoleScreenBufferInfo //sys setConsoleCursorPosition(console Handle, position uint32) (err error) = kernel32.SetConsoleCursorPosition +//sys GetConsoleCP() (cp uint32, err error) = kernel32.GetConsoleCP +//sys GetConsoleOutputCP() (cp uint32, err error) = kernel32.GetConsoleOutputCP +//sys SetConsoleCP(cp uint32) (err error) = kernel32.SetConsoleCP +//sys SetConsoleOutputCP(cp uint32) (err error) = kernel32.SetConsoleOutputCP //sys WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW //sys ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW //sys resizePseudoConsole(pconsole Handle, size uint32) (hr error) = kernel32.ResizePseudoConsole diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 4d0c15745..7b97a154c 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -1060,6 +1060,7 @@ const ( SIO_GET_EXTENSION_FUNCTION_POINTER = IOC_INOUT | IOC_WS2 | 6 SIO_KEEPALIVE_VALS = IOC_IN | IOC_VENDOR | 4 SIO_UDP_CONNRESET = IOC_IN | IOC_VENDOR | 12 + SIO_UDP_NETRESET = IOC_IN | IOC_VENDOR | 15 // cf. http://support.microsoft.com/default.aspx?scid=kb;en-us;257460 @@ -2031,6 +2032,50 @@ const ( IF_TYPE_IEEE1394 = 144 ) +// Enum NL_PREFIX_ORIGIN for [IpAdapterUnicastAddress], see +// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_prefix_origin +const ( + IpPrefixOriginOther = 0 + IpPrefixOriginManual = 1 + IpPrefixOriginWellKnown = 2 + IpPrefixOriginDhcp = 3 + IpPrefixOriginRouterAdvertisement = 4 + IpPrefixOriginUnchanged = 1 << 4 +) + +// Enum NL_SUFFIX_ORIGIN for [IpAdapterUnicastAddress], see +// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_suffix_origin +const ( + NlsoOther = 0 + NlsoManual = 1 + NlsoWellKnown = 2 + NlsoDhcp = 3 + NlsoLinkLayerAddress = 4 + NlsoRandom = 5 + IpSuffixOriginOther = 0 + IpSuffixOriginManual = 1 + IpSuffixOriginWellKnown = 2 + IpSuffixOriginDhcp = 3 + IpSuffixOriginLinkLayerAddress = 4 + IpSuffixOriginRandom = 5 + IpSuffixOriginUnchanged = 1 << 4 +) + +// Enum NL_DAD_STATE for [IpAdapterUnicastAddress], see +// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_dad_state +const ( + NldsInvalid = 0 + NldsTentative = 1 + NldsDuplicate = 2 + NldsDeprecated = 3 + NldsPreferred = 4 + IpDadStateInvalid = 0 + IpDadStateTentative = 1 + IpDadStateDuplicate = 2 + IpDadStateDeprecated = 3 + IpDadStatePreferred = 4 +) + type SocketAddress struct { Sockaddr *syscall.RawSockaddrAny SockaddrLength int32 diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 9bb979a3e..4c2e1bdc0 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -247,7 +247,9 @@ var ( procGetCommandLineW = modkernel32.NewProc("GetCommandLineW") procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW") procGetComputerNameW = modkernel32.NewProc("GetComputerNameW") + procGetConsoleCP = modkernel32.NewProc("GetConsoleCP") procGetConsoleMode = modkernel32.NewProc("GetConsoleMode") + procGetConsoleOutputCP = modkernel32.NewProc("GetConsoleOutputCP") procGetConsoleScreenBufferInfo = modkernel32.NewProc("GetConsoleScreenBufferInfo") procGetCurrentDirectoryW = modkernel32.NewProc("GetCurrentDirectoryW") procGetCurrentProcessId = modkernel32.NewProc("GetCurrentProcessId") @@ -347,8 +349,10 @@ var ( procSetCommMask = modkernel32.NewProc("SetCommMask") procSetCommState = modkernel32.NewProc("SetCommState") procSetCommTimeouts = modkernel32.NewProc("SetCommTimeouts") + procSetConsoleCP = modkernel32.NewProc("SetConsoleCP") procSetConsoleCursorPosition = modkernel32.NewProc("SetConsoleCursorPosition") procSetConsoleMode = modkernel32.NewProc("SetConsoleMode") + procSetConsoleOutputCP = modkernel32.NewProc("SetConsoleOutputCP") procSetCurrentDirectoryW = modkernel32.NewProc("SetCurrentDirectoryW") procSetDefaultDllDirectories = modkernel32.NewProc("SetDefaultDllDirectories") procSetDllDirectoryW = modkernel32.NewProc("SetDllDirectoryW") @@ -2162,6 +2166,15 @@ func GetComputerName(buf *uint16, n *uint32) (err error) { return } +func GetConsoleCP() (cp uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetConsoleCP.Addr(), 0, 0, 0, 0) + cp = uint32(r0) + if cp == 0 { + err = errnoErr(e1) + } + return +} + func GetConsoleMode(console Handle, mode *uint32) (err error) { r1, _, e1 := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(mode)), 0) if r1 == 0 { @@ -2170,6 +2183,15 @@ func GetConsoleMode(console Handle, mode *uint32) (err error) { return } +func GetConsoleOutputCP() (cp uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetConsoleOutputCP.Addr(), 0, 0, 0, 0) + cp = uint32(r0) + if cp == 0 { + err = errnoErr(e1) + } + return +} + func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) { r1, _, e1 := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(info)), 0) if r1 == 0 { @@ -3038,6 +3060,14 @@ func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { return } +func SetConsoleCP(cp uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetConsoleCP.Addr(), 1, uintptr(cp), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func setConsoleCursorPosition(console Handle, position uint32) (err error) { r1, _, e1 := syscall.Syscall(procSetConsoleCursorPosition.Addr(), 2, uintptr(console), uintptr(position), 0) if r1 == 0 { @@ -3054,6 +3084,14 @@ func SetConsoleMode(console Handle, mode uint32) (err error) { return } +func SetConsoleOutputCP(cp uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetConsoleOutputCP.Addr(), 1, uintptr(cp), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SetCurrentDirectory(path *uint16) (err error) { r1, _, e1 := syscall.Syscall(procSetCurrentDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) if r1 == 0 { diff --git a/vendor/golang.org/x/term/term_windows.go b/vendor/golang.org/x/term/term_windows.go index 465f56060..df6bf948e 100644 --- a/vendor/golang.org/x/term/term_windows.go +++ b/vendor/golang.org/x/term/term_windows.go @@ -26,6 +26,7 @@ func makeRaw(fd int) (*State, error) { return nil, err } raw := st &^ (windows.ENABLE_ECHO_INPUT | windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT | windows.ENABLE_PROCESSED_OUTPUT) + raw |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT if err := windows.SetConsoleMode(windows.Handle(fd), raw); err != nil { return nil, err } diff --git a/vendor/k8s.io/api/resource/v1alpha3/generated.pb.go b/vendor/k8s.io/api/resource/v1alpha3/generated.pb.go index 68eca0624..4ac01cc6f 100644 --- a/vendor/k8s.io/api/resource/v1alpha3/generated.pb.go +++ b/vendor/k8s.io/api/resource/v1alpha3/generated.pb.go @@ -1102,138 +1102,138 @@ func init() { } var fileDescriptor_66649ee9bbcd89d2 = []byte{ - // 2095 bytes of a gzipped FileDescriptorProto + // 2085 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x1a, 0xcd, 0x6f, 0x1c, 0x57, - 0xdd, 0xb3, 0xeb, 0xcf, 0x9f, 0xbf, 0x9f, 0x93, 0xe0, 0x9a, 0x66, 0x37, 0x99, 0x22, 0x70, 0xda, - 0x74, 0xb7, 0x71, 0x4b, 0x1b, 0x5a, 0x90, 0xf0, 0xd8, 0x6e, 0xe4, 0x28, 0x1f, 0xce, 0xdb, 0x26, + 0x3d, 0xb3, 0xeb, 0xcf, 0x9f, 0xbf, 0x9f, 0x93, 0xe0, 0x9a, 0x66, 0x37, 0x99, 0x22, 0x70, 0xda, + 0x74, 0xb6, 0x71, 0x4b, 0x1b, 0x5a, 0x90, 0xf0, 0xd8, 0x6e, 0xe4, 0x28, 0x1f, 0xce, 0xdb, 0x26, 0x34, 0x50, 0x4a, 0x9f, 0x67, 0x9f, 0xed, 0xc1, 0xb3, 0x33, 0xd3, 0x99, 0x37, 0x26, 0x16, 0x12, 0x8a, 0xb8, 0x70, 0x8b, 0x7a, 0xe5, 0x80, 0xb8, 0x21, 0x21, 0x0e, 0x70, 0xe0, 0x88, 0x54, 0x24, - 0x90, 0xc8, 0x31, 0x08, 0x0e, 0x3d, 0x2d, 0xc9, 0x22, 0xfe, 0x89, 0x5c, 0x40, 0xef, 0xcd, 0x9b, - 0xcf, 0x9d, 0xd9, 0xcc, 0x46, 0x91, 0x45, 0x6f, 0x9e, 0xdf, 0xf7, 0xfb, 0x7d, 0xbf, 0xb7, 0x86, - 0x8b, 0x87, 0x97, 0xbd, 0x86, 0x61, 0x37, 0x89, 0x63, 0x34, 0x5d, 0xea, 0xd9, 0xbe, 0xab, 0xd3, - 0xe6, 0xd1, 0x25, 0x62, 0x3a, 0x07, 0xe4, 0xcd, 0xe6, 0x3e, 0xb5, 0xa8, 0x4b, 0x18, 0x6d, 0x37, - 0x1c, 0xd7, 0x66, 0x36, 0x7a, 0x39, 0xa0, 0x6e, 0x10, 0xc7, 0x68, 0x84, 0xd4, 0x8d, 0x90, 0x7a, - 0xe5, 0xf5, 0x7d, 0x83, 0x1d, 0xf8, 0xbb, 0x0d, 0xdd, 0xee, 0x34, 0xf7, 0xed, 0x7d, 0xbb, 0x29, - 0x98, 0x76, 0xfd, 0x3d, 0xf1, 0x25, 0x3e, 0xc4, 0x5f, 0x81, 0xb0, 0x15, 0x35, 0xa1, 0x5a, 0xb7, - 0x5d, 0xae, 0x36, 0xab, 0x70, 0xe5, 0xad, 0x98, 0xa6, 0x43, 0xf4, 0x03, 0xc3, 0xa2, 0xee, 0x71, - 0xd3, 0x39, 0xdc, 0x4f, 0xdb, 0x3b, 0x0c, 0x97, 0xd7, 0xec, 0x50, 0x46, 0xf2, 0x74, 0x35, 0x8b, - 0xb8, 0x5c, 0xdf, 0x62, 0x46, 0xa7, 0x5f, 0xcd, 0xdb, 0xcf, 0x62, 0xf0, 0xf4, 0x03, 0xda, 0x21, - 0x59, 0x3e, 0xf5, 0xbf, 0x0a, 0x2c, 0xac, 0x9b, 0xa6, 0xad, 0x13, 0x66, 0xd8, 0x16, 0xa6, 0x9e, - 0x6f, 0x32, 0xf4, 0x23, 0x98, 0x68, 0xd3, 0x23, 0x43, 0xa7, 0xde, 0xb2, 0x72, 0x4e, 0x59, 0x9d, - 0x5e, 0x7b, 0xab, 0x31, 0xc8, 0xd9, 0x8d, 0x4d, 0x41, 0x9c, 0x15, 0xa3, 0xcd, 0x3f, 0xec, 0xd6, - 0x47, 0x7a, 0xdd, 0xfa, 0x44, 0x80, 0xf7, 0x70, 0x28, 0x15, 0xdd, 0x81, 0x19, 0xcb, 0x6e, 0xd3, - 0x16, 0x35, 0xa9, 0xce, 0x6c, 0x77, 0xb9, 0x2a, 0xb4, 0x9c, 0x4b, 0x6a, 0xe1, 0x51, 0x68, 0x1c, - 0x5d, 0x6a, 0xdc, 0x48, 0xd0, 0x69, 0x0b, 0xbd, 0x6e, 0x7d, 0x26, 0x09, 0xc1, 0x29, 0x39, 0x68, - 0x0d, 0x40, 0xb7, 0x2d, 0xe6, 0xda, 0xa6, 0x49, 0xdd, 0xe5, 0xd1, 0x73, 0xca, 0xea, 0x94, 0x86, - 0xa4, 0x15, 0xb0, 0x11, 0x61, 0x70, 0x82, 0x4a, 0x7d, 0x5c, 0x85, 0x69, 0x8d, 0x78, 0x86, 0x1e, - 0x58, 0x89, 0x7e, 0x06, 0x40, 0x18, 0x73, 0x8d, 0x5d, 0x9f, 0x89, 0xf3, 0x57, 0x57, 0xa7, 0xd7, - 0xbe, 0x35, 0xf8, 0xfc, 0x09, 0xf6, 0xc6, 0x7a, 0xc4, 0xbb, 0x65, 0x31, 0xf7, 0x58, 0x7b, 0x25, - 0x54, 0x1f, 0x23, 0x7e, 0xfe, 0xaf, 0xfa, 0xec, 0x2d, 0x9f, 0x98, 0xc6, 0x9e, 0x41, 0xdb, 0x37, - 0x48, 0x87, 0xe2, 0x84, 0x46, 0x74, 0x04, 0x93, 0x3a, 0x71, 0x88, 0x6e, 0xb0, 0xe3, 0xe5, 0x8a, - 0xd0, 0xfe, 0x4e, 0x79, 0xed, 0x1b, 0x92, 0x33, 0xd0, 0x7d, 0x5e, 0xea, 0x9e, 0x0c, 0xc1, 0xfd, - 0x9a, 0x23, 0x5d, 0x2b, 0x26, 0xcc, 0x67, 0x6c, 0x47, 0x0b, 0x50, 0x3d, 0xa4, 0xc7, 0x22, 0x07, - 0xa6, 0x30, 0xff, 0x13, 0x6d, 0xc0, 0xd8, 0x11, 0x31, 0x7d, 0xba, 0x5c, 0x11, 0x11, 0x7b, 0xbd, - 0x54, 0x5e, 0x84, 0x52, 0x71, 0xc0, 0xfb, 0x6e, 0xe5, 0xb2, 0xb2, 0x72, 0x08, 0xb3, 0x29, 0x5b, - 0x73, 0x74, 0x6d, 0xa6, 0x75, 0x35, 0x12, 0xba, 0xa2, 0x14, 0x6f, 0x38, 0x87, 0xfb, 0x69, 0xe5, - 0xb7, 0x7c, 0x62, 0x31, 0x83, 0x1d, 0x27, 0x94, 0xa9, 0x57, 0x60, 0x71, 0x63, 0xeb, 0x5a, 0x60, - 0x4d, 0x32, 0x57, 0xe8, 0x3d, 0xc7, 0xa5, 0x9e, 0x67, 0xd8, 0x56, 0xa0, 0x37, 0xce, 0x95, 0xad, - 0x08, 0x83, 0x13, 0x54, 0xea, 0x11, 0x8c, 0xcb, 0x2c, 0x39, 0x07, 0xa3, 0x16, 0xe9, 0x50, 0xc9, - 0x37, 0x23, 0xf9, 0x46, 0x85, 0x4f, 0x05, 0x06, 0x5d, 0x85, 0xb1, 0x5d, 0x1e, 0x19, 0x69, 0xfe, - 0x85, 0xd2, 0x41, 0xd4, 0xa6, 0x7a, 0xdd, 0xfa, 0x98, 0x00, 0xe0, 0x40, 0x84, 0xfa, 0xa0, 0x02, - 0x67, 0xb3, 0x45, 0xb6, 0x61, 0x5b, 0x7b, 0xc6, 0xbe, 0xef, 0x8a, 0x0f, 0xf4, 0x5d, 0x18, 0x0f, - 0x44, 0x4a, 0x8b, 0x56, 0xa5, 0x45, 0xe3, 0x2d, 0x01, 0x7d, 0xda, 0xad, 0x9f, 0xc9, 0xb2, 0x06, - 0x18, 0x2c, 0xf9, 0xd0, 0x2a, 0x4c, 0xba, 0xf4, 0x53, 0x9f, 0x7a, 0xcc, 0x13, 0x79, 0x37, 0xa5, - 0xcd, 0xf0, 0xd4, 0xc1, 0x12, 0x86, 0x23, 0x2c, 0xba, 0xaf, 0xc0, 0x52, 0x50, 0xc9, 0x29, 0x1b, - 0x64, 0x15, 0x5f, 0x2a, 0x93, 0x13, 0x29, 0x46, 0xed, 0xab, 0xd2, 0xd8, 0xa5, 0x1c, 0x24, 0xce, - 0x53, 0xa5, 0xfe, 0x47, 0x81, 0x33, 0xf9, 0x5d, 0x07, 0xed, 0xc1, 0x84, 0x2b, 0xfe, 0x0a, 0x8b, - 0xf7, 0xbd, 0x32, 0x06, 0xc9, 0x63, 0x16, 0xf7, 0xb0, 0xe0, 0xdb, 0xc3, 0xa1, 0x70, 0xa4, 0xc3, - 0xb8, 0x2e, 0x6c, 0x92, 0x55, 0xfa, 0xde, 0x70, 0x3d, 0x32, 0xed, 0x81, 0xb9, 0x30, 0x5c, 0x01, - 0x18, 0x4b, 0xd1, 0xea, 0x6f, 0x15, 0x98, 0xcf, 0x54, 0x11, 0xaa, 0x41, 0xd5, 0xb0, 0x98, 0x48, - 0xab, 0x6a, 0x10, 0xa3, 0x6d, 0x8b, 0xdd, 0xe1, 0xc9, 0x8e, 0x39, 0x02, 0x9d, 0x87, 0xd1, 0x5d, - 0xdb, 0x36, 0x45, 0x38, 0x26, 0xb5, 0xd9, 0x5e, 0xb7, 0x3e, 0xa5, 0xd9, 0xb6, 0x19, 0x50, 0x08, - 0x14, 0xfa, 0x06, 0x8c, 0x7b, 0xcc, 0x35, 0xac, 0x7d, 0xd9, 0x23, 0xe7, 0x7b, 0xdd, 0xfa, 0x74, - 0x4b, 0x40, 0x02, 0x32, 0x89, 0x46, 0xaf, 0xc2, 0xc4, 0x11, 0x75, 0x45, 0x85, 0x8c, 0x09, 0x4a, - 0xd1, 0x81, 0xef, 0x04, 0xa0, 0x80, 0x34, 0x24, 0x50, 0x7f, 0x5f, 0x81, 0x69, 0x19, 0x40, 0x93, - 0x18, 0x1d, 0x74, 0x37, 0x91, 0x50, 0x41, 0x24, 0x5e, 0x1b, 0x22, 0x12, 0xda, 0x42, 0xd8, 0xbc, - 0x72, 0x32, 0x90, 0xc2, 0xb4, 0x6e, 0x5b, 0x1e, 0x73, 0x89, 0x61, 0xc9, 0x74, 0x4d, 0x37, 0x88, - 0x41, 0x89, 0x27, 0xd9, 0xb4, 0x25, 0xa9, 0x60, 0x3a, 0x86, 0x79, 0x38, 0x29, 0x17, 0x7d, 0x1c, - 0x85, 0xb8, 0x2a, 0x34, 0xbc, 0x5d, 0x4a, 0x03, 0x3f, 0x7c, 0xb9, 0xe8, 0xfe, 0x4d, 0x81, 0xe5, - 0x22, 0xa6, 0x54, 0x3d, 0x2a, 0xcf, 0x55, 0x8f, 0x95, 0x93, 0xab, 0xc7, 0x3f, 0x2b, 0x89, 0xd8, - 0x7b, 0x1e, 0xfa, 0x04, 0x26, 0xf9, 0x6a, 0xd3, 0x26, 0x8c, 0xc8, 0x15, 0xe2, 0x8d, 0x41, 0xed, - 0xdb, 0x6b, 0x70, 0x6a, 0x3e, 0xee, 0x6f, 0xee, 0xfe, 0x98, 0xea, 0xec, 0x3a, 0x65, 0x24, 0x6e, - 0xc6, 0x31, 0x0c, 0x47, 0x52, 0xd1, 0x4d, 0x18, 0xf5, 0x1c, 0xaa, 0x0f, 0x33, 0x88, 0x84, 0x69, - 0x2d, 0x87, 0xea, 0x71, 0xbf, 0xe6, 0x5f, 0x58, 0x08, 0x52, 0x7f, 0x95, 0x0c, 0x86, 0xe7, 0xa5, - 0x83, 0x51, 0xe4, 0x62, 0xe5, 0xe4, 0x5c, 0xfc, 0x79, 0xd4, 0x0a, 0x84, 0x7d, 0xd7, 0x0c, 0x8f, - 0xa1, 0x8f, 0xfa, 0xdc, 0xdc, 0x28, 0xe7, 0x66, 0xce, 0x2d, 0x9c, 0x1c, 0x55, 0x59, 0x08, 0x49, - 0xb8, 0xf8, 0x06, 0x8c, 0x19, 0x8c, 0x76, 0xc2, 0xfa, 0xba, 0x50, 0xda, 0xc7, 0xda, 0xac, 0x94, - 0x3a, 0xb6, 0xcd, 0xf9, 0x71, 0x20, 0x46, 0xfd, 0x5d, 0x25, 0x75, 0x02, 0xee, 0x7b, 0xf4, 0x43, - 0x98, 0xf2, 0xe4, 0x44, 0x0e, 0xbb, 0xc4, 0xc5, 0x32, 0x7a, 0xa2, 0x95, 0x70, 0x51, 0xaa, 0x9a, - 0x0a, 0x21, 0x1e, 0x8e, 0x25, 0x26, 0x2a, 0xb8, 0x32, 0x54, 0x05, 0x67, 0xe2, 0x5f, 0x54, 0xc1, - 0xe8, 0x2e, 0xcc, 0x7a, 0xbe, 0xc1, 0xc8, 0xae, 0x49, 0xf9, 0x5a, 0xea, 0x95, 0xde, 0x64, 0x17, - 0x7b, 0xdd, 0xfa, 0x6c, 0x2b, 0xc9, 0x8a, 0xd3, 0x92, 0x54, 0x17, 0xf2, 0x72, 0x03, 0xfd, 0x00, - 0xc6, 0x6d, 0x87, 0x7c, 0xea, 0x53, 0x19, 0xf0, 0x67, 0x2c, 0x87, 0x37, 0x05, 0x6d, 0x5e, 0x06, - 0x02, 0x3f, 0x4e, 0x80, 0xc6, 0x52, 0xa4, 0xfa, 0x40, 0x81, 0x85, 0x6c, 0x9f, 0x1c, 0xa2, 0x11, - 0xed, 0xc0, 0x5c, 0x87, 0x30, 0xfd, 0x20, 0x9a, 0x55, 0xa2, 0x3a, 0xa7, 0xb4, 0xd5, 0x5e, 0xb7, - 0x3e, 0x77, 0x3d, 0x85, 0x79, 0xda, 0xad, 0xa3, 0xf7, 0x7d, 0xd3, 0x3c, 0x4e, 0xaf, 0xa3, 0x19, - 0x7e, 0xf5, 0x17, 0x55, 0x98, 0x4d, 0x8d, 0x85, 0x12, 0x8b, 0xd7, 0x3a, 0xcc, 0xb7, 0xe3, 0x38, - 0x72, 0x84, 0x34, 0xe3, 0x2b, 0x92, 0x38, 0x99, 0x84, 0x82, 0x2f, 0x4b, 0x9f, 0xce, 0xca, 0xea, - 0x0b, 0xcf, 0xca, 0x3b, 0x30, 0x47, 0xa2, 0x45, 0xe0, 0xba, 0xdd, 0xa6, 0x72, 0x0c, 0x37, 0x24, - 0xd7, 0xdc, 0x7a, 0x0a, 0xfb, 0xb4, 0x5b, 0x3f, 0x95, 0x5d, 0x1f, 0x38, 0x1c, 0x67, 0xa4, 0xa0, - 0x57, 0x60, 0x4c, 0xb7, 0x7d, 0x8b, 0x89, 0x59, 0x5d, 0x8d, 0xab, 0x70, 0x83, 0x03, 0x71, 0x80, - 0x43, 0xdf, 0x84, 0x69, 0xd2, 0xee, 0x18, 0xd6, 0xba, 0xae, 0x53, 0xcf, 0x5b, 0x1e, 0x17, 0x5b, - 0x42, 0x34, 0x0b, 0xd7, 0x63, 0x14, 0x4e, 0xd2, 0xa9, 0x7f, 0x52, 0xc2, 0x15, 0xb4, 0x60, 0x55, - 0x42, 0x17, 0xf8, 0xe2, 0x25, 0x50, 0x32, 0x38, 0x89, 0xdd, 0x49, 0x80, 0x71, 0x88, 0x47, 0x5f, - 0x87, 0xf1, 0xb6, 0x6b, 0x1c, 0x51, 0x57, 0x46, 0x26, 0x2a, 0xaf, 0x4d, 0x01, 0xc5, 0x12, 0xcb, - 0x83, 0xed, 0x84, 0xab, 0x4c, 0x22, 0xd8, 0x3b, 0xb6, 0x6d, 0x62, 0x81, 0x11, 0x92, 0x84, 0x55, - 0xd2, 0x85, 0xb1, 0xa4, 0xc0, 0x56, 0x89, 0x55, 0x3f, 0x82, 0xb9, 0xcc, 0xfe, 0x7f, 0x15, 0xaa, - 0x3a, 0x35, 0x65, 0x15, 0x35, 0x07, 0x47, 0xb7, 0xef, 0xf6, 0xa0, 0x4d, 0xf4, 0xba, 0xf5, 0xea, - 0xc6, 0xd6, 0x35, 0xcc, 0x85, 0xa8, 0xbf, 0x51, 0xe0, 0xa5, 0xc2, 0x4a, 0x4b, 0x9c, 0x56, 0x19, - 0x78, 0x5a, 0x02, 0xe0, 0x10, 0x97, 0x74, 0x28, 0xa3, 0xae, 0x97, 0x33, 0xd8, 0xd2, 0xfd, 0x5c, - 0x5e, 0xec, 0x1b, 0x98, 0xfc, 0x64, 0xeb, 0x1e, 0xa3, 0x16, 0xdf, 0xc1, 0xe2, 0x99, 0xb9, 0x13, - 0x09, 0xc2, 0x09, 0xa1, 0xea, 0x1f, 0x2b, 0x70, 0x6a, 0xc7, 0x6e, 0xb7, 0xf4, 0x03, 0xda, 0xf6, - 0x4d, 0xc3, 0xda, 0xe7, 0x97, 0x62, 0x7a, 0x8f, 0x9d, 0xc0, 0xc0, 0xfe, 0x30, 0x35, 0xb0, 0x9f, - 0xd1, 0x88, 0xf3, 0x6c, 0x2c, 0x9a, 0xdc, 0xe8, 0x13, 0xbe, 0xcd, 0x12, 0xe6, 0x87, 0xdd, 0xf7, - 0xf2, 0x73, 0xc8, 0x16, 0xfc, 0x71, 0x64, 0x82, 0x6f, 0x2c, 0xe5, 0xaa, 0x7f, 0x57, 0x60, 0x39, - 0x8f, 0xed, 0x04, 0x86, 0xf0, 0xf7, 0xd2, 0x43, 0x78, 0x6d, 0xf8, 0xb3, 0x15, 0x4c, 0xe3, 0xcf, - 0x0a, 0xce, 0x24, 0xc6, 0xf2, 0x65, 0x98, 0x09, 0xda, 0x15, 0x6d, 0xf3, 0x69, 0x24, 0x13, 0xf7, - 0x94, 0x14, 0x34, 0xd3, 0x4a, 0xe0, 0x70, 0x8a, 0x12, 0xbd, 0x0b, 0x73, 0x8e, 0xcd, 0xa8, 0xc5, - 0x0c, 0x62, 0x06, 0x23, 0x31, 0xb8, 0x4c, 0x22, 0xde, 0xd7, 0x76, 0x52, 0x18, 0x9c, 0xa1, 0x54, - 0x7f, 0xa9, 0xc0, 0x4a, 0x71, 0x74, 0xd0, 0x4f, 0x61, 0x2e, 0x3c, 0xb1, 0xd8, 0x97, 0x4b, 0x5e, - 0xf0, 0x70, 0x92, 0x27, 0x96, 0x2d, 0x43, 0x7e, 0x26, 0xec, 0xb9, 0x29, 0x32, 0x0f, 0x67, 0x54, - 0xa9, 0xbf, 0xae, 0xc0, 0x6c, 0x8a, 0xe4, 0x04, 0x4a, 0xe6, 0x56, 0xaa, 0x64, 0x9a, 0xc3, 0x1c, - 0xb3, 0xa8, 0x56, 0xee, 0x66, 0x6a, 0xe5, 0xd2, 0x30, 0x42, 0x07, 0x17, 0x49, 0x4f, 0x81, 0x5a, - 0x8a, 0x9e, 0xef, 0x10, 0x7e, 0x87, 0xba, 0x98, 0xee, 0x51, 0x97, 0x5a, 0x3a, 0x45, 0x17, 0x61, - 0x92, 0x38, 0xc6, 0x15, 0xd7, 0xf6, 0x1d, 0x99, 0x52, 0x51, 0xea, 0xaf, 0xef, 0x6c, 0x0b, 0x38, - 0x8e, 0x28, 0x38, 0x75, 0x68, 0x91, 0x9c, 0x00, 0x89, 0x3b, 0x61, 0x00, 0xc7, 0x11, 0x45, 0xb4, - 0x18, 0x8c, 0x16, 0x2e, 0x06, 0x1a, 0x54, 0x7d, 0xa3, 0x2d, 0x2f, 0xb2, 0x6f, 0x48, 0x82, 0xea, - 0xed, 0xed, 0xcd, 0xa7, 0xdd, 0xfa, 0xf9, 0xa2, 0xf7, 0x53, 0x76, 0xec, 0x50, 0xaf, 0x71, 0x7b, - 0x7b, 0x13, 0x73, 0x66, 0xf5, 0x2f, 0x0a, 0x2c, 0xa6, 0x0e, 0x79, 0x02, 0x2d, 0x60, 0x27, 0xdd, - 0x02, 0x5e, 0x1b, 0x22, 0x64, 0x05, 0xb5, 0x7f, 0x5f, 0x81, 0xb3, 0x03, 0xcb, 0xa2, 0xc4, 0x9a, - 0xf5, 0x1d, 0x98, 0xf7, 0xad, 0xf4, 0xf2, 0x1b, 0x54, 0xfa, 0x12, 0x5f, 0xb1, 0x6e, 0xa7, 0x51, - 0x38, 0x4b, 0xcb, 0xaf, 0x5b, 0x8b, 0x7d, 0x29, 0x8b, 0x3e, 0xc8, 0xbe, 0x3c, 0x5f, 0x28, 0x7d, - 0xe5, 0x1e, 0xf0, 0xdc, 0x9c, 0x7e, 0x16, 0xae, 0x94, 0x7a, 0x16, 0xfe, 0xbc, 0x02, 0x4b, 0x39, - 0xd9, 0x8f, 0x3e, 0x06, 0x88, 0xb7, 0xae, 0x9c, 0x60, 0xe7, 0x18, 0xd9, 0xf7, 0xa8, 0x34, 0x27, - 0xde, 0x83, 0x63, 0x68, 0x42, 0x22, 0xf2, 0x60, 0xda, 0xa5, 0x1e, 0x75, 0x8f, 0x68, 0xfb, 0x7d, - 0xdb, 0x95, 0x21, 0xff, 0xf6, 0x10, 0x21, 0xef, 0xab, 0xba, 0x78, 0xb9, 0xc3, 0xb1, 0x60, 0x9c, - 0xd4, 0x82, 0x5a, 0x70, 0xba, 0x4d, 0x49, 0xc2, 0x4c, 0xb1, 0xa6, 0xd1, 0xb6, 0x7c, 0x43, 0x3a, - 0x2b, 0x05, 0x9c, 0xde, 0xcc, 0x23, 0xc2, 0xf9, 0xbc, 0xea, 0x3f, 0x15, 0x38, 0x9d, 0xb2, 0xec, - 0x03, 0xda, 0x71, 0x4c, 0xc2, 0xe8, 0x09, 0x74, 0xce, 0xbb, 0xa9, 0xce, 0xf9, 0xce, 0x10, 0xee, - 0x0b, 0x8d, 0x2c, 0x7c, 0x27, 0xf8, 0x87, 0x02, 0x2f, 0xe5, 0x72, 0x9c, 0x40, 0x27, 0xf8, 0x30, - 0xdd, 0x09, 0xde, 0x7c, 0x8e, 0x73, 0x15, 0x74, 0x84, 0x47, 0x45, 0xa7, 0x6a, 0x05, 0x1b, 0xd6, - 0x97, 0x6f, 0xd4, 0xa9, 0x7f, 0x50, 0x60, 0x26, 0xa4, 0xe4, 0x37, 0x86, 0x12, 0x3d, 0x6d, 0x0d, - 0x40, 0xfe, 0x40, 0x16, 0xbe, 0x9f, 0x55, 0x63, 0xbb, 0xaf, 0x44, 0x18, 0x9c, 0xa0, 0x42, 0x57, - 0x01, 0x85, 0x16, 0xb6, 0x4c, 0xb1, 0xfb, 0xf3, 0x1b, 0x58, 0x55, 0xf0, 0xae, 0x48, 0x5e, 0x84, - 0xfb, 0x28, 0x70, 0x0e, 0x97, 0xfa, 0x57, 0x25, 0x5e, 0x32, 0x04, 0xf8, 0xff, 0xd5, 0xf3, 0xc2, - 0xb8, 0x42, 0xcf, 0x27, 0x87, 0xa4, 0xa0, 0x0c, 0x4b, 0xc3, 0x94, 0x29, 0xfd, 0xe2, 0x4a, 0x23, - 0x94, 0xf8, 0x9c, 0x43, 0x52, 0x58, 0x57, 0x50, 0x12, 0x0f, 0xaa, 0x99, 0x53, 0x88, 0x52, 0x28, - 0x7b, 0x99, 0xbb, 0x26, 0xaf, 0xae, 0x81, 0x5b, 0x5f, 0x2d, 0x67, 0x0e, 0x4f, 0xd3, 0xdc, 0x6b, - 0xee, 0x45, 0x98, 0xb4, 0xec, 0x36, 0x15, 0x8f, 0x19, 0x99, 0x55, 0xe8, 0x86, 0x84, 0xe3, 0x88, - 0xa2, 0xef, 0xe7, 0xd5, 0xd1, 0x17, 0xf4, 0xf3, 0x2a, 0x5f, 0xdf, 0x4c, 0xb9, 0xd5, 0x8f, 0x89, - 0xc9, 0x10, 0xaf, 0x6f, 0x12, 0x8e, 0x23, 0x0a, 0x74, 0x33, 0x9e, 0xe5, 0xe3, 0x22, 0x26, 0x5f, - 0x2b, 0x33, 0xcb, 0x8b, 0xc7, 0xb8, 0xa6, 0x3d, 0x7c, 0x52, 0x1b, 0x79, 0xf4, 0xa4, 0x36, 0xf2, - 0xc5, 0x93, 0xda, 0xc8, 0xfd, 0x5e, 0x4d, 0x79, 0xd8, 0xab, 0x29, 0x8f, 0x7a, 0x35, 0xe5, 0x8b, - 0x5e, 0x4d, 0x79, 0xdc, 0xab, 0x29, 0x9f, 0xfd, 0xbb, 0x36, 0xf2, 0xfd, 0x97, 0x07, 0xfd, 0x17, - 0xc1, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x4d, 0x8f, 0x9b, 0x77, 0x64, 0x20, 0x00, 0x00, + 0x90, 0xc8, 0x31, 0x08, 0x0e, 0x3d, 0x2d, 0xcd, 0x22, 0xfe, 0x89, 0x5c, 0x40, 0xef, 0xcd, 0x9b, + 0x4f, 0xcf, 0x6c, 0x66, 0xa3, 0xca, 0x2a, 0x37, 0xcf, 0xef, 0xfb, 0xfd, 0xbe, 0xdf, 0x5b, 0xc3, + 0xa5, 0x83, 0x2b, 0xbe, 0x66, 0x3a, 0x2d, 0xe2, 0x9a, 0x2d, 0x8f, 0xfa, 0x4e, 0xe0, 0x19, 0xb4, + 0x75, 0x78, 0x99, 0x58, 0xee, 0x3e, 0x79, 0xbd, 0xb5, 0x47, 0x6d, 0xea, 0x11, 0x46, 0x3b, 0x9a, + 0xeb, 0x39, 0xcc, 0x41, 0x2f, 0x86, 0xd4, 0x1a, 0x71, 0x4d, 0x2d, 0xa2, 0xd6, 0x22, 0xea, 0xe5, + 0x57, 0xf7, 0x4c, 0xb6, 0x1f, 0xec, 0x68, 0x86, 0xd3, 0x6d, 0xed, 0x39, 0x7b, 0x4e, 0x4b, 0x30, + 0xed, 0x04, 0xbb, 0xe2, 0x4b, 0x7c, 0x88, 0xbf, 0x42, 0x61, 0xcb, 0x6a, 0x4a, 0xb5, 0xe1, 0x78, + 0x5c, 0x6d, 0x5e, 0xe1, 0xf2, 0x1b, 0x09, 0x4d, 0x97, 0x18, 0xfb, 0xa6, 0x4d, 0xbd, 0xa3, 0x96, + 0x7b, 0xb0, 0x97, 0xb5, 0x77, 0x18, 0x2e, 0xbf, 0xd5, 0xa5, 0x8c, 0x14, 0xe9, 0x6a, 0x95, 0x71, + 0x79, 0x81, 0xcd, 0xcc, 0xee, 0x71, 0x35, 0x6f, 0x3e, 0x8b, 0xc1, 0x37, 0xf6, 0x69, 0x97, 0xe4, + 0xf9, 0xd4, 0xff, 0x2a, 0x30, 0xbf, 0x66, 0x59, 0x8e, 0x41, 0x98, 0xe9, 0xd8, 0x98, 0xfa, 0x81, + 0xc5, 0xd0, 0x8f, 0x60, 0xbc, 0x43, 0x0f, 0x4d, 0x83, 0xfa, 0x4b, 0xca, 0x79, 0x65, 0x65, 0x6a, + 0xf5, 0x0d, 0x6d, 0x90, 0xb3, 0xb5, 0x0d, 0x41, 0x9c, 0x17, 0xa3, 0xcf, 0x3d, 0xea, 0x35, 0x4f, + 0xf5, 0x7b, 0xcd, 0xf1, 0x10, 0xef, 0xe3, 0x48, 0x2a, 0xba, 0x0b, 0xd3, 0xb6, 0xd3, 0xa1, 0x6d, + 0x6a, 0x51, 0x83, 0x39, 0xde, 0x52, 0x5d, 0x68, 0x39, 0x9f, 0xd6, 0xc2, 0xa3, 0xa0, 0x1d, 0x5e, + 0xd6, 0x6e, 0xa6, 0xe8, 0xf4, 0xf9, 0x7e, 0xaf, 0x39, 0x9d, 0x86, 0xe0, 0x8c, 0x1c, 0xb4, 0x0a, + 0x60, 0x38, 0x36, 0xf3, 0x1c, 0xcb, 0xa2, 0xde, 0xd2, 0xc8, 0x79, 0x65, 0x65, 0x52, 0x47, 0xd2, + 0x0a, 0x58, 0x8f, 0x31, 0x38, 0x45, 0xa5, 0x7e, 0x5e, 0x87, 0x29, 0x9d, 0xf8, 0xa6, 0x11, 0x5a, + 0x89, 0x7e, 0x06, 0x40, 0x18, 0xf3, 0xcc, 0x9d, 0x80, 0x89, 0xf3, 0xd7, 0x57, 0xa6, 0x56, 0xbf, + 0x35, 0xf8, 0xfc, 0x29, 0x76, 0x6d, 0x2d, 0xe6, 0xdd, 0xb4, 0x99, 0x77, 0xa4, 0xbf, 0x14, 0xa9, + 0x4f, 0x10, 0x3f, 0xff, 0x57, 0x73, 0xe6, 0x76, 0x40, 0x2c, 0x73, 0xd7, 0xa4, 0x9d, 0x9b, 0xa4, + 0x4b, 0x71, 0x4a, 0x23, 0x3a, 0x84, 0x09, 0x83, 0xb8, 0xc4, 0x30, 0xd9, 0xd1, 0x52, 0x4d, 0x68, + 0x7f, 0xab, 0xba, 0xf6, 0x75, 0xc9, 0x19, 0xea, 0xbe, 0x20, 0x75, 0x4f, 0x44, 0xe0, 0xe3, 0x9a, + 0x63, 0x5d, 0xcb, 0x16, 0xcc, 0xe5, 0x6c, 0x47, 0xf3, 0x50, 0x3f, 0xa0, 0x47, 0x22, 0x07, 0x26, + 0x31, 0xff, 0x13, 0xad, 0xc3, 0xe8, 0x21, 0xb1, 0x02, 0xba, 0x54, 0x13, 0x11, 0x7b, 0xb5, 0x52, + 0x5e, 0x44, 0x52, 0x71, 0xc8, 0xfb, 0x76, 0xed, 0x8a, 0xb2, 0x7c, 0x00, 0x33, 0x19, 0x5b, 0x0b, + 0x74, 0x6d, 0x64, 0x75, 0x69, 0x29, 0x5d, 0x71, 0x8a, 0x6b, 0xee, 0xc1, 0x5e, 0x56, 0xf9, 0xed, + 0x80, 0xd8, 0xcc, 0x64, 0x47, 0x29, 0x65, 0xea, 0x55, 0x58, 0x58, 0xdf, 0xbc, 0x1e, 0x5a, 0x93, + 0xce, 0x15, 0x7a, 0xdf, 0xf5, 0xa8, 0xef, 0x9b, 0x8e, 0x1d, 0xea, 0x4d, 0x72, 0x65, 0x33, 0xc6, + 0xe0, 0x14, 0x95, 0x7a, 0x08, 0x63, 0x32, 0x4b, 0xce, 0xc3, 0x88, 0x4d, 0xba, 0x54, 0xf2, 0x4d, + 0x4b, 0xbe, 0x11, 0xe1, 0x53, 0x81, 0x41, 0xd7, 0x60, 0x74, 0x87, 0x47, 0x46, 0x9a, 0x7f, 0xb1, + 0x72, 0x10, 0xf5, 0xc9, 0x7e, 0xaf, 0x39, 0x2a, 0x00, 0x38, 0x14, 0xa1, 0x3e, 0xac, 0xc1, 0xb9, + 0x7c, 0x91, 0xad, 0x3b, 0xf6, 0xae, 0xb9, 0x17, 0x78, 0xe2, 0x03, 0x7d, 0x17, 0xc6, 0x42, 0x91, + 0xd2, 0xa2, 0x15, 0x69, 0xd1, 0x58, 0x5b, 0x40, 0x9f, 0xf6, 0x9a, 0x67, 0xf3, 0xac, 0x21, 0x06, + 0x4b, 0x3e, 0xb4, 0x02, 0x13, 0x1e, 0xfd, 0x38, 0xa0, 0x3e, 0xf3, 0x45, 0xde, 0x4d, 0xea, 0xd3, + 0x3c, 0x75, 0xb0, 0x84, 0xe1, 0x18, 0x8b, 0x1e, 0x28, 0xb0, 0x18, 0x56, 0x72, 0xc6, 0x06, 0x59, + 0xc5, 0x97, 0xab, 0xe4, 0x44, 0x86, 0x51, 0xff, 0xaa, 0x34, 0x76, 0xb1, 0x00, 0x89, 0x8b, 0x54, + 0xa9, 0xff, 0x51, 0xe0, 0x6c, 0x71, 0xd7, 0x41, 0xbb, 0x30, 0xee, 0x89, 0xbf, 0xa2, 0xe2, 0x7d, + 0xa7, 0x8a, 0x41, 0xf2, 0x98, 0xe5, 0x3d, 0x2c, 0xfc, 0xf6, 0x71, 0x24, 0x1c, 0x19, 0x30, 0x66, + 0x08, 0x9b, 0x64, 0x95, 0xbe, 0x33, 0x5c, 0x8f, 0xcc, 0x7a, 0x60, 0x36, 0x0a, 0x57, 0x08, 0xc6, + 0x52, 0xb4, 0xfa, 0x5b, 0x05, 0xe6, 0x72, 0x55, 0x84, 0x1a, 0x50, 0x37, 0x6d, 0x26, 0xd2, 0xaa, + 0x1e, 0xc6, 0x68, 0xcb, 0x66, 0x77, 0x79, 0xb2, 0x63, 0x8e, 0x40, 0x17, 0x60, 0x64, 0xc7, 0x71, + 0x2c, 0x11, 0x8e, 0x09, 0x7d, 0xa6, 0xdf, 0x6b, 0x4e, 0xea, 0x8e, 0x63, 0x85, 0x14, 0x02, 0x85, + 0xbe, 0x01, 0x63, 0x3e, 0xf3, 0x4c, 0x7b, 0x4f, 0xf6, 0xc8, 0xb9, 0x7e, 0xaf, 0x39, 0xd5, 0x16, + 0x90, 0x90, 0x4c, 0xa2, 0xd1, 0xcb, 0x30, 0x7e, 0x48, 0x3d, 0x51, 0x21, 0xa3, 0x82, 0x52, 0x74, + 0xe0, 0xbb, 0x21, 0x28, 0x24, 0x8d, 0x08, 0xd4, 0xdf, 0xd7, 0x60, 0x4a, 0x06, 0xd0, 0x22, 0x66, + 0x17, 0xdd, 0x4b, 0x25, 0x54, 0x18, 0x89, 0x57, 0x86, 0x88, 0x84, 0x3e, 0x1f, 0x35, 0xaf, 0x82, + 0x0c, 0xa4, 0x30, 0x65, 0x38, 0xb6, 0xcf, 0x3c, 0x62, 0xda, 0x32, 0x5d, 0xb3, 0x0d, 0x62, 0x50, + 0xe2, 0x49, 0x36, 0x7d, 0x51, 0x2a, 0x98, 0x4a, 0x60, 0x3e, 0x4e, 0xcb, 0x45, 0x1f, 0xc6, 0x21, + 0xae, 0x0b, 0x0d, 0x6f, 0x56, 0xd2, 0xc0, 0x0f, 0x5f, 0x2d, 0xba, 0x7f, 0x53, 0x60, 0xa9, 0x8c, + 0x29, 0x53, 0x8f, 0xca, 0x73, 0xd5, 0x63, 0xed, 0xe4, 0xea, 0xf1, 0xcf, 0x4a, 0x2a, 0xf6, 0xbe, + 0x8f, 0x3e, 0x82, 0x09, 0xbe, 0xda, 0x74, 0x08, 0x23, 0x72, 0x85, 0x78, 0x6d, 0x50, 0xfb, 0xf6, + 0x35, 0x4e, 0xcd, 0xc7, 0xfd, 0xad, 0x9d, 0x1f, 0x53, 0x83, 0xdd, 0xa0, 0x8c, 0x24, 0xcd, 0x38, + 0x81, 0xe1, 0x58, 0x2a, 0xba, 0x05, 0x23, 0xbe, 0x4b, 0x8d, 0x61, 0x06, 0x91, 0x30, 0xad, 0xed, + 0x52, 0x23, 0xe9, 0xd7, 0xfc, 0x0b, 0x0b, 0x41, 0xea, 0xaf, 0xd2, 0xc1, 0xf0, 0xfd, 0x6c, 0x30, + 0xca, 0x5c, 0xac, 0x9c, 0x9c, 0x8b, 0x3f, 0x8d, 0x5b, 0x81, 0xb0, 0xef, 0xba, 0xe9, 0x33, 0xf4, + 0xc1, 0x31, 0x37, 0x6b, 0xd5, 0xdc, 0xcc, 0xb9, 0x85, 0x93, 0xe3, 0x2a, 0x8b, 0x20, 0x29, 0x17, + 0xdf, 0x84, 0x51, 0x93, 0xd1, 0x6e, 0x54, 0x5f, 0x17, 0x2b, 0xfb, 0x58, 0x9f, 0x91, 0x52, 0x47, + 0xb7, 0x38, 0x3f, 0x0e, 0xc5, 0xa8, 0xbf, 0xab, 0x65, 0x4e, 0xc0, 0x7d, 0x8f, 0x7e, 0x08, 0x93, + 0xbe, 0x9c, 0xc8, 0x51, 0x97, 0xb8, 0x54, 0x45, 0x4f, 0xbc, 0x12, 0x2e, 0x48, 0x55, 0x93, 0x11, + 0xc4, 0xc7, 0x89, 0xc4, 0x54, 0x05, 0xd7, 0x86, 0xaa, 0xe0, 0x5c, 0xfc, 0xcb, 0x2a, 0x18, 0xdd, + 0x83, 0x19, 0x3f, 0x30, 0x19, 0xd9, 0xb1, 0x28, 0x5f, 0x4b, 0xfd, 0xca, 0x9b, 0xec, 0x42, 0xbf, + 0xd7, 0x9c, 0x69, 0xa7, 0x59, 0x71, 0x56, 0x92, 0xea, 0x41, 0x51, 0x6e, 0xa0, 0x1f, 0xc0, 0x98, + 0xe3, 0x92, 0x8f, 0x03, 0x2a, 0x03, 0xfe, 0x8c, 0xe5, 0xf0, 0x96, 0xa0, 0x2d, 0xca, 0x40, 0xe0, + 0xc7, 0x09, 0xd1, 0x58, 0x8a, 0x54, 0x1f, 0x2a, 0x30, 0x9f, 0xef, 0x93, 0x43, 0x34, 0xa2, 0x6d, + 0x98, 0xed, 0x12, 0x66, 0xec, 0xc7, 0xb3, 0x4a, 0x54, 0xe7, 0xa4, 0xbe, 0xd2, 0xef, 0x35, 0x67, + 0x6f, 0x64, 0x30, 0x4f, 0x7b, 0x4d, 0xf4, 0x6e, 0x60, 0x59, 0x47, 0xd9, 0x75, 0x34, 0xc7, 0xaf, + 0xfe, 0xa2, 0x0e, 0x33, 0x99, 0xb1, 0x50, 0x61, 0xf1, 0x5a, 0x83, 0xb9, 0x4e, 0x12, 0x47, 0x8e, + 0x90, 0x66, 0x7c, 0x45, 0x12, 0xa7, 0x93, 0x50, 0xf0, 0xe5, 0xe9, 0xb3, 0x59, 0x59, 0xff, 0xc2, + 0xb3, 0xf2, 0x2e, 0xcc, 0x92, 0x78, 0x11, 0xb8, 0xe1, 0x74, 0xa8, 0x1c, 0xc3, 0x9a, 0xe4, 0x9a, + 0x5d, 0xcb, 0x60, 0x9f, 0xf6, 0x9a, 0xa7, 0xf3, 0xeb, 0x03, 0x87, 0xe3, 0x9c, 0x14, 0xf4, 0x12, + 0x8c, 0x1a, 0x4e, 0x60, 0x33, 0x31, 0xab, 0xeb, 0x49, 0x15, 0xae, 0x73, 0x20, 0x0e, 0x71, 0xe8, + 0x9b, 0x30, 0x45, 0x3a, 0x5d, 0xd3, 0x5e, 0x33, 0x0c, 0xea, 0xfb, 0x4b, 0x63, 0x62, 0x4b, 0x88, + 0x67, 0xe1, 0x5a, 0x82, 0xc2, 0x69, 0x3a, 0xf5, 0x4f, 0x4a, 0xb4, 0x82, 0x96, 0xac, 0x4a, 0xe8, + 0x22, 0x5f, 0xbc, 0x04, 0x4a, 0x06, 0x27, 0xb5, 0x3b, 0x09, 0x30, 0x8e, 0xf0, 0xe8, 0xeb, 0x30, + 0xd6, 0xf1, 0xcc, 0x43, 0xea, 0xc9, 0xc8, 0xc4, 0xe5, 0xb5, 0x21, 0xa0, 0x58, 0x62, 0x79, 0xb0, + 0xdd, 0x68, 0x95, 0x49, 0x05, 0x7b, 0xdb, 0x71, 0x2c, 0x2c, 0x30, 0x42, 0x92, 0xb0, 0x4a, 0xba, + 0x30, 0x91, 0x14, 0xda, 0x2a, 0xb1, 0xea, 0x07, 0x30, 0x9b, 0xdb, 0xff, 0xaf, 0x41, 0xdd, 0xa0, + 0x96, 0xac, 0xa2, 0xd6, 0xe0, 0xe8, 0x1e, 0xbb, 0x3d, 0xe8, 0xe3, 0xfd, 0x5e, 0xb3, 0xbe, 0xbe, + 0x79, 0x1d, 0x73, 0x21, 0xea, 0x6f, 0x14, 0x78, 0xa1, 0xb4, 0xd2, 0x52, 0xa7, 0x55, 0x06, 0x9e, + 0x96, 0x00, 0xb8, 0xc4, 0x23, 0x5d, 0xca, 0xa8, 0xe7, 0x17, 0x0c, 0xb6, 0x6c, 0x3f, 0x97, 0x17, + 0x7b, 0x0d, 0x93, 0x9f, 0x6c, 0xde, 0x67, 0xd4, 0xe6, 0x3b, 0x58, 0x32, 0x33, 0xb7, 0x63, 0x41, + 0x38, 0x25, 0x54, 0xfd, 0x63, 0x0d, 0x4e, 0x6f, 0x3b, 0x9d, 0xb6, 0xb1, 0x4f, 0x3b, 0x81, 0x65, + 0xda, 0x7b, 0xfc, 0x52, 0x4c, 0xef, 0xb3, 0x13, 0x18, 0xd8, 0xef, 0x67, 0x06, 0xf6, 0x33, 0x1a, + 0x71, 0x91, 0x8d, 0x65, 0x93, 0x1b, 0x7d, 0xc4, 0xb7, 0x59, 0xc2, 0x82, 0xa8, 0xfb, 0x5e, 0x79, + 0x0e, 0xd9, 0x82, 0x3f, 0x89, 0x4c, 0xf8, 0x8d, 0xa5, 0x5c, 0xf5, 0xef, 0x0a, 0x2c, 0x15, 0xb1, + 0x9d, 0xc0, 0x10, 0xfe, 0x5e, 0x76, 0x08, 0xaf, 0x0e, 0x7f, 0xb6, 0x92, 0x69, 0xfc, 0x49, 0xc9, + 0x99, 0xc4, 0x58, 0xbe, 0x02, 0xd3, 0x61, 0xbb, 0xa2, 0x1d, 0x3e, 0x8d, 0x64, 0xe2, 0x9e, 0x96, + 0x82, 0xa6, 0xdb, 0x29, 0x1c, 0xce, 0x50, 0xa2, 0xb7, 0x61, 0xd6, 0x75, 0x18, 0xb5, 0x99, 0x49, + 0xac, 0x70, 0x24, 0x86, 0x97, 0x49, 0xc4, 0xfb, 0xda, 0x76, 0x06, 0x83, 0x73, 0x94, 0xea, 0x2f, + 0x15, 0x58, 0x2e, 0x8f, 0x0e, 0xfa, 0x29, 0xcc, 0x46, 0x27, 0x16, 0xfb, 0x72, 0xc5, 0x0b, 0x1e, + 0x4e, 0xf3, 0x24, 0xb2, 0x65, 0xc8, 0xcf, 0x46, 0x3d, 0x37, 0x43, 0xe6, 0xe3, 0x9c, 0x2a, 0xf5, + 0xd7, 0x35, 0x98, 0xc9, 0x90, 0x9c, 0x40, 0xc9, 0xdc, 0xce, 0x94, 0x4c, 0x6b, 0x98, 0x63, 0x96, + 0xd5, 0xca, 0xbd, 0x5c, 0xad, 0x5c, 0x1e, 0x46, 0xe8, 0xe0, 0x22, 0xe9, 0x2b, 0xd0, 0xc8, 0xd0, + 0xf3, 0x1d, 0x22, 0xe8, 0x52, 0x0f, 0xd3, 0x5d, 0xea, 0x51, 0xdb, 0xa0, 0xe8, 0x12, 0x4c, 0x10, + 0xd7, 0xbc, 0xea, 0x39, 0x81, 0x2b, 0x53, 0x2a, 0x4e, 0xfd, 0xb5, 0xed, 0x2d, 0x01, 0xc7, 0x31, + 0x05, 0xa7, 0x8e, 0x2c, 0x92, 0x13, 0x20, 0x75, 0x27, 0x0c, 0xe1, 0x38, 0xa6, 0x88, 0x17, 0x83, + 0x91, 0xd2, 0xc5, 0x40, 0x87, 0x7a, 0x60, 0x76, 0xe4, 0x45, 0xf6, 0x35, 0x49, 0x50, 0xbf, 0xb3, + 0xb5, 0xf1, 0xb4, 0xd7, 0xbc, 0x50, 0xf6, 0x7e, 0xca, 0x8e, 0x5c, 0xea, 0x6b, 0x77, 0xb6, 0x36, + 0x30, 0x67, 0x56, 0xff, 0xa2, 0xc0, 0x42, 0xe6, 0x90, 0x27, 0xd0, 0x02, 0xb6, 0xb3, 0x2d, 0xe0, + 0x95, 0x21, 0x42, 0x56, 0x52, 0xfb, 0x0f, 0x14, 0x38, 0x37, 0xb0, 0x2c, 0x2a, 0xac, 0x59, 0xdf, + 0x81, 0xb9, 0xc0, 0xce, 0x2e, 0xbf, 0x61, 0xa5, 0x2f, 0xf2, 0x15, 0xeb, 0x4e, 0x16, 0x85, 0xf3, + 0xb4, 0xfc, 0xba, 0xb5, 0x70, 0x2c, 0x65, 0xd1, 0x7b, 0xf9, 0x97, 0xe7, 0x8b, 0x95, 0xaf, 0xdc, + 0x03, 0x9e, 0x9b, 0xb3, 0xcf, 0xc2, 0xb5, 0x4a, 0xcf, 0xc2, 0x9f, 0xd6, 0x60, 0xb1, 0x20, 0xfb, + 0xd1, 0x87, 0x00, 0xc9, 0xd6, 0x55, 0x10, 0xec, 0x02, 0x23, 0x8f, 0x3d, 0x2a, 0xcd, 0x8a, 0xf7, + 0xe0, 0x04, 0x9a, 0x92, 0x88, 0x7c, 0x98, 0xf2, 0xa8, 0x4f, 0xbd, 0x43, 0xda, 0x79, 0xd7, 0xf1, + 0x64, 0xc8, 0xbf, 0x3d, 0x44, 0xc8, 0x8f, 0x55, 0x5d, 0xb2, 0xdc, 0xe1, 0x44, 0x30, 0x4e, 0x6b, + 0x41, 0x6d, 0x38, 0xd3, 0xa1, 0x24, 0x65, 0xa6, 0x58, 0xd3, 0x68, 0x47, 0xbe, 0x21, 0x9d, 0x93, + 0x02, 0xce, 0x6c, 0x14, 0x11, 0xe1, 0x62, 0x5e, 0xf5, 0x9f, 0x0a, 0x9c, 0xc9, 0x58, 0xf6, 0x1e, + 0xed, 0xba, 0x16, 0x61, 0xf4, 0x04, 0x3a, 0xe7, 0xbd, 0x4c, 0xe7, 0x7c, 0x6b, 0x08, 0xf7, 0x45, + 0x46, 0x96, 0xbe, 0x13, 0xfc, 0x43, 0x81, 0x17, 0x0a, 0x39, 0x4e, 0xa0, 0x13, 0xbc, 0x9f, 0xed, + 0x04, 0xaf, 0x3f, 0xc7, 0xb9, 0x4a, 0x3a, 0xc2, 0xe3, 0xb2, 0x53, 0xb5, 0xc3, 0x0d, 0xeb, 0xff, + 0x6f, 0xd4, 0xa9, 0x7f, 0x50, 0x60, 0x3a, 0xa2, 0xe4, 0x37, 0x86, 0x0a, 0x3d, 0x6d, 0x15, 0x40, + 0xfe, 0x40, 0x16, 0xbd, 0x9f, 0xd5, 0x13, 0xbb, 0xaf, 0xc6, 0x18, 0x9c, 0xa2, 0x42, 0xd7, 0x00, + 0x45, 0x16, 0xb6, 0x2d, 0xb1, 0xfb, 0xf3, 0x1b, 0x58, 0x5d, 0xf0, 0x2e, 0x4b, 0x5e, 0x84, 0x8f, + 0x51, 0xe0, 0x02, 0x2e, 0xf5, 0xaf, 0x4a, 0xb2, 0x64, 0x08, 0xf0, 0x97, 0xd5, 0xf3, 0xc2, 0xb8, + 0x52, 0xcf, 0xa7, 0x87, 0xa4, 0xa0, 0xfc, 0xd2, 0x0e, 0x49, 0x61, 0x5d, 0x49, 0x49, 0x3c, 0xac, + 0xe7, 0x4e, 0x21, 0x4a, 0xa1, 0xea, 0x65, 0xee, 0xba, 0xbc, 0xba, 0x86, 0x6e, 0x7d, 0xb9, 0x9a, + 0x39, 0x3c, 0x4d, 0x0b, 0xaf, 0xb9, 0x97, 0x60, 0xc2, 0x76, 0x3a, 0x54, 0x3c, 0x66, 0xe4, 0x56, + 0xa1, 0x9b, 0x12, 0x8e, 0x63, 0x8a, 0x63, 0x3f, 0xaf, 0x8e, 0x7c, 0x41, 0x3f, 0xaf, 0xf2, 0xf5, + 0xcd, 0x92, 0x5b, 0xfd, 0xa8, 0x98, 0x0c, 0xc9, 0xfa, 0x26, 0xe1, 0x38, 0xa6, 0x40, 0xb7, 0x92, + 0x59, 0x3e, 0x26, 0x62, 0xf2, 0xb5, 0x2a, 0xb3, 0xbc, 0x7c, 0x8c, 0xeb, 0xfa, 0xa3, 0x27, 0x8d, + 0x53, 0x8f, 0x9f, 0x34, 0x4e, 0x7d, 0xf6, 0xa4, 0x71, 0xea, 0x41, 0xbf, 0xa1, 0x3c, 0xea, 0x37, + 0x94, 0xc7, 0xfd, 0x86, 0xf2, 0x59, 0xbf, 0xa1, 0x7c, 0xde, 0x6f, 0x28, 0x9f, 0xfc, 0xbb, 0x71, + 0xea, 0xfb, 0x2f, 0x0e, 0xfa, 0x2f, 0x82, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x23, 0x3d, 0xa6, + 0x20, 0x64, 0x20, 0x00, 0x00, } func (m *AllocationResult) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/resource/v1alpha3/generated.proto b/vendor/k8s.io/api/resource/v1alpha3/generated.proto index 35a7fbafa..b4428ad45 100644 --- a/vendor/k8s.io/api/resource/v1alpha3/generated.proto +++ b/vendor/k8s.io/api/resource/v1alpha3/generated.proto @@ -843,7 +843,7 @@ message ResourceSlice { message ResourceSliceList { // Standard list metadata // +optional - optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta listMeta = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of resource ResourceSlices. repeated ResourceSlice items = 2; diff --git a/vendor/k8s.io/api/resource/v1alpha3/types.go b/vendor/k8s.io/api/resource/v1alpha3/types.go index 298d8d107..4efd2491d 100644 --- a/vendor/k8s.io/api/resource/v1alpha3/types.go +++ b/vendor/k8s.io/api/resource/v1alpha3/types.go @@ -290,7 +290,7 @@ type ResourceSliceList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata // +optional - metav1.ListMeta `json:"listMeta" protobuf:"bytes,1,opt,name=listMeta"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is the list of resource ResourceSlices. Items []ResourceSlice `json:"items" protobuf:"bytes,2,rep,name=items"` diff --git a/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go b/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go index 8154c99ce..1a44a971d 100644 --- a/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go @@ -379,7 +379,7 @@ func (ResourceSlice) SwaggerDoc() map[string]string { var map_ResourceSliceList = map[string]string{ "": "ResourceSliceList is a collection of ResourceSlices.", - "listMeta": "Standard list metadata", + "metadata": "Standard list metadata", "items": "Items is the list of resource ResourceSlices.", } diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/doc.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/doc.go index b9993f4ca..dd286e1f2 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/doc.go +++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/doc.go @@ -20,6 +20,7 @@ limitations under the License. // +k8s:openapi-gen=true // +groupName=apiregistration.k8s.io // +k8s:defaulter-gen=TypeMeta +// +k8s:prerelease-lifecycle-gen=true // Package v1 contains the API Registration API, which is responsible for // registering an API `Group`/`Version` with another kubernetes like API server. diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/generated.proto b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/generated.proto index 8413a158b..5571387ef 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/generated.proto +++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/generated.proto @@ -34,7 +34,7 @@ message APIService { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec contains information for locating and communicating with a server optional APIServiceSpec spec = 2; @@ -54,7 +54,7 @@ message APIServiceCondition { // Last time the condition transitioned from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // Unique, one-word, CamelCase reason for the condition's last transition. // +optional @@ -70,7 +70,7 @@ message APIServiceList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of APIService repeated APIService items = 2; diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/types.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/types.go index 14f71c704..fe5f64c0e 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/types.go +++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/types.go @@ -19,6 +19,7 @@ package v1 import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.10 // APIServiceList is a list of APIService objects. type APIServiceList struct { @@ -145,6 +146,7 @@ type APIServiceStatus struct { // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.10 // APIService represents a server for a particular GroupVersion. // Name must be "version.group". diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..14d3e1f48 --- /dev/null +++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,34 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *APIService) APILifecycleIntroduced() (major, minor int) { + return 1, 10 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *APIServiceList) APILifecycleIntroduced() (major, minor int) { + return 1, 10 +} diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.proto b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.proto index dca9b1ed6..938039f4d 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.proto +++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.proto @@ -34,7 +34,7 @@ message APIService { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec contains information for locating and communicating with a server optional APIServiceSpec spec = 2; @@ -54,7 +54,7 @@ message APIServiceCondition { // Last time the condition transitioned from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // Unique, one-word, CamelCase reason for the condition's last transition. // +optional @@ -70,7 +70,7 @@ message APIServiceList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of APIService repeated APIService items = 2; diff --git a/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/apiservice.go b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/apiservice.go index 25bf6ea44..b248437dc 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/apiservice.go +++ b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/apiservice.go @@ -20,12 +20,11 @@ package v1 import ( "context" - "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" v1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" scheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" ) @@ -40,6 +39,7 @@ type APIServicesGetter interface { type APIServiceInterface interface { Create(ctx context.Context, aPIService *v1.APIService, opts metav1.CreateOptions) (*v1.APIService, error) Update(ctx context.Context, aPIService *v1.APIService, opts metav1.UpdateOptions) (*v1.APIService, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, aPIService *v1.APIService, opts metav1.UpdateOptions) (*v1.APIService, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -52,133 +52,18 @@ type APIServiceInterface interface { // aPIServices implements APIServiceInterface type aPIServices struct { - client rest.Interface + *gentype.ClientWithList[*v1.APIService, *v1.APIServiceList] } // newAPIServices returns a APIServices func newAPIServices(c *ApiregistrationV1Client) *aPIServices { return &aPIServices{ - client: c.RESTClient(), + gentype.NewClientWithList[*v1.APIService, *v1.APIServiceList]( + "apiservices", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.APIService { return &v1.APIService{} }, + func() *v1.APIServiceList { return &v1.APIServiceList{} }), } } - -// Get takes name of the aPIService, and returns the corresponding aPIService object, and an error if there is any. -func (c *aPIServices) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.APIService, err error) { - result = &v1.APIService{} - err = c.client.Get(). - Resource("apiservices"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of APIServices that match those selectors. -func (c *aPIServices) List(ctx context.Context, opts metav1.ListOptions) (result *v1.APIServiceList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.APIServiceList{} - err = c.client.Get(). - Resource("apiservices"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested aPIServices. -func (c *aPIServices) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("apiservices"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a aPIService and creates it. Returns the server's representation of the aPIService, and an error, if there is any. -func (c *aPIServices) Create(ctx context.Context, aPIService *v1.APIService, opts metav1.CreateOptions) (result *v1.APIService, err error) { - result = &v1.APIService{} - err = c.client.Post(). - Resource("apiservices"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(aPIService). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a aPIService and updates it. Returns the server's representation of the aPIService, and an error, if there is any. -func (c *aPIServices) Update(ctx context.Context, aPIService *v1.APIService, opts metav1.UpdateOptions) (result *v1.APIService, err error) { - result = &v1.APIService{} - err = c.client.Put(). - Resource("apiservices"). - Name(aPIService.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(aPIService). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *aPIServices) UpdateStatus(ctx context.Context, aPIService *v1.APIService, opts metav1.UpdateOptions) (result *v1.APIService, err error) { - result = &v1.APIService{} - err = c.client.Put(). - Resource("apiservices"). - Name(aPIService.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(aPIService). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the aPIService and deletes it. Returns an error if one occurs. -func (c *aPIServices) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("apiservices"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *aPIServices) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("apiservices"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched aPIService. -func (c *aPIServices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.APIService, err error) { - result = &v1.APIService{} - err = c.client.Patch(pt). - Resource("apiservices"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/modules.txt b/vendor/modules.txt index 336333b26..6eca25de9 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -500,7 +500,7 @@ github.com/onsi/gomega/matchers/support/goraph/edge github.com/onsi/gomega/matchers/support/goraph/node github.com/onsi/gomega/matchers/support/goraph/util github.com/onsi/gomega/types -# github.com/openshift/api v0.0.0-20240830142653-85dc560939ef => github.com/openshift/api v0.0.0-20240830142653-85dc560939ef +# github.com/openshift/api v0.0.0-20241203091751-58d4ac495429 => github.com/openshift/api v0.0.0-20240830142653-85dc560939ef ## explicit; go 1.22.0 github.com/openshift/api github.com/openshift/api/annotations @@ -571,7 +571,7 @@ github.com/openshift/api/template github.com/openshift/api/template/v1 github.com/openshift/api/user github.com/openshift/api/user/v1 -# github.com/openshift/client-go v0.0.0-20240528061634-b054aa794d87 => github.com/openshift/client-go v0.0.0-20240528061634-b054aa794d87 +# github.com/openshift/client-go v0.0.0-20241203091221-452dfb8fa071 => github.com/openshift/client-go v0.0.0-20240528061634-b054aa794d87 ## explicit; go 1.22.0 github.com/openshift/client-go/build/applyconfigurations/build/v1 github.com/openshift/client-go/build/applyconfigurations/internal @@ -611,8 +611,9 @@ github.com/openshift/client-go/operator/informers/externalversions/operator/v1 github.com/openshift/client-go/operator/informers/externalversions/operator/v1alpha1 github.com/openshift/client-go/operator/listers/operator/v1 github.com/openshift/client-go/operator/listers/operator/v1alpha1 -# github.com/openshift/library-go v0.0.0-20240821135116-ade3966091b1 +# github.com/openshift/library-go v0.0.0-20241213140326-389618b826f5 ## explicit; go 1.22.0 +github.com/openshift/library-go/pkg/apiserver/jsonpatch github.com/openshift/library-go/pkg/certs github.com/openshift/library-go/pkg/controller/factory github.com/openshift/library-go/pkg/crypto @@ -661,8 +662,8 @@ github.com/pkg/errors # github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 ## explicit github.com/pmezard/go-difflib/difflib -# github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.69.1 -## explicit; go 1.17 +# github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.74.0 +## explicit; go 1.21 github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1 github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1 @@ -893,7 +894,7 @@ go.uber.org/zap/zapcore # go4.org v0.0.0-20200104003542-c7e774b10ea0 ## explicit go4.org/errorutil -# golang.org/x/crypto v0.26.0 +# golang.org/x/crypto v0.27.0 ## explicit; go 1.20 golang.org/x/crypto/pkcs12 golang.org/x/crypto/pkcs12/internal/rc2 @@ -908,7 +909,7 @@ golang.org/x/mod/internal/lazyregexp golang.org/x/mod/modfile golang.org/x/mod/module golang.org/x/mod/semver -# golang.org/x/net v0.28.0 +# golang.org/x/net v0.29.0 ## explicit; go 1.18 golang.org/x/net/context golang.org/x/net/html @@ -930,15 +931,15 @@ golang.org/x/oauth2/internal ## explicit; go 1.18 golang.org/x/sync/errgroup golang.org/x/sync/semaphore -# golang.org/x/sys v0.23.0 +# golang.org/x/sys v0.25.0 ## explicit; go 1.18 golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/term v0.23.0 +# golang.org/x/term v0.24.0 ## explicit; go 1.18 golang.org/x/term -# golang.org/x/text v0.17.0 +# golang.org/x/text v0.18.0 ## explicit; go 1.18 golang.org/x/text/encoding golang.org/x/text/encoding/charmap @@ -1100,7 +1101,7 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# k8s.io/api v0.31.0 +# k8s.io/api v0.31.1 ## explicit; go 1.22.0 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -1160,7 +1161,7 @@ k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 k8s.io/api/storagemigration/v1alpha1 -# k8s.io/apiextensions-apiserver v0.31.0 +# k8s.io/apiextensions-apiserver v0.31.1 ## explicit; go 1.22.0 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 @@ -1178,7 +1179,7 @@ k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensio k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/internalinterfaces k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1beta1 -# k8s.io/apimachinery v0.31.0 +# k8s.io/apimachinery v0.31.1 ## explicit; go 1.22.0 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -1235,11 +1236,11 @@ k8s.io/apimachinery/pkg/version k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.31.0 +# k8s.io/apiserver v0.31.1 ## explicit; go 1.22.0 k8s.io/apiserver/pkg/authentication/user k8s.io/apiserver/pkg/storage/names -# k8s.io/client-go v0.31.0 +# k8s.io/client-go v0.31.1 ## explicit; go 1.22.0 k8s.io/client-go/applyconfigurations/admissionregistration/v1 k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1 @@ -1514,7 +1515,7 @@ k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry k8s.io/client-go/util/watchlist k8s.io/client-go/util/workqueue -# k8s.io/component-base v0.31.0 +# k8s.io/component-base v0.31.1 ## explicit; go 1.22.0 k8s.io/component-base/metrics k8s.io/component-base/metrics/legacyregistry @@ -1529,7 +1530,7 @@ k8s.io/klog/v2/internal/dbg k8s.io/klog/v2/internal/serialize k8s.io/klog/v2/internal/severity k8s.io/klog/v2/internal/sloghandler -# k8s.io/kube-aggregator v0.30.1 +# k8s.io/kube-aggregator v0.31.1 ## explicit; go 1.22.0 k8s.io/kube-aggregator/pkg/apis/apiregistration k8s.io/kube-aggregator/pkg/apis/apiregistration/v1 @@ -1550,7 +1551,7 @@ k8s.io/kube-openapi/pkg/validation/spec # k8s.io/pod-security-admission v0.31.0 ## explicit; go 1.22.0 k8s.io/pod-security-admission/api -# k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 +# k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 ## explicit; go 1.18 k8s.io/utils/buffer k8s.io/utils/clock