diff --git a/apis/ecs/v1alpha1/task_definition_family.go b/apis/ecs/v1alpha1/task_definition_family.go new file mode 100644 index 0000000000..873aa72a09 --- /dev/null +++ b/apis/ecs/v1alpha1/task_definition_family.go @@ -0,0 +1,319 @@ +/* +Copyright 2021 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// TaskDefinitionFamilyParameters defines the desired state of TaskDefinitionFamily +type TaskDefinitionFamilyParameters struct { + // Region is which region the TaskDefinitionFamily will be created. + // +kubebuilder:validation:Required + Region string `json:"region"` + // A list of container definitions in JSON format that describe the different + // containers that make up your task. + // +kubebuilder:validation:Required + ContainerDefinitions []*ContainerDefinition `json:"containerDefinitions"` + // The number of CPU units used by the task. It can be expressed as an integer + // using CPU units (for example, 1024) or as a string using vCPUs (for example, + // 1 vCPU or 1 vcpu) in a task definition. String values are converted to an + // integer indicating the CPU units when the task definition is registered. + // + // Task-level CPU and memory parameters are ignored for Windows containers. + // We recommend specifying container-level resources for Windows containers. + // + // If you're using the EC2 launch type, this field is optional. Supported values + // are between 128 CPU units (0.125 vCPUs) and 10240 CPU units (10 vCPUs). If + // you do not specify a value, the parameter is ignored. + // + // If you're using the Fargate launch type, this field is required and you must + // use one of the following values, which determines your range of supported + // values for the memory parameter: + // + // The CPU units cannot be less than 1 vCPU when you use Windows containers + // on Fargate. + // + // * 256 (.25 vCPU) - Available memory values: 512 (0.5 GB), 1024 (1 GB), + // 2048 (2 GB) + // + // * 512 (.5 vCPU) - Available memory values: 1024 (1 GB), 2048 (2 GB), 3072 + // (3 GB), 4096 (4 GB) + // + // * 1024 (1 vCPU) - Available memory values: 2048 (2 GB), 3072 (3 GB), 4096 + // (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) + // + // * 2048 (2 vCPU) - Available memory values: 4096 (4 GB) and 16384 (16 GB) + // in increments of 1024 (1 GB) + // + // * 4096 (4 vCPU) - Available memory values: 8192 (8 GB) and 30720 (30 GB) + // in increments of 1024 (1 GB) + // + // * 8192 (8 vCPU) - Available memory values: 16 GB and 60 GB in 4 GB increments + // This option requires Linux platform 1.4.0 or later. + // + // * 16384 (16vCPU) - Available memory values: 32GB and 120 GB in 8 GB increments + // This option requires Linux platform 1.4.0 or later. + CPU *string `json:"cpu,omitempty"` + // The amount of ephemeral storage to allocate for the task. This parameter + // is used to expand the total amount of ephemeral storage available, beyond + // the default amount, for tasks hosted on Fargate. For more information, see + // Fargate task storage (https://docs.aws.amazon.com/AmazonECS/latest/userguide/using_data_volumes.html) + // in the Amazon ECS User Guide for Fargate. + // + // For tasks using the Fargate launch type, the task requires the following + // platforms: + // + // * Linux platform version 1.4.0 or later. + // + // * Windows platform version 1.0.0 or later. + EphemeralStorage *EphemeralStorage `json:"ephemeralStorage,omitempty"` + // You must specify a family for a task definition. You can use it track multiple + // versions of the same task definition. The family is used as a name for your + // task definition. Up to 255 letters (uppercase and lowercase), numbers, underscores, + // and hyphens are allowed. + // +kubebuilder:validation:Required + Family *string `json:"family"` + // The Elastic Inference accelerators to use for the containers in the task. + InferenceAccelerators []*InferenceAccelerator `json:"inferenceAccelerators,omitempty"` + // The IPC resource namespace to use for the containers in the task. The valid + // values are host, task, or none. If host is specified, then all containers + // within the tasks that specified the host IPC mode on the same container instance + // share the same IPC resources with the host Amazon EC2 instance. If task is + // specified, all containers within the specified task share the same IPC resources. + // If none is specified, then IPC resources within the containers of a task + // are private and not shared with other containers in a task or on the container + // instance. If no value is specified, then the IPC resource namespace sharing + // depends on the Docker daemon setting on the container instance. For more + // information, see IPC settings (https://docs.docker.com/engine/reference/run/#ipc-settings---ipc) + // in the Docker run reference. + // + // If the host IPC mode is used, be aware that there is a heightened risk of + // undesired IPC namespace expose. For more information, see Docker security + // (https://docs.docker.com/engine/security/security/). + // + // If you are setting namespaced kernel parameters using systemControls for + // the containers in the task, the following will apply to your IPC resource + // namespace. For more information, see System Controls (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html) + // in the Amazon Elastic Container Service Developer Guide. + // + // * For tasks that use the host IPC mode, IPC namespace related systemControls + // are not supported. + // + // * For tasks that use the task IPC mode, IPC namespace related systemControls + // will apply to all containers within a task. + // + // This parameter is not supported for Windows containers or tasks run on Fargate. + IPCMode *string `json:"ipcMode,omitempty"` + // The amount of memory (in MiB) used by the task. It can be expressed as an + // integer using MiB (for example ,1024) or as a string using GB (for example, + // 1GB or 1 GB) in a task definition. String values are converted to an integer + // indicating the MiB when the task definition is registered. + // + // Task-level CPU and memory parameters are ignored for Windows containers. + // We recommend specifying container-level resources for Windows containers. + // + // If using the EC2 launch type, this field is optional. + // + // If using the Fargate launch type, this field is required and you must use + // one of the following values. This determines your range of supported values + // for the cpu parameter. + // + // The CPU units cannot be less than 1 vCPU when you use Windows containers + // on Fargate. + // + // * 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available cpu values: 256 (.25 + // vCPU) + // + // * 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - Available cpu values: + // 512 (.5 vCPU) + // + // * 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 + // (7 GB), 8192 (8 GB) - Available cpu values: 1024 (1 vCPU) + // + // * Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) - + // Available cpu values: 2048 (2 vCPU) + // + // * Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) - + // Available cpu values: 4096 (4 vCPU) + // + // * Between 16 GB and 60 GB in 4 GB increments - Available cpu values: 8192 + // (8 vCPU) This option requires Linux platform 1.4.0 or later. + // + // * Between 32GB and 120 GB in 8 GB increments - Available cpu values: 16384 + // (16 vCPU) This option requires Linux platform 1.4.0 or later. + Memory *string `json:"memory,omitempty"` + // The Docker networking mode to use for the containers in the task. The valid + // values are none, bridge, awsvpc, and host. If no network mode is specified, + // the default is bridge. + // + // For Amazon ECS tasks on Fargate, the awsvpc network mode is required. For + // Amazon ECS tasks on Amazon EC2 Linux instances, any network mode can be used. + // For Amazon ECS tasks on Amazon EC2 Windows instances, or awsvpc + // can be used. If the network mode is set to none, you cannot specify port + // mappings in your container definitions, and the tasks containers do not have + // external connectivity. The host and awsvpc network modes offer the highest + // networking performance for containers because they use the EC2 network stack + // instead of the virtualized network stack provided by the bridge mode. + // + // With the host and awsvpc network modes, exposed container ports are mapped + // directly to the corresponding host port (for the host network mode) or the + // attached elastic network interface port (for the awsvpc network mode), so + // you cannot take advantage of dynamic host port mappings. + // + // When using the host network mode, you should not run containers using the + // root user (UID 0). It is considered best practice to use a non-root user. + // + // If the network mode is awsvpc, the task is allocated an elastic network interface, + // and you must specify a NetworkConfiguration value when you create a service + // or run a task with the task definition. For more information, see Task Networking + // (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html) + // in the Amazon Elastic Container Service Developer Guide. + // + // If the network mode is host, you cannot run multiple instantiations of the + // same task on a single container instance when port mappings are used. + // + // For more information, see Network settings (https://docs.docker.com/engine/reference/run/#network-settings) + // in the Docker run reference. + NetworkMode *string `json:"networkMode,omitempty"` + // The process namespace to use for the containers in the task. The valid values + // are host or task. If host is specified, then all containers within the tasks + // that specified the host PID mode on the same container instance share the + // same process namespace with the host Amazon EC2 instance. If task is specified, + // all containers within the specified task share the same process namespace. + // If no value is specified, the default is a private namespace. For more information, + // see PID settings (https://docs.docker.com/engine/reference/run/#pid-settings---pid) + // in the Docker run reference. + // + // If the host PID mode is used, be aware that there is a heightened risk of + // undesired process namespace expose. For more information, see Docker security + // (https://docs.docker.com/engine/security/security/). + // + // This parameter is not supported for Windows containers or tasks run on Fargate. + PIDMode *string `json:"pidMode,omitempty"` + // An array of placement constraint objects to use for the task. You can specify + // a maximum of 10 constraints for each task. This limit includes constraints + // in the task definition and those specified at runtime. + PlacementConstraints []*TaskDefinitionPlacementConstraint `json:"placementConstraints,omitempty"` + // The configuration details for the App Mesh proxy. + // + // For tasks hosted on Amazon EC2 instances, the container instances require + // at least version 1.26.0 of the container agent and at least version 1.26.0-1 + // of the ecs-init package to use a proxy configuration. If your container instances + // are launched from the Amazon ECS-optimized AMI version 20190301 or later, + // then they contain the required versions of the container agent and ecs-init. + // For more information, see Amazon ECS-optimized AMI versions (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-ami-versions.html) + // in the Amazon Elastic Container Service Developer Guide. + ProxyConfiguration *ProxyConfiguration `json:"proxyConfiguration,omitempty"` + // The task launch type that Amazon ECS validates the task definition against. + // A client exception is returned if the task definition doesn't validate against + // the compatibilities specified. If no value is specified, the parameter is + // omitted from the response. + RequiresCompatibilities []*string `json:"requiresCompatibilities,omitempty"` + // The operating system that your tasks definitions run on. A platform family + // is specified only for tasks using the Fargate launch type. + // + // When you specify a task definition in a service, this value must match the + // runtimePlatform value of the service. + RuntimePlatform *RuntimePlatform `json:"runtimePlatform,omitempty"` + // The metadata that you apply to the task definition to help you categorize + // and organize them. Each tag consists of a key and an optional value. You + // define both of them. + // + // The following basic restrictions apply to tags: + // + // * Maximum number of tags per resource - 50 + // + // * For each resource, each tag key must be unique, and each tag key can + // have only one value. + // + // * Maximum key length - 128 Unicode characters in UTF-8 + // + // * Maximum value length - 256 Unicode characters in UTF-8 + // + // * If your tagging schema is used across multiple services and resources, + // remember that other services may have restrictions on allowed characters. + // Generally allowed characters are: letters, numbers, and spaces representable + // in UTF-8, and the following characters: + - = . _ : / @. + // + // * Tag keys and values are case-sensitive. + // + // * Do not use aws:, AWS:, or any upper or lowercase combination of such + // as a prefix for either keys or values as it is reserved for Amazon Web + // Services use. You cannot edit or delete tag keys or values with this prefix. + // Tags with this prefix do not count against your tags per resource limit. + Tags []*Tag `json:"tags,omitempty"` + CustomTaskDefinitionParameters `json:",inline"` +} + +// TaskDefinitionFamilySpec defines the desired state of TaskDefinitionFamily +type TaskDefinitionFamilySpec struct { + xpv1.ResourceSpec `json:",inline"` + ForProvider TaskDefinitionFamilyParameters `json:"forProvider"` +} + +// TaskDefinitionFamilyObservation defines the observed state of TaskDefinitionFamily +type TaskDefinitionFamilyObservation struct { + // The full description of the registered task definition. + TaskDefinition *TaskDefinition_SDK `json:"taskDefinition,omitempty"` +} + +// TaskDefinitionFamilyStatus defines the observed state of TaskDefinitionFamily. +type TaskDefinitionFamilyApiStatus struct { + xpv1.ResourceStatus `json:",inline"` + AtProvider TaskDefinitionFamilyObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true + +// TaskDefinitionFamily is the Schema for the TaskDefinitionFamilies API +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:subresource:status +// +kubebuilder:storageversion +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type TaskDefinitionFamily struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec TaskDefinitionFamilySpec `json:"spec"` + Status TaskDefinitionFamilyApiStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// TaskDefinitionFamilyList contains a list of TaskDefinitionFamilies +type TaskDefinitionFamilyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []TaskDefinitionFamily `json:"items"` +} + +// Repository type metadata. +var ( + TaskDefinitionFamilyKind = "TaskDefinitionFamily" + TaskDefinitionFamilyGroupKind = schema.GroupKind{Group: CRDGroup, Kind: TaskDefinitionFamilyKind}.String() + TaskDefinitionFamilyKindAPIVersion = TaskDefinitionFamilyKind + "." + GroupVersion.String() + TaskDefinitionFamilyGroupVersionKind = GroupVersion.WithKind(TaskDefinitionFamilyKind) +) + +func init() { + SchemeBuilder.Register(&TaskDefinitionFamily{}, &TaskDefinitionFamilyList{}) +} diff --git a/apis/ecs/v1alpha1/zz_generated.deepcopy.go b/apis/ecs/v1alpha1/zz_generated.deepcopy.go index d8363d9149..fdf843cf4d 100644 --- a/apis/ecs/v1alpha1/zz_generated.deepcopy.go +++ b/apis/ecs/v1alpha1/zz_generated.deepcopy.go @@ -4340,6 +4340,235 @@ func (in *TaskDefinition) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskDefinitionFamily) DeepCopyInto(out *TaskDefinitionFamily) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskDefinitionFamily. +func (in *TaskDefinitionFamily) DeepCopy() *TaskDefinitionFamily { + if in == nil { + return nil + } + out := new(TaskDefinitionFamily) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TaskDefinitionFamily) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskDefinitionFamilyApiStatus) DeepCopyInto(out *TaskDefinitionFamilyApiStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskDefinitionFamilyApiStatus. +func (in *TaskDefinitionFamilyApiStatus) DeepCopy() *TaskDefinitionFamilyApiStatus { + if in == nil { + return nil + } + out := new(TaskDefinitionFamilyApiStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskDefinitionFamilyList) DeepCopyInto(out *TaskDefinitionFamilyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]TaskDefinitionFamily, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskDefinitionFamilyList. +func (in *TaskDefinitionFamilyList) DeepCopy() *TaskDefinitionFamilyList { + if in == nil { + return nil + } + out := new(TaskDefinitionFamilyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TaskDefinitionFamilyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskDefinitionFamilyObservation) DeepCopyInto(out *TaskDefinitionFamilyObservation) { + *out = *in + if in.TaskDefinition != nil { + in, out := &in.TaskDefinition, &out.TaskDefinition + *out = new(TaskDefinition_SDK) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskDefinitionFamilyObservation. +func (in *TaskDefinitionFamilyObservation) DeepCopy() *TaskDefinitionFamilyObservation { + if in == nil { + return nil + } + out := new(TaskDefinitionFamilyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskDefinitionFamilyParameters) DeepCopyInto(out *TaskDefinitionFamilyParameters) { + *out = *in + if in.ContainerDefinitions != nil { + in, out := &in.ContainerDefinitions, &out.ContainerDefinitions + *out = make([]*ContainerDefinition, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(ContainerDefinition) + (*in).DeepCopyInto(*out) + } + } + } + if in.CPU != nil { + in, out := &in.CPU, &out.CPU + *out = new(string) + **out = **in + } + if in.EphemeralStorage != nil { + in, out := &in.EphemeralStorage, &out.EphemeralStorage + *out = new(EphemeralStorage) + (*in).DeepCopyInto(*out) + } + if in.Family != nil { + in, out := &in.Family, &out.Family + *out = new(string) + **out = **in + } + if in.InferenceAccelerators != nil { + in, out := &in.InferenceAccelerators, &out.InferenceAccelerators + *out = make([]*InferenceAccelerator, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(InferenceAccelerator) + (*in).DeepCopyInto(*out) + } + } + } + if in.IPCMode != nil { + in, out := &in.IPCMode, &out.IPCMode + *out = new(string) + **out = **in + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(string) + **out = **in + } + if in.NetworkMode != nil { + in, out := &in.NetworkMode, &out.NetworkMode + *out = new(string) + **out = **in + } + if in.PIDMode != nil { + in, out := &in.PIDMode, &out.PIDMode + *out = new(string) + **out = **in + } + if in.PlacementConstraints != nil { + in, out := &in.PlacementConstraints, &out.PlacementConstraints + *out = make([]*TaskDefinitionPlacementConstraint, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(TaskDefinitionPlacementConstraint) + (*in).DeepCopyInto(*out) + } + } + } + if in.ProxyConfiguration != nil { + in, out := &in.ProxyConfiguration, &out.ProxyConfiguration + *out = new(ProxyConfiguration) + (*in).DeepCopyInto(*out) + } + if in.RequiresCompatibilities != nil { + in, out := &in.RequiresCompatibilities, &out.RequiresCompatibilities + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RuntimePlatform != nil { + in, out := &in.RuntimePlatform, &out.RuntimePlatform + *out = new(RuntimePlatform) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]*Tag, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(Tag) + (*in).DeepCopyInto(*out) + } + } + } + in.CustomTaskDefinitionParameters.DeepCopyInto(&out.CustomTaskDefinitionParameters) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskDefinitionFamilyParameters. +func (in *TaskDefinitionFamilyParameters) DeepCopy() *TaskDefinitionFamilyParameters { + if in == nil { + return nil + } + out := new(TaskDefinitionFamilyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskDefinitionFamilySpec) DeepCopyInto(out *TaskDefinitionFamilySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskDefinitionFamilySpec. +func (in *TaskDefinitionFamilySpec) DeepCopy() *TaskDefinitionFamilySpec { + if in == nil { + return nil + } + out := new(TaskDefinitionFamilySpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TaskDefinitionList) DeepCopyInto(out *TaskDefinitionList) { *out = *in diff --git a/apis/ecs/v1alpha1/zz_generated.managed.go b/apis/ecs/v1alpha1/zz_generated.managed.go index d7af3181ad..7033110bb6 100644 --- a/apis/ecs/v1alpha1/zz_generated.managed.go +++ b/apis/ecs/v1alpha1/zz_generated.managed.go @@ -199,3 +199,63 @@ func (mg *TaskDefinition) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectio func (mg *TaskDefinition) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { mg.Spec.WriteConnectionSecretToReference = r } + +// GetCondition of this TaskDefinitionFamily. +func (mg *TaskDefinitionFamily) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this TaskDefinitionFamily. +func (mg *TaskDefinitionFamily) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this TaskDefinitionFamily. +func (mg *TaskDefinitionFamily) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this TaskDefinitionFamily. +func (mg *TaskDefinitionFamily) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this TaskDefinitionFamily. +func (mg *TaskDefinitionFamily) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this TaskDefinitionFamily. +func (mg *TaskDefinitionFamily) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this TaskDefinitionFamily. +func (mg *TaskDefinitionFamily) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this TaskDefinitionFamily. +func (mg *TaskDefinitionFamily) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this TaskDefinitionFamily. +func (mg *TaskDefinitionFamily) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this TaskDefinitionFamily. +func (mg *TaskDefinitionFamily) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this TaskDefinitionFamily. +func (mg *TaskDefinitionFamily) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this TaskDefinitionFamily. +func (mg *TaskDefinitionFamily) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/ecs/v1alpha1/zz_generated.managedlist.go b/apis/ecs/v1alpha1/zz_generated.managedlist.go index 5968882441..ff40f24923 100644 --- a/apis/ecs/v1alpha1/zz_generated.managedlist.go +++ b/apis/ecs/v1alpha1/zz_generated.managedlist.go @@ -38,6 +38,15 @@ func (l *ServiceList) GetItems() []resource.Managed { return items } +// GetItems of this TaskDefinitionFamilyList. +func (l *TaskDefinitionFamilyList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + // GetItems of this TaskDefinitionList. func (l *TaskDefinitionList) GetItems() []resource.Managed { items := make([]resource.Managed, len(l.Items)) diff --git a/apis/ecs/v1alpha1/zz_generated.resolvers.go b/apis/ecs/v1alpha1/zz_generated.resolvers.go index f4dfc62ffc..49822ce5d6 100644 --- a/apis/ecs/v1alpha1/zz_generated.resolvers.go +++ b/apis/ecs/v1alpha1/zz_generated.resolvers.go @@ -232,3 +232,88 @@ func (mg *TaskDefinition) ResolveReferences(ctx context.Context, c client.Reader return nil } + +// ResolveReferences of this TaskDefinitionFamily. +func (mg *TaskDefinitionFamily) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.CustomTaskDefinitionParameters.ExecutionRoleARN), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.CustomTaskDefinitionParameters.ExecutionRoleARNRef, + Selector: mg.Spec.ForProvider.CustomTaskDefinitionParameters.ExecutionRoleARNSelector, + To: reference.To{ + List: &v1beta11.RoleList{}, + Managed: &v1beta11.Role{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.CustomTaskDefinitionParameters.ExecutionRoleARN") + } + mg.Spec.ForProvider.CustomTaskDefinitionParameters.ExecutionRoleARN = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.CustomTaskDefinitionParameters.ExecutionRoleARNRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.CustomTaskDefinitionParameters.TaskRoleARN), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.CustomTaskDefinitionParameters.TaskRoleARNRef, + Selector: mg.Spec.ForProvider.CustomTaskDefinitionParameters.TaskRoleARNSelector, + To: reference.To{ + List: &v1beta11.RoleList{}, + Managed: &v1beta11.Role{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.CustomTaskDefinitionParameters.TaskRoleARN") + } + mg.Spec.ForProvider.CustomTaskDefinitionParameters.TaskRoleARN = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.CustomTaskDefinitionParameters.TaskRoleARNRef = rsp.ResolvedReference + + for i4 := 0; i4 < len(mg.Spec.ForProvider.CustomTaskDefinitionParameters.Volumes); i4++ { + if mg.Spec.ForProvider.CustomTaskDefinitionParameters.Volumes[i4].EFSVolumeConfiguration != nil { + if mg.Spec.ForProvider.CustomTaskDefinitionParameters.Volumes[i4].EFSVolumeConfiguration.AuthorizationConfig != nil { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.CustomTaskDefinitionParameters.Volumes[i4].EFSVolumeConfiguration.AuthorizationConfig.AccessPointID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.CustomTaskDefinitionParameters.Volumes[i4].EFSVolumeConfiguration.AuthorizationConfig.AccessPointIDRef, + Selector: mg.Spec.ForProvider.CustomTaskDefinitionParameters.Volumes[i4].EFSVolumeConfiguration.AuthorizationConfig.AccessPointIDSelector, + To: reference.To{ + List: &v1alpha11.AccessPointList{}, + Managed: &v1alpha11.AccessPoint{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.CustomTaskDefinitionParameters.Volumes[i4].EFSVolumeConfiguration.AuthorizationConfig.AccessPointID") + } + mg.Spec.ForProvider.CustomTaskDefinitionParameters.Volumes[i4].EFSVolumeConfiguration.AuthorizationConfig.AccessPointID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.CustomTaskDefinitionParameters.Volumes[i4].EFSVolumeConfiguration.AuthorizationConfig.AccessPointIDRef = rsp.ResolvedReference + + } + } + } + for i4 := 0; i4 < len(mg.Spec.ForProvider.CustomTaskDefinitionParameters.Volumes); i4++ { + if mg.Spec.ForProvider.CustomTaskDefinitionParameters.Volumes[i4].EFSVolumeConfiguration != nil { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.CustomTaskDefinitionParameters.Volumes[i4].EFSVolumeConfiguration.FileSystemID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.CustomTaskDefinitionParameters.Volumes[i4].EFSVolumeConfiguration.FileSystemIDRef, + Selector: mg.Spec.ForProvider.CustomTaskDefinitionParameters.Volumes[i4].EFSVolumeConfiguration.FileSystemIDSelector, + To: reference.To{ + List: &v1alpha11.FileSystemList{}, + Managed: &v1alpha11.FileSystem{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.CustomTaskDefinitionParameters.Volumes[i4].EFSVolumeConfiguration.FileSystemID") + } + mg.Spec.ForProvider.CustomTaskDefinitionParameters.Volumes[i4].EFSVolumeConfiguration.FileSystemID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.CustomTaskDefinitionParameters.Volumes[i4].EFSVolumeConfiguration.FileSystemIDRef = rsp.ResolvedReference + + } + } + + return nil +} diff --git a/package/crds/ecs.aws.crossplane.io_taskdefinitionfamilies.yaml b/package/crds/ecs.aws.crossplane.io_taskdefinitionfamilies.yaml new file mode 100644 index 0000000000..09287254dd --- /dev/null +++ b/package/crds/ecs.aws.crossplane.io_taskdefinitionfamilies.yaml @@ -0,0 +1,2204 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: taskdefinitionfamilies.ecs.aws.crossplane.io +spec: + group: ecs.aws.crossplane.io + names: + categories: + - crossplane + - managed + - aws + kind: TaskDefinitionFamily + listKind: TaskDefinitionFamilyList + plural: taskdefinitionfamilies + singular: taskdefinitionfamily + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: TaskDefinitionFamily is the Schema for the TaskDefinitionFamilies + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: TaskDefinitionFamilySpec defines the desired state of TaskDefinitionFamily + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + description: TaskDefinitionFamilyParameters defines the desired state + of TaskDefinitionFamily + properties: + containerDefinitions: + description: |- + A list of container definitions in JSON format that describe the different + containers that make up your task. + items: + properties: + command: + items: + type: string + type: array + cpu: + format: int64 + type: integer + credentialSpecs: + items: + type: string + type: array + dependsOn: + items: + properties: + condition: + type: string + containerName: + type: string + type: object + type: array + disableNetworking: + type: boolean + dnsSearchDomains: + items: + type: string + type: array + dnsServers: + items: + type: string + type: array + dockerLabels: + additionalProperties: + type: string + type: object + dockerSecurityOptions: + items: + type: string + type: array + entryPoint: + items: + type: string + type: array + environment: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + environmentFiles: + items: + properties: + type_: + type: string + value: + type: string + type: object + type: array + essential: + type: boolean + extraHosts: + items: + properties: + hostname: + type: string + ipAddress: + type: string + type: object + type: array + firelensConfiguration: + description: |- + The FireLens configuration for the container. This is used to specify and + configure a log router for container logs. For more information, see Custom + log routing (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_firelens.html) + in the Amazon Elastic Container Service Developer Guide. + properties: + options: + additionalProperties: + type: string + type: object + type_: + type: string + type: object + healthCheck: + description: |- + An object representing a container health check. Health check parameters + that are specified in a container definition override any Docker health checks + that exist in the container image (such as those specified in a parent image + or from the image's Dockerfile). This configuration maps to the HEALTHCHECK + parameter of docker run (https://docs.docker.com/engine/reference/run/). + + + The Amazon ECS container agent only monitors and reports on the health checks + specified in the task definition. Amazon ECS does not monitor Docker health + checks that are embedded in a container image and not specified in the container + definition. Health check parameters that are specified in a container definition + override any Docker health checks that exist in the container image. + + + You can view the health status of both individual containers and a task with + the DescribeTasks API operation or when viewing the task details in the console. + + + The health check is designed to make sure that your containers survive agent + restarts, upgrades, or temporary unavailability. + + + The following describes the possible healthStatus values for a container: + + + * HEALTHY-The container health check has passed successfully. + + + * UNHEALTHY-The container health check has failed. + + + * UNKNOWN-The container health check is being evaluated or there's no + container health check defined. + + + The following describes the possible healthStatus values for a task. The + container health check status of non-essential containers don't have an effect + on the health status of a task. + + + * HEALTHY-All essential containers within the task have passed their health + checks. + + + * UNHEALTHY-One or more essential containers have failed their health + check. + + + * UNKNOWN-The essential containers within the task are still having their + health checks evaluated, there are only nonessential containers with health + checks defined, or there are no container health checks defined. + + + If a task is run manually, and not as part of a service, the task will continue + its lifecycle regardless of its health status. For tasks that are part of + a service, if the task reports as unhealthy then the task will be stopped + and the service scheduler will replace it. + + + The following are notes about container health check support: + + + * When the Amazon ECS agent cannot connect to the Amazon ECS service, + the service reports the container as UNHEALTHY. + + + * The health check statuses are the "last heard from" response from the + Amazon ECS agent. There are no assumptions made about the status of the + container health checks. + + + * Container health checks require version 1.17.0 or greater of the Amazon + ECS container agent. For more information, see Updating the Amazon ECS + container agent (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-update.html). + + + * Container health checks are supported for Fargate tasks if you're using + platform version 1.1.0 or greater. For more information, see Fargate platform + versions (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html). + + + * Container health checks aren't supported for tasks that are part of + a service that's configured to use a Classic Load Balancer. + properties: + command: + items: + type: string + type: array + interval: + format: int64 + type: integer + retries: + format: int64 + type: integer + startPeriod: + format: int64 + type: integer + timeout: + format: int64 + type: integer + type: object + hostname: + type: string + image: + type: string + interactive: + type: boolean + links: + items: + type: string + type: array + linuxParameters: + description: |- + The Linux-specific options that are applied to the container, such as Linux + KernelCapabilities (https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_KernelCapabilities.html). + properties: + capabilities: + description: |- + The Linux capabilities for the container that are added to or dropped from + the default configuration provided by Docker. For more information about + the default capabilities and the non-default available capabilities, see + Runtime privilege and Linux capabilities (https://docs.docker.com/engine/reference/run/#runtime-privilege-and-linux-capabilities) + in the Docker run reference. For more detailed information about these Linux + capabilities, see the capabilities(7) (http://man7.org/linux/man-pages/man7/capabilities.7.html) + Linux manual page. + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + devices: + items: + properties: + containerPath: + type: string + hostPath: + type: string + permissions: + items: + type: string + type: array + type: object + type: array + initProcessEnabled: + type: boolean + maxSwap: + format: int64 + type: integer + sharedMemorySize: + format: int64 + type: integer + swappiness: + format: int64 + type: integer + tmpfs: + items: + properties: + containerPath: + type: string + mountOptions: + items: + type: string + type: array + size: + format: int64 + type: integer + type: object + type: array + type: object + logConfiguration: + description: |- + The log configuration for the container. This parameter maps to LogConfig + in the Create a container (https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) + section of the Docker Remote API (https://docs.docker.com/engine/api/v1.35/) + and the --log-driver option to docker run (https://docs.docker.com/engine/reference/commandline/run/). + + + By default, containers use the same logging driver that the Docker daemon + uses. However, the container might use a different logging driver than the + Docker daemon by specifying a log driver configuration in the container definition. + For more information about the options for different supported log drivers, + see Configure logging drivers (https://docs.docker.com/engine/admin/logging/overview/) + in the Docker documentation. + + + Understand the following when specifying a log configuration for your containers. + + + * Amazon ECS currently supports a subset of the logging drivers available + to the Docker daemon. Additional log drivers may be available in future + releases of the Amazon ECS container agent. For tasks on Fargate, the + supported log drivers are awslogs, splunk, and awsfirelens. For tasks + hosted on Amazon EC2 instances, the supported log drivers are awslogs, + fluentd, gelf, json-file, journald, logentries,syslog, splunk, and awsfirelens. + + + * This parameter requires version 1.18 of the Docker Remote API or greater + on your container instance. + + + * For tasks that are hosted on Amazon EC2 instances, the Amazon ECS container + agent must register the available logging drivers with the ECS_AVAILABLE_LOGGING_DRIVERS + environment variable before containers placed on that instance can use + these log configuration options. For more information, see Amazon ECS + container agent configuration (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) + in the Amazon Elastic Container Service Developer Guide. + + + * For tasks that are on Fargate, because you don't have access to the + underlying infrastructure your tasks are hosted on, any additional software + needed must be installed outside of the task. For example, the Fluentd + output aggregators or a remote host running Logstash to send Gelf logs + to. + properties: + logDriver: + type: string + options: + additionalProperties: + type: string + type: object + secretOptions: + items: + properties: + name: + type: string + valueFrom: + type: string + type: object + type: array + type: object + memory: + format: int64 + type: integer + memoryReservation: + format: int64 + type: integer + mountPoints: + items: + properties: + containerPath: + type: string + readOnly: + type: boolean + sourceVolume: + type: string + type: object + type: array + name: + type: string + portMappings: + items: + properties: + appProtocol: + type: string + containerPort: + format: int64 + type: integer + containerPortRange: + type: string + hostPort: + format: int64 + type: integer + name: + type: string + protocol: + type: string + type: object + type: array + privileged: + type: boolean + pseudoTerminal: + type: boolean + readonlyRootFilesystem: + type: boolean + repositoryCredentials: + description: The repository credentials for private registry + authentication. + properties: + credentialsParameter: + type: string + type: object + resourceRequirements: + items: + properties: + type_: + type: string + value: + type: string + type: object + type: array + secrets: + items: + properties: + name: + type: string + valueFrom: + type: string + type: object + type: array + startTimeout: + format: int64 + type: integer + stopTimeout: + format: int64 + type: integer + systemControls: + items: + properties: + namespace: + type: string + value: + type: string + type: object + type: array + ulimits: + items: + properties: + hardLimit: + format: int64 + type: integer + name: + type: string + softLimit: + format: int64 + type: integer + type: object + type: array + user: + type: string + volumesFrom: + items: + properties: + readOnly: + type: boolean + sourceContainer: + type: string + type: object + type: array + workingDirectory: + type: string + type: object + type: array + cpu: + description: |- + The number of CPU units used by the task. It can be expressed as an integer + using CPU units (for example, 1024) or as a string using vCPUs (for example, + 1 vCPU or 1 vcpu) in a task definition. String values are converted to an + integer indicating the CPU units when the task definition is registered. + + + Task-level CPU and memory parameters are ignored for Windows containers. + We recommend specifying container-level resources for Windows containers. + + + If you're using the EC2 launch type, this field is optional. Supported values + are between 128 CPU units (0.125 vCPUs) and 10240 CPU units (10 vCPUs). If + you do not specify a value, the parameter is ignored. + + + If you're using the Fargate launch type, this field is required and you must + use one of the following values, which determines your range of supported + values for the memory parameter: + + + The CPU units cannot be less than 1 vCPU when you use Windows containers + on Fargate. + + + * 256 (.25 vCPU) - Available memory values: 512 (0.5 GB), 1024 (1 GB), + 2048 (2 GB) + + + * 512 (.5 vCPU) - Available memory values: 1024 (1 GB), 2048 (2 GB), 3072 + (3 GB), 4096 (4 GB) + + + * 1024 (1 vCPU) - Available memory values: 2048 (2 GB), 3072 (3 GB), 4096 + (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) + + + * 2048 (2 vCPU) - Available memory values: 4096 (4 GB) and 16384 (16 GB) + in increments of 1024 (1 GB) + + + * 4096 (4 vCPU) - Available memory values: 8192 (8 GB) and 30720 (30 GB) + in increments of 1024 (1 GB) + + + * 8192 (8 vCPU) - Available memory values: 16 GB and 60 GB in 4 GB increments + This option requires Linux platform 1.4.0 or later. + + + * 16384 (16vCPU) - Available memory values: 32GB and 120 GB in 8 GB increments + This option requires Linux platform 1.4.0 or later. + type: string + ephemeralStorage: + description: |- + The amount of ephemeral storage to allocate for the task. This parameter + is used to expand the total amount of ephemeral storage available, beyond + the default amount, for tasks hosted on Fargate. For more information, see + Fargate task storage (https://docs.aws.amazon.com/AmazonECS/latest/userguide/using_data_volumes.html) + in the Amazon ECS User Guide for Fargate. + + + For tasks using the Fargate launch type, the task requires the following + platforms: + + + * Linux platform version 1.4.0 or later. + + + * Windows platform version 1.0.0 or later. + properties: + sizeInGiB: + format: int64 + type: integer + type: object + executionRoleARN: + description: |- + The Amazon Resource Name (ARN) of the task execution role that grants the + Amazon ECS container agent permission to make Amazon Web Services API calls + on your behalf. The task execution IAM role is required depending on the + requirements of your task. For more information, see Amazon ECS task execution + IAM role (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_execution_IAM_role.html) + in the Amazon Elastic Container Service Developer Guide. + type: string + executionRoleARNRef: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + executionRoleARNSelector: + description: A Selector selects an object. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + family: + description: |- + You must specify a family for a task definition. You can use it track multiple + versions of the same task definition. The family is used as a name for your + task definition. Up to 255 letters (uppercase and lowercase), numbers, underscores, + and hyphens are allowed. + type: string + inferenceAccelerators: + description: The Elastic Inference accelerators to use for the + containers in the task. + items: + properties: + deviceName: + type: string + deviceType: + type: string + type: object + type: array + ipcMode: + description: |- + The IPC resource namespace to use for the containers in the task. The valid + values are host, task, or none. If host is specified, then all containers + within the tasks that specified the host IPC mode on the same container instance + share the same IPC resources with the host Amazon EC2 instance. If task is + specified, all containers within the specified task share the same IPC resources. + If none is specified, then IPC resources within the containers of a task + are private and not shared with other containers in a task or on the container + instance. If no value is specified, then the IPC resource namespace sharing + depends on the Docker daemon setting on the container instance. For more + information, see IPC settings (https://docs.docker.com/engine/reference/run/#ipc-settings---ipc) + in the Docker run reference. + + + If the host IPC mode is used, be aware that there is a heightened risk of + undesired IPC namespace expose. For more information, see Docker security + (https://docs.docker.com/engine/security/security/). + + + If you are setting namespaced kernel parameters using systemControls for + the containers in the task, the following will apply to your IPC resource + namespace. For more information, see System Controls (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html) + in the Amazon Elastic Container Service Developer Guide. + + + * For tasks that use the host IPC mode, IPC namespace related systemControls + are not supported. + + + * For tasks that use the task IPC mode, IPC namespace related systemControls + will apply to all containers within a task. + + + This parameter is not supported for Windows containers or tasks run on Fargate. + type: string + memory: + description: |- + The amount of memory (in MiB) used by the task. It can be expressed as an + integer using MiB (for example ,1024) or as a string using GB (for example, + 1GB or 1 GB) in a task definition. String values are converted to an integer + indicating the MiB when the task definition is registered. + + + Task-level CPU and memory parameters are ignored for Windows containers. + We recommend specifying container-level resources for Windows containers. + + + If using the EC2 launch type, this field is optional. + + + If using the Fargate launch type, this field is required and you must use + one of the following values. This determines your range of supported values + for the cpu parameter. + + + The CPU units cannot be less than 1 vCPU when you use Windows containers + on Fargate. + + + * 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available cpu values: 256 (.25 + vCPU) + + + * 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - Available cpu values: + 512 (.5 vCPU) + + + * 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 + (7 GB), 8192 (8 GB) - Available cpu values: 1024 (1 vCPU) + + + * Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) - + Available cpu values: 2048 (2 vCPU) + + + * Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) - + Available cpu values: 4096 (4 vCPU) + + + * Between 16 GB and 60 GB in 4 GB increments - Available cpu values: 8192 + (8 vCPU) This option requires Linux platform 1.4.0 or later. + + + * Between 32GB and 120 GB in 8 GB increments - Available cpu values: 16384 + (16 vCPU) This option requires Linux platform 1.4.0 or later. + type: string + networkMode: + description: |- + The Docker networking mode to use for the containers in the task. The valid + values are none, bridge, awsvpc, and host. If no network mode is specified, + the default is bridge. + + + For Amazon ECS tasks on Fargate, the awsvpc network mode is required. For + Amazon ECS tasks on Amazon EC2 Linux instances, any network mode can be used. + For Amazon ECS tasks on Amazon EC2 Windows instances, or awsvpc + can be used. If the network mode is set to none, you cannot specify port + mappings in your container definitions, and the tasks containers do not have + external connectivity. The host and awsvpc network modes offer the highest + networking performance for containers because they use the EC2 network stack + instead of the virtualized network stack provided by the bridge mode. + + + With the host and awsvpc network modes, exposed container ports are mapped + directly to the corresponding host port (for the host network mode) or the + attached elastic network interface port (for the awsvpc network mode), so + you cannot take advantage of dynamic host port mappings. + + + When using the host network mode, you should not run containers using the + root user (UID 0). It is considered best practice to use a non-root user. + + + If the network mode is awsvpc, the task is allocated an elastic network interface, + and you must specify a NetworkConfiguration value when you create a service + or run a task with the task definition. For more information, see Task Networking + (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html) + in the Amazon Elastic Container Service Developer Guide. + + + If the network mode is host, you cannot run multiple instantiations of the + same task on a single container instance when port mappings are used. + + + For more information, see Network settings (https://docs.docker.com/engine/reference/run/#network-settings) + in the Docker run reference. + type: string + pidMode: + description: |- + The process namespace to use for the containers in the task. The valid values + are host or task. If host is specified, then all containers within the tasks + that specified the host PID mode on the same container instance share the + same process namespace with the host Amazon EC2 instance. If task is specified, + all containers within the specified task share the same process namespace. + If no value is specified, the default is a private namespace. For more information, + see PID settings (https://docs.docker.com/engine/reference/run/#pid-settings---pid) + in the Docker run reference. + + + If the host PID mode is used, be aware that there is a heightened risk of + undesired process namespace expose. For more information, see Docker security + (https://docs.docker.com/engine/security/security/). + + + This parameter is not supported for Windows containers or tasks run on Fargate. + type: string + placementConstraints: + description: |- + An array of placement constraint objects to use for the task. You can specify + a maximum of 10 constraints for each task. This limit includes constraints + in the task definition and those specified at runtime. + items: + properties: + expression: + type: string + type_: + type: string + type: object + type: array + proxyConfiguration: + description: |- + The configuration details for the App Mesh proxy. + + + For tasks hosted on Amazon EC2 instances, the container instances require + at least version 1.26.0 of the container agent and at least version 1.26.0-1 + of the ecs-init package to use a proxy configuration. If your container instances + are launched from the Amazon ECS-optimized AMI version 20190301 or later, + then they contain the required versions of the container agent and ecs-init. + For more information, see Amazon ECS-optimized AMI versions (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-ami-versions.html) + in the Amazon Elastic Container Service Developer Guide. + properties: + containerName: + type: string + properties: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + type_: + type: string + type: object + region: + description: Region is which region the TaskDefinitionFamily will + be created. + type: string + requiresCompatibilities: + description: |- + The task launch type that Amazon ECS validates the task definition against. + A client exception is returned if the task definition doesn't validate against + the compatibilities specified. If no value is specified, the parameter is + omitted from the response. + items: + type: string + type: array + runtimePlatform: + description: |- + The operating system that your tasks definitions run on. A platform family + is specified only for tasks using the Fargate launch type. + + + When you specify a task definition in a service, this value must match the + runtimePlatform value of the service. + properties: + cpuArchitecture: + type: string + operatingSystemFamily: + type: string + type: object + tags: + description: |- + The metadata that you apply to the task definition to help you categorize + and organize them. Each tag consists of a key and an optional value. You + define both of them. + + + The following basic restrictions apply to tags: + + + * Maximum number of tags per resource - 50 + + + * For each resource, each tag key must be unique, and each tag key can + have only one value. + + + * Maximum key length - 128 Unicode characters in UTF-8 + + + * Maximum value length - 256 Unicode characters in UTF-8 + + + * If your tagging schema is used across multiple services and resources, + remember that other services may have restrictions on allowed characters. + Generally allowed characters are: letters, numbers, and spaces representable + in UTF-8, and the following characters: + - = . _ : / @. + + + * Tag keys and values are case-sensitive. + + + * Do not use aws:, AWS:, or any upper or lowercase combination of such + as a prefix for either keys or values as it is reserved for Amazon Web + Services use. You cannot edit or delete tag keys or values with this prefix. + Tags with this prefix do not count against your tags per resource limit. + items: + properties: + key: + type: string + value: + type: string + type: object + type: array + taskRoleARN: + description: |- + The short name or full Amazon Resource Name (ARN) of the IAM role that containers + in this task can assume. All containers in this task are granted the permissions + that are specified in this role. For more information, see IAM Roles for + Tasks (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html) + in the Amazon Elastic Container Service Developer Guide. + A list of volume definitions in JSON format that containers in your task + may use. + type: string + taskRoleARNRef: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + taskRoleARNSelector: + description: A Selector selects an object. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + volumes: + items: + description: CustomVolume provides custom parameters for the + Volume type + properties: + dockerVolumeConfiguration: + description: |- + This parameter is specified when you are using Docker volumes. Docker volumes + are only supported when you are using the EC2 launch type. Windows containers + only support the use of the local driver. To use bind mounts, specify a host + instead. + properties: + autoprovision: + type: boolean + driver: + type: string + driverOpts: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + scope: + type: string + type: object + efsVolumeConfiguration: + description: |- + This parameter is specified when you are using an Amazon Elastic File System + file system for task storage. For more information, see Amazon EFS Volumes + (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/efs-volumes.html) + in the Amazon Elastic Container Service Developer Guide. + properties: + authorizationConfig: + description: The authorization configuration details + for the Amazon EFS file system. + properties: + accessPointID: + description: |- + The Amazon EFS access point ID to use. If an access point is specified, the + root directory value specified in the EFSVolumeConfiguration must either + be omitted or set to / which will enforce the path set on the EFS access + point. If an access point is used, transit encryption must be enabled in + the EFSVolumeConfiguration. For more information, see Working with Amazon + EFS Access Points (https://docs.aws.amazon.com/efs/latest/ug/efs-access-points.html) + in the Amazon Elastic File System User Guide. + type: string + accessPointIDRef: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + accessPointIDSelector: + description: A Selector selects an object. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + iam: + description: |- + Determines whether to use the Amazon ECS task IAM role defined in a task + definition when mounting the Amazon EFS file system. If enabled, transit + encryption must be enabled in the EFSVolumeConfiguration. If this parameter + is omitted, the default value of DISABLED is used. For more information, + see Using Amazon EFS Access Points (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/efs-volumes.html#efs-volume-accesspoints) + in the Amazon Elastic Container Service Developer Guide. + type: string + type: object + fileSystemID: + type: string + fileSystemIDRef: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + fileSystemIDSelector: + description: A Selector selects an object. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + rootDirectory: + type: string + transitEncryption: + type: string + transitEncryptionPort: + format: int64 + type: integer + type: object + fsxWindowsFileServerVolumeConfiguration: + description: |- + This parameter is specified when you are using Amazon FSx for Windows File + Server (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/what-is.html) + file system for task storage. + + + For more information and the input format, see Amazon FSx for Windows File + Server Volumes (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/wfsx-volumes.html) + in the Amazon Elastic Container Service Developer Guide. + properties: + authorizationConfig: + description: |- + The authorization configuration details for Amazon FSx for Windows File Server + file system. See FSxWindowsFileServerVolumeConfiguration (https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_FSxWindowsFileServerVolumeConfiguration.html) + in the Amazon ECS API Reference. + + + For more information and the input format, see Amazon FSx for Windows File + Server Volumes (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/wfsx-volumes.html) + in the Amazon Elastic Container Service Developer Guide. + properties: + credentialsParameter: + type: string + domain: + type: string + type: object + fileSystemID: + type: string + rootDirectory: + type: string + type: object + host: + description: Details on a container instance bind mount + host volume. + properties: + sourcePath: + type: string + type: object + name: + type: string + required: + - name + type: object + type: array + required: + - containerDefinitions + - family + - region + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: TaskDefinitionFamilyStatus defines the observed state of + TaskDefinitionFamily. + properties: + atProvider: + description: TaskDefinitionFamilyObservation defines the observed + state of TaskDefinitionFamily + properties: + taskDefinition: + description: The full description of the registered task definition. + properties: + compatibilities: + items: + type: string + type: array + containerDefinitions: + items: + properties: + command: + items: + type: string + type: array + cpu: + format: int64 + type: integer + credentialSpecs: + items: + type: string + type: array + dependsOn: + items: + properties: + condition: + type: string + containerName: + type: string + type: object + type: array + disableNetworking: + type: boolean + dnsSearchDomains: + items: + type: string + type: array + dnsServers: + items: + type: string + type: array + dockerLabels: + additionalProperties: + type: string + type: object + dockerSecurityOptions: + items: + type: string + type: array + entryPoint: + items: + type: string + type: array + environment: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + environmentFiles: + items: + properties: + type_: + type: string + value: + type: string + type: object + type: array + essential: + type: boolean + extraHosts: + items: + properties: + hostname: + type: string + ipAddress: + type: string + type: object + type: array + firelensConfiguration: + description: |- + The FireLens configuration for the container. This is used to specify and + configure a log router for container logs. For more information, see Custom + log routing (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_firelens.html) + in the Amazon Elastic Container Service Developer Guide. + properties: + options: + additionalProperties: + type: string + type: object + type_: + type: string + type: object + healthCheck: + description: |- + An object representing a container health check. Health check parameters + that are specified in a container definition override any Docker health checks + that exist in the container image (such as those specified in a parent image + or from the image's Dockerfile). This configuration maps to the HEALTHCHECK + parameter of docker run (https://docs.docker.com/engine/reference/run/). + + + The Amazon ECS container agent only monitors and reports on the health checks + specified in the task definition. Amazon ECS does not monitor Docker health + checks that are embedded in a container image and not specified in the container + definition. Health check parameters that are specified in a container definition + override any Docker health checks that exist in the container image. + + + You can view the health status of both individual containers and a task with + the DescribeTasks API operation or when viewing the task details in the console. + + + The health check is designed to make sure that your containers survive agent + restarts, upgrades, or temporary unavailability. + + + The following describes the possible healthStatus values for a container: + + + * HEALTHY-The container health check has passed successfully. + + + * UNHEALTHY-The container health check has failed. + + + * UNKNOWN-The container health check is being evaluated or there's no + container health check defined. + + + The following describes the possible healthStatus values for a task. The + container health check status of non-essential containers don't have an effect + on the health status of a task. + + + * HEALTHY-All essential containers within the task have passed their health + checks. + + + * UNHEALTHY-One or more essential containers have failed their health + check. + + + * UNKNOWN-The essential containers within the task are still having their + health checks evaluated, there are only nonessential containers with health + checks defined, or there are no container health checks defined. + + + If a task is run manually, and not as part of a service, the task will continue + its lifecycle regardless of its health status. For tasks that are part of + a service, if the task reports as unhealthy then the task will be stopped + and the service scheduler will replace it. + + + The following are notes about container health check support: + + + * When the Amazon ECS agent cannot connect to the Amazon ECS service, + the service reports the container as UNHEALTHY. + + + * The health check statuses are the "last heard from" response from the + Amazon ECS agent. There are no assumptions made about the status of the + container health checks. + + + * Container health checks require version 1.17.0 or greater of the Amazon + ECS container agent. For more information, see Updating the Amazon ECS + container agent (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-update.html). + + + * Container health checks are supported for Fargate tasks if you're using + platform version 1.1.0 or greater. For more information, see Fargate platform + versions (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html). + + + * Container health checks aren't supported for tasks that are part of + a service that's configured to use a Classic Load Balancer. + properties: + command: + items: + type: string + type: array + interval: + format: int64 + type: integer + retries: + format: int64 + type: integer + startPeriod: + format: int64 + type: integer + timeout: + format: int64 + type: integer + type: object + hostname: + type: string + image: + type: string + interactive: + type: boolean + links: + items: + type: string + type: array + linuxParameters: + description: |- + The Linux-specific options that are applied to the container, such as Linux + KernelCapabilities (https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_KernelCapabilities.html). + properties: + capabilities: + description: |- + The Linux capabilities for the container that are added to or dropped from + the default configuration provided by Docker. For more information about + the default capabilities and the non-default available capabilities, see + Runtime privilege and Linux capabilities (https://docs.docker.com/engine/reference/run/#runtime-privilege-and-linux-capabilities) + in the Docker run reference. For more detailed information about these Linux + capabilities, see the capabilities(7) (http://man7.org/linux/man-pages/man7/capabilities.7.html) + Linux manual page. + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + devices: + items: + properties: + containerPath: + type: string + hostPath: + type: string + permissions: + items: + type: string + type: array + type: object + type: array + initProcessEnabled: + type: boolean + maxSwap: + format: int64 + type: integer + sharedMemorySize: + format: int64 + type: integer + swappiness: + format: int64 + type: integer + tmpfs: + items: + properties: + containerPath: + type: string + mountOptions: + items: + type: string + type: array + size: + format: int64 + type: integer + type: object + type: array + type: object + logConfiguration: + description: |- + The log configuration for the container. This parameter maps to LogConfig + in the Create a container (https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) + section of the Docker Remote API (https://docs.docker.com/engine/api/v1.35/) + and the --log-driver option to docker run (https://docs.docker.com/engine/reference/commandline/run/). + + + By default, containers use the same logging driver that the Docker daemon + uses. However, the container might use a different logging driver than the + Docker daemon by specifying a log driver configuration in the container definition. + For more information about the options for different supported log drivers, + see Configure logging drivers (https://docs.docker.com/engine/admin/logging/overview/) + in the Docker documentation. + + + Understand the following when specifying a log configuration for your containers. + + + * Amazon ECS currently supports a subset of the logging drivers available + to the Docker daemon. Additional log drivers may be available in future + releases of the Amazon ECS container agent. For tasks on Fargate, the + supported log drivers are awslogs, splunk, and awsfirelens. For tasks + hosted on Amazon EC2 instances, the supported log drivers are awslogs, + fluentd, gelf, json-file, journald, logentries,syslog, splunk, and awsfirelens. + + + * This parameter requires version 1.18 of the Docker Remote API or greater + on your container instance. + + + * For tasks that are hosted on Amazon EC2 instances, the Amazon ECS container + agent must register the available logging drivers with the ECS_AVAILABLE_LOGGING_DRIVERS + environment variable before containers placed on that instance can use + these log configuration options. For more information, see Amazon ECS + container agent configuration (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) + in the Amazon Elastic Container Service Developer Guide. + + + * For tasks that are on Fargate, because you don't have access to the + underlying infrastructure your tasks are hosted on, any additional software + needed must be installed outside of the task. For example, the Fluentd + output aggregators or a remote host running Logstash to send Gelf logs + to. + properties: + logDriver: + type: string + options: + additionalProperties: + type: string + type: object + secretOptions: + items: + properties: + name: + type: string + valueFrom: + type: string + type: object + type: array + type: object + memory: + format: int64 + type: integer + memoryReservation: + format: int64 + type: integer + mountPoints: + items: + properties: + containerPath: + type: string + readOnly: + type: boolean + sourceVolume: + type: string + type: object + type: array + name: + type: string + portMappings: + items: + properties: + appProtocol: + type: string + containerPort: + format: int64 + type: integer + containerPortRange: + type: string + hostPort: + format: int64 + type: integer + name: + type: string + protocol: + type: string + type: object + type: array + privileged: + type: boolean + pseudoTerminal: + type: boolean + readonlyRootFilesystem: + type: boolean + repositoryCredentials: + description: The repository credentials for private + registry authentication. + properties: + credentialsParameter: + type: string + type: object + resourceRequirements: + items: + properties: + type_: + type: string + value: + type: string + type: object + type: array + secrets: + items: + properties: + name: + type: string + valueFrom: + type: string + type: object + type: array + startTimeout: + format: int64 + type: integer + stopTimeout: + format: int64 + type: integer + systemControls: + items: + properties: + namespace: + type: string + value: + type: string + type: object + type: array + ulimits: + items: + properties: + hardLimit: + format: int64 + type: integer + name: + type: string + softLimit: + format: int64 + type: integer + type: object + type: array + user: + type: string + volumesFrom: + items: + properties: + readOnly: + type: boolean + sourceContainer: + type: string + type: object + type: array + workingDirectory: + type: string + type: object + type: array + cpu: + type: string + deregisteredAt: + format: date-time + type: string + ephemeralStorage: + description: |- + The amount of ephemeral storage to allocate for the task. This parameter + is used to expand the total amount of ephemeral storage available, beyond + the default amount, for tasks hosted on Fargate. For more information, see + Fargate task storage (https://docs.aws.amazon.com/AmazonECS/latest/userguide/using_data_volumes.html) + in the Amazon ECS User Guide for Fargate. + + + For tasks using the Fargate launch type, the task requires the following + platforms: + + + * Linux platform version 1.4.0 or later. + + + * Windows platform version 1.0.0 or later. + properties: + sizeInGiB: + format: int64 + type: integer + type: object + executionRoleARN: + type: string + family: + type: string + inferenceAccelerators: + items: + properties: + deviceName: + type: string + deviceType: + type: string + type: object + type: array + ipcMode: + type: string + memory: + type: string + networkMode: + type: string + pidMode: + type: string + placementConstraints: + items: + properties: + expression: + type: string + type_: + type: string + type: object + type: array + proxyConfiguration: + description: |- + The configuration details for the App Mesh proxy. + + + For tasks that use the EC2 launch type, the container instances require at + least version 1.26.0 of the container agent and at least version 1.26.0-1 + of the ecs-init package to use a proxy configuration. If your container instances + are launched from the Amazon ECS optimized AMI version 20190301 or later, + then they contain the required versions of the container agent and ecs-init. + For more information, see Amazon ECS-optimized Linux AMI (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html) + properties: + containerName: + type: string + properties: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + type_: + type: string + type: object + registeredAt: + format: date-time + type: string + registeredBy: + type: string + requiresAttributes: + items: + properties: + name: + type: string + targetID: + type: string + targetType: + type: string + value: + type: string + type: object + type: array + requiresCompatibilities: + items: + type: string + type: array + revision: + format: int64 + type: integer + runtimePlatform: + description: |- + Information about the platform for the Amazon ECS service or task. + + + For more information about RuntimePlatform, see RuntimePlatform (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html#runtime-platform) + in the Amazon Elastic Container Service Developer Guide. + properties: + cpuArchitecture: + type: string + operatingSystemFamily: + type: string + type: object + status: + type: string + taskDefinitionARN: + type: string + taskRoleARN: + type: string + volumes: + items: + properties: + dockerVolumeConfiguration: + description: |- + This parameter is specified when you're using Docker volumes. Docker volumes + are only supported when you're using the EC2 launch type. Windows containers + only support the use of the local driver. To use bind mounts, specify a host + instead. + properties: + autoprovision: + type: boolean + driver: + type: string + driverOpts: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + scope: + type: string + type: object + efsVolumeConfiguration: + description: |- + This parameter is specified when you're using an Amazon Elastic File System + file system for task storage. For more information, see Amazon EFS volumes + (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/efs-volumes.html) + in the Amazon Elastic Container Service Developer Guide. + properties: + authorizationConfig: + description: The authorization configuration details + for the Amazon EFS file system. + properties: + accessPointID: + type: string + iam: + type: string + type: object + fileSystemID: + type: string + rootDirectory: + type: string + transitEncryption: + type: string + transitEncryptionPort: + format: int64 + type: integer + type: object + fsxWindowsFileServerVolumeConfiguration: + description: |- + This parameter is specified when you're using Amazon FSx for Windows File + Server (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/what-is.html) + file system for task storage. + + + For more information and the input format, see Amazon FSx for Windows File + Server volumes (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/wfsx-volumes.html) + in the Amazon Elastic Container Service Developer Guide. + properties: + authorizationConfig: + description: |- + The authorization configuration details for Amazon FSx for Windows File Server + file system. See FSxWindowsFileServerVolumeConfiguration (https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_FSxWindowsFileServerVolumeConfiguration.html) + in the Amazon ECS API Reference. + + + For more information and the input format, see Amazon FSx for Windows File + Server Volumes (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/wfsx-volumes.html) + in the Amazon Elastic Container Service Developer Guide. + properties: + credentialsParameter: + type: string + domain: + type: string + type: object + fileSystemID: + type: string + rootDirectory: + type: string + type: object + host: + description: Details on a container instance bind mount + host volume. + properties: + sourcePath: + type: string + type: object + name: + type: string + type: object + type: array + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/pkg/clients/ecs/taskdefinitionfamily/task_definition_family.go b/pkg/clients/ecs/taskdefinitionfamily/task_definition_family.go new file mode 100644 index 0000000000..f34ac6b9b6 --- /dev/null +++ b/pkg/clients/ecs/taskdefinitionfamily/task_definition_family.go @@ -0,0 +1,764 @@ +/* +Copyright 2021 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package taskdefinitionfamily + +import ( + "github.com/aws/aws-sdk-go/aws" + awsecs "github.com/aws/aws-sdk-go/service/ecs" + xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + + ecs "github.com/crossplane-contrib/provider-aws/apis/ecs/v1alpha1" + "github.com/crossplane-contrib/provider-aws/pkg/utils/pointer" +) + +func LateInitialize(in *ecs.TaskDefinitionFamilyParameters, resp *awsecs.DescribeTaskDefinitionOutput) { //nolint:gocyclo + if in != nil && resp != nil && resp.TaskDefinition != nil { + if len(in.ContainerDefinitions) == len(resp.TaskDefinition.ContainerDefinitions) { + for cdi, cd := range in.ContainerDefinitions { + awscd := resp.TaskDefinition.ContainerDefinitions[cdi] + + cd.CPU = pointer.LateInitialize(cd.CPU, awscd.Cpu) + + if len(cd.PortMappings) == len(awscd.PortMappings) { + for pmi, pm := range cd.PortMappings { + pmcd := awscd.PortMappings[pmi] + + pm.HostPort = pointer.LateInitialize(pm.HostPort, pmcd.HostPort) + pm.Protocol = pointer.LateInitialize(pm.Protocol, pmcd.Protocol) + } + } + } + } + if in.Volumes != nil { + for voli, vol := range in.Volumes { + awsvol := resp.TaskDefinition.Volumes[voli] + if vol.Host == nil && awsvol.Host != nil { + vol.Host = &ecs.HostVolumeProperties{ + SourcePath: awsvol.Host.SourcePath, + } + } + } + } + } +} + +// Modified version of ACK generated conversion function +// +//nolint:gocyclo,gosimple +func GenerateTaskDefinitionFamilyFromDescribe(resp *awsecs.DescribeTaskDefinitionOutput) *ecs.TaskDefinitionFamily { + cr := &ecs.TaskDefinitionFamily{} + parameters := &cr.Spec.ForProvider + + if resp.Tags != nil { + f0 := []*ecs.Tag{} + for _, f0iter := range resp.Tags { + f0elem := &ecs.Tag{} + if f0iter.Key != nil { + f0elem.Key = f0iter.Key + } + if f0iter.Value != nil { + f0elem.Value = f0iter.Value + } + f0 = append(f0, f0elem) + } + cr.Spec.ForProvider.Tags = f0 + } else { + cr.Spec.ForProvider.Tags = nil + } + if resp.TaskDefinition != nil { + if resp.TaskDefinition.ContainerDefinitions != nil { + f1f1 := []*ecs.ContainerDefinition{} + for _, f1f1iter := range resp.TaskDefinition.ContainerDefinitions { + f1f1elem := &ecs.ContainerDefinition{} + if f1f1iter.Command != nil { + f1f1elemf0 := []*string{} + for _, f1f1elemf0iter := range f1f1iter.Command { + var f1f1elemf0elem string + f1f1elemf0elem = *f1f1elemf0iter + f1f1elemf0 = append(f1f1elemf0, &f1f1elemf0elem) + } + f1f1elem.Command = f1f1elemf0 + } + if f1f1iter.Cpu != nil { + f1f1elem.CPU = f1f1iter.Cpu + } + if f1f1iter.CredentialSpecs != nil { + f1f1elemf2 := []*string{} + for _, f1f1elemf2iter := range f1f1iter.CredentialSpecs { + var f1f1elemf2elem string + f1f1elemf2elem = *f1f1elemf2iter + f1f1elemf2 = append(f1f1elemf2, &f1f1elemf2elem) + } + f1f1elem.CredentialSpecs = f1f1elemf2 + } + if f1f1iter.DependsOn != nil { + f1f1elemf3 := []*ecs.ContainerDependency{} + for _, f1f1elemf3iter := range f1f1iter.DependsOn { + f1f1elemf3elem := &ecs.ContainerDependency{} + if f1f1elemf3iter.Condition != nil { + f1f1elemf3elem.Condition = f1f1elemf3iter.Condition + } + if f1f1elemf3iter.ContainerName != nil { + f1f1elemf3elem.ContainerName = f1f1elemf3iter.ContainerName + } + f1f1elemf3 = append(f1f1elemf3, f1f1elemf3elem) + } + f1f1elem.DependsOn = f1f1elemf3 + } + if f1f1iter.DisableNetworking != nil { + f1f1elem.DisableNetworking = f1f1iter.DisableNetworking + } + if f1f1iter.DnsSearchDomains != nil { + f1f1elemf5 := []*string{} + for _, f1f1elemf5iter := range f1f1iter.DnsSearchDomains { + var f1f1elemf5elem string + f1f1elemf5elem = *f1f1elemf5iter + f1f1elemf5 = append(f1f1elemf5, &f1f1elemf5elem) + } + f1f1elem.DNSSearchDomains = f1f1elemf5 + } + if f1f1iter.DnsServers != nil { + f1f1elemf6 := []*string{} + for _, f1f1elemf6iter := range f1f1iter.DnsServers { + var f1f1elemf6elem string + f1f1elemf6elem = *f1f1elemf6iter + f1f1elemf6 = append(f1f1elemf6, &f1f1elemf6elem) + } + f1f1elem.DNSServers = f1f1elemf6 + } + if f1f1iter.DockerLabels != nil { + f1f1elemf7 := map[string]*string{} + for f1f1elemf7key, f1f1elemf7valiter := range f1f1iter.DockerLabels { + var f1f1elemf7val string + f1f1elemf7val = *f1f1elemf7valiter + f1f1elemf7[f1f1elemf7key] = &f1f1elemf7val + } + f1f1elem.DockerLabels = f1f1elemf7 + } + if f1f1iter.DockerSecurityOptions != nil { + f1f1elemf8 := []*string{} + for _, f1f1elemf8iter := range f1f1iter.DockerSecurityOptions { + var f1f1elemf8elem string + f1f1elemf8elem = *f1f1elemf8iter + f1f1elemf8 = append(f1f1elemf8, &f1f1elemf8elem) + } + f1f1elem.DockerSecurityOptions = f1f1elemf8 + } + if f1f1iter.EntryPoint != nil { + f1f1elemf9 := []*string{} + for _, f1f1elemf9iter := range f1f1iter.EntryPoint { + var f1f1elemf9elem string + f1f1elemf9elem = *f1f1elemf9iter + f1f1elemf9 = append(f1f1elemf9, &f1f1elemf9elem) + } + f1f1elem.EntryPoint = f1f1elemf9 + } + if f1f1iter.Environment != nil { + f1f1elemf10 := []*ecs.KeyValuePair{} + for _, f1f1elemf10iter := range f1f1iter.Environment { + f1f1elemf10elem := &ecs.KeyValuePair{} + if f1f1elemf10iter.Name != nil { + f1f1elemf10elem.Name = f1f1elemf10iter.Name + } + if f1f1elemf10iter.Value != nil { + f1f1elemf10elem.Value = f1f1elemf10iter.Value + } + f1f1elemf10 = append(f1f1elemf10, f1f1elemf10elem) + } + f1f1elem.Environment = f1f1elemf10 + } + if f1f1iter.EnvironmentFiles != nil { + f1f1elemf11 := []*ecs.EnvironmentFile{} + for _, f1f1elemf11iter := range f1f1iter.EnvironmentFiles { + f1f1elemf11elem := &ecs.EnvironmentFile{} + if f1f1elemf11iter.Type != nil { + f1f1elemf11elem.Type = f1f1elemf11iter.Type + } + if f1f1elemf11iter.Value != nil { + f1f1elemf11elem.Value = f1f1elemf11iter.Value + } + f1f1elemf11 = append(f1f1elemf11, f1f1elemf11elem) + } + f1f1elem.EnvironmentFiles = f1f1elemf11 + } + if f1f1iter.Essential != nil { + f1f1elem.Essential = f1f1iter.Essential + } + if f1f1iter.ExtraHosts != nil { + f1f1elemf13 := []*ecs.HostEntry{} + for _, f1f1elemf13iter := range f1f1iter.ExtraHosts { + f1f1elemf13elem := &ecs.HostEntry{} + if f1f1elemf13iter.Hostname != nil { + f1f1elemf13elem.Hostname = f1f1elemf13iter.Hostname + } + if f1f1elemf13iter.IpAddress != nil { + f1f1elemf13elem.IPAddress = f1f1elemf13iter.IpAddress + } + f1f1elemf13 = append(f1f1elemf13, f1f1elemf13elem) + } + f1f1elem.ExtraHosts = f1f1elemf13 + } + if f1f1iter.FirelensConfiguration != nil { + f1f1elemf14 := &ecs.FirelensConfiguration{} + if f1f1iter.FirelensConfiguration.Options != nil { + f1f1elemf14f0 := map[string]*string{} + for f1f1elemf14f0key, f1f1elemf14f0valiter := range f1f1iter.FirelensConfiguration.Options { + var f1f1elemf14f0val string + f1f1elemf14f0val = *f1f1elemf14f0valiter + f1f1elemf14f0[f1f1elemf14f0key] = &f1f1elemf14f0val + } + f1f1elemf14.Options = f1f1elemf14f0 + } + if f1f1iter.FirelensConfiguration.Type != nil { + f1f1elemf14.Type = f1f1iter.FirelensConfiguration.Type + } + f1f1elem.FirelensConfiguration = f1f1elemf14 + } + if f1f1iter.HealthCheck != nil { + f1f1elemf15 := &ecs.HealthCheck{} + if f1f1iter.HealthCheck.Command != nil { + f1f1elemf15f0 := []*string{} + for _, f1f1elemf15f0iter := range f1f1iter.HealthCheck.Command { + var f1f1elemf15f0elem string + f1f1elemf15f0elem = *f1f1elemf15f0iter + f1f1elemf15f0 = append(f1f1elemf15f0, &f1f1elemf15f0elem) + } + f1f1elemf15.Command = f1f1elemf15f0 + } + if f1f1iter.HealthCheck.Interval != nil { + f1f1elemf15.Interval = f1f1iter.HealthCheck.Interval + } + if f1f1iter.HealthCheck.Retries != nil { + f1f1elemf15.Retries = f1f1iter.HealthCheck.Retries + } + if f1f1iter.HealthCheck.StartPeriod != nil { + f1f1elemf15.StartPeriod = f1f1iter.HealthCheck.StartPeriod + } + if f1f1iter.HealthCheck.Timeout != nil { + f1f1elemf15.Timeout = f1f1iter.HealthCheck.Timeout + } + f1f1elem.HealthCheck = f1f1elemf15 + } + if f1f1iter.Hostname != nil { + f1f1elem.Hostname = f1f1iter.Hostname + } + if f1f1iter.Image != nil { + f1f1elem.Image = f1f1iter.Image + } + if f1f1iter.Interactive != nil { + f1f1elem.Interactive = f1f1iter.Interactive + } + if f1f1iter.Links != nil { + f1f1elemf19 := []*string{} + for _, f1f1elemf19iter := range f1f1iter.Links { + var f1f1elemf19elem string + f1f1elemf19elem = *f1f1elemf19iter + f1f1elemf19 = append(f1f1elemf19, &f1f1elemf19elem) + } + f1f1elem.Links = f1f1elemf19 + } + if f1f1iter.LinuxParameters != nil { + f1f1elemf20 := &ecs.LinuxParameters{} + if f1f1iter.LinuxParameters.Capabilities != nil { + f1f1elemf20f0 := &ecs.KernelCapabilities{} + if f1f1iter.LinuxParameters.Capabilities.Add != nil { + f1f1elemf20f0f0 := []*string{} + for _, f1f1elemf20f0f0iter := range f1f1iter.LinuxParameters.Capabilities.Add { + var f1f1elemf20f0f0elem string + f1f1elemf20f0f0elem = *f1f1elemf20f0f0iter + f1f1elemf20f0f0 = append(f1f1elemf20f0f0, &f1f1elemf20f0f0elem) + } + f1f1elemf20f0.Add = f1f1elemf20f0f0 + } + if f1f1iter.LinuxParameters.Capabilities.Drop != nil { + f1f1elemf20f0f1 := []*string{} + for _, f1f1elemf20f0f1iter := range f1f1iter.LinuxParameters.Capabilities.Drop { + var f1f1elemf20f0f1elem string + f1f1elemf20f0f1elem = *f1f1elemf20f0f1iter + f1f1elemf20f0f1 = append(f1f1elemf20f0f1, &f1f1elemf20f0f1elem) + } + f1f1elemf20f0.Drop = f1f1elemf20f0f1 + } + f1f1elemf20.Capabilities = f1f1elemf20f0 + } + if f1f1iter.LinuxParameters.Devices != nil { + f1f1elemf20f1 := []*ecs.Device{} + for _, f1f1elemf20f1iter := range f1f1iter.LinuxParameters.Devices { + f1f1elemf20f1elem := &ecs.Device{} + if f1f1elemf20f1iter.ContainerPath != nil { + f1f1elemf20f1elem.ContainerPath = f1f1elemf20f1iter.ContainerPath + } + if f1f1elemf20f1iter.HostPath != nil { + f1f1elemf20f1elem.HostPath = f1f1elemf20f1iter.HostPath + } + if f1f1elemf20f1iter.Permissions != nil { + f1f1elemf20f1elemf2 := []*string{} + for _, f1f1elemf20f1elemf2iter := range f1f1elemf20f1iter.Permissions { + var f1f1elemf20f1elemf2elem string + f1f1elemf20f1elemf2elem = *f1f1elemf20f1elemf2iter + f1f1elemf20f1elemf2 = append(f1f1elemf20f1elemf2, &f1f1elemf20f1elemf2elem) + } + f1f1elemf20f1elem.Permissions = f1f1elemf20f1elemf2 + } + f1f1elemf20f1 = append(f1f1elemf20f1, f1f1elemf20f1elem) + } + f1f1elemf20.Devices = f1f1elemf20f1 + } + if f1f1iter.LinuxParameters.InitProcessEnabled != nil { + f1f1elemf20.InitProcessEnabled = f1f1iter.LinuxParameters.InitProcessEnabled + } + if f1f1iter.LinuxParameters.MaxSwap != nil { + f1f1elemf20.MaxSwap = f1f1iter.LinuxParameters.MaxSwap + } + if f1f1iter.LinuxParameters.SharedMemorySize != nil { + f1f1elemf20.SharedMemorySize = f1f1iter.LinuxParameters.SharedMemorySize + } + if f1f1iter.LinuxParameters.Swappiness != nil { + f1f1elemf20.Swappiness = f1f1iter.LinuxParameters.Swappiness + } + if f1f1iter.LinuxParameters.Tmpfs != nil { + f1f1elemf20f6 := []*ecs.Tmpfs{} + for _, f1f1elemf20f6iter := range f1f1iter.LinuxParameters.Tmpfs { + f1f1elemf20f6elem := &ecs.Tmpfs{} + if f1f1elemf20f6iter.ContainerPath != nil { + f1f1elemf20f6elem.ContainerPath = f1f1elemf20f6iter.ContainerPath + } + if f1f1elemf20f6iter.MountOptions != nil { + f1f1elemf20f6elemf1 := []*string{} + for _, f1f1elemf20f6elemf1iter := range f1f1elemf20f6iter.MountOptions { + var f1f1elemf20f6elemf1elem string + f1f1elemf20f6elemf1elem = *f1f1elemf20f6elemf1iter + f1f1elemf20f6elemf1 = append(f1f1elemf20f6elemf1, &f1f1elemf20f6elemf1elem) + } + f1f1elemf20f6elem.MountOptions = f1f1elemf20f6elemf1 + } + if f1f1elemf20f6iter.Size != nil { + f1f1elemf20f6elem.Size = f1f1elemf20f6iter.Size + } + f1f1elemf20f6 = append(f1f1elemf20f6, f1f1elemf20f6elem) + } + f1f1elemf20.Tmpfs = f1f1elemf20f6 + } + f1f1elem.LinuxParameters = f1f1elemf20 + } + if f1f1iter.LogConfiguration != nil { + f1f1elemf21 := &ecs.LogConfiguration{} + if f1f1iter.LogConfiguration.LogDriver != nil { + f1f1elemf21.LogDriver = f1f1iter.LogConfiguration.LogDriver + } + if f1f1iter.LogConfiguration.Options != nil { + f1f1elemf21f1 := map[string]*string{} + for f1f1elemf21f1key, f1f1elemf21f1valiter := range f1f1iter.LogConfiguration.Options { + var f1f1elemf21f1val string + f1f1elemf21f1val = *f1f1elemf21f1valiter + f1f1elemf21f1[f1f1elemf21f1key] = &f1f1elemf21f1val + } + f1f1elemf21.Options = f1f1elemf21f1 + } + if f1f1iter.LogConfiguration.SecretOptions != nil { + f1f1elemf21f2 := []*ecs.Secret{} + for _, f1f1elemf21f2iter := range f1f1iter.LogConfiguration.SecretOptions { + f1f1elemf21f2elem := &ecs.Secret{} + if f1f1elemf21f2iter.Name != nil { + f1f1elemf21f2elem.Name = f1f1elemf21f2iter.Name + } + if f1f1elemf21f2iter.ValueFrom != nil { + f1f1elemf21f2elem.ValueFrom = f1f1elemf21f2iter.ValueFrom + } + f1f1elemf21f2 = append(f1f1elemf21f2, f1f1elemf21f2elem) + } + f1f1elemf21.SecretOptions = f1f1elemf21f2 + } + f1f1elem.LogConfiguration = f1f1elemf21 + } + if f1f1iter.Memory != nil { + f1f1elem.Memory = f1f1iter.Memory + } + if f1f1iter.MemoryReservation != nil { + f1f1elem.MemoryReservation = f1f1iter.MemoryReservation + } + if f1f1iter.MountPoints != nil { + f1f1elemf24 := []*ecs.MountPoint{} + for _, f1f1elemf24iter := range f1f1iter.MountPoints { + f1f1elemf24elem := &ecs.MountPoint{} + if f1f1elemf24iter.ContainerPath != nil { + f1f1elemf24elem.ContainerPath = f1f1elemf24iter.ContainerPath + } + if f1f1elemf24iter.ReadOnly != nil { + f1f1elemf24elem.ReadOnly = f1f1elemf24iter.ReadOnly + } + if f1f1elemf24iter.SourceVolume != nil { + f1f1elemf24elem.SourceVolume = f1f1elemf24iter.SourceVolume + } + f1f1elemf24 = append(f1f1elemf24, f1f1elemf24elem) + } + f1f1elem.MountPoints = f1f1elemf24 + } + if f1f1iter.Name != nil { + f1f1elem.Name = f1f1iter.Name + } + if f1f1iter.PortMappings != nil { + f1f1elemf26 := []*ecs.PortMapping{} + for _, f1f1elemf26iter := range f1f1iter.PortMappings { + f1f1elemf26elem := &ecs.PortMapping{} + if f1f1elemf26iter.AppProtocol != nil { + f1f1elemf26elem.AppProtocol = f1f1elemf26iter.AppProtocol + } + if f1f1elemf26iter.ContainerPort != nil { + f1f1elemf26elem.ContainerPort = f1f1elemf26iter.ContainerPort + } + if f1f1elemf26iter.ContainerPortRange != nil { + f1f1elemf26elem.ContainerPortRange = f1f1elemf26iter.ContainerPortRange + } + if f1f1elemf26iter.HostPort != nil { + f1f1elemf26elem.HostPort = f1f1elemf26iter.HostPort + } + if f1f1elemf26iter.Name != nil { + f1f1elemf26elem.Name = f1f1elemf26iter.Name + } + if f1f1elemf26iter.Protocol != nil { + f1f1elemf26elem.Protocol = f1f1elemf26iter.Protocol + } + f1f1elemf26 = append(f1f1elemf26, f1f1elemf26elem) + } + f1f1elem.PortMappings = f1f1elemf26 + } + if f1f1iter.Privileged != nil { + f1f1elem.Privileged = f1f1iter.Privileged + } + if f1f1iter.PseudoTerminal != nil { + f1f1elem.PseudoTerminal = f1f1iter.PseudoTerminal + } + if f1f1iter.ReadonlyRootFilesystem != nil { + f1f1elem.ReadonlyRootFilesystem = f1f1iter.ReadonlyRootFilesystem + } + if f1f1iter.RepositoryCredentials != nil { + f1f1elemf30 := &ecs.RepositoryCredentials{} + if f1f1iter.RepositoryCredentials.CredentialsParameter != nil { + f1f1elemf30.CredentialsParameter = f1f1iter.RepositoryCredentials.CredentialsParameter + } + f1f1elem.RepositoryCredentials = f1f1elemf30 + } + if f1f1iter.ResourceRequirements != nil { + f1f1elemf31 := []*ecs.ResourceRequirement{} + for _, f1f1elemf31iter := range f1f1iter.ResourceRequirements { + f1f1elemf31elem := &ecs.ResourceRequirement{} + if f1f1elemf31iter.Type != nil { + f1f1elemf31elem.Type = f1f1elemf31iter.Type + } + if f1f1elemf31iter.Value != nil { + f1f1elemf31elem.Value = f1f1elemf31iter.Value + } + f1f1elemf31 = append(f1f1elemf31, f1f1elemf31elem) + } + f1f1elem.ResourceRequirements = f1f1elemf31 + } + if f1f1iter.Secrets != nil { + f1f1elemf32 := []*ecs.Secret{} + for _, f1f1elemf32iter := range f1f1iter.Secrets { + f1f1elemf32elem := &ecs.Secret{} + if f1f1elemf32iter.Name != nil { + f1f1elemf32elem.Name = f1f1elemf32iter.Name + } + if f1f1elemf32iter.ValueFrom != nil { + f1f1elemf32elem.ValueFrom = f1f1elemf32iter.ValueFrom + } + f1f1elemf32 = append(f1f1elemf32, f1f1elemf32elem) + } + f1f1elem.Secrets = f1f1elemf32 + } + if f1f1iter.StartTimeout != nil { + f1f1elem.StartTimeout = f1f1iter.StartTimeout + } + if f1f1iter.StopTimeout != nil { + f1f1elem.StopTimeout = f1f1iter.StopTimeout + } + if f1f1iter.SystemControls != nil { + f1f1elemf35 := []*ecs.SystemControl{} + for _, f1f1elemf35iter := range f1f1iter.SystemControls { + f1f1elemf35elem := &ecs.SystemControl{} + if f1f1elemf35iter.Namespace != nil { + f1f1elemf35elem.Namespace = f1f1elemf35iter.Namespace + } + if f1f1elemf35iter.Value != nil { + f1f1elemf35elem.Value = f1f1elemf35iter.Value + } + f1f1elemf35 = append(f1f1elemf35, f1f1elemf35elem) + } + f1f1elem.SystemControls = f1f1elemf35 + } + if f1f1iter.Ulimits != nil { + f1f1elemf36 := []*ecs.Ulimit{} + for _, f1f1elemf36iter := range f1f1iter.Ulimits { + f1f1elemf36elem := &ecs.Ulimit{} + if f1f1elemf36iter.HardLimit != nil { + f1f1elemf36elem.HardLimit = f1f1elemf36iter.HardLimit + } + if f1f1elemf36iter.Name != nil { + f1f1elemf36elem.Name = f1f1elemf36iter.Name + } + if f1f1elemf36iter.SoftLimit != nil { + f1f1elemf36elem.SoftLimit = f1f1elemf36iter.SoftLimit + } + f1f1elemf36 = append(f1f1elemf36, f1f1elemf36elem) + } + f1f1elem.Ulimits = f1f1elemf36 + } + if f1f1iter.User != nil { + f1f1elem.User = f1f1iter.User + } + if f1f1iter.VolumesFrom != nil { + f1f1elemf38 := []*ecs.VolumeFrom{} + for _, f1f1elemf38iter := range f1f1iter.VolumesFrom { + f1f1elemf38elem := &ecs.VolumeFrom{} + if f1f1elemf38iter.ReadOnly != nil { + f1f1elemf38elem.ReadOnly = f1f1elemf38iter.ReadOnly + } + if f1f1elemf38iter.SourceContainer != nil { + f1f1elemf38elem.SourceContainer = f1f1elemf38iter.SourceContainer + } + f1f1elemf38 = append(f1f1elemf38, f1f1elemf38elem) + } + f1f1elem.VolumesFrom = f1f1elemf38 + } + if f1f1iter.WorkingDirectory != nil { + f1f1elem.WorkingDirectory = f1f1iter.WorkingDirectory + } + f1f1 = append(f1f1, f1f1elem) + } + parameters.ContainerDefinitions = f1f1 + } + if resp.TaskDefinition.Cpu != nil { + parameters.CPU = resp.TaskDefinition.Cpu + } + if resp.TaskDefinition.EphemeralStorage != nil { + f1f4 := &ecs.EphemeralStorage{} + if resp.TaskDefinition.EphemeralStorage.SizeInGiB != nil { + f1f4.SizeInGiB = resp.TaskDefinition.EphemeralStorage.SizeInGiB + } + parameters.EphemeralStorage = f1f4 + } + if resp.TaskDefinition.ExecutionRoleArn != nil { + parameters.ExecutionRoleARN = resp.TaskDefinition.ExecutionRoleArn + } + if resp.TaskDefinition.Family != nil { + parameters.Family = resp.TaskDefinition.Family + } + if resp.TaskDefinition.InferenceAccelerators != nil { + f1f7 := []*ecs.InferenceAccelerator{} + for _, f1f7iter := range resp.TaskDefinition.InferenceAccelerators { + f1f7elem := &ecs.InferenceAccelerator{} + if f1f7iter.DeviceName != nil { + f1f7elem.DeviceName = f1f7iter.DeviceName + } + if f1f7iter.DeviceType != nil { + f1f7elem.DeviceType = f1f7iter.DeviceType + } + f1f7 = append(f1f7, f1f7elem) + } + parameters.InferenceAccelerators = f1f7 + } + if resp.TaskDefinition.IpcMode != nil { + parameters.IPCMode = resp.TaskDefinition.IpcMode + } + if resp.TaskDefinition.Memory != nil { + parameters.Memory = resp.TaskDefinition.Memory + } + if resp.TaskDefinition.NetworkMode != nil { + parameters.NetworkMode = resp.TaskDefinition.NetworkMode + } + if resp.TaskDefinition.PidMode != nil { + parameters.PIDMode = resp.TaskDefinition.PidMode + } + if resp.TaskDefinition.PlacementConstraints != nil { + f1f12 := []*ecs.TaskDefinitionPlacementConstraint{} + for _, f1f12iter := range resp.TaskDefinition.PlacementConstraints { + f1f12elem := &ecs.TaskDefinitionPlacementConstraint{} + if f1f12iter.Expression != nil { + f1f12elem.Expression = f1f12iter.Expression + } + if f1f12iter.Type != nil { + f1f12elem.Type = f1f12iter.Type + } + f1f12 = append(f1f12, f1f12elem) + } + parameters.PlacementConstraints = f1f12 + } + if resp.TaskDefinition.ProxyConfiguration != nil { + f1f13 := &ecs.ProxyConfiguration{} + if resp.TaskDefinition.ProxyConfiguration.ContainerName != nil { + f1f13.ContainerName = resp.TaskDefinition.ProxyConfiguration.ContainerName + } + if resp.TaskDefinition.ProxyConfiguration.Properties != nil { + f1f13f1 := []*ecs.KeyValuePair{} + for _, f1f13f1iter := range resp.TaskDefinition.ProxyConfiguration.Properties { + f1f13f1elem := &ecs.KeyValuePair{} + if f1f13f1iter.Name != nil { + f1f13f1elem.Name = f1f13f1iter.Name + } + if f1f13f1iter.Value != nil { + f1f13f1elem.Value = f1f13f1iter.Value + } + f1f13f1 = append(f1f13f1, f1f13f1elem) + } + f1f13.Properties = f1f13f1 + } + if resp.TaskDefinition.ProxyConfiguration.Type != nil { + f1f13.Type = resp.TaskDefinition.ProxyConfiguration.Type + } + parameters.ProxyConfiguration = f1f13 + } + if resp.TaskDefinition.RequiresCompatibilities != nil { + f1f17 := []*string{} + for _, f1f17iter := range resp.TaskDefinition.RequiresCompatibilities { + var f1f17elem string + f1f17elem = *f1f17iter + f1f17 = append(f1f17, &f1f17elem) + } + parameters.RequiresCompatibilities = f1f17 + } + if resp.TaskDefinition.RuntimePlatform != nil { + f1f19 := &ecs.RuntimePlatform{} + if resp.TaskDefinition.RuntimePlatform.CpuArchitecture != nil { + f1f19.CPUArchitecture = resp.TaskDefinition.RuntimePlatform.CpuArchitecture + } + if resp.TaskDefinition.RuntimePlatform.OperatingSystemFamily != nil { + f1f19.OperatingSystemFamily = resp.TaskDefinition.RuntimePlatform.OperatingSystemFamily + } + parameters.RuntimePlatform = f1f19 + } + if resp.TaskDefinition.TaskRoleArn != nil { + parameters.TaskRoleARN = resp.TaskDefinition.TaskRoleArn + } + if resp.TaskDefinition.Volumes != nil { + f1f23 := []*ecs.CustomVolume{} + for _, f1f23iter := range resp.TaskDefinition.Volumes { + f1f23elem := &ecs.CustomVolume{} + if f1f23iter.DockerVolumeConfiguration != nil { + f1f23elemf0 := &ecs.DockerVolumeConfiguration{} + if f1f23iter.DockerVolumeConfiguration.Autoprovision != nil { + f1f23elemf0.Autoprovision = f1f23iter.DockerVolumeConfiguration.Autoprovision + } + if f1f23iter.DockerVolumeConfiguration.Driver != nil { + f1f23elemf0.Driver = f1f23iter.DockerVolumeConfiguration.Driver + } + if f1f23iter.DockerVolumeConfiguration.DriverOpts != nil { + f1f23elemf0f2 := map[string]*string{} + for f1f23elemf0f2key, f1f23elemf0f2valiter := range f1f23iter.DockerVolumeConfiguration.DriverOpts { + var f1f23elemf0f2val string + f1f23elemf0f2val = *f1f23elemf0f2valiter + f1f23elemf0f2[f1f23elemf0f2key] = &f1f23elemf0f2val + } + f1f23elemf0.DriverOpts = f1f23elemf0f2 + } + if f1f23iter.DockerVolumeConfiguration.Labels != nil { + f1f23elemf0f3 := map[string]*string{} + for f1f23elemf0f3key, f1f23elemf0f3valiter := range f1f23iter.DockerVolumeConfiguration.Labels { + var f1f23elemf0f3val string + f1f23elemf0f3val = *f1f23elemf0f3valiter + f1f23elemf0f3[f1f23elemf0f3key] = &f1f23elemf0f3val + } + f1f23elemf0.Labels = f1f23elemf0f3 + } + if f1f23iter.DockerVolumeConfiguration.Scope != nil { + f1f23elemf0.Scope = f1f23iter.DockerVolumeConfiguration.Scope + } + f1f23elem.DockerVolumeConfiguration = f1f23elemf0 + } + if f1f23iter.EfsVolumeConfiguration != nil { + f1f23elemf1 := &ecs.CustomEFSVolumeConfiguration{} + if f1f23iter.EfsVolumeConfiguration.AuthorizationConfig != nil { + f1f23elemf1f0 := &ecs.CustomEFSAuthorizationConfig{} + if f1f23iter.EfsVolumeConfiguration.AuthorizationConfig.AccessPointId != nil { + f1f23elemf1f0.AccessPointID = f1f23iter.EfsVolumeConfiguration.AuthorizationConfig.AccessPointId + } + if f1f23iter.EfsVolumeConfiguration.AuthorizationConfig.Iam != nil { + f1f23elemf1f0.IAM = f1f23iter.EfsVolumeConfiguration.AuthorizationConfig.Iam + } + f1f23elemf1.AuthorizationConfig = f1f23elemf1f0 + } + if f1f23iter.EfsVolumeConfiguration.FileSystemId != nil { + f1f23elemf1.FileSystemID = f1f23iter.EfsVolumeConfiguration.FileSystemId + } + if f1f23iter.EfsVolumeConfiguration.RootDirectory != nil { + f1f23elemf1.RootDirectory = f1f23iter.EfsVolumeConfiguration.RootDirectory + } + if f1f23iter.EfsVolumeConfiguration.TransitEncryption != nil { + f1f23elemf1.TransitEncryption = f1f23iter.EfsVolumeConfiguration.TransitEncryption + } + if f1f23iter.EfsVolumeConfiguration.TransitEncryptionPort != nil { + f1f23elemf1.TransitEncryptionPort = f1f23iter.EfsVolumeConfiguration.TransitEncryptionPort + } + f1f23elem.EFSVolumeConfiguration = f1f23elemf1 + } + if f1f23iter.FsxWindowsFileServerVolumeConfiguration != nil { + f1f23elemf2 := &ecs.FSxWindowsFileServerVolumeConfiguration{} + if f1f23iter.FsxWindowsFileServerVolumeConfiguration.AuthorizationConfig != nil { + f1f23elemf2f0 := &ecs.FSxWindowsFileServerAuthorizationConfig{} + if f1f23iter.FsxWindowsFileServerVolumeConfiguration.AuthorizationConfig.CredentialsParameter != nil { + f1f23elemf2f0.CredentialsParameter = f1f23iter.FsxWindowsFileServerVolumeConfiguration.AuthorizationConfig.CredentialsParameter + } + if f1f23iter.FsxWindowsFileServerVolumeConfiguration.AuthorizationConfig.Domain != nil { + f1f23elemf2f0.Domain = f1f23iter.FsxWindowsFileServerVolumeConfiguration.AuthorizationConfig.Domain + } + f1f23elemf2.AuthorizationConfig = f1f23elemf2f0 + } + if f1f23iter.FsxWindowsFileServerVolumeConfiguration.FileSystemId != nil { + f1f23elemf2.FileSystemID = f1f23iter.FsxWindowsFileServerVolumeConfiguration.FileSystemId + } + if f1f23iter.FsxWindowsFileServerVolumeConfiguration.RootDirectory != nil { + f1f23elemf2.RootDirectory = f1f23iter.FsxWindowsFileServerVolumeConfiguration.RootDirectory + } + f1f23elem.FsxWindowsFileServerVolumeConfiguration = f1f23elemf2 + } + if f1f23iter.Host != nil { + f1f23elemf3 := &ecs.HostVolumeProperties{} + if f1f23iter.Host.SourcePath != nil { + f1f23elemf3.SourcePath = f1f23iter.Host.SourcePath + } + f1f23elem.Host = f1f23elemf3 + } + if f1f23iter.Name != nil { + f1f23elem.Name = f1f23iter.Name + } + f1f23 = append(f1f23, f1f23elem) + } + + parameters.Volumes = f1f23 + } + } + + return cr +} + +func IsUpToDate(target *ecs.TaskDefinitionFamily, out *awsecs.DescribeTaskDefinitionOutput) (bool, string) { + t := target.Spec.ForProvider.DeepCopy() + c := GenerateTaskDefinitionFamilyFromDescribe(out).Spec.ForProvider.DeepCopy() + + tags := func(a, b *ecs.Tag) bool { return aws.StringValue(a.Key) < aws.StringValue(b.Key) } + + diff := cmp.Diff(c, t, + cmpopts.EquateEmpty(), + cmpopts.SortSlices(tags), + // Not present in DescribeTaskDefinitionOutput + cmpopts.IgnoreFields(ecs.TaskDefinitionFamilyParameters{}, "Region"), + cmpopts.IgnoreTypes(&xpv1.Reference{}, &xpv1.Selector{}, []xpv1.Reference{})) + + return diff == "", diff +} diff --git a/pkg/controller/ecs/setup.go b/pkg/controller/ecs/setup.go index 7b53469ecb..a4a37c7b27 100644 --- a/pkg/controller/ecs/setup.go +++ b/pkg/controller/ecs/setup.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane-contrib/provider-aws/pkg/controller/ecs/cluster" "github.com/crossplane-contrib/provider-aws/pkg/controller/ecs/service" "github.com/crossplane-contrib/provider-aws/pkg/controller/ecs/taskdefinition" + taskdefinitionfamiliy "github.com/crossplane-contrib/provider-aws/pkg/controller/ecs/taskdefinitionfamily" "github.com/crossplane-contrib/provider-aws/pkg/utils/setup" ) @@ -33,5 +34,6 @@ func Setup(mgr ctrl.Manager, o controller.Options) error { cluster.SetupCluster, service.SetupService, taskdefinition.SetupTaskDefinition, + taskdefinitionfamiliy.SetupTaskDefinitionFamily, ) } diff --git a/pkg/controller/ecs/taskdefinition/setup.go b/pkg/controller/ecs/taskdefinition/setup.go index 53ce2a4b2c..69091f18c3 100644 --- a/pkg/controller/ecs/taskdefinition/setup.go +++ b/pkg/controller/ecs/taskdefinition/setup.go @@ -93,7 +93,7 @@ func postObserve(_ context.Context, cr *svcapitypes.TaskDefinition, resp *svcsdk func preCreate(_ context.Context, cr *svcapitypes.TaskDefinition, obj *svcsdk.RegisterTaskDefinitionInput) error { obj.ExecutionRoleArn = cr.Spec.ForProvider.ExecutionRoleARN obj.TaskRoleArn = cr.Spec.ForProvider.TaskRoleARN - obj.Volumes = generateVolumes(cr) + obj.Volumes = GenerateVolumes(cr) if err := obj.Validate(); err != nil { return err @@ -121,7 +121,7 @@ func preDelete(_ context.Context, cr *svcapitypes.TaskDefinition, obj *svcsdk.De // Helper func to copy CustomVolume types into AWS SDK types // Mostly copied from the autogenerated conversion code before // ignoring the API in generator-config.yaml -func generateVolumes(cr *svcapitypes.TaskDefinition) []*svcsdk.Volume { //nolint:gocyclo +func GenerateVolumes(cr *svcapitypes.TaskDefinition) []*svcsdk.Volume { //nolint:gocyclo volumes := []*svcsdk.Volume{} if cr.Spec.ForProvider.Volumes == nil { diff --git a/pkg/controller/ecs/taskdefinition/setup_test.go b/pkg/controller/ecs/taskdefinition/setup_test.go index 3cdc8c7ff6..a6893d6194 100644 --- a/pkg/controller/ecs/taskdefinition/setup_test.go +++ b/pkg/controller/ecs/taskdefinition/setup_test.go @@ -209,7 +209,7 @@ func TestConvertVolumes(t *testing.T) { for name, tc := range cases { t.Run(name, func(t *testing.T) { - got := generateVolumes(tc.cr) + got := GenerateVolumes(tc.cr) if diff := cmp.Diff(tc.want, got); diff != "" { t.Errorf("%s\nExample(...): -want, +got:\n%s", tc.reason, diff) } diff --git a/pkg/controller/ecs/taskdefinitionfamily/controller.go b/pkg/controller/ecs/taskdefinitionfamily/controller.go new file mode 100644 index 0000000000..432bcd3ca7 --- /dev/null +++ b/pkg/controller/ecs/taskdefinitionfamily/controller.go @@ -0,0 +1,237 @@ +package taskdefinitionfamily + +import ( + "context" + "fmt" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + awsecs "github.com/aws/aws-sdk-go/service/ecs" + awsecsiface "github.com/aws/aws-sdk-go/service/ecs/ecsiface" + xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/controller" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/meta" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/google/go-cmp/cmp" + "github.com/pkg/errors" + "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + ecs "github.com/crossplane-contrib/provider-aws/apis/ecs/v1alpha1" + "github.com/crossplane-contrib/provider-aws/apis/v1alpha1" + tdfclient "github.com/crossplane-contrib/provider-aws/pkg/clients/ecs/taskdefinitionfamily" + "github.com/crossplane-contrib/provider-aws/pkg/controller/ecs/taskdefinition" + "github.com/crossplane-contrib/provider-aws/pkg/features" + connectaws "github.com/crossplane-contrib/provider-aws/pkg/utils/connect/aws" + errorutils "github.com/crossplane-contrib/provider-aws/pkg/utils/errors" + custommanaged "github.com/crossplane-contrib/provider-aws/pkg/utils/reconciler/managed" +) + +const ( + errUnexpectedObject = "managed resource is not a TaskDefinitionFamily resource" + errCreateSession = "cannot create a new session" + errCreate = "cannot create TaskDefinition in AWS" + errUpdate = "cannot update TaskDefinition in AWS" + errDescribe = "failed to describe TaskDefinition" + errDelete = "failed to delete TaskDefinition" +) + +// SetupTaskDefinitionFamily adds a controller that reconciles a TaskDefinitionFamily. +func SetupTaskDefinitionFamily(mgr ctrl.Manager, o controller.Options) error { + name := managed.ControllerName(ecs.TaskDefinitionFamilyGroupKind) + + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.Features.Enabled(features.EnableAlphaExternalSecretStores) { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), v1alpha1.StoreConfigGroupVersionKind)) + } + + reconcilerOpts := []managed.ReconcilerOption{ + managed.WithCriticalAnnotationUpdater(custommanaged.NewRetryingCriticalAnnotationUpdater(mgr.GetClient())), + managed.WithExternalConnecter(&connector{kube: mgr.GetClient()}), + managed.WithInitializers(), + managed.WithPollInterval(o.PollInterval), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithConnectionPublishers(cps...), + } + + if o.Features.Enabled(features.EnableAlphaManagementPolicies) { + reconcilerOpts = append(reconcilerOpts, managed.WithManagementPolicies()) + } + + r := managed.NewReconciler(mgr, + resource.ManagedKind(ecs.TaskDefinitionFamilyGroupVersionKind), + reconcilerOpts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(resource.DesiredStateChanged()). + For(&ecs.TaskDefinitionFamily{}). + Complete(r) +} + +type connector struct { + kube client.Client +} + +type external struct { + client awsecsiface.ECSAPI + kube client.Client +} + +func (c *connector) Connect(ctx context.Context, mg resource.Managed) (managed.ExternalClient, error) { + cr, ok := mg.(*ecs.TaskDefinitionFamily) + if !ok { + return nil, errors.New(errUnexpectedObject) + } + sess, err := connectaws.GetConfigV1(ctx, c.kube, mg, cr.Spec.ForProvider.Region) + if err != nil { + return nil, errors.Wrap(err, errCreateSession) + } + return &external{ + kube: c.kube, + client: awsecs.New(sess), + }, nil +} + +func (e *external) Observe(ctx context.Context, mg resource.Managed) (managed.ExternalObservation, error) { + cr, ok := mg.(*ecs.TaskDefinitionFamily) + if !ok { + return managed.ExternalObservation{}, errors.New(errUnexpectedObject) + } + + if meta.GetExternalName(cr) == "" { + return managed.ExternalObservation{ + ResourceExists: false, + }, nil + } + + input := taskdefinition.GenerateDescribeTaskDefinitionInput(GenerateTaskDefinition(cr)) + input.SetTaskDefinition(meta.GetExternalName(cr)) + if err := input.Validate(); err != nil { + return managed.ExternalObservation{}, err + } + input.Include = []*string{ptr.To("TAGS")} + + resp, err := e.client.DescribeTaskDefinitionWithContext(ctx, input) + if err != nil { + return managed.ExternalObservation{ResourceExists: false}, errorutils.Wrap(resource.Ignore(isNotFound, err), errDescribe) + } + currentSpec := cr.Spec.ForProvider.DeepCopy() + tdfclient.LateInitialize(&cr.Spec.ForProvider, resp) + + GenerateTaskDefinitionFamily(taskdefinition.GenerateTaskDefinition(resp)). + Status.AtProvider.DeepCopyInto(&cr.Status.AtProvider) + + resourceExists := true + if aws.StringValue(cr.Status.AtProvider.TaskDefinition.Status) == awsecs.TaskDefinitionStatusActive { + cr.SetConditions(xpv1.Available()) + } + if aws.StringValue(cr.Status.AtProvider.TaskDefinition.Status) == awsecs.TaskDefinitionStatusInactive { + // Deleted task definitions can still be described in the API and show up with an INACTIVE status. + resourceExists = false + cr.SetConditions(xpv1.Unavailable()) + } + + isUpToDate, diff := tdfclient.IsUpToDate(cr, resp) + + return managed.ExternalObservation{ + ResourceExists: resourceExists, + ResourceUpToDate: isUpToDate, + Diff: diff, + ResourceLateInitialized: !cmp.Equal(cr.Spec.ForProvider, *currentSpec), + }, nil +} + +func (e *external) Create(ctx context.Context, mg resource.Managed) (managed.ExternalCreation, error) { + cr, ok := mg.(*ecs.TaskDefinitionFamily) + if !ok { + return managed.ExternalCreation{}, errors.New(errUnexpectedObject) + } + + cr.Status.SetConditions(xpv1.Creating()) + + input := taskdefinition.GenerateRegisterTaskDefinitionInput(GenerateTaskDefinition(cr)) + input.ExecutionRoleArn = cr.Spec.ForProvider.ExecutionRoleARN + input.TaskRoleArn = cr.Spec.ForProvider.TaskRoleARN + input.Volumes = taskdefinition.GenerateVolumes(GenerateTaskDefinition(cr)) + + resp, err := e.client.RegisterTaskDefinitionWithContext(ctx, input) + if err != nil { + return managed.ExternalCreation{}, errorutils.Wrap(err, errCreate) + } + + meta.SetExternalName(cr, aws.StringValue(stripRevision(resp.TaskDefinition.TaskDefinitionArn))) + + return managed.ExternalCreation{}, nil +} + +func (e *external) Update(ctx context.Context, mg resource.Managed) (managed.ExternalUpdate, error) { + cr, ok := mg.(*ecs.TaskDefinitionFamily) + if !ok { + return managed.ExternalUpdate{}, errors.New(errUnexpectedObject) + } + + createInput := taskdefinition.GenerateRegisterTaskDefinitionInput(GenerateTaskDefinition(cr)) + createInput.ExecutionRoleArn = cr.Spec.ForProvider.ExecutionRoleARN + createInput.TaskRoleArn = cr.Spec.ForProvider.TaskRoleARN + createInput.Volumes = taskdefinition.GenerateVolumes(GenerateTaskDefinition(cr)) + + _, err := e.client.RegisterTaskDefinitionWithContext(ctx, createInput) + if err != nil { + return managed.ExternalUpdate{}, errorutils.Wrap(err, errCreate) + } + + if cr.Status.AtProvider.TaskDefinition != nil && cr.Status.AtProvider.TaskDefinition.TaskDefinitionARN != nil { + deleteInput := &awsecs.DeregisterTaskDefinitionInput{ + TaskDefinition: cr.Status.AtProvider.TaskDefinition.TaskDefinitionARN, + } + _, err = e.client.DeregisterTaskDefinitionWithContext(ctx, deleteInput) + if err != nil { + return managed.ExternalUpdate{}, errorutils.Wrap(err, errDelete) + } + } + + return managed.ExternalUpdate{}, nil +} + +func (e *external) Delete(ctx context.Context, mg resource.Managed) error { + cr, ok := mg.(*ecs.TaskDefinitionFamily) + if !ok { + return errors.New(errUnexpectedObject) + } + + cr.Status.SetConditions(xpv1.Deleting()) + + input := taskdefinition.GenerateDeregisterTaskDefinitionInput(GenerateTaskDefinition(cr)) + input.SetTaskDefinition(*cr.Status.AtProvider.TaskDefinition.TaskDefinitionARN) + _, err := e.client.DeregisterTaskDefinitionWithContext(ctx, input) + + return errorutils.Wrap(resource.Ignore(taskdefinition.IsNotFound, err), errDelete) +} + +// Strips the revision of a TaskDefinition ARN. +func stripRevision(arn *string) *string { + if arn != nil { + if idx := strings.LastIndex(*arn, ":"); idx != -1 { + if _, err := fmt.Sscanf((*arn)[idx:], ":%d", new(int)); err == nil { + return ptr.To((*arn)[:idx]) + } + } + return arn + } + return nil +} + +// IsNotFound returns whether the given error is of type NotFound or not. +func isNotFound(err error) bool { + awsErr, ok := err.(awserr.Error) //nolint:errorlint + // There is no specific error for a 404 Not Found, it always returns 400. + return ok && awsErr.Code() == "ClientException" +} diff --git a/pkg/controller/ecs/taskdefinitionfamily/conversions.go b/pkg/controller/ecs/taskdefinitionfamily/conversions.go new file mode 100644 index 0000000000..e78aac4998 --- /dev/null +++ b/pkg/controller/ecs/taskdefinitionfamily/conversions.go @@ -0,0 +1,93 @@ +/* +Copyright 2021 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package taskdefinitionfamily + +import ( + awsecs "github.com/aws/aws-sdk-go/service/ecs" + + ecs "github.com/crossplane-contrib/provider-aws/apis/ecs/v1alpha1" +) + +// Converts a TaskDefinitionFamily into a TaskDefinition to use auto-generated TaskDefinition functions +func GenerateTaskDefinition(f *ecs.TaskDefinitionFamily) *ecs.TaskDefinition { + return &ecs.TaskDefinition{ + Spec: ecs.TaskDefinitionSpec{ + ForProvider: ecs.TaskDefinitionParameters{ + Region: f.Spec.ForProvider.Region, + ContainerDefinitions: f.Spec.ForProvider.ContainerDefinitions, + CPU: f.Spec.ForProvider.CPU, + EphemeralStorage: f.Spec.ForProvider.EphemeralStorage, + Family: f.Spec.ForProvider.Family, + InferenceAccelerators: f.Spec.ForProvider.InferenceAccelerators, + IPCMode: f.Spec.ForProvider.IPCMode, + Memory: f.Spec.ForProvider.Memory, + NetworkMode: f.Spec.ForProvider.NetworkMode, + PIDMode: f.Spec.ForProvider.PIDMode, + PlacementConstraints: f.Spec.ForProvider.PlacementConstraints, + ProxyConfiguration: f.Spec.ForProvider.ProxyConfiguration, + RequiresCompatibilities: f.Spec.ForProvider.RequiresCompatibilities, + RuntimePlatform: f.Spec.ForProvider.RuntimePlatform, + Tags: f.Spec.ForProvider.Tags, + CustomTaskDefinitionParameters: f.Spec.ForProvider.CustomTaskDefinitionParameters, + }, + }, + Status: ecs.TaskDefinitionStatus{ + AtProvider: ecs.TaskDefinitionObservation{ + TaskDefinition: f.Status.AtProvider.TaskDefinition, + }, + }, + } +} + +// Converts a TaskDefinition into a TaskDefinitionFamily to use auto-generated TaskDefinition functions +func GenerateTaskDefinitionFamily(f *ecs.TaskDefinition) *ecs.TaskDefinitionFamily { + return &ecs.TaskDefinitionFamily{ + Spec: ecs.TaskDefinitionFamilySpec{ + ForProvider: ecs.TaskDefinitionFamilyParameters{ + Region: f.Spec.ForProvider.Region, + ContainerDefinitions: f.Spec.ForProvider.ContainerDefinitions, + CPU: f.Spec.ForProvider.CPU, + EphemeralStorage: f.Spec.ForProvider.EphemeralStorage, + Family: f.Spec.ForProvider.Family, + InferenceAccelerators: f.Spec.ForProvider.InferenceAccelerators, + IPCMode: f.Spec.ForProvider.IPCMode, + Memory: f.Spec.ForProvider.Memory, + NetworkMode: f.Spec.ForProvider.NetworkMode, + PIDMode: f.Spec.ForProvider.PIDMode, + PlacementConstraints: f.Spec.ForProvider.PlacementConstraints, + ProxyConfiguration: f.Spec.ForProvider.ProxyConfiguration, + RequiresCompatibilities: f.Spec.ForProvider.RequiresCompatibilities, + RuntimePlatform: f.Spec.ForProvider.RuntimePlatform, + Tags: f.Spec.ForProvider.Tags, + CustomTaskDefinitionParameters: f.Spec.ForProvider.CustomTaskDefinitionParameters, + }, + }, + Status: ecs.TaskDefinitionFamilyApiStatus{ + AtProvider: ecs.TaskDefinitionFamilyObservation{ + TaskDefinition: f.Status.AtProvider.TaskDefinition, + }, + }, + } +} + +// Converts RegisterTaskDefinitionOutput into a DescribeTaskDefinitionOutput to use auto-generated convert functions +func GenerateDescribeTaskDefinitionOutput(registerOutput *awsecs.RegisterTaskDefinitionOutput) *awsecs.DescribeTaskDefinitionOutput { + return &awsecs.DescribeTaskDefinitionOutput{ + Tags: registerOutput.Tags, + TaskDefinition: registerOutput.TaskDefinition, + } +}