Skip to content

Commit

Permalink
Merge pull request #2450 from openshift-cherrypick-robot/cherry-pick-…
Browse files Browse the repository at this point in the history
…2448-to-release_1.2.45

[release_1.2.45] OCM-11054 | fix: add unit to disk size on describe nodepool
  • Loading branch information
davidleerh authored Sep 11, 2024
2 parents cc664c3 + 4763985 commit 7f79b00
Show file tree
Hide file tree
Showing 3 changed files with 17 additions and 13 deletions.
10 changes: 8 additions & 2 deletions cmd/describe/machinepool/cmd_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ Tags:
Taints:
Availability zone: us-east-1a
Subnet:
Disk Size: 300 GiB
Version: 4.12.24
EC2 Metadata Http Tokens: optional
Autorepair: No
Expand All @@ -57,6 +58,7 @@ Tags:
Taints:
Availability zone: us-east-1a
Subnet:
Disk Size: 300 GiB
Version: 4.12.24
EC2 Metadata Http Tokens: optional
Autorepair: No
Expand All @@ -83,6 +85,7 @@ Tags: foo=bar
Taints:
Availability zone: us-east-1a
Subnet:
Disk Size: 300 GiB
Version: 4.12.24
EC2 Metadata Http Tokens: optional
Autorepair: No
Expand All @@ -102,6 +105,8 @@ Scheduled upgrade: scheduled 4.12.25 on 2023-08-07 15:22 UTC
aws_node_pool:
instance_type: m5.xlarge
kind: AWSNodePool
root_volume:
size: 300
id: nodepool85
kind: NodePool
management_upgrade:
Expand Down Expand Up @@ -426,7 +431,7 @@ var _ = Describe("Upgrade machine pool", func() {
// formatNodePool simulates the output of APIs for a fake node pool
func formatNodePool() string {
version := cmv1.NewVersion().ID("4.12.24").RawID("openshift-4.12.24")
awsNodePool := cmv1.NewAWSNodePool().InstanceType("m5.xlarge")
awsNodePool := cmv1.NewAWSNodePool().InstanceType("m5.xlarge").RootVolume(cmv1.NewAWSVolume().Size(300))
nodeDrain := cmv1.NewValue().Value(1).Unit("minute")
mgmtUpgrade := cmv1.NewNodePoolManagementUpgrade().Type("Replace").MaxSurge("1").MaxUnavailable("0")
np, err := cmv1.NewNodePool().ID(nodePoolName).Version(version).
Expand All @@ -439,7 +444,8 @@ func formatNodePool() string {
// formatNodePool simulates the output of APIs for a fake node pool with AWS tags
func formatNodePoolWithTags() string {
version := cmv1.NewVersion().ID("4.12.24").RawID("openshift-4.12.24")
awsNodePool := cmv1.NewAWSNodePool().InstanceType("m5.xlarge").Tags(map[string]string{"foo": "bar"})
awsNodePool := cmv1.NewAWSNodePool().InstanceType("m5.xlarge").Tags(map[string]string{"foo": "bar"}).
RootVolume(cmv1.NewAWSVolume().Size(300))
nodeDrain := cmv1.NewValue().Value(1).Unit("minute")
mgmtUpgrade := cmv1.NewNodePoolManagementUpgrade().Type("Replace").MaxSurge("1").MaxUnavailable("0")
np, err := cmv1.NewNodePool().ID(nodePoolName).Version(version).
Expand Down
9 changes: 2 additions & 7 deletions pkg/machinepool/output.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ var nodePoolOutputString string = "\n" +
"Taints: %s\n" +
"Availability zone: %s\n" +
"Subnet: %s\n" +
"Disk Size: %s\n" +
"Version: %s\n" +
"EC2 Metadata Http Tokens: %s\n" +
"Autorepair: %s\n" +
Expand Down Expand Up @@ -77,6 +78,7 @@ func nodePoolOutput(clusterId string, nodePool *cmv1.NodePool) string {
ocmOutput.PrintTaints(nodePool.Taints()),
nodePool.AvailabilityZone(),
nodePool.Subnet(),
ocmOutput.PrintNodePoolDiskSize(nodePool.AWSNodePool()),
ocmOutput.PrintNodePoolVersion(nodePool.Version()),
ocmOutput.PrintEC2MetadataHttpTokens(nodePool.AWSNodePool()),
ocmOutput.PrintNodePoolAutorepair(nodePool.AutoRepair()),
Expand All @@ -88,12 +90,5 @@ func nodePoolOutput(clusterId string, nodePool *cmv1.NodePool) string {
ocmOutput.PrintNodePoolMessage(nodePool.Status()),
)

if nodePool.AWSNodePool() != nil && nodePool.AWSNodePool().RootVolume() != nil {
diskSize, ok := nodePool.AWSNodePool().RootVolume().GetSize()
if ok {
output += fmt.Sprintf("Disk size: %d\n", diskSize)
}
}

return output
}
11 changes: 7 additions & 4 deletions pkg/machinepool/output_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -101,9 +101,11 @@ var _ = Describe("Output", Ordered, func() {
Expect(out).To(Equal(result))
})
It("nodepool output with autoscaling", func() {
awsNodePoolBuilder := cmv1.NewAWSNodePool().RootVolume(cmv1.NewAWSVolume().Size(300))
npAutoscaling := cmv1.NewNodePoolAutoscaling().ID("test-as").MinReplica(2).MaxReplica(8)
mgmtUpgradeBuilder := cmv1.NewNodePoolManagementUpgrade().MaxSurge("1").MaxUnavailable("0")
nodePoolBuilder := *cmv1.NewNodePool().ID("test-mp").Autoscaling(npAutoscaling).Replicas(4).
AWSNodePool(awsNodePoolBuilder).
AvailabilityZone("test-az").Subnet("test-subnets").Version(cmv1.NewVersion().
ID("1")).AutoRepair(false).TuningConfigs("test-tc").
KubeletConfigs("test-kc").Labels(labels).Taints(taintsBuilder).
Expand All @@ -119,13 +121,15 @@ var _ = Describe("Output", Ordered, func() {

out := fmt.Sprintf(nodePoolOutputString,
"test-mp", "test-cluster", "Yes", replicasOutput, "", "", labelsOutput, "", taintsOutput, "test-az",
"test-subnets", "1", "optional", "No", "test-tc", "test-kc", "", "", managementUpgradeOutput, "")
"test-subnets", "300 GiB", "1", "optional", "No", "test-tc", "test-kc", "", "", managementUpgradeOutput, "")

result := nodePoolOutput("test-cluster", nodePool)
Expect(out).To(Equal(result))
})
It("nodepool output without autoscaling", func() {
awsNodePoolBuilder := cmv1.NewAWSNodePool().RootVolume(cmv1.NewAWSVolume().Size(300))
nodePoolBuilder := *cmv1.NewNodePool().ID("test-mp").Replicas(4).
AWSNodePool(awsNodePoolBuilder).
AvailabilityZone("test-az").Subnet("test-subnets").Version(cmv1.NewVersion().
ID("1")).AutoRepair(false).TuningConfigs("test-tc").
KubeletConfigs("test-kc").Labels(labels).Taints(taintsBuilder)
Expand All @@ -136,7 +140,7 @@ var _ = Describe("Output", Ordered, func() {

out := fmt.Sprintf(nodePoolOutputString,
"test-mp", "test-cluster", "No", "4", "", "", labelsOutput, "", taintsOutput, "test-az",
"test-subnets", "1", "optional", "No", "test-tc", "test-kc", "", "", "", "")
"test-subnets", "300 GiB", "1", "optional", "No", "test-tc", "test-kc", "", "", "", "")

result := nodePoolOutput("test-cluster", nodePool)
Expect(out).To(Equal(result))
Expand All @@ -154,8 +158,7 @@ var _ = Describe("Output", Ordered, func() {

out := fmt.Sprintf(nodePoolOutputString,
"test-mp", "test-cluster", "No", "4", "", "", labelsOutput, "", taintsOutput, "test-az",
"test-subnets", "1", "optional", "No", "test-tc", "test-kc", "", "", "", "")
out += "Disk size: 256\n"
"test-subnets", "256 GiB", "1", "optional", "No", "test-tc", "test-kc", "", "", "", "")

result := nodePoolOutput("test-cluster", nodePool)
Expect(out).To(Equal(result))
Expand Down

0 comments on commit 7f79b00

Please sign in to comment.