diff --git a/CHANGELOG.md b/CHANGELOG.md index 2c3e124c770..08a8f6aeb2e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,16 @@ +Release v2.0.0-preview.2 (2018-01-15) +=== + +### Services +* Synced the V2 SDK with latests AWS service API definitions. + +### SDK Bugs +* `service/s3/s3manager`: Fix Upload Manger's UploadInput fields ([#89](https://github.com/aws/aws-sdk-go-v2/pull/89)) + * Fixes [#88](https://github.com/aws/aws-sdk-go-v2/issues/88) +* `aws`: Fix Pagination handling of empty string NextToken ([#94](https://github.com/aws/aws-sdk-go-v2/pull/94)) + * Fixes [#84](https://github.com/aws/aws-sdk-go-v2/issues/84) + + Release v2.0.0-preview.1 (2017-12-21) === diff --git a/aws/version.go b/aws/version.go index 83ee8badba2..e3414201e66 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "2.0.0-preview.1" +const SDKVersion = "2.0.0-preview.2" diff --git a/models/apis/AWSMigrationHub/2017-05-31/docs-2.json b/models/apis/AWSMigrationHub/2017-05-31/docs-2.json index 674c7721cb1..c593e773987 100644 --- a/models/apis/AWSMigrationHub/2017-05-31/docs-2.json +++ b/models/apis/AWSMigrationHub/2017-05-31/docs-2.json @@ -63,7 +63,7 @@ "base": null, "refs": { "DisassociateDiscoveredResourceRequest$ConfigurationId": "

ConfigurationId of the ADS resource to be disassociated.

", - "DiscoveredResource$ConfigurationId": "

The configurationId in ADS that uniquely identifies the on-premise resource.

" + "DiscoveredResource$ConfigurationId": "

The configurationId in ADS that uniquely identifies the on-premises resource.

" } }, "CreateProgressUpdateStreamRequest": { diff --git a/models/apis/codebuild/2016-10-06/api-2.json b/models/apis/codebuild/2016-10-06/api-2.json index c13cb71d3ca..7d474566112 100644 --- a/models/apis/codebuild/2016-10-06/api-2.json +++ b/models/apis/codebuild/2016-10-06/api-2.json @@ -442,7 +442,8 @@ "type":"structure", "members":{ "name":{"shape":"String"}, - "description":{"shape":"String"} + "description":{"shape":"String"}, + "versions":{"shape":"ImageVersions"} } }, "EnvironmentImages":{ @@ -498,6 +499,10 @@ "type":"list", "member":{"shape":"EnvironmentVariable"} }, + "ImageVersions":{ + "type":"list", + "member":{"shape":"String"} + }, "InvalidInputException":{ "type":"structure", "members":{ diff --git a/models/apis/codebuild/2016-10-06/docs-2.json b/models/apis/codebuild/2016-10-06/docs-2.json index 5b9890105f8..7ee0804b85a 100644 --- a/models/apis/codebuild/2016-10-06/docs-2.json +++ b/models/apis/codebuild/2016-10-06/docs-2.json @@ -253,6 +253,12 @@ "StartBuildInput$environmentVariablesOverride": "

A set of environment variables that overrides, for this build only, the latest ones already defined in the build project.

" } }, + "ImageVersions": { + "base": null, + "refs": { + "EnvironmentImage$versions": "

A list of environment image versions.

" + } + }, "InvalidInputException": { "base": "

The input value that was provided is not valid.

", "refs": { @@ -556,6 +562,7 @@ "EnvironmentImage$name": "

The name of the Docker image.

", "EnvironmentImage$description": "

The description of the Docker image.

", "EnvironmentVariable$value": "

The value of the environment variable.

We strongly discourage using environment variables to store sensitive values, especially AWS secret key IDs and secret access keys. Environment variables can be displayed in plain text using tools such as the AWS CodeBuild console and the AWS Command Line Interface (AWS CLI).

", + "ImageVersions$member": null, "ListBuildsForProjectInput$nextToken": "

During a previous call, if there are more than 100 items in the list, only the first 100 items are returned, along with a unique string called a next token. To get the next batch of items in the list, call this operation again, adding the next token to the call. To get all of the items in the list, keep calling this operation with each subsequent next token that is returned, until no more next tokens are returned.

", "ListBuildsForProjectOutput$nextToken": "

If there are more than 100 items in the list, only the first 100 items are returned, along with a unique string called a next token. To get the next batch of items in the list, call this operation again, adding the next token to the call.

", "ListBuildsInput$nextToken": "

During a previous call, if there are more than 100 items in the list, only the first 100 items are returned, along with a unique string called a next token. To get the next batch of items in the list, call this operation again, adding the next token to the call. To get all of the items in the list, keep calling this operation with each subsequent next token that is returned, until no more next tokens are returned.

", @@ -636,11 +643,11 @@ } }, "VpcConfig": { - "base": "

If your AWS CodeBuild project accesses resources in an Amazon VPC, you provide this parameter that identifies the VPC ID and the list of security group IDs and subnet IDs. The security groups and subnets must belong to the same VPC. You must provide at least one security group and one subnet ID.

", + "base": "

Information about the VPC configuration that AWS CodeBuild will access.

", "refs": { "Build$vpcConfig": "

If your AWS CodeBuild project accesses resources in an Amazon VPC, you provide this parameter that identifies the VPC ID and the list of security group IDs and subnet IDs. The security groups and subnets must belong to the same VPC. You must provide at least one security group and one subnet ID.

", "CreateProjectInput$vpcConfig": "

VpcConfig enables AWS CodeBuild to access resources in an Amazon VPC.

", - "Project$vpcConfig": "

If your AWS CodeBuild project accesses resources in an Amazon VPC, you provide this parameter that identifies the VPC ID and the list of security group IDs and subnet IDs. The security groups and subnets must belong to the same VPC. You must provide at least one security group and one subnet ID.

", + "Project$vpcConfig": "

Information about the VPC configuration that AWS CodeBuild will access.

", "UpdateProjectInput$vpcConfig": "

VpcConfig enables AWS CodeBuild to access resources in an Amazon VPC.

" } }, diff --git a/models/apis/codedeploy/2014-10-06/api-2.json b/models/apis/codedeploy/2014-10-06/api-2.json index 35b14d6b959..39c08bf0797 100644 --- a/models/apis/codedeploy/2014-10-06/api-2.json +++ b/models/apis/codedeploy/2014-10-06/api-2.json @@ -7,6 +7,7 @@ "protocol":"json", "serviceAbbreviation":"CodeDeploy", "serviceFullName":"AWS CodeDeploy", + "serviceId":"CodeDeploy", "signatureVersion":"v4", "targetPrefix":"CodeDeploy_20141006", "timestampFormat":"unixTimestamp", @@ -22,6 +23,7 @@ "input":{"shape":"AddTagsToOnPremisesInstancesInput"}, "errors":[ {"shape":"InstanceNameRequiredException"}, + {"shape":"InvalidInstanceNameException"}, {"shape":"TagRequiredException"}, {"shape":"InvalidTagException"}, {"shape":"TagLimitExceededException"}, @@ -151,7 +153,8 @@ {"shape":"ApplicationNameRequiredException"}, {"shape":"InvalidApplicationNameException"}, {"shape":"ApplicationAlreadyExistsException"}, - {"shape":"ApplicationLimitExceededException"} + {"shape":"ApplicationLimitExceededException"}, + {"shape":"InvalidComputePlatformException"} ] }, "CreateDeployment":{ @@ -179,7 +182,12 @@ {"shape":"InvalidTargetInstancesException"}, {"shape":"InvalidAutoRollbackConfigException"}, {"shape":"InvalidLoadBalancerInfoException"}, - {"shape":"InvalidFileExistsBehaviorException"} + {"shape":"InvalidFileExistsBehaviorException"}, + {"shape":"InvalidRoleException"}, + {"shape":"InvalidAutoScalingGroupException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidUpdateOutdatedInstancesOnlyValueException"}, + {"shape":"InvalidIgnoreApplicationStopFailuresValueException"} ] }, "CreateDeploymentConfig":{ @@ -195,7 +203,9 @@ {"shape":"DeploymentConfigNameRequiredException"}, {"shape":"DeploymentConfigAlreadyExistsException"}, {"shape":"InvalidMinimumHealthyHostValueException"}, - {"shape":"DeploymentConfigLimitExceededException"} + {"shape":"DeploymentConfigLimitExceededException"}, + {"shape":"InvalidComputePlatformException"}, + {"shape":"InvalidTrafficRoutingConfigurationException"} ] }, "CreateDeploymentGroup":{ @@ -232,7 +242,8 @@ {"shape":"InvalidBlueGreenDeploymentConfigurationException"}, {"shape":"InvalidEC2TagCombinationException"}, {"shape":"InvalidOnPremisesTagCombinationException"}, - {"shape":"TagSetListLimitExceededException"} + {"shape":"TagSetListLimitExceededException"}, + {"shape":"InvalidInputException"} ] }, "DeleteApplication":{ @@ -277,6 +288,22 @@ {"shape":"InvalidRoleException"} ] }, + "DeleteGitHubAccountToken":{ + "name":"DeleteGitHubAccountToken", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteGitHubAccountTokenInput"}, + "output":{"shape":"DeleteGitHubAccountTokenOutput"}, + "errors":[ + {"shape":"GitHubAccountTokenNameRequiredException"}, + {"shape":"GitHubAccountTokenDoesNotExistException"}, + {"shape":"InvalidGitHubAccountTokenNameException"}, + {"shape":"ResourceValidationException"}, + {"shape":"OperationNotSupportedException"} + ] + }, "DeregisterOnPremisesInstance":{ "name":"DeregisterOnPremisesInstance", "http":{ @@ -505,7 +532,8 @@ "output":{"shape":"ListGitHubAccountTokenNamesOutput"}, "errors":[ {"shape":"InvalidNextTokenException"}, - {"shape":"ResourceValidationException"} + {"shape":"ResourceValidationException"}, + {"shape":"OperationNotSupportedException"} ] }, "ListOnPremisesInstances":{ @@ -522,6 +550,24 @@ {"shape":"InvalidNextTokenException"} ] }, + "PutLifecycleEventHookExecutionStatus":{ + "name":"PutLifecycleEventHookExecutionStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutLifecycleEventHookExecutionStatusInput"}, + "output":{"shape":"PutLifecycleEventHookExecutionStatusOutput"}, + "errors":[ + {"shape":"InvalidLifecycleEventHookExecutionStatusException"}, + {"shape":"InvalidLifecycleEventHookExecutionIdException"}, + {"shape":"LifecycleEventAlreadyCompletedException"}, + {"shape":"DeploymentIdRequiredException"}, + {"shape":"DeploymentDoesNotExistException"}, + {"shape":"InvalidDeploymentIdException"}, + {"shape":"UnsupportedActionForDeploymentTypeException"} + ] + }, "RegisterApplicationRevision":{ "name":"RegisterApplicationRevision", "http":{ @@ -567,6 +613,7 @@ "input":{"shape":"RemoveTagsFromOnPremisesInstancesInput"}, "errors":[ {"shape":"InstanceNameRequiredException"}, + {"shape":"InvalidInstanceNameException"}, {"shape":"TagRequiredException"}, {"shape":"InvalidTagException"}, {"shape":"TagLimitExceededException"}, @@ -652,7 +699,8 @@ {"shape":"InvalidBlueGreenDeploymentConfigurationException"}, {"shape":"InvalidEC2TagCombinationException"}, {"shape":"InvalidOnPremisesTagCombinationException"}, - {"shape":"TagSetListLimitExceededException"} + {"shape":"TagSetListLimitExceededException"}, + {"shape":"InvalidInputException"} ] } }, @@ -668,7 +716,10 @@ "instanceNames":{"shape":"InstanceNameList"} } }, - "AdditionalDeploymentStatusInfo":{"type":"string"}, + "AdditionalDeploymentStatusInfo":{ + "type":"string", + "deprecated":true + }, "Alarm":{ "type":"structure", "members":{ @@ -714,7 +765,8 @@ "applicationName":{"shape":"ApplicationName"}, "createTime":{"shape":"Timestamp"}, "linkedToGitHub":{"shape":"Boolean"}, - "gitHubAccountName":{"shape":"GitHubAccountTokenName"} + "gitHubAccountName":{"shape":"GitHubAccountTokenName"}, + "computePlatform":{"shape":"ComputePlatform"} } }, "ApplicationLimitExceededException":{ @@ -807,6 +859,7 @@ }, "BatchGetApplicationsInput":{ "type":"structure", + "required":["applicationNames"], "members":{ "applicationNames":{"shape":"ApplicationsList"} } @@ -855,6 +908,7 @@ }, "BatchGetDeploymentsInput":{ "type":"structure", + "required":["deploymentIds"], "members":{ "deploymentIds":{"shape":"DeploymentsList"} } @@ -867,6 +921,7 @@ }, "BatchGetOnPremisesInstancesInput":{ "type":"structure", + "required":["instanceNames"], "members":{ "instanceNames":{"shape":"InstanceNameList"} } @@ -910,10 +965,19 @@ "enum":[ "tar", "tgz", - "zip" + "zip", + "YAML", + "JSON" ] }, "CommitId":{"type":"string"}, + "ComputePlatform":{ + "type":"string", + "enum":[ + "Server", + "Lambda" + ] + }, "ContinueDeploymentInput":{ "type":"structure", "members":{ @@ -924,7 +988,8 @@ "type":"structure", "required":["applicationName"], "members":{ - "applicationName":{"shape":"ApplicationName"} + "applicationName":{"shape":"ApplicationName"}, + "computePlatform":{"shape":"ComputePlatform"} } }, "CreateApplicationOutput":{ @@ -935,13 +1000,12 @@ }, "CreateDeploymentConfigInput":{ "type":"structure", - "required":[ - "deploymentConfigName", - "minimumHealthyHosts" - ], + "required":["deploymentConfigName"], "members":{ "deploymentConfigName":{"shape":"DeploymentConfigName"}, - "minimumHealthyHosts":{"shape":"MinimumHealthyHosts"} + "minimumHealthyHosts":{"shape":"MinimumHealthyHosts"}, + "trafficRoutingConfig":{"shape":"TrafficRoutingConfig"}, + "computePlatform":{"shape":"ComputePlatform"} } }, "CreateDeploymentConfigOutput":{ @@ -1034,6 +1098,18 @@ "hooksNotCleanedUp":{"shape":"AutoScalingGroupList"} } }, + "DeleteGitHubAccountTokenInput":{ + "type":"structure", + "members":{ + "tokenName":{"shape":"GitHubAccountTokenName"} + } + }, + "DeleteGitHubAccountTokenOutput":{ + "type":"structure", + "members":{ + "tokenName":{"shape":"GitHubAccountTokenName"} + } + }, "DeploymentAlreadyCompletedException":{ "type":"structure", "members":{ @@ -1065,7 +1141,9 @@ "deploymentConfigId":{"shape":"DeploymentConfigId"}, "deploymentConfigName":{"shape":"DeploymentConfigName"}, "minimumHealthyHosts":{"shape":"MinimumHealthyHosts"}, - "createTime":{"shape":"Timestamp"} + "createTime":{"shape":"Timestamp"}, + "computePlatform":{"shape":"ComputePlatform"}, + "trafficRoutingConfig":{"shape":"TrafficRoutingConfig"} } }, "DeploymentConfigLimitExceededException":{ @@ -1137,7 +1215,8 @@ "lastSuccessfulDeployment":{"shape":"LastDeploymentInfo"}, "lastAttemptedDeployment":{"shape":"LastDeploymentInfo"}, "ec2TagSet":{"shape":"EC2TagSet"}, - "onPremisesTagSet":{"shape":"OnPremisesTagSet"} + "onPremisesTagSet":{"shape":"OnPremisesTagSet"}, + "computePlatform":{"shape":"ComputePlatform"} } }, "DeploymentGroupInfoList":{ @@ -1199,7 +1278,9 @@ "blueGreenDeploymentConfiguration":{"shape":"BlueGreenDeploymentConfiguration"}, "loadBalancerInfo":{"shape":"LoadBalancerInfo"}, "additionalDeploymentStatusInfo":{"shape":"AdditionalDeploymentStatusInfo"}, - "fileExistsBehavior":{"shape":"FileExistsBehavior"} + "fileExistsBehavior":{"shape":"FileExistsBehavior"}, + "deploymentStatusMessages":{"shape":"DeploymentStatusMessageList"}, + "computePlatform":{"shape":"ComputePlatform"} } }, "DeploymentIsNotInReadyStateException":{ @@ -1268,6 +1349,10 @@ "type":"list", "member":{"shape":"DeploymentStatus"} }, + "DeploymentStatusMessageList":{ + "type":"list", + "member":{"shape":"ErrorMessage"} + }, "DeploymentStyle":{ "type":"structure", "members":{ @@ -1376,7 +1461,15 @@ "AGENT_ISSUE", "AUTO_SCALING_IAM_ROLE_PERMISSIONS", "AUTO_SCALING_CONFIGURATION", - "MANUAL_STOP" + "MANUAL_STOP", + "MISSING_BLUE_GREEN_DEPLOYMENT_CONFIGURATION", + "MISSING_ELB_INFORMATION", + "MISSING_GITHUB_TOKEN", + "ELASTIC_LOAD_BALANCING_INVALID", + "ELB_INVALID_INSTANCE", + "INVALID_LAMBDA_CONFIGURATION", + "INVALID_LAMBDA_FUNCTION", + "HOOK_EXECUTION_FAILURE" ] }, "ErrorInformation":{ @@ -1521,6 +1614,12 @@ "type":"list", "member":{"shape":"GitHubAccountTokenName"} }, + "GitHubAccountTokenNameRequiredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, "GitHubLocation":{ "type":"structure", "members":{ @@ -1716,6 +1815,12 @@ }, "exception":true }, + "InvalidComputePlatformException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, "InvalidDeployedStateFilterException":{ "type":"structure", "members":{ @@ -1776,6 +1881,12 @@ }, "exception":true }, + "InvalidGitHubAccountTokenNameException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, "InvalidIamSessionArnException":{ "type":"structure", "members":{ @@ -1788,6 +1899,24 @@ }, "exception":true }, + "InvalidIgnoreApplicationStopFailuresValueException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidInputException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidInstanceIdException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, "InvalidInstanceNameException":{ "type":"structure", "members":{ @@ -1812,6 +1941,18 @@ }, "exception":true }, + "InvalidLifecycleEventHookExecutionIdException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidLifecycleEventHookExecutionStatusException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, "InvalidLoadBalancerInfoException":{ "type":"structure", "members":{ @@ -1896,12 +2037,24 @@ }, "exception":true }, + "InvalidTrafficRoutingConfigurationException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, "InvalidTriggerConfigException":{ "type":"structure", "members":{ }, "exception":true }, + "InvalidUpdateOutdatedInstancesOnlyValueException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, "Key":{"type":"string"}, "LastDeploymentInfo":{ "type":"structure", @@ -1933,6 +2086,13 @@ "status":{"shape":"LifecycleEventStatus"} } }, + "LifecycleEventAlreadyCompletedException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "LifecycleEventHookExecutionId":{"type":"string"}, "LifecycleEventList":{ "type":"list", "member":{"shape":"LifecycleEvent"} @@ -2130,6 +2290,36 @@ "type":"list", "member":{"shape":"TagFilterList"} }, + "OperationNotSupportedException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "Percentage":{"type":"integer"}, + "PutLifecycleEventHookExecutionStatusInput":{ + "type":"structure", + "members":{ + "deploymentId":{"shape":"DeploymentId"}, + "lifecycleEventHookExecutionId":{"shape":"LifecycleEventHookExecutionId"}, + "status":{"shape":"LifecycleEventStatus"} + } + }, + "PutLifecycleEventHookExecutionStatusOutput":{ + "type":"structure", + "members":{ + "lifecycleEventHookExecutionId":{"shape":"LifecycleEventHookExecutionId"} + } + }, + "RawString":{ + "type":"structure", + "members":{ + "content":{"shape":"RawStringContent"}, + "sha256":{"shape":"RawStringSha256"} + } + }, + "RawStringContent":{"type":"string"}, + "RawStringSha256":{"type":"string"}, "RegisterApplicationRevisionInput":{ "type":"structure", "required":[ @@ -2198,7 +2388,8 @@ "members":{ "revisionType":{"shape":"RevisionLocationType"}, "s3Location":{"shape":"S3Location"}, - "gitHubLocation":{"shape":"GitHubLocation"} + "gitHubLocation":{"shape":"GitHubLocation"}, + "string":{"shape":"RawString"} } }, "RevisionLocationList":{ @@ -2209,7 +2400,8 @@ "type":"string", "enum":[ "S3", - "GitHub" + "GitHub", + "String" ] }, "RevisionRequiredException":{ @@ -2349,6 +2541,26 @@ "ec2TagSet":{"shape":"EC2TagSet"} } }, + "ThrottlingException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "TimeBasedCanary":{ + "type":"structure", + "members":{ + "canaryPercentage":{"shape":"Percentage"}, + "canaryInterval":{"shape":"WaitTimeInMins"} + } + }, + "TimeBasedLinear":{ + "type":"structure", + "members":{ + "linearPercentage":{"shape":"Percentage"}, + "linearInterval":{"shape":"WaitTimeInMins"} + } + }, "TimeRange":{ "type":"structure", "members":{ @@ -2357,6 +2569,22 @@ } }, "Timestamp":{"type":"timestamp"}, + "TrafficRoutingConfig":{ + "type":"structure", + "members":{ + "type":{"shape":"TrafficRoutingType"}, + "timeBasedCanary":{"shape":"TimeBasedCanary"}, + "timeBasedLinear":{"shape":"TimeBasedLinear"} + } + }, + "TrafficRoutingType":{ + "type":"string", + "enum":[ + "TimeBasedCanary", + "TimeBasedLinear", + "AllAtOnce" + ] + }, "TriggerConfig":{ "type":"structure", "members":{ @@ -2441,6 +2669,7 @@ } }, "Value":{"type":"string"}, - "VersionId":{"type":"string"} + "VersionId":{"type":"string"}, + "WaitTimeInMins":{"type":"integer"} } } diff --git a/models/apis/codedeploy/2014-10-06/docs-2.json b/models/apis/codedeploy/2014-10-06/docs-2.json index 21caee74721..f3e6ad6deb9 100644 --- a/models/apis/codedeploy/2014-10-06/docs-2.json +++ b/models/apis/codedeploy/2014-10-06/docs-2.json @@ -1,6 +1,6 @@ { "version": "2.0", - "service": "AWS CodeDeploy

AWS CodeDeploy is a deployment service that automates application deployments to Amazon EC2 instances or on-premises instances running in your own facility.

You can deploy a nearly unlimited variety of application content, such as code, web and configuration files, executables, packages, scripts, multimedia files, and so on. AWS CodeDeploy can deploy application content stored in Amazon S3 buckets, GitHub repositories, or Bitbucket repositories. You do not need to make changes to your existing code before you can use AWS CodeDeploy.

AWS CodeDeploy makes it easier for you to rapidly release new features, helps you avoid downtime during application deployment, and handles the complexity of updating your applications, without many of the risks associated with error-prone manual deployments.

AWS CodeDeploy Components

Use the information in this guide to help you work with the following AWS CodeDeploy components:

This guide also contains information to help you get details about the instances in your deployments and to make on-premises instances available for AWS CodeDeploy deployments.

AWS CodeDeploy Information Resources

", + "service": "AWS CodeDeploy

AWS CodeDeploy is a deployment service that automates application deployments to Amazon EC2 instances, on-premises instances running in your own facility, or serverless AWS Lambda functions.

You can deploy a nearly unlimited variety of application content, such as an updated Lambda function, code, web and configuration files, executables, packages, scripts, multimedia files, and so on. AWS CodeDeploy can deploy application content stored in Amazon S3 buckets, GitHub repositories, or Bitbucket repositories. You do not need to make changes to your existing code before you can use AWS CodeDeploy.

AWS CodeDeploy makes it easier for you to rapidly release new features, helps you avoid downtime during application deployment, and handles the complexity of updating your applications, without many of the risks associated with error-prone manual deployments.

AWS CodeDeploy Components

Use the information in this guide to help you work with the following AWS CodeDeploy components:

This guide also contains information to help you get details about the instances in your deployments, to make on-premises instances available for AWS CodeDeploy deployments, and to get details about a Lambda function deployment.

AWS CodeDeploy Information Resources

", "operations": { "AddTagsToOnPremisesInstances": "

Adds tags to on-premises instances.

", "BatchGetApplicationRevisions": "

Gets information about one or more application revisions.

", @@ -17,6 +17,7 @@ "DeleteApplication": "

Deletes an application.

", "DeleteDeploymentConfig": "

Deletes a deployment configuration.

A deployment configuration cannot be deleted if it is currently in use. Predefined configurations cannot be deleted.

", "DeleteDeploymentGroup": "

Deletes a deployment group.

", + "DeleteGitHubAccountToken": "

Deletes a GitHub account connection.

", "DeregisterOnPremisesInstance": "

Deregisters an on-premises instance.

", "GetApplication": "

Gets information about an application.

", "GetApplicationRevision": "

Gets information about an application revision.

", @@ -33,6 +34,7 @@ "ListDeployments": "

Lists the deployments in a deployment group for an application registered with the applicable IAM user or AWS account.

", "ListGitHubAccountTokenNames": "

Lists the names of stored connections to GitHub accounts.

", "ListOnPremisesInstances": "

Gets a list of names for one or more on-premises instances.

Unless otherwise specified, both registered and deregistered on-premises instance names will be listed. To list only registered or deregistered on-premises instance names, use the registration status parameter.

", + "PutLifecycleEventHookExecutionStatus": "

Sets the result of a Lambda validation function. The function validates one or both lifecycle events (BeforeAllowTraffic and AfterAllowTraffic) and returns Succeeded or Failed.

", "RegisterApplicationRevision": "

Registers with AWS CodeDeploy a revision for the specified application.

", "RegisterOnPremisesInstance": "

Registers an on-premises instance.

Only one IAM ARN (an IAM session ARN or IAM user ARN) is supported in the request. You cannot use both.

", "RemoveTagsFromOnPremisesInstances": "

Removes one or more tags from one or more on-premises instances.

", @@ -334,6 +336,17 @@ "GitHubLocation$commitId": "

The SHA1 commit ID of the GitHub commit that represents the bundled artifacts for the application revision.

" } }, + "ComputePlatform": { + "base": null, + "refs": { + "ApplicationInfo$computePlatform": "

The destination platform type for deployment of the application (Lambda or Server).

", + "CreateApplicationInput$computePlatform": "

The destination platform type for the deployment (Lambda or Server).

", + "CreateDeploymentConfigInput$computePlatform": "

The destination platform type for the deployment (Lambda or Server>).

", + "DeploymentConfigInfo$computePlatform": "

The destination platform type for the deployment (Lambda or Server).

", + "DeploymentGroupInfo$computePlatform": "

The destination platform type for the deployment group (Lambda or Server).

", + "DeploymentInfo$computePlatform": "

The destination platform type for the deployment (Lambda or Server).

" + } + }, "ContinueDeploymentInput": { "base": null, "refs": { @@ -399,6 +412,16 @@ "refs": { } }, + "DeleteGitHubAccountTokenInput": { + "base": "

Represents the input of a DeleteGitHubAccount operation.

", + "refs": { + } + }, + "DeleteGitHubAccountTokenOutput": { + "base": "

Represents the output of a DeleteGitHubAccountToken operation.

", + "refs": { + } + }, "DeploymentAlreadyCompletedException": { "base": "

The deployment is already complete.

", "refs": { @@ -550,6 +573,7 @@ "InstanceSummary$deploymentId": "

The deployment ID.

", "LastDeploymentInfo$deploymentId": "

The deployment ID.

", "ListDeploymentInstancesInput$deploymentId": "

The unique ID of a deployment.

", + "PutLifecycleEventHookExecutionStatusInput$deploymentId": "

The ID of the deployment. Pass this ID to a Lambda function that validates a deployment lifecycle event.

", "RollbackInfo$rollbackDeploymentId": "

The ID of the deployment rollback.

", "RollbackInfo$rollbackTriggeringDeploymentId": "

The deployment ID of the deployment that was underway and triggered a rollback deployment because it failed or was stopped.

", "SkipWaitTimeForInstanceTerminationInput$deploymentId": "

The ID of the blue/green deployment for which you want to skip the instance termination wait time.

", @@ -621,6 +645,12 @@ "ListDeploymentsInput$includeOnlyStatuses": "

A subset of deployments to list by status:

" } }, + "DeploymentStatusMessageList": { + "base": null, + "refs": { + "DeploymentInfo$deploymentStatusMessages": "

Messages that contain information about the status of a deployment.

" + } + }, "DeploymentStyle": { "base": "

Information about the type of deployment, either in-place or blue/green, you want to run and whether to route deployment traffic behind a load balancer.

", "refs": { @@ -734,7 +764,7 @@ "ELBName": { "base": null, "refs": { - "ELBInfo$name": "

For blue/green deployments, the name of the load balancer that will be used to route traffic from original instances to replacement instances in a blue/green deployment. For in-place deployments, the name of the load balancer that instances are deregistered from, so they are not serving traffic during a deployment, and then re-registered with after the deployment completes.

" + "ELBInfo$name": "

For blue/green deployments, the name of the load balancer that will be used to route traffic from original instances to replacement instances in a blue/green deployment. For in-place deployments, the name of the load balancer that instances are deregistered from so they are not serving traffic during a deployment, and then re-registered with after the deployment completes.

" } }, "ETag": { @@ -761,6 +791,7 @@ "BatchGetApplicationRevisionsOutput$errorMessage": "

Information about errors that may have occurred during the API call.

", "BatchGetDeploymentGroupsOutput$errorMessage": "

Information about errors that may have occurred during the API call.

", "BatchGetDeploymentInstancesOutput$errorMessage": "

Information about errors that may have occurred during the API call.

", + "DeploymentStatusMessageList$member": null, "ErrorInformation$message": "

An accompanying error message.

" } }, @@ -857,6 +888,8 @@ "base": null, "refs": { "ApplicationInfo$gitHubAccountName": "

The name for a connection to a GitHub account.

", + "DeleteGitHubAccountTokenInput$tokenName": "

The name of the GitHub account connection to delete.

", + "DeleteGitHubAccountTokenOutput$tokenName": "

The name of the GitHub account connection that was deleted.

", "GitHubAccountTokenNameList$member": null } }, @@ -866,6 +899,11 @@ "ListGitHubAccountTokenNamesOutput$tokenNameList": "

A list of names of connections to GitHub accounts.

" } }, + "GitHubAccountTokenNameRequiredException": { + "base": "

The call is missing a required GitHub account connection name.

", + "refs": { + } + }, "GitHubLocation": { "base": "

Information about the location of application artifacts stored in GitHub.

", "refs": { @@ -1087,6 +1125,11 @@ "refs": { } }, + "InvalidComputePlatformException": { + "base": "

The computePlatform is invalid. The computePlatform should be Lambda or Server.

", + "refs": { + } + }, "InvalidDeployedStateFilterException": { "base": "

The deployed state filter was specified in an invalid format.

", "refs": { @@ -1137,6 +1180,11 @@ "refs": { } }, + "InvalidGitHubAccountTokenNameException": { + "base": "

The format of the specified GitHub account connection name is invalid.

", + "refs": { + } + }, "InvalidIamSessionArnException": { "base": "

The IAM session ARN was specified in an invalid format.

", "refs": { @@ -1147,6 +1195,21 @@ "refs": { } }, + "InvalidIgnoreApplicationStopFailuresValueException": { + "base": "

The IgnoreApplicationStopFailures value is invalid. For AWS Lambda deployments, false is expected. For EC2/On-premises deployments, true or false is expected.

", + "refs": { + } + }, + "InvalidInputException": { + "base": "

The specified input was specified in an invalid format.

", + "refs": { + } + }, + "InvalidInstanceIdException": { + "base": "

", + "refs": { + } + }, "InvalidInstanceNameException": { "base": "

The specified on-premises instance name was specified in an invalid format.

", "refs": { @@ -1167,6 +1230,16 @@ "refs": { } }, + "InvalidLifecycleEventHookExecutionIdException": { + "base": "

A lifecycle event hook is invalid. Review the hooks section in your AppSpec file to ensure the lifecycle events and hooks functions are valid.

", + "refs": { + } + }, + "InvalidLifecycleEventHookExecutionStatusException": { + "base": "

The result of a Lambda validation function that verifies a lifecycle event is invalid. It should return Succeeded or Failed.

", + "refs": { + } + }, "InvalidLoadBalancerInfoException": { "base": "

An invalid load balancer name, or no load balancer name, was specified.

", "refs": { @@ -1237,11 +1310,21 @@ "refs": { } }, + "InvalidTrafficRoutingConfigurationException": { + "base": "

The configuration that specifies how traffic is routed during a deployment is invalid.

", + "refs": { + } + }, "InvalidTriggerConfigException": { "base": "

The trigger was specified in an invalid format.

", "refs": { } }, + "InvalidUpdateOutdatedInstancesOnlyValueException": { + "base": "

The UpdateOutdatedInstancesOnly value is invalid. For AWS Lambda deployments, false is expected. For EC2/On-premises deployments, true or false is expected.

", + "refs": { + } + }, "Key": { "base": null, "refs": { @@ -1269,6 +1352,18 @@ "LifecycleEventList$member": null } }, + "LifecycleEventAlreadyCompletedException": { + "base": "

An attempt to return the status of an already completed lifecycle event occurred.

", + "refs": { + } + }, + "LifecycleEventHookExecutionId": { + "base": null, + "refs": { + "PutLifecycleEventHookExecutionStatusInput$lifecycleEventHookExecutionId": "

The execution ID of a deployment's lifecycle hook. A deployment lifecycle hook is specified in the hooks section of the AppSpec file.

", + "PutLifecycleEventHookExecutionStatusOutput$lifecycleEventHookExecutionId": "

The execution ID of the lifecycle event hook. A hook is specified in the hooks section of the deployment's AppSpec file.

" + } + }, "LifecycleEventList": { "base": null, "refs": { @@ -1284,7 +1379,8 @@ "LifecycleEventStatus": { "base": null, "refs": { - "LifecycleEvent$status": "

The deployment lifecycle event status:

" + "LifecycleEvent$status": "

The deployment lifecycle event status:

", + "PutLifecycleEventHookExecutionStatusInput$status": "

The result of a Lambda function that validates a deployment lifecycle event (Succeeded or Failed).

" } }, "LifecycleHookLimitExceededException": { @@ -1470,6 +1566,46 @@ "OnPremisesTagSet$onPremisesTagSetList": "

A list containing other lists of on-premises instance tag groups. In order for an instance to be included in the deployment group, it must be identified by all the tag groups in the list.

" } }, + "OperationNotSupportedException": { + "base": "

The API used does not support the deployment.

", + "refs": { + } + }, + "Percentage": { + "base": null, + "refs": { + "TimeBasedCanary$canaryPercentage": "

The percentage of traffic to shift in the first increment of a TimeBasedCanary deployment.

", + "TimeBasedLinear$linearPercentage": "

The percentage of traffic that is shifted at the start of each increment of a TimeBasedLinear deployment.

" + } + }, + "PutLifecycleEventHookExecutionStatusInput": { + "base": null, + "refs": { + } + }, + "PutLifecycleEventHookExecutionStatusOutput": { + "base": null, + "refs": { + } + }, + "RawString": { + "base": "

A revision for an AWS Lambda deployment that is a YAML-formatted or JSON-formatted string. For AWS Lambda deployments, the revision is the same as the AppSpec file.

", + "refs": { + "RevisionLocation$string": "

Information about the location of an AWS Lambda deployment revision stored as a RawString.

" + } + }, + "RawStringContent": { + "base": null, + "refs": { + "RawString$content": "

The YAML-formatted or JSON-formatted revision string. It includes information about which Lambda function to update and optional Lambda functions that validate deployment lifecycle events.

" + } + }, + "RawStringSha256": { + "base": null, + "refs": { + "RawString$sha256": "

The SHA256 hash value of the revision that is specified as a RawString.

" + } + }, "RegisterApplicationRevisionInput": { "base": "

Represents the input of a RegisterApplicationRevision operation.

", "refs": { @@ -1543,7 +1679,7 @@ "RevisionLocationType": { "base": null, "refs": { - "RevisionLocation$revisionType": "

The type of application revision:

" + "RevisionLocation$revisionType": "

The type of application revision:

" } }, "RevisionRequiredException": { @@ -1587,7 +1723,7 @@ "S3Location": { "base": "

Information about the location of application artifacts stored in Amazon S3.

", "refs": { - "RevisionLocation$s3Location": "

Information about the location of application artifacts stored in Amazon S3.

" + "RevisionLocation$s3Location": "

Information about the location of a revision stored in Amazon S3.

" } }, "ScriptName": { @@ -1699,6 +1835,23 @@ "DeploymentInfo$targetInstances": "

Information about the instances that belong to the replacement environment in a blue/green deployment.

" } }, + "ThrottlingException": { + "base": "

An API function was called too frequently.

", + "refs": { + } + }, + "TimeBasedCanary": { + "base": "

A configuration that shifts traffic from one version of a Lambda function to another in two increments. The original and target Lambda function versions are specified in the deployment's AppSpec file.

", + "refs": { + "TrafficRoutingConfig$timeBasedCanary": "

A configuration that shifts traffic from one version of a Lambda function to another in two increments. The original and target Lambda function versions are specified in the deployment's AppSpec file.

" + } + }, + "TimeBasedLinear": { + "base": "

A configuration that shifts traffic from one version of a Lambda function to another in equal increments, with an equal number of minutes between each increment. The original and target Lambda function versions are specified in the deployment's AppSpec file.

", + "refs": { + "TrafficRoutingConfig$timeBasedLinear": "

A configuration that shifts traffic from one version of a Lambda function to another in equal increments, with an equal number of minutes between each increment. The original and target Lambda function versions are specified in the deployment's AppSpec file.

" + } + }, "TimeRange": { "base": "

Information about a time range.

", "refs": { @@ -1727,6 +1880,19 @@ "TimeRange$end": "

The end time of the time range.

Specify null to leave the end time open-ended.

" } }, + "TrafficRoutingConfig": { + "base": "

The configuration that specifies how traffic is shifted from one version of a Lambda function to another version during an AWS Lambda deployment.

", + "refs": { + "CreateDeploymentConfigInput$trafficRoutingConfig": "

The configuration that specifies how the deployment traffic will be routed.

", + "DeploymentConfigInfo$trafficRoutingConfig": "

The configuration specifying how the deployment traffic will be routed. Only deployments with a Lambda compute platform can specify this.

" + } + }, + "TrafficRoutingType": { + "base": null, + "refs": { + "TrafficRoutingConfig$type": "

The type of traffic shifting (TimeBasedCanary or TimeBasedLinear) used by a deployment configuration .

" + } + }, "TriggerConfig": { "base": "

Information about notification triggers for the deployment group.

", "refs": { @@ -1803,6 +1969,13 @@ "refs": { "S3Location$version": "

A specific version of the Amazon S3 object that represents the bundled artifacts for the application revision.

If the version is not specified, the system will use the most recent version by default.

" } + }, + "WaitTimeInMins": { + "base": null, + "refs": { + "TimeBasedCanary$canaryInterval": "

The number of minutes between the first and second traffic shifts of a TimeBasedCanary deployment.

", + "TimeBasedLinear$linearInterval": "

The number of minutes between each incremental traffic shift of a TimeBasedLinear deployment.

" + } } } } diff --git a/models/apis/discovery/2015-11-01/api-2.json b/models/apis/discovery/2015-11-01/api-2.json index a06dfc6893b..4c12e6a8510 100644 --- a/models/apis/discovery/2015-11-01/api-2.json +++ b/models/apis/discovery/2015-11-01/api-2.json @@ -429,10 +429,7 @@ }, "ConfigurationTagSet":{ "type":"list", - "member":{ - "shape":"ConfigurationTag", - "locationName":"item" - } + "member":{"shape":"ConfigurationTag"} }, "Configurations":{ "type":"list", @@ -725,10 +722,7 @@ "FilterValue":{"type":"string"}, "FilterValues":{ "type":"list", - "member":{ - "shape":"FilterValue", - "locationName":"item" - } + "member":{"shape":"FilterValue"} }, "Filters":{ "type":"list", @@ -930,10 +924,7 @@ "TagKey":{"type":"string"}, "TagSet":{ "type":"list", - "member":{ - "shape":"Tag", - "locationName":"item" - } + "member":{"shape":"Tag"} }, "TagValue":{"type":"string"}, "TimeStamp":{"type":"timestamp"}, diff --git a/models/apis/discovery/2015-11-01/docs-2.json b/models/apis/discovery/2015-11-01/docs-2.json index 469abd04d06..0321f6790e0 100644 --- a/models/apis/discovery/2015-11-01/docs-2.json +++ b/models/apis/discovery/2015-11-01/docs-2.json @@ -1,6 +1,6 @@ { "version": "2.0", - "service": "AWS Application Discovery Service

AWS Application Discovery Service helps you plan application migration projects by automatically identifying servers, virtual machines (VMs), software, and software dependencies running in your on-premises data centers. Application Discovery Service also collects application performance data, which can help you assess the outcome of your migration. The data collected by Application Discovery Service is securely retained in an Amazon-hosted and managed database in the cloud. You can export the data as a CSV or XML file into your preferred visualization tool or cloud-migration solution to plan your migration. For more information, see the Application Discovery Service FAQ.

Application Discovery Service offers two modes of operation.

Application Discovery Service integrates with application discovery solutions from AWS Partner Network (APN) partners. Third-party application discovery tools can query Application Discovery Service and write to the Application Discovery Service database using a public API. You can then import the data into either a visualization tool or cloud-migration solution.

Application Discovery Service doesn't gather sensitive information. All data is handled according to the AWS Privacy Policy. You can operate Application Discovery Service using offline mode to inspect collected data before it is shared with the service.

Your AWS account must be granted access to Application Discovery Service, a process called whitelisting. This is true for AWS partners and customers alike. To request access, sign up for AWS Application Discovery Service here. We send you information about how to get started.

This API reference provides descriptions, syntax, and usage examples for each of the actions and data types for Application Discovery Service. The topic for each action shows the API request parameters and the response. Alternatively, you can use one of the AWS SDKs to access an API that is tailored to the programming language or platform that you're using. For more information, see AWS SDKs.

This guide is intended for use with the AWS Application Discovery Service User Guide .

", + "service": "AWS Application Discovery Service

AWS Application Discovery Service helps you plan application migration projects by automatically identifying servers, virtual machines (VMs), software, and software dependencies running in your on-premises data centers. Application Discovery Service also collects application performance data, which can help you assess the outcome of your migration. The data collected by Application Discovery Service is securely retained in an Amazon-hosted and managed database in the cloud. You can export the data as a CSV or XML file into your preferred visualization tool or cloud-migration solution to plan your migration. For more information, see the Application Discovery Service FAQ.

Application Discovery Service offers two modes of operation.

Application Discovery Service integrates with application discovery solutions from AWS Partner Network (APN) partners. Third-party application discovery tools can query Application Discovery Service and write to the Application Discovery Service database using a public API. You can then import the data into either a visualization tool or cloud-migration solution.

Application Discovery Service doesn't gather sensitive information. All data is handled according to the AWS Privacy Policy. You can operate Application Discovery Service using offline mode to inspect collected data before it is shared with the service.

Your AWS account must be granted access to Application Discovery Service, a process called whitelisting. This is true for AWS partners and customers alike. To request access, sign up for AWS Application Discovery Service.

This API reference provides descriptions, syntax, and usage examples for each of the actions and data types for Application Discovery Service. The topic for each action shows the API request parameters and the response. Alternatively, you can use one of the AWS SDKs to access an API that is tailored to the programming language or platform that you're using. For more information, see AWS SDKs.

This guide is intended for use with the AWS Application Discovery Service User Guide .

", "operations": { "AssociateConfigurationItemsToApplication": "

Associates one or more configuration items with an application.

", "CreateApplication": "

Creates an application with the given name and description.

", @@ -18,7 +18,7 @@ "ListConfigurations": "

Retrieves a list of configuration items according to criteria that you specify in a filter. The filter criteria identifies the relationship requirements.

", "ListServerNeighbors": "

Retrieves a list of servers that are one network hop away from a specified server.

", "StartDataCollectionByAgentIds": "

Instructs the specified agents or connectors to start collecting data.

", - "StartExportTask": "

Begins the export of discovered data to an S3 bucket.

If you specify agentId in a filter, the task exports up to 72 hours of detailed data collected by the identified Application Discovery Agent, including network, process, and performance details. A time range for exported agent data may be set by using startTime and endTime. Export of detailed agent data is limited to five concurrently running exports.

If you do not include an agentId filter, summary data is exported that includes both AWS Agentless Discovery Connector data and summary data from AWS Discovery Agents. Export of summary data is limited to two exports per day.

", + "StartExportTask": "

Begins the export of discovered data to an S3 bucket.

If you specify agentIds in a filter, the task exports up to 72 hours of detailed data collected by the identified Application Discovery Agent, including network, process, and performance details. A time range for exported agent data may be set by using startTime and endTime. Export of detailed agent data is limited to five concurrently running exports.

If you do not include an agentIds filter, summary data is exported that includes both AWS Agentless Discovery Connector data and summary data from AWS Discovery Agents. Export of summary data is limited to two exports per day.

", "StopDataCollectionByAgentIds": "

Instructs the specified agents or connectors to stop collecting data.

", "UpdateApplication": "

Updates metadata about an application.

" }, @@ -337,7 +337,7 @@ "ExportDataFormats": { "base": null, "refs": { - "StartExportTaskRequest$exportDataFormat": "

The file format for the returned export data. Default value is CSV.

" + "StartExportTaskRequest$exportDataFormat": "

The file format for the returned export data. Default value is CSV. Note: The GRAPHML option has been deprecated.

" } }, "ExportFilter": { diff --git a/models/apis/ds/2015-04-16/api-2.json b/models/apis/ds/2015-04-16/api-2.json index 2bde9f4b063..4e655d89df4 100644 --- a/models/apis/ds/2015-04-16/api-2.json +++ b/models/apis/ds/2015-04-16/api-2.json @@ -910,7 +910,8 @@ "ShortName":{"shape":"DirectoryShortName"}, "Password":{"shape":"Password"}, "Description":{"shape":"Description"}, - "VpcSettings":{"shape":"DirectoryVpcSettings"} + "VpcSettings":{"shape":"DirectoryVpcSettings"}, + "Edition":{"shape":"DirectoryEdition"} } }, "CreateMicrosoftADResult":{ @@ -1165,6 +1166,7 @@ "Name":{"shape":"DirectoryName"}, "ShortName":{"shape":"DirectoryShortName"}, "Size":{"shape":"DirectorySize"}, + "Edition":{"shape":"DirectoryEdition"}, "Alias":{"shape":"AliasName"}, "AccessUrl":{"shape":"AccessUrl"}, "Description":{"shape":"Description"}, @@ -1186,6 +1188,13 @@ "type":"list", "member":{"shape":"DirectoryDescription"} }, + "DirectoryEdition":{ + "type":"string", + "enum":[ + "Enterprise", + "Standard" + ] + }, "DirectoryId":{ "type":"string", "pattern":"^d-[0-9a-f]{10}$" diff --git a/models/apis/ds/2015-04-16/docs-2.json b/models/apis/ds/2015-04-16/docs-2.json index 81e33240dcc..0ed07bfd09c 100644 --- a/models/apis/ds/2015-04-16/docs-2.json +++ b/models/apis/ds/2015-04-16/docs-2.json @@ -462,6 +462,13 @@ "DescribeDirectoriesResult$DirectoryDescriptions": "

The list of DirectoryDescription objects that were retrieved.

It is possible that this list contains less than the number of items specified in the Limit member of the request. This occurs if there are less than the requested number of items left to retrieve, or if the limitations of the operation have been exceeded.

" } }, + "DirectoryEdition": { + "base": null, + "refs": { + "CreateMicrosoftADRequest$Edition": "

AWS Microsoft AD is available in two editions: Standard and Enterprise. Enterprise is the default.

", + "DirectoryDescription$Edition": "

The edition associated with this directory.

" + } + }, "DirectoryId": { "base": null, "refs": { @@ -1105,7 +1112,7 @@ "base": null, "refs": { "DirectoryConnectSettingsDescription$SecurityGroupId": "

The security group identifier for the AD Connector directory.

", - "DirectoryVpcSettingsDescription$SecurityGroupId": "

The security group identifier for the directory. If the directory was created before 8/1/2014, this is the identifier of the directory members security group that was created when the directory was created. If the directory was created after this date, this value is null.

" + "DirectoryVpcSettingsDescription$SecurityGroupId": "

The domain controller security group identifier for the directory.

" } }, "Server": { diff --git a/models/apis/ec2/2016-11-15/api-2.json b/models/apis/ec2/2016-11-15/api-2.json index b2970345a60..21aab77ab5e 100755 --- a/models/apis/ec2/2016-11-15/api-2.json +++ b/models/apis/ec2/2016-11-15/api-2.json @@ -2538,6 +2538,10 @@ "PrivateIpAddress":{ "shape":"String", "locationName":"privateIpAddress" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" } } }, diff --git a/models/apis/ec2/2016-11-15/docs-2.json b/models/apis/ec2/2016-11-15/docs-2.json index 42cb4eea35a..a64ecbf56b3 100755 --- a/models/apis/ec2/2016-11-15/docs-2.json +++ b/models/apis/ec2/2016-11-15/docs-2.json @@ -243,7 +243,7 @@ "ReleaseAddress": "

Releases the specified Elastic IP address.

[EC2-Classic, default VPC] Releasing an Elastic IP address automatically disassociates it from any instance that it's associated with. To disassociate an Elastic IP address without releasing it, use DisassociateAddress.

[Nondefault VPC] You must use DisassociateAddress to disassociate the Elastic IP address before you can release it. Otherwise, Amazon EC2 returns an error (InvalidIPAddress.InUse).

After releasing an Elastic IP address, it is released to the IP address pool. Be sure to update your DNS records and any servers or devices that communicate with the address. If you attempt to release an Elastic IP address that you already released, you'll get an AuthFailure error if the address is already allocated to another AWS account.

[EC2-VPC] After you release an Elastic IP address for use in a VPC, you might be able to recover it. For more information, see AllocateAddress.

", "ReleaseHosts": "

When you no longer want to use an On-Demand Dedicated Host it can be released. On-Demand billing is stopped and the host goes into released state. The host ID of Dedicated Hosts that have been released can no longer be specified in another request, e.g., ModifyHosts. You must stop or terminate all instances on a host before it can be released.

When Dedicated Hosts are released, it make take some time for them to stop counting toward your limit and you may receive capacity errors when trying to allocate new Dedicated hosts. Try waiting a few minutes, and then try again.

Released hosts will still appear in a DescribeHosts response.

", "ReplaceIamInstanceProfileAssociation": "

Replaces an IAM instance profile for the specified running instance. You can use this action to change the IAM instance profile that's associated with an instance without having to disassociate the existing IAM instance profile first.

Use DescribeIamInstanceProfileAssociations to get the association ID.

", - "ReplaceNetworkAclAssociation": "

Changes which network ACL a subnet is associated with. By default when you create a subnet, it's automatically associated with the default network ACL. For more information about network ACLs, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

", + "ReplaceNetworkAclAssociation": "

Changes which network ACL a subnet is associated with. By default when you create a subnet, it's automatically associated with the default network ACL. For more information about network ACLs, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

This is an idempotent operation.

", "ReplaceNetworkAclEntry": "

Replaces an entry (rule) in a network ACL. For more information about network ACLs, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

", "ReplaceRoute": "

Replaces an existing route within a route table in a VPC. You must provide only one of the following: Internet gateway or virtual private gateway, NAT instance, NAT gateway, VPC peering connection, network interface, or egress-only Internet gateway.

For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

", "ReplaceRouteTableAssociation": "

Changes the route table associated with a given subnet in a VPC. After the operation completes, the subnet uses the routes in the new route table it's associated with. For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

You can also use ReplaceRouteTableAssociation to change which table is the main route table in the VPC. You just specify the main route table's association ID and the route table to be the new main route table.

", @@ -3385,8 +3385,8 @@ "DescribeVolumesRequest$Filters": "

One or more filters.

", "DescribeVpcClassicLinkRequest$Filters": "

One or more filters.

", "DescribeVpcEndpointConnectionNotificationsRequest$Filters": "

One or more filters.

", - "DescribeVpcEndpointConnectionsRequest$Filters": "

One or more filters.

", - "DescribeVpcEndpointServiceConfigurationsRequest$Filters": "

One or more filters.

", + "DescribeVpcEndpointConnectionsRequest$Filters": "

One or more filters.

", + "DescribeVpcEndpointServiceConfigurationsRequest$Filters": "

One or more filters.

", "DescribeVpcEndpointServicePermissionsRequest$Filters": "

One or more filters.

", "DescribeVpcEndpointServicesRequest$Filters": "

One or more filters.

", "DescribeVpcEndpointsRequest$Filters": "

One or more filters.

", @@ -5471,7 +5471,7 @@ } }, "PrefixListId": { - "base": "

The ID of the prefix.

", + "base": "

[EC2-VPC only] The ID of the prefix.

", "refs": { "PrefixListIdList$member": null } @@ -5479,7 +5479,7 @@ "PrefixListIdList": { "base": null, "refs": { - "IpPermission$PrefixListIds": "

(Valid for AuthorizeSecurityGroupEgress, RevokeSecurityGroupEgress and DescribeSecurityGroups only) One or more prefix list IDs for an AWS service. In an AuthorizeSecurityGroupEgress request, this is the AWS service that you want to access through a VPC endpoint from instances associated with the security group.

" + "IpPermission$PrefixListIds": "

(EC2-VPC only; valid for AuthorizeSecurityGroupEgress, RevokeSecurityGroupEgress and DescribeSecurityGroups only) One or more prefix list IDs for an AWS service. In an AuthorizeSecurityGroupEgress request, this is the AWS service that you want to access through a VPC endpoint from instances associated with the security group.

" } }, "PrefixListIdSet": { @@ -8071,6 +8071,7 @@ "TagList": { "base": null, "refs": { + "Address$Tags": "

Any tags assigned to the Elastic IP address.

", "ClassicLinkInstance$Tags": "

Any tags assigned to the instance.

", "ConversionTask$Tags": "

Any tags assigned to the task.

", "CreateTagsRequest$Tags": "

One or more tags. The value parameter is required, but if you don't want the tag to have a value, specify the parameter with no value, and we set the value to an empty string.

", diff --git a/models/apis/ecs/2014-11-13/api-2.json b/models/apis/ecs/2014-11-13/api-2.json index 30dad7b936b..5672a374921 100644 --- a/models/apis/ecs/2014-11-13/api-2.json +++ b/models/apis/ecs/2014-11-13/api-2.json @@ -804,7 +804,8 @@ "deploymentConfiguration":{"shape":"DeploymentConfiguration"}, "placementConstraints":{"shape":"PlacementConstraints"}, "placementStrategy":{"shape":"PlacementStrategies"}, - "networkConfiguration":{"shape":"NetworkConfiguration"} + "networkConfiguration":{"shape":"NetworkConfiguration"}, + "healthCheckGracePeriodSeconds":{"shape":"BoxedInteger"} } }, "CreateServiceResponse":{ @@ -1519,7 +1520,8 @@ "createdAt":{"shape":"Timestamp"}, "placementConstraints":{"shape":"PlacementConstraints"}, "placementStrategy":{"shape":"PlacementStrategies"}, - "networkConfiguration":{"shape":"NetworkConfiguration"} + "networkConfiguration":{"shape":"NetworkConfiguration"}, + "healthCheckGracePeriodSeconds":{"shape":"BoxedInteger"} } }, "ServiceEvent":{ @@ -1846,7 +1848,8 @@ "deploymentConfiguration":{"shape":"DeploymentConfiguration"}, "networkConfiguration":{"shape":"NetworkConfiguration"}, "platformVersion":{"shape":"String"}, - "forceNewDeployment":{"shape":"Boolean"} + "forceNewDeployment":{"shape":"Boolean"}, + "healthCheckGracePeriodSeconds":{"shape":"BoxedInteger"} } }, "UpdateServiceResponse":{ diff --git a/models/apis/ecs/2014-11-13/docs-2.json b/models/apis/ecs/2014-11-13/docs-2.json index 4d3349aa9af..6490880c14f 100644 --- a/models/apis/ecs/2014-11-13/docs-2.json +++ b/models/apis/ecs/2014-11-13/docs-2.json @@ -1,6 +1,6 @@ { "version": "2.0", - "service": "

Amazon Elastic Container Service (Amazon ECS) is a highly scalable, fast, container management service that makes it easy to run, stop, and manage Docker containers on a cluster. You can host your cluster on a serverless infrastructure that is managed by Amazon ECS by launching your services or tasks using the Fargate launch type. For more control, you can host your tasks on a cluster of Amazon Elastic Compute Cloud (Amazon EC2) instances that you manage by using the EC2 launch type. For more information about launch types, see Amazon ECS Launch Types.

Amazon ECS lets you launch and stop container-based applications with simple API calls, allows you to get the state of your cluster from a centralized service, and gives you access to many familiar Amazon EC2 features.

You can use Amazon ECS to schedule the placement of containers across your cluster based on your resource needs, isolation policies, and availability requirements. Amazon ECS eliminates the need for you to operate your own cluster management and configuration management systems or worry about scaling your management infrastructure.

", + "service": "

Amazon Elastic Container Service (Amazon ECS) is a highly scalable, fast, container management service that makes it easy to run, stop, and manage Docker containers on a cluster. You can host your cluster on a serverless infrastructure that is managed by Amazon ECS by launching your services or tasks using the Fargate launch type. For more control, you can host your tasks on a cluster of Amazon Elastic Compute Cloud (Amazon EC2) instances that you manage by using the EC2 launch type. For more information about launch types, see Amazon ECS Launch Types.

Amazon ECS lets you launch and stop container-based applications with simple API calls, allows you to get the state of your cluster from a centralized service, and gives you access to many familiar Amazon EC2 features.

You can use Amazon ECS to schedule the placement of containers across your cluster based on your resource needs, isolation policies, and availability requirements. Amazon ECS eliminates the need for you to operate your own cluster management and configuration management systems or worry about scaling your management infrastructure.

", "operations": { "CreateCluster": "

Creates a new Amazon ECS cluster. By default, your account receives a default cluster when you launch your first container instance. However, you can create your own cluster with a unique name with the CreateCluster action.

When you call the CreateCluster API operation, Amazon ECS attempts to create the service-linked role for your account so that required resources in other AWS services can be managed on your behalf. However, if the IAM user that makes the call does not have permissions to create the service-linked role, it is not created. For more information, see Using Service-Linked Roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.

", "CreateService": "

Runs and maintains a desired number of tasks from a specified task definition. If the number of tasks running in a service drops below desiredCount, Amazon ECS spawns another copy of the task in the specified cluster. To update an existing service, see UpdateService.

In addition to maintaining the desired count of tasks in your service, you can optionally run your service behind a load balancer. The load balancer distributes traffic across the tasks that are associated with the service. For more information, see Service Load Balancing in the Amazon Elastic Container Service Developer Guide.

You can optionally specify a deployment configuration for your service. During a deployment, the service scheduler uses the minimumHealthyPercent and maximumPercent parameters to determine the deployment strategy. The deployment is triggered by changing the task definition or the desired count of a service with an UpdateService operation.

The minimumHealthyPercent represents a lower limit on the number of your service's tasks that must remain in the RUNNING state during a deployment, as a percentage of the desiredCount (rounded up to the nearest integer). This parameter enables you to deploy without using additional cluster capacity. For example, if your service has a desiredCount of four tasks and a minimumHealthyPercent of 50%, the scheduler can stop two existing tasks to free up cluster capacity before starting two new tasks. Tasks for services that do not use a load balancer are considered healthy if they are in the RUNNING state. Tasks for services that do use a load balancer are considered healthy if they are in the RUNNING state and the container instance they are hosted on is reported as healthy by the load balancer. The default value for minimumHealthyPercent is 50% in the console and 100% for the AWS CLI, the AWS SDKs, and the APIs.

The maximumPercent parameter represents an upper limit on the number of your service's tasks that are allowed in the RUNNING or PENDING state during a deployment, as a percentage of the desiredCount (rounded down to the nearest integer). This parameter enables you to define the deployment batch size. For example, if your service has a desiredCount of four tasks and a maximumPercent value of 200%, the scheduler can start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default value for maximumPercent is 200%.

When the service scheduler launches new tasks, it determines task placement in your cluster using the following logic:

", @@ -25,7 +25,7 @@ "PutAttributes": "

Create or update an attribute on an Amazon ECS resource. If the attribute does not exist, it is created. If the attribute exists, its value is replaced with the specified value. To delete an attribute, use DeleteAttributes. For more information, see Attributes in the Amazon Elastic Container Service Developer Guide.

", "RegisterContainerInstance": "

This action is only used by the Amazon ECS agent, and it is not intended for use outside of the agent.

Registers an EC2 instance into the specified cluster. This instance becomes available to place containers on.

", "RegisterTaskDefinition": "

Registers a new task definition from the supplied family and containerDefinitions. Optionally, you can add data volumes to your containers with the volumes parameter. For more information about task definition parameters and defaults, see Amazon ECS Task Definitions in the Amazon Elastic Container Service Developer Guide.

You can specify an IAM role for your task with the taskRoleArn parameter. When you specify an IAM role for a task, its containers can then use the latest versions of the AWS CLI or SDKs to make API requests to the AWS services that are specified in the IAM policy associated with the role. For more information, see IAM Roles for Tasks in the Amazon Elastic Container Service Developer Guide.

You can specify a Docker networking mode for the containers in your task definition with the networkMode parameter. The available network modes correspond to those described in Network settings in the Docker run reference. If you specify the awsvpc network mode, the task is allocated an Elastic Network Interface, and you must specify a NetworkConfiguration when you create a service or run a task with the task definition. For more information, see Task Networking in the Amazon Elastic Container Service Developer Guide.

", - "RunTask": "

Starts a new task using the specified task definition.

You can allow Amazon ECS to place tasks for you, or you can customize how Amazon ECS places tasks using placement constraints and placement strategies. For more information, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide.

Alternatively, you can use StartTask to use your own scheduler or place tasks manually on specific container instances.

", + "RunTask": "

Starts a new task using the specified task definition.

You can allow Amazon ECS to place tasks for you, or you can customize how Amazon ECS places tasks using placement constraints and placement strategies. For more information, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide.

Alternatively, you can use StartTask to use your own scheduler or place tasks manually on specific container instances.

The Amazon ECS API follows an eventual consistency model, due to the distributed nature of the system supporting the API. This means that the result of an API command you run that affects your Amazon ECS resources might not be immediately visible to all subsequent commands you run. You should keep this in mind when you carry out an API command that immediately follows a previous API command.

To manage eventual consistency, you can do the following:

", "StartTask": "

Starts a new task from the specified task definition on the specified container instance or instances.

Alternatively, you can use RunTask to place tasks for you. For more information, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide.

", "StopTask": "

Stops a running task.

When StopTask is called on a task, the equivalent of docker stop is issued to the containers running in the task. This results in a SIGTERM and a default 30-second timeout, after which SIGKILL is sent and the containers are forcibly stopped. If the container handles the SIGTERM gracefully and exits within 30 seconds from receiving it, no SIGKILL is sent.

The default 30-second timeout can be configured on the Amazon ECS container agent with the ECS_CONTAINER_STOP_TIMEOUT variable. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.

", "SubmitContainerStateChange": "

This action is only used by the Amazon ECS agent, and it is not intended for use outside of the agent.

Sent to acknowledge that a container changed states.

", @@ -143,12 +143,13 @@ "refs": { "Container$exitCode": "

The exit code returned from the container.

", "ContainerDefinition$memory": "

The hard limit (in MiB) of memory to present to the container. If your container attempts to exceed the memory specified here, the container is killed. This parameter maps to Memory in the Create a container section of the Docker Remote API and the --memory option to docker run.

If your containers will be part of a task using the Fargate launch type, this field is optional and the only requirement is that the total amount of memory reserved for all containers within a task be lower than the task memory value.

For containers that will be part of a task using the EC2 launch type, you must specify a non-zero integer for one or both of memory or memoryReservation in container definitions. If you specify both, memory must be greater than memoryReservation. If you specify memoryReservation, then that value is subtracted from the available memory resources for the container instance on which the container is placed; otherwise, the value of memory is used.

The Docker daemon reserves a minimum of 4 MiB of memory for a container, so you should not specify fewer than 4 MiB of memory for your containers.

", - "ContainerDefinition$memoryReservation": "

The soft limit (in MiB) of memory to reserve for the container. When system memory is under heavy contention, Docker attempts to keep the container memory to this soft limit; however, your container can consume more memory when it needs to, up to either the hard limit specified with the memory parameter (if applicable), or all of the available memory on the container instance, whichever comes first. This parameter maps to MemoryReservation in the Create a container section of the Docker Remote API and the --memory-reservation option to docker run.

You must specify a non-zero integer for one or both of memory or memoryReservation in container definitions. If you specify both, memory must be greater than memoryReservation. If you specify memoryReservation, then that value is subtracted from the available memory resources for the container instance on which the container is placed; otherwise, the value of memory is used.

For example, if your container normally uses 128 MiB of memory, but occasionally bursts to 256 MiB of memory for short periods of time, you can set a memoryReservation of 128 MiB, and a memory hard limit of 300 MiB. This configuration would allow the container to only reserve 128 MiB of memory from the remaining resources on the container instance, but also allow the container to consume more memory resources when needed.

", + "ContainerDefinition$memoryReservation": "

The soft limit (in MiB) of memory to reserve for the container. When system memory is under heavy contention, Docker attempts to keep the container memory to this soft limit; however, your container can consume more memory when it needs to, up to either the hard limit specified with the memory parameter (if applicable), or all of the available memory on the container instance, whichever comes first. This parameter maps to MemoryReservation in the Create a container section of the Docker Remote API and the --memory-reservation option to docker run.

You must specify a non-zero integer for one or both of memory or memoryReservation in container definitions. If you specify both, memory must be greater than memoryReservation. If you specify memoryReservation, then that value is subtracted from the available memory resources for the container instance on which the container is placed; otherwise, the value of memory is used.

For example, if your container normally uses 128 MiB of memory, but occasionally bursts to 256 MiB of memory for short periods of time, you can set a memoryReservation of 128 MiB, and a memory hard limit of 300 MiB. This configuration would allow the container to only reserve 128 MiB of memory from the remaining resources on the container instance, but also allow the container to consume more memory resources when needed.

The Docker daemon reserves a minimum of 4 MiB of memory for a container, so you should not specify fewer than 4 MiB of memory for your containers.

", "ContainerOverride$cpu": "

The number of cpu units reserved for the container, instead of the default value from the task definition. You must also specify a container name.

", "ContainerOverride$memory": "

The hard limit (in MiB) of memory to present to the container, instead of the default value from the task definition. If your container attempts to exceed the memory specified here, the container is killed. You must also specify a container name.

", "ContainerOverride$memoryReservation": "

The soft limit (in MiB) of memory to reserve for the container, instead of the default value from the task definition. You must also specify a container name.

", "ContainerStateChange$exitCode": "

The exit code for the container, if the state change is a result of the container exiting.

", "CreateServiceRequest$desiredCount": "

The number of instantiations of the specified task definition to place and keep running on your cluster.

", + "CreateServiceRequest$healthCheckGracePeriodSeconds": "

The period of time, in seconds, that the Amazon ECS service scheduler should ignore unhealthy Elastic Load Balancing target health checks after a task has first started. This is only valid if your service is configured to use a load balancer. If your service's tasks take a while to start and respond to ELB health checks, you can specify a health check grace period of up to 1,800 seconds during which the ECS service scheduler will ignore ELB health check status. This grace period can prevent the ECS service scheduler from marking tasks as unhealthy and stopping them before they have time to come up.

", "DeploymentConfiguration$maximumPercent": "

The upper limit (as a percentage of the service's desiredCount) of the number of tasks that are allowed in the RUNNING or PENDING state in a service during a deployment. The maximum number of tasks during a deployment is the desiredCount multiplied by maximumPercent/100, rounded down to the nearest integer value.

", "DeploymentConfiguration$minimumHealthyPercent": "

The lower limit (as a percentage of the service's desiredCount) of the number of running tasks that must remain in the RUNNING state in a service during a deployment. The minimum number of healthy tasks during a deployment is the desiredCount multiplied by minimumHealthyPercent/100, rounded up to the nearest integer value.

", "ListAttributesRequest$maxResults": "

The maximum number of cluster results returned by ListAttributes in paginated output. When this parameter is used, ListAttributes only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListAttributes request with the returned nextToken value. This value can be between 1 and 100. If this parameter is not used, then ListAttributes returns up to 100 results and a nextToken value if applicable.

", @@ -161,11 +162,13 @@ "LoadBalancer$containerPort": "

The port on the container to associate with the load balancer. This port must correspond to a containerPort in the service's task definition. Your container instances must allow ingress traffic on the hostPort of the port mapping.

", "NetworkBinding$containerPort": "

The port number on the container that is used with the network binding.

", "NetworkBinding$hostPort": "

The port number on the host that is used with the network binding.

", - "PortMapping$containerPort": "

The port number on the container that is bound to the user-specified or automatically assigned host port.

If using containers in a task with the Fargate launch type, exposed ports should be specified using containerPort.

If using containers in a task with the EC2 launch type and you specify a container port and not a host port, your container automatically receives a host port in the ephemeral port range (for more information, see hostPort). Port mappings that are automatically assigned in this way do not count toward the 100 reserved ports limit of a container instance.

", - "PortMapping$hostPort": "

The port number on the container instance to reserve for your container.

If using containers in a task with the Fargate launch type, the hostPort can either be left blank or needs to be the same value as the containerPort.

If using containers in a task with the EC2 launch type, you can specify a non-reserved host port for your container port mapping, or you can omit the hostPort (or set it to 0) while specifying a containerPort and your container automatically receives a port in the ephemeral port range for your container instance operating system and Docker version.

The default ephemeral port range for Docker version 1.6.0 and later is listed on the instance under /proc/sys/net/ipv4/ip_local_port_range; if this kernel parameter is unavailable, the default ephemeral port range from 49153 through 65535 is used. You should not attempt to specify a host port in the ephemeral port range as these are reserved for automatic assignment. In general, ports below 32768 are outside of the ephemeral port range.

The default ephemeral port range from 49153 through 65535 is always used for Docker versions before 1.6.0.

The default reserved ports are 22 for SSH, the Docker ports 2375 and 2376, and the Amazon ECS container agent ports 51678 and 51679. Any host port that was previously specified in a running task is also reserved while the task is running (after a task stops, the host port is released). The current reserved ports are displayed in the remainingResources of DescribeContainerInstances output, and a container instance may have up to 100 reserved ports at a time, including the default reserved ports (automatically assigned ports do not count toward the 100 reserved ports limit).

", + "PortMapping$containerPort": "

The port number on the container that is bound to the user-specified or automatically assigned host port.

If using containers in a task with the awsvpc or host network mode, exposed ports should be specified using containerPort.

If using containers in a task with the bridge network mode and you specify a container port and not a host port, your container automatically receives a host port in the ephemeral port range (for more information, see hostPort). Port mappings that are automatically assigned in this way do not count toward the 100 reserved ports limit of a container instance.

", + "PortMapping$hostPort": "

The port number on the container instance to reserve for your container.

If using containers in a task with the awsvpc or host network mode, the hostPort can either be left blank or needs to be the same value as the containerPort.

If using containers in a task with the bridge network mode, you can specify a non-reserved host port for your container port mapping, or you can omit the hostPort (or set it to 0) while specifying a containerPort and your container automatically receives a port in the ephemeral port range for your container instance operating system and Docker version.

The default ephemeral port range for Docker version 1.6.0 and later is listed on the instance under /proc/sys/net/ipv4/ip_local_port_range; if this kernel parameter is unavailable, the default ephemeral port range from 49153 through 65535 is used. You should not attempt to specify a host port in the ephemeral port range as these are reserved for automatic assignment. In general, ports below 32768 are outside of the ephemeral port range.

The default ephemeral port range from 49153 through 65535 is always used for Docker versions before 1.6.0.

The default reserved ports are 22 for SSH, the Docker ports 2375 and 2376, and the Amazon ECS container agent ports 51678 and 51679. Any host port that was previously specified in a running task is also reserved while the task is running (after a task stops, the host port is released). The current reserved ports are displayed in the remainingResources of DescribeContainerInstances output, and a container instance may have up to 100 reserved ports at a time, including the default reserved ports (automatically assigned ports do not count toward the 100 reserved ports limit).

", "RunTaskRequest$count": "

The number of instantiations of the specified task to place on your cluster. You can specify up to 10 tasks per call.

", + "Service$healthCheckGracePeriodSeconds": "

The period of time, in seconds, that the Amazon ECS service scheduler ignores unhealthy Elastic Load Balancing target health checks after a task has first started.

", "SubmitContainerStateChangeRequest$exitCode": "

The exit code returned for the state change request.

", - "UpdateServiceRequest$desiredCount": "

The number of instantiations of the task to place and keep running in your service.

" + "UpdateServiceRequest$desiredCount": "

The number of instantiations of the task to place and keep running in your service.

", + "UpdateServiceRequest$healthCheckGracePeriodSeconds": "

The period of time, in seconds, that the Amazon ECS service scheduler should ignore unhealthy Elastic Load Balancing target health checks after a task has first started. This is only valid if your service is configured to use a load balancer. If your service's tasks take a while to start and respond to ELB health checks, you can specify a health check grace period of up to 1,800 seconds during which the ECS service scheduler will ignore ELB health check status. This grace period can prevent the ECS service scheduler from marking tasks as unhealthy and stopping them before they have time to come up.

" } }, "ClientException": { @@ -205,7 +208,7 @@ "ClusterFieldList": { "base": null, "refs": { - "DescribeClustersRequest$include": "

Additional information about your clusters to be separated by launch type, including:

" + "DescribeClustersRequest$include": "

Additional information about your clusters to be separated by launch type, including:

" } }, "ClusterNotFoundException": { @@ -831,7 +834,7 @@ } }, "PortMapping": { - "base": "

Port mappings allow containers to access ports on the host container instance to send or receive traffic. Port mappings are specified as part of the container definition.

If using containers in a task with the Fargate launch type, exposed ports should be specified using containerPort. The hostPort can be left blank or it must be the same value as the containerPort.

After a task reaches the RUNNING status, manual and automatic host and container port assignments are visible in the networkBindings section of DescribeTasks API responses.

", + "base": "

Port mappings allow containers to access ports on the host container instance to send or receive traffic. Port mappings are specified as part of the container definition.

If using containers in a task with the awsvpc or host network mode, exposed ports should be specified using containerPort. The hostPort can be left blank or it must be the same value as the containerPort.

After a task reaches the RUNNING status, manual and automatic host and container port assignments are visible in the networkBindings section of DescribeTasks API responses.

", "refs": { "PortMappingList$member": null } @@ -1093,8 +1096,8 @@ "RegisterTaskDefinitionRequest$family": "

You must specify a family for a task definition, which allows you to track multiple versions of the same task definition. The family is used as a name for your task definition. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed.

", "RegisterTaskDefinitionRequest$taskRoleArn": "

The short name or full Amazon Resource Name (ARN) of the IAM role that containers in this task can assume. All containers in this task are granted the permissions that are specified in this role. For more information, see IAM Roles for Tasks in the Amazon Elastic Container Service Developer Guide.

", "RegisterTaskDefinitionRequest$executionRoleArn": "

The Amazon Resource Name (ARN) of the task execution role that the Amazon ECS container agent and the Docker daemon can assume.

", - "RegisterTaskDefinitionRequest$cpu": "

The number of cpu units used by the task. If using the EC2 launch type, this field is optional and any value can be used. If you are using the Fargate launch type, this field is required and you must use one of the following values, which determines your range of valid values for the memory parameter:

", - "RegisterTaskDefinitionRequest$memory": "

The amount (in MiB) of memory used by the task. If using the EC2 launch type, this field is optional and any value can be used. If you are using the Fargate launch type, this field is required and you must use one of the following values, which determines your range of valid values for the cpu parameter:

", + "RegisterTaskDefinitionRequest$cpu": "

The number of cpu units used by the task. If using the EC2 launch type, this field is optional and any value can be used.

Task-level CPU and memory parameters are ignored for Windows containers. We recommend specifying container-level resources for Windows containers.

If you are using the Fargate launch type, this field is required and you must use one of the following values, which determines your range of valid values for the memory parameter:

", + "RegisterTaskDefinitionRequest$memory": "

The amount (in MiB) of memory used by the task. If using the EC2 launch type, this field is optional and any value can be used.

Task-level CPU and memory parameters are ignored for Windows containers. We recommend specifying container-level resources for Windows containers.

If you are using the Fargate launch type, this field is required and you must use one of the following values, which determines your range of valid values for the cpu parameter:

", "Resource$name": "

The name of the resource, such as cpu, memory, ports, or a user-defined resource.

", "Resource$type": "

The type of the resource, such as INTEGER, DOUBLE, LONG, or STRINGSET.

", "RunTaskRequest$cluster": "

The short name or full Amazon Resource Name (ARN) of the cluster on which to run your task. If you do not specify a cluster, the default cluster is assumed.

", @@ -1137,8 +1140,8 @@ "Task$containerInstanceArn": "

The ARN of the container instances that host the task.

", "Task$lastStatus": "

The last known status of the task.

", "Task$desiredStatus": "

The desired status of the task.

", - "Task$cpu": "

The number of cpu units used by the task. If using the EC2 launch type, this field is optional and any value can be used. If using the Fargate launch type, this field is required and you must use one of the following values, which determines your range of valid values for the memory parameter:

", - "Task$memory": "

The amount (in MiB) of memory used by the task. If using the EC2 launch type, this field is optional and any value can be used. If using the Fargate launch type, this field is required and you must use one of the following values, which determines your range of valid values for the cpu parameter:

", + "Task$cpu": "

The number of cpu units used by the task. If using the EC2 launch type, this field is optional and any value can be used. If using the Fargate launch type, this field is required and you must use one of the following values, which determines your range of valid values for the memory parameter:

", + "Task$memory": "

The amount (in MiB) of memory used by the task. If using the EC2 launch type, this field is optional and any value can be used. If using the Fargate launch type, this field is required and you must use one of the following values, which determines your range of valid values for the cpu parameter:

", "Task$startedBy": "

The tag specified when a task is started. If the task is started by an Amazon ECS service, then the startedBy parameter contains the deployment ID of the service that starts it.

", "Task$stoppedReason": "

The reason the task was stopped.

", "Task$group": "

The name of the task group associated with the task.

", @@ -1147,8 +1150,8 @@ "TaskDefinition$family": "

The family of your task definition, used as the definition name.

", "TaskDefinition$taskRoleArn": "

The ARN of the IAM role that containers in this task can assume. All containers in this task are granted the permissions that are specified in this role.

IAM roles for tasks on Windows require that the -EnableTaskIAMRole option is set when you launch the Amazon ECS-optimized Windows AMI. Your containers must also run some configuration code in order to take advantage of the feature. For more information, see Windows IAM Roles for Tasks in the Amazon Elastic Container Service Developer Guide.

", "TaskDefinition$executionRoleArn": "

The Amazon Resource Name (ARN) of the task execution role that the Amazon ECS container agent and the Docker daemon can assume.

", - "TaskDefinition$cpu": "

The number of cpu units used by the task. If using the EC2 launch type, this field is optional and any value can be used. If using the Fargate launch type, this field is required and you must use one of the following values, which determines your range of valid values for the memory parameter:

", - "TaskDefinition$memory": "

The amount (in MiB) of memory used by the task. If using the EC2 launch type, this field is optional and any value can be used. If using the Fargate launch type, this field is required and you must use one of the following values, which determines your range of valid values for the cpu parameter:

", + "TaskDefinition$cpu": "

The number of cpu units used by the task. If using the EC2 launch type, this field is optional and any value can be used. If using the Fargate launch type, this field is required and you must use one of the following values, which determines your range of valid values for the memory parameter:

", + "TaskDefinition$memory": "

The amount (in MiB) of memory used by the task. If using the EC2 launch type, this field is optional and any value can be used. If using the Fargate launch type, this field is required and you must use one of the following values, which determines your range of valid values for the cpu parameter:

", "TaskDefinitionPlacementConstraint$expression": "

A cluster query language expression to apply to the constraint. For more information, see Cluster Query Language in the Amazon Elastic Container Service Developer Guide.

", "TaskOverride$taskRoleArn": "

The Amazon Resource Name (ARN) of the IAM role that containers in this task can assume. All containers in this task are granted the permissions that are specified in this role.

", "TaskOverride$executionRoleArn": "

The Amazon Resource Name (ARN) of the task execution role that the Amazon ECS container agent and the Docker daemon can assume.

", diff --git a/models/apis/elasticloadbalancing/2012-06-01/api-2.json b/models/apis/elasticloadbalancing/2012-06-01/api-2.json index 5a4e9ba7b11..621df584915 100644 --- a/models/apis/elasticloadbalancing/2012-06-01/api-2.json +++ b/models/apis/elasticloadbalancing/2012-06-01/api-2.json @@ -135,7 +135,8 @@ {"shape":"InvalidSchemeException"}, {"shape":"TooManyTagsException"}, {"shape":"DuplicateTagKeysException"}, - {"shape":"UnsupportedProtocolException"} + {"shape":"UnsupportedProtocolException"}, + {"shape":"OperationNotPermittedException"} ] }, "CreateLoadBalancerListeners":{ @@ -1280,6 +1281,17 @@ } }, "Name":{"type":"string"}, + "OperationNotPermittedException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"OperationNotPermitted", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, "PageSize":{ "type":"integer", "max":400, diff --git a/models/apis/elasticloadbalancing/2012-06-01/docs-2.json b/models/apis/elasticloadbalancing/2012-06-01/docs-2.json index 29953b5a8ef..7bb4c54ff05 100644 --- a/models/apis/elasticloadbalancing/2012-06-01/docs-2.json +++ b/models/apis/elasticloadbalancing/2012-06-01/docs-2.json @@ -1,6 +1,6 @@ { "version": "2.0", - "service": "Elastic Load Balancing

A load balancer distributes incoming traffic across your EC2 instances. This enables you to increase the availability of your application. The load balancer also monitors the health of its registered instances and ensures that it routes traffic only to healthy instances. You configure your load balancer to accept incoming traffic by specifying one or more listeners, which are configured with a protocol and port number for connections from clients to the load balancer and a protocol and port number for connections from the load balancer to the instances.

Elastic Load Balancing supports two types of load balancers: Classic Load Balancers and Application Load Balancers (new). A Classic Load Balancer makes routing and load balancing decisions either at the transport layer (TCP/SSL) or the application layer (HTTP/HTTPS), and supports either EC2-Classic or a VPC. An Application Load Balancer makes routing and load balancing decisions at the application layer (HTTP/HTTPS), supports path-based routing, and can route requests to one or more ports on each EC2 instance or container instance in your virtual private cloud (VPC). For more information, see the Elastic Load Balancing User Guide.

This reference covers the 2012-06-01 API, which supports Classic Load Balancers. The 2015-12-01 API supports Application Load Balancers.

To get started, create a load balancer with one or more listeners using CreateLoadBalancer. Register your instances with the load balancer using RegisterInstancesWithLoadBalancer.

All Elastic Load Balancing operations are idempotent, which means that they complete at most one time. If you repeat an operation, it succeeds with a 200 OK response code.

", + "service": "Elastic Load Balancing

A load balancer can distribute incoming traffic across your EC2 instances. This enables you to increase the availability of your application. The load balancer also monitors the health of its registered instances and ensures that it routes traffic only to healthy instances. You configure your load balancer to accept incoming traffic by specifying one or more listeners, which are configured with a protocol and port number for connections from clients to the load balancer and a protocol and port number for connections from the load balancer to the instances.

Elastic Load Balancing supports three types of load balancers: Application Load Balancers, Network Load Balancers, and Classic Load Balancers. You can select a load balancer based on your application needs. For more information, see the Elastic Load Balancing User Guide.

This reference covers the 2012-06-01 API, which supports Classic Load Balancers. The 2015-12-01 API supports Application Load Balancers and Network Load Balancers.

To get started, create a load balancer with one or more listeners using CreateLoadBalancer. Register your instances with the load balancer using RegisterInstancesWithLoadBalancer.

All Elastic Load Balancing operations are idempotent, which means that they complete at most one time. If you repeat an operation, it succeeds with a 200 OK response code.

", "operations": { "AddTags": "

Adds the specified tags to the specified load balancer. Each load balancer can have a maximum of 10 tags.

Each tag consists of a key and an optional value. If a tag with the same key is already associated with the load balancer, AddTags updates its value.

For more information, see Tag Your Classic Load Balancer in the Classic Load Balancer Guide.

", "ApplySecurityGroupsToLoadBalancer": "

Associates one or more security groups with your load balancer in a virtual private cloud (VPC). The specified security groups override the previously associated security groups.

For more information, see Security Groups for Load Balancers in a VPC in the Classic Load Balancer Guide.

", @@ -769,6 +769,11 @@ "Limit$Name": "

The name of the limit. The possible values are:

" } }, + "OperationNotPermittedException": { + "base": "

This operation is not allowed.

", + "refs": { + } + }, "PageSize": { "base": null, "refs": { diff --git a/models/apis/elasticloadbalancingv2/2015-12-01/api-2.json b/models/apis/elasticloadbalancingv2/2015-12-01/api-2.json index 9825058bc8b..60d9ffc2087 100644 --- a/models/apis/elasticloadbalancingv2/2015-12-01/api-2.json +++ b/models/apis/elasticloadbalancingv2/2015-12-01/api-2.json @@ -97,7 +97,8 @@ {"shape":"DuplicateTagKeysException"}, {"shape":"ResourceInUseException"}, {"shape":"AllocationIdNotFoundException"}, - {"shape":"AvailabilityZoneNotSupportedException"} + {"shape":"AvailabilityZoneNotSupportedException"}, + {"shape":"OperationNotPermittedException"} ] }, "CreateRule":{ diff --git a/models/apis/elasticloadbalancingv2/2015-12-01/docs-2.json b/models/apis/elasticloadbalancingv2/2015-12-01/docs-2.json index 3823173168b..7ec79c64f03 100644 --- a/models/apis/elasticloadbalancingv2/2015-12-01/docs-2.json +++ b/models/apis/elasticloadbalancingv2/2015-12-01/docs-2.json @@ -4,10 +4,10 @@ "operations": { "AddListenerCertificates": "

Adds the specified certificate to the specified secure listener.

If the certificate was already added, the call is successful but the certificate is not added again.

To list the certificates for your listener, use DescribeListenerCertificates. To remove certificates from your listener, use RemoveListenerCertificates.

", "AddTags": "

Adds the specified tags to the specified Elastic Load Balancing resource. You can tag your Application Load Balancers, Network Load Balancers, and your target groups.

Each tag consists of a key and an optional value. If a resource already has a tag with the same key, AddTags updates its value.

To list the current tags for your resources, use DescribeTags. To remove tags from your resources, use RemoveTags.

", - "CreateListener": "

Creates a listener for the specified Application Load Balancer or Network Load Balancer.

To update a listener, use ModifyListener. When you are finished with a listener, you can delete it using DeleteListener. If you are finished with both the listener and the load balancer, you can delete them both using DeleteLoadBalancer.

This operation is idempotent, which means that it completes at most one time. If you attempt to create multiple listeners with the same settings, each call succeeds.

For more information, see Listeners for Your Application Load Balancers in the Application Load Balancers Guide and Listeners for Your Network Load Balancers in the Network Load Balancers Guide.

", - "CreateLoadBalancer": "

Creates an Application Load Balancer or a Network Load Balancer.

When you create a load balancer, you can specify security groups, subnets, IP address type, and tags. Otherwise, you could do so later using SetSecurityGroups, SetSubnets, SetIpAddressType, and AddTags.

To create listeners for your load balancer, use CreateListener. To describe your current load balancers, see DescribeLoadBalancers. When you are finished with a load balancer, you can delete it using DeleteLoadBalancer.

For limit information, see Limits for Your Application Load Balancer in the Application Load Balancers Guide and Limits for Your Network Load Balancer in the Network Load Balancers Guide.

This operation is idempotent, which means that it completes at most one time. If you attempt to create multiple load balancers with the same settings, each call succeeds.

For more information, see Application Load Balancers in the Application Load Balancers Guide and Network Load Balancers in the Network Load Balancers Guide.

", + "CreateListener": "

Creates a listener for the specified Application Load Balancer or Network Load Balancer.

You can create up to 10 listeners per load balancer.

To update a listener, use ModifyListener. When you are finished with a listener, you can delete it using DeleteListener. If you are finished with both the listener and the load balancer, you can delete them both using DeleteLoadBalancer.

For more information, see Listeners for Your Application Load Balancers in the Application Load Balancers Guide and Listeners for Your Network Load Balancers in the Network Load Balancers Guide.

", + "CreateLoadBalancer": "

Creates an Application Load Balancer or a Network Load Balancer.

When you create a load balancer, you can specify security groups, subnets, IP address type, and tags. Otherwise, you could do so later using SetSecurityGroups, SetSubnets, SetIpAddressType, and AddTags.

To create listeners for your load balancer, use CreateListener. To describe your current load balancers, see DescribeLoadBalancers. When you are finished with a load balancer, you can delete it using DeleteLoadBalancer.

You can create up to 20 load balancers per region per account. You can request an increase for the number of load balancers for your account. For more information, see Limits for Your Application Load Balancer in the Application Load Balancers Guide and Limits for Your Network Load Balancer in the Network Load Balancers Guide.

For more information, see Application Load Balancers in the Application Load Balancers Guide and Network Load Balancers in the Network Load Balancers Guide.

", "CreateRule": "

Creates a rule for the specified listener. The listener must be associated with an Application Load Balancer.

Rules are evaluated in priority order, from the lowest value to the highest value. When the condition for a rule is met, the specified action is taken. If no conditions are met, the action for the default rule is taken. For more information, see Listener Rules in the Application Load Balancers Guide.

To view your current rules, use DescribeRules. To update a rule, use ModifyRule. To set the priorities of your rules, use SetRulePriorities. To delete a rule, use DeleteRule.

", - "CreateTargetGroup": "

Creates a target group.

To register targets with the target group, use RegisterTargets. To update the health check settings for the target group, use ModifyTargetGroup. To monitor the health of targets in the target group, use DescribeTargetHealth.

To route traffic to the targets in a target group, specify the target group in an action using CreateListener or CreateRule.

To delete a target group, use DeleteTargetGroup.

This operation is idempotent, which means that it completes at most one time. If you attempt to create multiple target groups with the same settings, each call succeeds.

For more information, see Target Groups for Your Application Load Balancers in the Application Load Balancers Guide or Target Groups for Your Network Load Balancers in the Network Load Balancers Guide.

", + "CreateTargetGroup": "

Creates a target group.

To register targets with the target group, use RegisterTargets. To update the health check settings for the target group, use ModifyTargetGroup. To monitor the health of targets in the target group, use DescribeTargetHealth.

To route traffic to the targets in a target group, specify the target group in an action using CreateListener or CreateRule.

To delete a target group, use DeleteTargetGroup.

For more information, see Target Groups for Your Application Load Balancers in the Application Load Balancers Guide or Target Groups for Your Network Load Balancers in the Network Load Balancers Guide.

", "DeleteListener": "

Deletes the specified listener.

Alternatively, your listener is deleted when you delete the load balancer it is attached to using DeleteLoadBalancer.

", "DeleteLoadBalancer": "

Deletes the specified Application Load Balancer or Network Load Balancer and its attached listeners.

You can't delete a load balancer if deletion protection is enabled. If the load balancer does not exist or has already been deleted, the call succeeds.

Deleting a load balancer does not affect its registered targets. For example, your EC2 instances continue to run and are still registered to their target groups. If you no longer need these EC2 instances, you can stop or terminate them.

", "DeleteRule": "

Deletes the specified rule.

", @@ -493,7 +493,7 @@ } }, "InvalidTargetException": { - "base": "

The specified target does not exist, is not in the same VPC as the target group, or has an unsupported instance type.

", + "base": "

The specified target does not exist or is not in the same VPC as the target group.

", "refs": { } }, @@ -781,7 +781,7 @@ "Name": { "base": null, "refs": { - "Limit$Name": "

The name of the limit. The possible values are:

" + "Limit$Name": "

The name of the limit. The possible values are:

" } }, "OperationNotPermittedException": { @@ -1092,8 +1092,8 @@ "SubnetMappings": { "base": null, "refs": { - "CreateLoadBalancerInput$SubnetMappings": "

The IDs of the subnets to attach to the load balancer. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings.

[Application Load Balancers] You must specify subnets from at least two Availability Zones. You cannot specify Elastic IP addresses for your subnets.

[Network Load Balancers] You can specify subnets from one or more Availability Zones. You can specify one Elastic IP address per subnet.

", - "SetSubnetsInput$SubnetMappings": "

The IDs of the subnets. You must specify subnets from at least two Availability Zones. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings.

You cannot specify Elastic IP addresses for your subnets.

" + "CreateLoadBalancerInput$SubnetMappings": "

The IDs of the subnets to attach to the load balancer. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings.

[Network Load Balancers] You can specify one Elastic IP address per subnet.

[Application Load Balancers] You cannot specify Elastic IP addresses for your subnets.

", + "SetSubnetsInput$SubnetMappings": "

The IDs of the subnets. You must specify subnets from at least two Availability Zones. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings.

The load balancer is allocated one static IP address per subnet. You cannot specify your own Elastic IP addresses.

" } }, "SubnetNotFoundException": { @@ -1104,7 +1104,7 @@ "Subnets": { "base": null, "refs": { - "CreateLoadBalancerInput$Subnets": "

The IDs of the subnets to attach to the load balancer. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings.

[Application Load Balancers] You must specify subnets from at least two Availability Zones.

[Network Load Balancers] You can specify subnets from one or more Availability Zones.

", + "CreateLoadBalancerInput$Subnets": "

The IDs of the subnets to attach to the load balancer. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings.

[Application Load Balancers] You must specify subnets from at least two Availability Zones.

", "SetSubnetsInput$Subnets": "

The IDs of the subnets. You must specify subnets from at least two Availability Zones. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings.

" } }, @@ -1209,7 +1209,7 @@ "TargetGroupAttributeKey": { "base": null, "refs": { - "TargetGroupAttribute$Key": "

The name of the attribute.

" + "TargetGroupAttribute$Key": "

The name of the attribute.

" } }, "TargetGroupAttributeValue": { diff --git a/models/apis/glue/2017-03-31/api-2.json b/models/apis/glue/2017-03-31/api-2.json index f743b6ce443..bfad3f2c8bc 100644 --- a/models/apis/glue/2017-03-31/api-2.json +++ b/models/apis/glue/2017-03-31/api-2.json @@ -192,7 +192,8 @@ {"shape":"AlreadyExistsException"}, {"shape":"InternalServiceException"}, {"shape":"OperationTimeoutException"}, - {"shape":"ResourceNumberLimitExceededException"} + {"shape":"ResourceNumberLimitExceededException"}, + {"shape":"ConcurrentModificationException"} ] }, "CreatePartition":{ @@ -254,9 +255,11 @@ "errors":[ {"shape":"AlreadyExistsException"}, {"shape":"InvalidInputException"}, + {"shape":"IdempotentParameterMismatchException"}, {"shape":"InternalServiceException"}, {"shape":"OperationTimeoutException"}, - {"shape":"ResourceNumberLimitExceededException"} + {"shape":"ResourceNumberLimitExceededException"}, + {"shape":"ConcurrentModificationException"} ] }, "CreateUserDefinedFunction":{ @@ -401,7 +404,8 @@ "errors":[ {"shape":"InvalidInputException"}, {"shape":"InternalServiceException"}, - {"shape":"OperationTimeoutException"} + {"shape":"OperationTimeoutException"}, + {"shape":"ConcurrentModificationException"} ] }, "DeleteUserDefinedFunction":{ @@ -951,7 +955,8 @@ {"shape":"InvalidInputException"}, {"shape":"InternalServiceException"}, {"shape":"EntityNotFoundException"}, - {"shape":"OperationTimeoutException"} + {"shape":"OperationTimeoutException"}, + {"shape":"ConcurrentModificationException"} ] }, "UpdateClassifier":{ @@ -1058,7 +1063,8 @@ {"shape":"InvalidInputException"}, {"shape":"EntityNotFoundException"}, {"shape":"InternalServiceException"}, - {"shape":"OperationTimeoutException"} + {"shape":"OperationTimeoutException"}, + {"shape":"ConcurrentModificationException"} ] }, "UpdatePartition":{ @@ -1104,7 +1110,8 @@ {"shape":"InvalidInputException"}, {"shape":"InternalServiceException"}, {"shape":"EntityNotFoundException"}, - {"shape":"OperationTimeoutException"} + {"shape":"OperationTimeoutException"}, + {"shape":"ConcurrentModificationException"} ] }, "UpdateUserDefinedFunction":{ @@ -1786,13 +1793,15 @@ "type":"structure", "members":{ "DagNodes":{"shape":"DagNodes"}, - "DagEdges":{"shape":"DagEdges"} + "DagEdges":{"shape":"DagEdges"}, + "Language":{"shape":"Language"} } }, "CreateScriptResponse":{ "type":"structure", "members":{ - "PythonScript":{"shape":"PythonScript"} + "PythonScript":{"shape":"PythonScript"}, + "ScalaCode":{"shape":"ScalaCode"} } }, "CreateTableRequest":{ @@ -2467,13 +2476,15 @@ "Mapping":{"shape":"MappingList"}, "Source":{"shape":"CatalogEntry"}, "Sinks":{"shape":"CatalogEntries"}, - "Location":{"shape":"Location"} + "Location":{"shape":"Location"}, + "Language":{"shape":"Language"} } }, "GetPlanResponse":{ "type":"structure", "members":{ - "PythonScript":{"shape":"PythonScript"} + "PythonScript":{"shape":"PythonScript"}, + "ScalaCode":{"shape":"ScalaCode"} } }, "GetTableRequest":{ @@ -2778,6 +2789,13 @@ "min":1, "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*" }, + "Language":{ + "type":"string", + "enum":[ + "PYTHON", + "SCALA" + ] + }, "LastCrawlInfo":{ "type":"structure", "members":{ @@ -2828,7 +2846,10 @@ }, "Logical":{ "type":"string", - "enum":["AND"] + "enum":[ + "AND", + "ANY" + ] }, "LogicalOperator":{ "type":"string", @@ -3083,6 +3104,7 @@ "type":"list", "member":{"shape":"S3Target"} }, + "ScalaCode":{"type":"string"}, "Schedule":{ "type":"structure", "members":{ diff --git a/models/apis/glue/2017-03-31/docs-2.json b/models/apis/glue/2017-03-31/docs-2.json index e26336ff09e..25d92cba365 100644 --- a/models/apis/glue/2017-03-31/docs-2.json +++ b/models/apis/glue/2017-03-31/docs-2.json @@ -7,7 +7,7 @@ "BatchDeletePartition": "

Deletes one or more partitions in a batch operation.

", "BatchDeleteTable": "

Deletes multiple tables at once.

", "BatchGetPartition": "

Retrieves partitions in a batch request.

", - "BatchStopJobRun": "

Stops a batch of job runs for a given job.

", + "BatchStopJobRun": "

Stops one or more job runs for a specified Job.

", "CreateClassifier": "

Creates a classifier in the user's account. This may be either a GrokClassifier or an XMLClassifier.

", "CreateConnection": "

Creates a connection definition in the Data Catalog.

", "CreateCrawler": "

Creates a new crawler with specified targets, role, configuration, and optional schedule. At least one crawl target must be specified, in either the s3Targets or the jdbcTargets field.

", @@ -15,7 +15,7 @@ "CreateDevEndpoint": "

Creates a new DevEndpoint.

", "CreateJob": "

Creates a new job.

", "CreatePartition": "

Creates a new partition.

", - "CreateScript": "

Transforms a directed acyclic graph (DAG) into a Python script.

", + "CreateScript": "

Transforms a directed acyclic graph (DAG) into code.

", "CreateTable": "

Creates a new table definition in the Data Catalog.

", "CreateTrigger": "

Creates a new trigger.

", "CreateUserDefinedFunction": "

Creates a new function definition in the Data Catalog.

", @@ -24,10 +24,10 @@ "DeleteCrawler": "

Removes a specified crawler from the Data Catalog, unless the crawler state is RUNNING.

", "DeleteDatabase": "

Removes a specified Database from a Data Catalog.

", "DeleteDevEndpoint": "

Deletes a specified DevEndpoint.

", - "DeleteJob": "

Deletes a specified job.

", + "DeleteJob": "

Deletes a specified job. If the job is not found, no exception is thrown.

", "DeletePartition": "

Deletes a specified partition.

", "DeleteTable": "

Removes a table definition from the Data Catalog.

", - "DeleteTrigger": "

Deletes a specified trigger.

", + "DeleteTrigger": "

Deletes a specified trigger. If the trigger is not found, no exception is thrown.

", "DeleteUserDefinedFunction": "

Deletes an existing function definition from the Data Catalog.

", "GetCatalogImportStatus": "

Retrieves the status of a migration operation.

", "GetClassifier": "

Retrieve a classifier by name.

", @@ -49,7 +49,7 @@ "GetMapping": "

Creates mappings.

", "GetPartition": "

Retrieves information about a specified partition.

", "GetPartitions": "

Retrieves information about the partitions in a table.

", - "GetPlan": "

Gets a Python script to perform a specified mapping.

", + "GetPlan": "

Gets code to perform a specified mapping.

", "GetTable": "

Retrieves the Table definition in a Data Catalog for a specified table.

", "GetTableVersions": "

Retrieves a list of strings that identify available versions of a specified table.

", "GetTables": "

Retrieves the definitions of some or all of the tables in a given Database.

", @@ -62,7 +62,7 @@ "StartCrawler": "

Starts a crawl using the specified crawler, regardless of what is scheduled. If the crawler is already running, does nothing.

", "StartCrawlerSchedule": "

Changes the schedule state of the specified crawler to SCHEDULED, unless the crawler is already running or the schedule state is already SCHEDULED.

", "StartJobRun": "

Runs a job.

", - "StartTrigger": "

Starts an existing trigger.

", + "StartTrigger": "

Starts an existing trigger. See Triggering Jobs for information about how different types of trigger are started.

", "StopCrawler": "

If the specified crawler is running, stops the crawl.

", "StopCrawlerSchedule": "

Sets the schedule state of the specified crawler to NOT_SCHEDULED, but does not stop the crawler if it is already running.

", "StopTrigger": "

Stops a specified trigger.

", @@ -106,7 +106,7 @@ "AttemptCount": { "base": null, "refs": { - "JobRun$Attempt": "

The number or the attempt to run this job.

" + "JobRun$Attempt": "

The number of the attempt to run this job.

" } }, "BatchCreatePartitionRequest": { @@ -179,7 +179,7 @@ } }, "BatchStopJobRunError": { - "base": "

Details about the job run and the error that occurred while trying to submit it for stopping.

", + "base": "

Records an error that occurred when attempting to stop a specified JobRun.

", "refs": { "BatchStopJobRunErrorList$member": null } @@ -187,13 +187,13 @@ "BatchStopJobRunErrorList": { "base": null, "refs": { - "BatchStopJobRunResponse$Errors": "

A list containing the job run Ids and details of the error that occurred for each job run while submitting to stop.

" + "BatchStopJobRunResponse$Errors": "

A list of the errors that were encountered in tryng to stop JobRuns, including the JobRunId for which each error was encountered and details about the error.

" } }, "BatchStopJobRunJobRunIdList": { "base": null, "refs": { - "BatchStopJobRunRequest$JobRunIds": "

A list of job run Ids of the given job to be stopped.

" + "BatchStopJobRunRequest$JobRunIds": "

A list of the JobRunIds that should be stopped for that Job.

" } }, "BatchStopJobRunRequest": { @@ -207,7 +207,7 @@ } }, "BatchStopJobRunSuccessfulSubmission": { - "base": "

Details about the job run which is submitted successfully for stopping.

", + "base": "

Records a successful request to stop a specified JobRun.

", "refs": { "BatchStopJobRunSuccessfulSubmissionList$member": null } @@ -215,7 +215,7 @@ "BatchStopJobRunSuccessfulSubmissionList": { "base": null, "refs": { - "BatchStopJobRunResponse$SuccessfulSubmissions": "

A list of job runs which are successfully submitted for stopping.

" + "BatchStopJobRunResponse$SuccessfulSubmissions": "

A list of the JobRuns that were successfully submitted for stopping.

" } }, "Boolean": { @@ -231,7 +231,7 @@ "BooleanValue": { "base": null, "refs": { - "GetJobRunRequest$PredecessorsIncluded": "

A list of the predecessor runs to return as well.

", + "GetJobRunRequest$PredecessorsIncluded": "

True if a list of predecessor runs should be returned.

", "UpdateDevEndpointRequest$UpdateEtlLibraries": "

True if the list of custom libraries to be loaded in the development endpoint needs to be updated, or False otherwise.

" } }, @@ -512,9 +512,9 @@ "CrawlerConfiguration": { "base": null, "refs": { - "Crawler$Configuration": "

Crawler configuration information. This versioned JSON string allows users to specify aspects of a Crawler's behavior.

You can use this field to force partitions to inherit metadata such as classification, input format, output format, serde information, and schema from their parent table, rather than detect this information separately for each partition. Use the following JSON string to specify that behavior:

", - "CreateCrawlerRequest$Configuration": "

Crawler configuration information. This versioned JSON string allows users to specify aspects of a Crawler's behavior.

You can use this field to force partitions to inherit metadata such as classification, input format, output format, serde information, and schema from their parent table, rather than detect this information separately for each partition.

", - "UpdateCrawlerRequest$Configuration": "

Crawler configuration information. This versioned JSON string allows users to specify aspects of a Crawler's behavior.

You can use this field to force partitions to inherit metadata such as classification, input format, output format, serde information, and schema from their parent table, rather than detect this information separately for each partition. Use the following JSON string to specify that behavior:

" + "Crawler$Configuration": "

Crawler configuration information. This versioned JSON string allows users to specify aspects of a Crawler's behavior.

You can use this field to force partitions to inherit metadata such as classification, input format, output format, serde information, and schema from their parent table, rather than detect this information separately for each partition. Use the following JSON string to specify that behavior:

Example: '{ \"Version\": 1.0, \"CrawlerOutput\": { \"Partitions\": { \"AddOrUpdateBehavior\": \"InheritFromTable\" } } }'

", + "CreateCrawlerRequest$Configuration": "

Crawler configuration information. This versioned JSON string allows users to specify aspects of a Crawler's behavior.

You can use this field to force partitions to inherit metadata such as classification, input format, output format, serde information, and schema from their parent table, rather than detect this information separately for each partition. Use the following JSON string to specify that behavior:

Example: '{ \"Version\": 1.0, \"CrawlerOutput\": { \"Partitions\": { \"AddOrUpdateBehavior\": \"InheritFromTable\" } } }'

", + "UpdateCrawlerRequest$Configuration": "

Crawler configuration information. This versioned JSON string allows users to specify aspects of a Crawler's behavior.

You can use this field to force partitions to inherit metadata such as classification, input format, output format, serde information, and schema from their parent table, rather than detect this information separately for each partition. Use the following JSON string to specify that behavior:

Example:  '{ \"Version\": 1.0, \"CrawlerOutput\": { \"Partitions\": { \"AddOrUpdateBehavior\": \"InheritFromTable\" } } }'

" } }, "CrawlerList": { @@ -923,7 +923,7 @@ "ErrorDetail": { "base": "

Contains details about an error.

", "refs": { - "BatchStopJobRunError$ErrorDetail": "

The details of the error that occurred.

", + "BatchStopJobRunError$ErrorDetail": "

Specifies details about the error that was encountered.

", "ErrorByName$value": null, "PartitionError$ErrorDetail": "

Details about the partition error.

", "TableError$ErrorDetail": "

Detail about the error.

" @@ -966,12 +966,12 @@ "GenericMap": { "base": null, "refs": { - "Action$Arguments": "

Arguments to be passed to the job.

", - "CreateJobRequest$DefaultArguments": "

The default parameters for this job.

", - "Job$DefaultArguments": "

The default parameters for this job.

", - "JobRun$Arguments": "

The job arguments associated with this run.

", - "JobUpdate$DefaultArguments": "

The default parameters for this job.

", - "StartJobRunRequest$Arguments": "

Specific arguments for this job run.

" + "Action$Arguments": "

Arguments to be passed to the job.

You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.

For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.

For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.

", + "CreateJobRequest$DefaultArguments": "

The default arguments for this job.

You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.

For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.

For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.

", + "Job$DefaultArguments": "

The default arguments for this job, specified as name-value pairs.

You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.

For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.

For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.

", + "JobRun$Arguments": "

The job arguments associated with this run. These override equivalent default arguments set for the job.

You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.

For information about how to specify and consume your own job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.

For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.

", + "JobUpdate$DefaultArguments": "

The default arguments for this job.

You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.

For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.

For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.

", + "StartJobRunRequest$Arguments": "

The job arguments specifically for this run. They override the equivalent default arguments set for the job itself.

You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.

For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.

For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.

" } }, "GenericString": { @@ -991,7 +991,7 @@ "CreateDevEndpointResponse$ExtraPythonLibsS3Path": "

Path(s) to one or more Python libraries in an S3 bucket that will be loaded in your DevEndpoint.

", "CreateDevEndpointResponse$ExtraJarsS3Path": "

Path to one or more Java Jars in an S3 bucket that will be loaded in your DevEndpoint.

", "CreateDevEndpointResponse$FailureReason": "

The reason for a current failure in this DevEndpoint.

", - "CreateTriggerRequest$Schedule": "

A cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers. For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *).

", + "CreateTriggerRequest$Schedule": "

A cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers. For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *).

This field is required when the trigger type is SCHEDULED.

", "DeleteDevEndpointRequest$EndpointName": "

The name of the DevEndpoint.

", "DevEndpoint$EndpointName": "

The name of the DevEndpoint.

", "DevEndpoint$SubnetId": "

The subnet ID for this DevEndpoint.

", @@ -1018,10 +1018,10 @@ "GetJobsResponse$NextToken": "

A continuation token, if not all jobs have yet been returned.

", "GetTriggersRequest$NextToken": "

A continuation token, if this is a continuation call.

", "GetTriggersResponse$NextToken": "

A continuation token, if not all the requested triggers have yet been returned.

", - "JobCommand$Name": "

The name of this job command.

", + "JobCommand$Name": "

The name of the job command: this must be glueetl.

", "StringList$member": null, "Trigger$Schedule": "

A cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers. For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *).

", - "TriggerUpdate$Schedule": "

An updated cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers. For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *).

", + "TriggerUpdate$Schedule": "

A cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers. For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *).

", "UpdateDevEndpointRequest$EndpointName": "

The name of the DevEndpoint to be updated.

", "UpdateDevEndpointRequest$PublicKey": "

The public key for the DevEndpoint to use.

" } @@ -1335,16 +1335,16 @@ "IdString": { "base": null, "refs": { - "BatchStopJobRunError$JobRunId": "

The job run Id.

", + "BatchStopJobRunError$JobRunId": "

The JobRunId of the JobRun in question.

", "BatchStopJobRunJobRunIdList$member": null, - "BatchStopJobRunSuccessfulSubmission$JobRunId": "

The job run Id.

", + "BatchStopJobRunSuccessfulSubmission$JobRunId": "

The JobRunId of the JobRun in question.

", "GetJobRunRequest$RunId": "

The ID of the job run.

", "JobRun$Id": "

The ID of this job run.

", - "JobRun$PreviousRunId": "

The ID of the previous run of this job.

", - "Predecessor$RunId": "

The job-run ID of the precessor job run.

", - "StartJobRunRequest$JobRunId": "

The ID of the job run to start.

", + "JobRun$PreviousRunId": "

The ID of the previous run of this job. For example, the JobRunId specified in the StartJobRun action.

", + "Predecessor$RunId": "

The job-run ID of the predecessor job run.

", + "StartJobRunRequest$JobRunId": "

The ID of a previous JobRun to retry.

", "StartJobRunResponse$JobRunId": "

The ID assigned to this job run.

", - "Trigger$Id": "

The trigger ID.

" + "Trigger$Id": "

Reserved for future use.

" } }, "IdempotentParameterMismatchException": { @@ -1381,16 +1381,16 @@ "CreateDevEndpointRequest$NumberOfNodes": "

The number of AWS Glue Data Processing Units (DPUs) to allocate to this DevEndpoint.

", "CreateDevEndpointResponse$ZeppelinRemoteSparkInterpreterPort": "

The Apache Zeppelin port for the remote Apache Spark interpreter.

", "CreateDevEndpointResponse$NumberOfNodes": "

The number of AWS Glue Data Processing Units (DPUs) allocated to this DevEndpoint.

", - "CreateJobRequest$AllocatedCapacity": "

The number of capacity units allocated to this job.

", + "CreateJobRequest$AllocatedCapacity": "

The number of AWS Glue data processing units (DPUs) to allocate to this Job. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page.

", "DevEndpoint$ZeppelinRemoteSparkInterpreterPort": "

The Apache Zeppelin port for the remote Apache Spark interpreter.

", "DevEndpoint$NumberOfNodes": "

The number of AWS Glue Data Processing Units (DPUs) allocated to this DevEndpoint.

", - "Job$AllocatedCapacity": "

The number of capacity units allocated to this job.

", + "Job$AllocatedCapacity": "

The number of AWS Glue data processing units (DPUs) allocated to this Job. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page.

", "JobBookmarkEntry$Version": "

Version of the job.

", "JobBookmarkEntry$Run": "

The run ID number.

", "JobBookmarkEntry$Attempt": "

The attempt ID number.

", - "JobRun$AllocatedCapacity": "

The amount of infrastructure capacity allocated to this job run.

", - "JobUpdate$AllocatedCapacity": "

The number of capacity units allocated to this job.

", - "StartJobRunRequest$AllocatedCapacity": "

The infrastructure capacity to allocate to this job.

" + "JobRun$AllocatedCapacity": "

The number of AWS Glue data processing units (DPUs) allocated to this JobRun. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page.

", + "JobUpdate$AllocatedCapacity": "

The number of AWS Glue data processing units (DPUs) to allocate to this Job. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page.

", + "StartJobRunRequest$AllocatedCapacity": "

The number of AWS Glue data processing units (DPUs) to allocate to this JobRun. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page.

" } }, "InternalServiceException": { @@ -1416,7 +1416,7 @@ } }, "Job": { - "base": "

Specifies a job in the Data Catalog.

", + "base": "

Specifies a job.

", "refs": { "GetJobResponse$Job": "

The requested job definition.

", "JobList$member": null @@ -1433,7 +1433,7 @@ "refs": { "CreateJobRequest$Command": "

The JobCommand that executes this job.

", "Job$Command": "

The JobCommand that executes this job.

", - "JobUpdate$Command": "

The JobCommand that executes this job.

" + "JobUpdate$Command": "

The JobCommand that executes this job (required).

" } }, "JobList": { @@ -1465,12 +1465,12 @@ "JobRunState": { "base": null, "refs": { - "Condition$State": "

The condition state.

", + "Condition$State": "

The condition state. Currently, the values supported are SUCCEEDED, STOPPED and FAILED.

", "JobRun$JobRunState": "

The current state of the job run.

" } }, "JobUpdate": { - "base": "

Specifies information used to update an existing job.

", + "base": "

Specifies information used to update an existing job. Note that the previous job definition will be completely overwritten by this information.

", "refs": { "UpdateJobRequest$JobUpdate": "

Specifies the values with which to update the job.

" } @@ -1487,6 +1487,13 @@ "ParametersMap$key": null } }, + "Language": { + "base": null, + "refs": { + "CreateScriptRequest$Language": "

The programming language of the resulting code from the DAG.

", + "GetPlanRequest$Language": "

The programming language of the code to perform the mapping.

" + } + }, "LastCrawlInfo": { "base": "

Status and error information about the most recent crawl.

", "refs": { @@ -1566,7 +1573,7 @@ "MaxConcurrentRuns": { "base": null, "refs": { - "ExecutionProperty$MaxConcurrentRuns": "

The maximum number of concurrent runs allowed for a job.

" + "ExecutionProperty$MaxConcurrentRuns": "

The maximum number of concurrent runs allowed for a job. The default is 1. An error is returned when this threshold is reached. The maximum value you can specify is controlled by a service limit.

" } }, "MaxRetries": { @@ -1625,15 +1632,15 @@ "BatchDeleteTableRequest$DatabaseName": "

The name of the catalog database where the tables to delete reside.

", "BatchGetPartitionRequest$DatabaseName": "

The name of the catalog database where the partitions reside.

", "BatchGetPartitionRequest$TableName": "

The name of the partitions' table.

", - "BatchStopJobRunError$JobName": "

The name of the job.

", - "BatchStopJobRunRequest$JobName": "

The name of the job whose job runs are to be stopped.

", - "BatchStopJobRunSuccessfulSubmission$JobName": "

The name of the job.

", + "BatchStopJobRunError$JobName": "

The name of the Job in question.

", + "BatchStopJobRunRequest$JobName": "

The name of the Job in question.

", + "BatchStopJobRunSuccessfulSubmission$JobName": "

The Name of the Job in question.

", "CatalogEntry$DatabaseName": "

The database in which the table metadata resides.

", "CatalogEntry$TableName": "

The name of the table in question.

", "CatalogImportStatus$ImportedBy": "

The name of the person who initiated the migration.

", "ClassifierNameList$member": null, "Column$Name": "

The name of the Column.

", - "Condition$JobName": "

The name of the job in question.

", + "Condition$JobName": "

The name of the Job to whose JobRuns this condition applies and on which this trigger waits.

", "Connection$Name": "

The name of the connection definition.

", "Connection$LastUpdatedBy": "

The user, group or role that last updated this connection definition.

", "ConnectionInput$Name": "

The name of the connection.

", @@ -1642,13 +1649,13 @@ "CrawlerNameList$member": null, "CreateCrawlerRequest$Name": "

Name of the new crawler.

", "CreateGrokClassifierRequest$Name": "

The name of the new classifier.

", - "CreateJobRequest$Name": "

The name you assign to this job.

", - "CreateJobResponse$Name": "

The unique name of the new job that has been created.

", + "CreateJobRequest$Name": "

The name you assign to this job. It must be unique in your account.

", + "CreateJobResponse$Name": "

The unique name that was provided.

", "CreatePartitionRequest$DatabaseName": "

The name of the metadata database in which the partition is to be created.

", "CreatePartitionRequest$TableName": "

The name of the metadata table in which the partition is to be created.

", "CreateTableRequest$DatabaseName": "

The catalog database in which to create the new table.

", - "CreateTriggerRequest$Name": "

The name to assign to the new trigger.

", - "CreateTriggerResponse$Name": "

The name assigned to the new trigger.

", + "CreateTriggerRequest$Name": "

The name of the trigger.

", + "CreateTriggerResponse$Name": "

The name of the trigger.

", "CreateUserDefinedFunctionRequest$DatabaseName": "

The name of the catalog database in which to create the function.

", "CreateXMLClassifierRequest$Name": "

The name of the classifier.

", "Database$Name": "

Name of the database.

", @@ -1687,14 +1694,14 @@ "GetTableVersionsRequest$TableName": "

The name of the table.

", "GetTablesRequest$DatabaseName": "

The database in the catalog whose tables to list.

", "GetTriggerRequest$Name": "

The name of the trigger to retrieve.

", - "GetTriggersRequest$DependentJobName": "

The name of the job for which to retrieve triggers.

", + "GetTriggersRequest$DependentJobName": "

The name of the job for which to retrieve triggers. The trigger that can start this job will be returned, and if there is no such trigger, all triggers will be returned.

", "GetUserDefinedFunctionRequest$DatabaseName": "

The name of the catalog database where the function is located.

", "GetUserDefinedFunctionRequest$FunctionName": "

The name of the function.

", "GetUserDefinedFunctionsRequest$DatabaseName": "

The name of the catalog database where the functions are located.

", "GetUserDefinedFunctionsRequest$Pattern": "

An optional function-name pattern string that filters the function definitions returned.

", "GrokClassifier$Name": "

The name of the classifier.

", "Job$Name": "

The name you assign to this job.

", - "JobRun$TriggerName": "

The name of the trigger for this job run.

", + "JobRun$TriggerName": "

The name of the trigger that started this job run.

", "JobRun$JobName": "

The name of the job being run.

", "MatchCriteria$member": null, "NameStringList$member": null, @@ -1724,7 +1731,7 @@ "TableInput$Name": "

Name of the table.

", "TableInput$Owner": "

Owner of the table.

", "Trigger$Name": "

Name of the trigger.

", - "TriggerUpdate$Name": "

The name of the trigger.

", + "TriggerUpdate$Name": "

Reserved for future use.

", "UpdateConnectionRequest$Name": "

The name of the connection definition to update.

", "UpdateCrawlerRequest$Name": "

Name of the new crawler.

", "UpdateCrawlerScheduleRequest$CrawlerName": "

Name of the crawler whose schedule to update.

", @@ -1905,7 +1912,7 @@ } }, "Predecessor": { - "base": "

A job run that preceded this one.

", + "base": "

A job run that was used in the predicate of a conditional trigger that triggered this job run.

", "refs": { "PredecessorList$member": null } @@ -1919,8 +1926,8 @@ "Predicate": { "base": "

Defines the predicate of the trigger, which determines when it fires.

", "refs": { - "CreateTriggerRequest$Predicate": "

A predicate to specify when the new trigger should fire.

", - "Trigger$Predicate": "

The predicate of this trigger.

", + "CreateTriggerRequest$Predicate": "

A predicate to specify when the new trigger should fire.

This field is required when the trigger type is CONDITIONAL.

", + "Trigger$Predicate": "

The predicate of this trigger, which defines when it will fire.

", "TriggerUpdate$Predicate": "

The predicate of this trigger, which defines when it will fire.

" } }, @@ -1998,17 +2005,17 @@ "RoleString": { "base": null, "refs": { - "CreateJobRequest$Role": "

The role associated with this job.

", - "Job$Role": "

The role associated with this job.

", - "JobUpdate$Role": "

The role associated with this job.

" + "CreateJobRequest$Role": "

The name of the IAM role associated with this job.

", + "Job$Role": "

The name of the IAM role associated with this job.

", + "JobUpdate$Role": "

The name of the IAM role associated with this job (required).

" } }, "RowTag": { "base": null, "refs": { - "CreateXMLClassifierRequest$RowTag": "

The XML tag designating the element that contains each record in an XML document being parsed. Note that this cannot be an empty element. It must contain child elements representing fields in the record.

", - "UpdateXMLClassifierRequest$RowTag": "

The XML tag designating the element that contains each record in an XML document being parsed. Note that this cannot be an empty element. It must contain child elements representing fields in the record.

", - "XMLClassifier$RowTag": "

The XML tag designating the element that contains each record in an XML document being parsed. Note that this cannot be an empty element. It must contain child elements representing fields in the record.

" + "CreateXMLClassifierRequest$RowTag": "

The XML tag designating the element that contains each record in an XML document being parsed. Note that this cannot identify a self-closing element (closed by />). An empty row element that contains only attributes can be parsed as long as it ends with a closing tag (for example, <row item_a=\"A\" item_b=\"B\"></row> is okay, but <row item_a=\"A\" item_b=\"B\" /> is not).

", + "UpdateXMLClassifierRequest$RowTag": "

The XML tag designating the element that contains each record in an XML document being parsed. Note that this cannot identify a self-closing element (closed by />). An empty row element that contains only attributes can be parsed as long as it ends with a closing tag (for example, <row item_a=\"A\" item_b=\"B\"></row> is okay, but <row item_a=\"A\" item_b=\"B\" /> is not).

", + "XMLClassifier$RowTag": "

The XML tag designating the element that contains each record in an XML document being parsed. Note that this cannot identify a self-closing element (closed by />). An empty row element that contains only attributes can be parsed as long as it ends with a closing tag (for example, <row item_a=\"A\" item_b=\"B\"></row> is okay, but <row item_a=\"A\" item_b=\"B\" /> is not).

" } }, "S3Target": { @@ -2023,6 +2030,13 @@ "CrawlerTargets$S3Targets": "

Specifies Amazon S3 targets.

" } }, + "ScalaCode": { + "base": null, + "refs": { + "CreateScriptResponse$ScalaCode": "

The Scala code generated from the DAG.

", + "GetPlanResponse$ScalaCode": "

Scala code to perform the mapping.

" + } + }, "Schedule": { "base": "

A scheduling object using a cron statement to schedule an event.

", "refs": { @@ -2068,7 +2082,7 @@ "ScriptLocationString": { "base": null, "refs": { - "JobCommand$ScriptLocation": "

Specifies the location of a script that executes a job.

" + "JobCommand$ScriptLocation": "

Specifies the S3 path to a script that executes a job (required).

" } }, "SecurityGroupIdList": { @@ -2342,7 +2356,7 @@ } }, "TriggerUpdate": { - "base": "

A structure used to provide information used to updata a trigger.

", + "base": "

A structure used to provide information used to update a trigger. This object will update the the previous trigger definition by overwriting it completely.

", "refs": { "UpdateTriggerRequest$TriggerUpdate": "

The new values with which to update the trigger.

" } diff --git a/models/apis/inspector/2016-02-16/api-2.json b/models/apis/inspector/2016-02-16/api-2.json index b24199e766d..7ffa9c16f1d 100644 --- a/models/apis/inspector/2016-02-16/api-2.json +++ b/models/apis/inspector/2016-02-16/api-2.json @@ -576,7 +576,8 @@ "type":"string", "enum":[ "HEALTHY", - "UNHEALTHY" + "UNHEALTHY", + "UNKNOWN" ] }, "AgentHealthCode":{ @@ -617,8 +618,14 @@ "type":"structure", "required":["agentId"], "members":{ + "hostname":{"shape":"Hostname"}, "agentId":{"shape":"AgentId"}, - "autoScalingGroup":{"shape":"AutoScalingGroup"} + "autoScalingGroup":{"shape":"AutoScalingGroup"}, + "agentHealth":{"shape":"AgentHealth"}, + "agentVersion":{"shape":"AgentVersion"}, + "operatingSystem":{"shape":"OperatingSystem"}, + "kernelVersion":{"shape":"KernelVersion"}, + "ipv4Address":{"shape":"Ipv4Address"} } }, "AgentPreviewList":{ @@ -627,6 +634,11 @@ "max":100, "min":0 }, + "AgentVersion":{ + "type":"string", + "max":128, + "min":1 + }, "AgentsAlreadyRunningAssessmentException":{ "type":"structure", "required":[ @@ -653,6 +665,7 @@ "max":300, "min":1 }, + "ArnCount":{"type":"integer"}, "AssessmentRulesPackageArnList":{ "type":"list", "member":{"shape":"Arn"}, @@ -889,6 +902,7 @@ "durationInSeconds", "rulesPackageArns", "userAttributesForFindings", + "assessmentRunCount", "createdAt" ], "members":{ @@ -898,6 +912,8 @@ "durationInSeconds":{"shape":"AssessmentRunDuration"}, "rulesPackageArns":{"shape":"AssessmentTemplateRulesPackageArnList"}, "userAttributesForFindings":{"shape":"UserAttributeList"}, + "lastAssessmentRunArn":{"shape":"Arn"}, + "assessmentRunCount":{"shape":"ArnCount"}, "createdAt":{"shape":"Timestamp"} } }, @@ -1482,6 +1498,11 @@ "max":50, "min":0 }, + "KernelVersion":{ + "type":"string", + "max":128, + "min":1 + }, "LimitExceededErrorCode":{ "type":"string", "enum":[ @@ -1706,6 +1727,11 @@ "type":"integer", "min":0 }, + "OperatingSystem":{ + "type":"string", + "max":256, + "min":1 + }, "PaginationToken":{ "type":"string", "max":300, diff --git a/models/apis/inspector/2016-02-16/docs-2.json b/models/apis/inspector/2016-02-16/docs-2.json index 59f2134e271..831a3851c99 100644 --- a/models/apis/inspector/2016-02-16/docs-2.json +++ b/models/apis/inspector/2016-02-16/docs-2.json @@ -3,8 +3,8 @@ "service": "Amazon Inspector

Amazon Inspector enables you to analyze the behavior of your AWS resources and to identify potential security issues. For more information, see Amazon Inspector User Guide.

", "operations": { "AddAttributesToFindings": "

Assigns attributes (key and value pairs) to the findings that are specified by the ARNs of the findings.

", - "CreateAssessmentTarget": "

Creates a new assessment target using the ARN of the resource group that is generated by CreateResourceGroup. You can create up to 50 assessment targets per AWS account. You can run up to 500 concurrent agents per AWS account. For more information, see Amazon Inspector Assessment Targets.

", - "CreateAssessmentTemplate": "

Creates an assessment template for the assessment target that is specified by the ARN of the assessment target.

", + "CreateAssessmentTarget": "

Creates a new assessment target using the ARN of the resource group that is generated by CreateResourceGroup. If the service-linked role isn’t already registered, also creates and registers a service-linked role to grant Amazon Inspector access to AWS Services needed to perform security assessments. You can create up to 50 assessment targets per AWS account. You can run up to 500 concurrent agents per AWS account. For more information, see Amazon Inspector Assessment Targets.

", + "CreateAssessmentTemplate": "

Creates an assessment template for the assessment target that is specified by the ARN of the assessment target. If the service-linked role isn’t already registered, also creates and registers a service-linked role to grant Amazon Inspector access to AWS Services needed to perform security assessments.

", "CreateResourceGroup": "

Creates a resource group using the specified set of tags (key and value pairs) that are used to select the EC2 instances to be included in an Amazon Inspector assessment target. The created resource group is then used to create an Amazon Inspector assessment target. For more information, see CreateAssessmentTarget.

", "DeleteAssessmentRun": "

Deletes the assessment run that is specified by the ARN of the assessment run.

", "DeleteAssessmentTarget": "

Deletes the assessment target that is specified by the ARN of the assessment target.

", @@ -27,7 +27,7 @@ "ListRulesPackages": "

Lists all available Amazon Inspector rules packages.

", "ListTagsForResource": "

Lists all tags associated with an assessment template.

", "PreviewAgents": "

Previews the agents installed on the EC2 instances that are part of the specified assessment target.

", - "RegisterCrossAccountAccessRole": "

Registers the IAM role that Amazon Inspector uses to list your EC2 instances at the start of the assessment run or when you call the PreviewAgents action.

", + "RegisterCrossAccountAccessRole": "

Registers the IAM role that grants Amazon Inspector access to AWS Services needed to perform security assessments.

", "RemoveAttributesFromFindings": "

Removes entire attributes (key and value pairs) from the findings that are specified by the ARNs of the findings where an attribute with the specified key exists.

", "SetTagsForResource": "

Sets tags (key and value pairs) to the assessment template that is specified by the ARN of the assessment template.

", "StartAssessmentRun": "

Starts the assessment run specified by the ARN of the assessment template. For this API to function properly, you must not exceed the limit of running up to 500 concurrent agents per AWS account.

", @@ -87,6 +87,7 @@ "base": null, "refs": { "AgentHealthList$member": null, + "AgentPreview$agentHealth": "

The health status of the Amazon Inspector Agent.

", "AssessmentRunAgent$agentHealth": "

The current health state of the agent.

" } }, @@ -137,6 +138,12 @@ "PreviewAgentsResponse$agentPreviews": "

The resulting list of agents.

" } }, + "AgentVersion": { + "base": null, + "refs": { + "AgentPreview$agentVersion": "

The version of the Amazon Inspector Agent.

" + } + }, "AgentsAlreadyRunningAssessmentException": { "base": "

You started an assessment run, but one of the instances is already participating in another assessment run.

", "refs": { @@ -163,6 +170,7 @@ "AssessmentTarget$resourceGroupArn": "

The ARN that specifies the resource group that is associated with the assessment target.

", "AssessmentTemplate$arn": "

The ARN of the assessment template.

", "AssessmentTemplate$assessmentTargetArn": "

The ARN of the assessment target that corresponds to this assessment template.

", + "AssessmentTemplate$lastAssessmentRunArn": "

The Amazon Resource Name (ARN) of the most recent assessment run associated with this assessment template. This value exists only when the value of assessmentRunCount is greater than zero.

", "AssessmentTemplateRulesPackageArnList$member": null, "BatchDescribeArnList$member": null, "CreateAssessmentTargetRequest$resourceGroupArn": "

The ARN that specifies the resource group that is used to create the assessment target.

", @@ -187,7 +195,7 @@ "ListReturnedArnList$member": null, "ListTagsForResourceRequest$resourceArn": "

The ARN that specifies the assessment template whose tags you want to list.

", "PreviewAgentsRequest$previewAgentsArn": "

The ARN of the assessment target whose agents you want to preview.

", - "RegisterCrossAccountAccessRoleRequest$roleArn": "

The ARN of the IAM role that Amazon Inspector uses to list your EC2 instances during the assessment run or when you call the PreviewAgents action.

", + "RegisterCrossAccountAccessRoleRequest$roleArn": "

The ARN of the IAM role that grants Amazon Inspector access to AWS Services needed to perform security assessments.

", "ResourceGroup$arn": "

The ARN of the resource group.

", "RulesPackage$arn": "

The ARN of the rules package.

", "SetTagsForResourceRequest$resourceArn": "

The ARN of the assessment template that you want to set tags to.

", @@ -204,6 +212,12 @@ "UpdateAssessmentTargetRequest$resourceGroupArn": "

The ARN of the resource group that is used to specify the new resource group to associate with the assessment target.

" } }, + "ArnCount": { + "base": null, + "refs": { + "AssessmentTemplate$assessmentRunCount": "

The number of existing assessment runs associated with this assessment template. This value can be zero or a positive integer.

" + } + }, "AssessmentRulesPackageArnList": { "base": null, "refs": { @@ -692,6 +706,7 @@ "Hostname": { "base": null, "refs": { + "AgentPreview$hostname": "

The hostname of the EC2 instance on which the Amazon Inspector Agent is installed.

", "AssetAttributes$hostname": "

The hostname of the EC2 instance where the finding is generated.

" } }, @@ -746,6 +761,7 @@ "Ipv4Address": { "base": null, "refs": { + "AgentPreview$ipv4Address": "

The IP address of the EC2 instance on which the Amazon Inspector Agent is installed.

", "Ipv4AddressList$member": null } }, @@ -755,6 +771,12 @@ "AssetAttributes$ipv4Addresses": "

The list of IP v4 addresses of the EC2 instance where the finding is generated.

" } }, + "KernelVersion": { + "base": null, + "refs": { + "AgentPreview$kernelVersion": "

The kernel version of the operating system running on the EC2 instance on which the Amazon Inspector Agent is installed.

" + } + }, "LimitExceededErrorCode": { "base": null, "refs": { @@ -941,6 +963,12 @@ "InspectorServiceAttributes$schemaVersion": "

The schema version of this data type.

" } }, + "OperatingSystem": { + "base": null, + "refs": { + "AgentPreview$operatingSystem": "

The operating system running on the EC2 instance on which the Amazon Inspector Agent is installed.

" + } + }, "PaginationToken": { "base": null, "refs": { diff --git a/models/apis/inspector/2016-02-16/examples-1.json b/models/apis/inspector/2016-02-16/examples-1.json index d06decc470e..05b541f0456 100644 --- a/models/apis/inspector/2016-02-16/examples-1.json +++ b/models/apis/inspector/2016-02-16/examples-1.json @@ -280,6 +280,7 @@ { "name": "ExampleAssessmentTemplate", "arn": "arn:aws:inspector:us-west-2:123456789012:target/0-0kFIPusq/template/0-4r1V2mAw", + "assessmentRunCount": 0, "assessmentTargetArn": "arn:aws:inspector:us-west-2:123456789012:target/0-0kFIPusq", "createdAt": "1458074191.844", "durationInSeconds": 3600, diff --git a/models/apis/kinesisanalytics/2015-08-14/api-2.json b/models/apis/kinesisanalytics/2015-08-14/api-2.json index b9513c73c17..6af805aa381 100644 --- a/models/apis/kinesisanalytics/2015-08-14/api-2.json +++ b/models/apis/kinesisanalytics/2015-08-14/api-2.json @@ -7,6 +7,7 @@ "protocol":"json", "serviceAbbreviation":"Kinesis Analytics", "serviceFullName":"Amazon Kinesis Analytics", + "serviceId":"Kinesis Analytics", "signatureVersion":"v4", "targetPrefix":"KinesisAnalytics_20150814", "timestampFormat":"unixTimestamp", @@ -958,6 +959,31 @@ "RoleARNUpdate":{"shape":"RoleARN"} } }, + "LambdaOutput":{ + "type":"structure", + "required":[ + "ResourceARN", + "RoleARN" + ], + "members":{ + "ResourceARN":{"shape":"ResourceARN"}, + "RoleARN":{"shape":"RoleARN"} + } + }, + "LambdaOutputDescription":{ + "type":"structure", + "members":{ + "ResourceARN":{"shape":"ResourceARN"}, + "RoleARN":{"shape":"RoleARN"} + } + }, + "LambdaOutputUpdate":{ + "type":"structure", + "members":{ + "ResourceARNUpdate":{"shape":"ResourceARN"}, + "RoleARNUpdate":{"shape":"RoleARN"} + } + }, "LimitExceededException":{ "type":"structure", "members":{ @@ -1011,6 +1037,7 @@ "Name":{"shape":"InAppStreamName"}, "KinesisStreamsOutput":{"shape":"KinesisStreamsOutput"}, "KinesisFirehoseOutput":{"shape":"KinesisFirehoseOutput"}, + "LambdaOutput":{"shape":"LambdaOutput"}, "DestinationSchema":{"shape":"DestinationSchema"} } }, @@ -1021,6 +1048,7 @@ "Name":{"shape":"InAppStreamName"}, "KinesisStreamsOutputDescription":{"shape":"KinesisStreamsOutputDescription"}, "KinesisFirehoseOutputDescription":{"shape":"KinesisFirehoseOutputDescription"}, + "LambdaOutputDescription":{"shape":"LambdaOutputDescription"}, "DestinationSchema":{"shape":"DestinationSchema"} } }, @@ -1036,6 +1064,7 @@ "NameUpdate":{"shape":"InAppStreamName"}, "KinesisStreamsOutputUpdate":{"shape":"KinesisStreamsOutputUpdate"}, "KinesisFirehoseOutputUpdate":{"shape":"KinesisFirehoseOutputUpdate"}, + "LambdaOutputUpdate":{"shape":"LambdaOutputUpdate"}, "DestinationSchemaUpdate":{"shape":"DestinationSchema"} } }, diff --git a/models/apis/kinesisanalytics/2015-08-14/docs-2.json b/models/apis/kinesisanalytics/2015-08-14/docs-2.json index b2f5dbd8969..cda8e9926e9 100644 --- a/models/apis/kinesisanalytics/2015-08-14/docs-2.json +++ b/models/apis/kinesisanalytics/2015-08-14/docs-2.json @@ -5,16 +5,16 @@ "AddApplicationCloudWatchLoggingOption": "

Adds a CloudWatch log stream to monitor application configuration errors. For more information about using CloudWatch log streams with Amazon Kinesis Analytics applications, see Working with Amazon CloudWatch Logs.

", "AddApplicationInput": "

Adds a streaming source to your Amazon Kinesis application. For conceptual information, see Configuring Application Input.

You can add a streaming source either when you create an application or you can use this operation to add a streaming source after you create an application. For more information, see CreateApplication.

Any configuration update, including adding a streaming source using this operation, results in a new version of the application. You can use the DescribeApplication operation to find the current application version.

This operation requires permissions to perform the kinesisanalytics:AddApplicationInput action.

", "AddApplicationInputProcessingConfiguration": "

Adds an InputProcessingConfiguration to an application. An input processor preprocesses records on the input stream before the application's SQL code executes. Currently, the only input processor available is AWS Lambda.

", - "AddApplicationOutput": "

Adds an external destination to your Amazon Kinesis Analytics application.

If you want Amazon Kinesis Analytics to deliver data from an in-application stream within your application to an external destination (such as an Amazon Kinesis stream or a Firehose delivery stream), you add the relevant configuration to your application using this operation. You can configure one or more outputs for your application. Each output configuration maps an in-application stream and an external destination.

You can use one of the output configurations to deliver data from your in-application error stream to an external destination so that you can analyze the errors. For conceptual information, see Understanding Application Output (Destination).

Note that any configuration update, including adding a streaming source using this operation, results in a new version of the application. You can use the DescribeApplication operation to find the current application version.

For the limits on the number of application inputs and outputs you can configure, see Limits.

This operation requires permissions to perform the kinesisanalytics:AddApplicationOutput action.

", + "AddApplicationOutput": "

Adds an external destination to your Amazon Kinesis Analytics application.

If you want Amazon Kinesis Analytics to deliver data from an in-application stream within your application to an external destination (such as an Amazon Kinesis stream, an Amazon Kinesis Firehose delivery stream, or an Amazon Lambda function), you add the relevant configuration to your application using this operation. You can configure one or more outputs for your application. Each output configuration maps an in-application stream and an external destination.

You can use one of the output configurations to deliver data from your in-application error stream to an external destination so that you can analyze the errors. For conceptual information, see Understanding Application Output (Destination).

Note that any configuration update, including adding a streaming source using this operation, results in a new version of the application. You can use the DescribeApplication operation to find the current application version.

For the limits on the number of application inputs and outputs you can configure, see Limits.

This operation requires permissions to perform the kinesisanalytics:AddApplicationOutput action.

", "AddApplicationReferenceDataSource": "

Adds a reference data source to an existing application.

Amazon Kinesis Analytics reads reference data (that is, an Amazon S3 object) and creates an in-application table within your application. In the request, you provide the source (S3 bucket name and object key name), name of the in-application table to create, and the necessary mapping information that describes how data in Amazon S3 object maps to columns in the resulting in-application table.

For conceptual information, see Configuring Application Input. For the limits on data sources you can add to your application, see Limits.

This operation requires permissions to perform the kinesisanalytics:AddApplicationOutput action.

", - "CreateApplication": "

Creates an Amazon Kinesis Analytics application. You can configure each application with one streaming source as input, application code to process the input, and up to five streaming destinations where you want Amazon Kinesis Analytics to write the output data from your application. For an overview, see How it Works.

In the input configuration, you map the streaming source to an in-application stream, which you can think of as a constantly updating table. In the mapping, you must provide a schema for the in-application stream and map each data column in the in-application stream to a data element in the streaming source.

Your application code is one or more SQL statements that read input data, transform it, and generate output. Your application code can create one or more SQL artifacts like SQL streams or pumps.

In the output configuration, you can configure the application to write data from in-application streams created in your applications to up to five streaming destinations.

To read data from your source stream or write data to destination streams, Amazon Kinesis Analytics needs your permissions. You grant these permissions by creating IAM roles. This operation requires permissions to perform the kinesisanalytics:CreateApplication action.

For introductory exercises to create an Amazon Kinesis Analytics application, see Getting Started.

", + "CreateApplication": "

Creates an Amazon Kinesis Analytics application. You can configure each application with one streaming source as input, application code to process the input, and up to three destinations where you want Amazon Kinesis Analytics to write the output data from your application. For an overview, see How it Works.

In the input configuration, you map the streaming source to an in-application stream, which you can think of as a constantly updating table. In the mapping, you must provide a schema for the in-application stream and map each data column in the in-application stream to a data element in the streaming source.

Your application code is one or more SQL statements that read input data, transform it, and generate output. Your application code can create one or more SQL artifacts like SQL streams or pumps.

In the output configuration, you can configure the application to write data from in-application streams created in your applications to up to three destinations.

To read data from your source stream or write data to destination streams, Amazon Kinesis Analytics needs your permissions. You grant these permissions by creating IAM roles. This operation requires permissions to perform the kinesisanalytics:CreateApplication action.

For introductory exercises to create an Amazon Kinesis Analytics application, see Getting Started.

", "DeleteApplication": "

Deletes the specified application. Amazon Kinesis Analytics halts application execution and deletes the application, including any application artifacts (such as in-application streams, reference table, and application code).

This operation requires permissions to perform the kinesisanalytics:DeleteApplication action.

", "DeleteApplicationCloudWatchLoggingOption": "

Deletes a CloudWatch log stream from an application. For more information about using CloudWatch log streams with Amazon Kinesis Analytics applications, see Working with Amazon CloudWatch Logs.

", "DeleteApplicationInputProcessingConfiguration": "

Deletes an InputProcessingConfiguration from an input.

", "DeleteApplicationOutput": "

Deletes output destination configuration from your application configuration. Amazon Kinesis Analytics will no longer write data from the corresponding in-application stream to the external output destination.

This operation requires permissions to perform the kinesisanalytics:DeleteApplicationOutput action.

", "DeleteApplicationReferenceDataSource": "

Deletes a reference data source configuration from the specified application configuration.

If the application is running, Amazon Kinesis Analytics immediately removes the in-application table that you created using the AddApplicationReferenceDataSource operation.

This operation requires permissions to perform the kinesisanalytics.DeleteApplicationReferenceDataSource action.

", "DescribeApplication": "

Returns information about a specific Amazon Kinesis Analytics application.

If you want to retrieve a list of all applications in your account, use the ListApplications operation.

This operation requires permissions to perform the kinesisanalytics:DescribeApplication action. You can use DescribeApplication to get the current application versionId, which you need to call other operations such as Update.

", - "DiscoverInputSchema": "

Infers a schema by evaluating sample records on the specified streaming source (Amazon Kinesis stream or Amazon Kinesis Firehose delivery stream). In the response, the operation returns the inferred schema and also the sample records that the operation used to infer the schema.

You can use the inferred schema when configuring a streaming source for your application. For conceptual information, see Configuring Application Input. Note that when you create an application using the Amazon Kinesis Analytics console, the console uses this operation to infer a schema and show it in the console user interface.

This operation requires permissions to perform the kinesisanalytics:DiscoverInputSchema action.

", + "DiscoverInputSchema": "

Infers a schema by evaluating sample records on the specified streaming source (Amazon Kinesis stream or Amazon Kinesis Firehose delivery stream) or S3 object. In the response, the operation returns the inferred schema and also the sample records that the operation used to infer the schema.

You can use the inferred schema when configuring a streaming source for your application. For conceptual information, see Configuring Application Input. Note that when you create an application using the Amazon Kinesis Analytics console, the console uses this operation to infer a schema and show it in the console user interface.

This operation requires permissions to perform the kinesisanalytics:DiscoverInputSchema action.

", "ListApplications": "

Returns a list of Amazon Kinesis Analytics applications in your account. For each application, the response includes the application name, Amazon Resource Name (ARN), and status. If the response returns the HasMoreApplications value as true, you can send another request by adding the ExclusiveStartApplicationName in the request body, and set the value of this to the last application name from the previous response.

If you want detailed information about a specific application, use DescribeApplication.

This operation requires permissions to perform the kinesisanalytics:ListApplications action.

", "StartApplication": "

Starts the specified Amazon Kinesis Analytics application. After creating an application, you must exclusively call this operation to start your application.

After the application starts, it begins consuming the input data, processes it, and writes the output to the configured destination.

The application status must be READY for you to start an application. You can get the application status in the console or using the DescribeApplication operation.

After you start the application, you can stop the application from processing the input by calling the StopApplication operation.

This operation requires permissions to perform the kinesisanalytics:StartApplication action.

", "StopApplication": "

Stops the application from processing input data. You can stop an application only if it is in the running state. You can use the DescribeApplication operation to find the application state. After the application is stopped, Amazon Kinesis Analytics stops reading data from the input, the application stops processing data, and there is no output written to the destination.

This operation requires permissions to perform the kinesisanalytics:StopApplication action.

", @@ -76,7 +76,7 @@ "refs": { "ApplicationDetail$ApplicationCode": "

Returns the application code that you provided to perform data analysis on any of the in-application streams in your application.

", "ApplicationUpdate$ApplicationCodeUpdate": "

Describes application code updates.

", - "CreateApplicationRequest$ApplicationCode": "

One or more SQL statements that read input data, transform it, and generate output. For example, you can write a SQL statement that reads data from one in-application stream, generates a running average of the number of advertisement clicks by vendor, and insert resulting rows in another in-application stream using pumps. For more inforamtion about the typical pattern, see Application Code.

You can provide such series of SQL statements, where output of one statement can be used as the input for the next statement. You store intermediate results by creating in-application streams and pumps.

Note that the application code must create the streams with names specified in the Outputs. For example, if your Outputs defines output streams named ExampleOutputStream1 and ExampleOutputStream2, then your application code must create these streams.

" + "CreateApplicationRequest$ApplicationCode": "

One or more SQL statements that read input data, transform it, and generate output. For example, you can write a SQL statement that reads data from one in-application stream, generates a running average of the number of advertisement clicks by vendor, and insert resulting rows in another in-application stream using pumps. For more information about the typical pattern, see Application Code.

You can provide such series of SQL statements, where output of one statement can be used as the input for the next statement. You store intermediate results by creating in-application streams and pumps.

Note that the application code must create the streams with names specified in the Outputs. For example, if your Outputs defines output streams named ExampleOutputStream1 and ExampleOutputStream2, then your application code must create these streams.

" } }, "ApplicationDescription": { @@ -147,7 +147,7 @@ "AddApplicationCloudWatchLoggingOptionRequest$CurrentApplicationVersionId": "

The version ID of the Kinesis Analytics application.

", "AddApplicationInputProcessingConfigurationRequest$CurrentApplicationVersionId": "

Version of the application to which you want to add the input processing configuration. You can use the DescribeApplication operation to get the current application version. If the version specified is not the current version, the ConcurrentModificationException is returned.

", "AddApplicationInputRequest$CurrentApplicationVersionId": "

Current version of your Amazon Kinesis Analytics application. You can use the DescribeApplication operation to find the current application version.

", - "AddApplicationOutputRequest$CurrentApplicationVersionId": "

Version of the application to which you want add the output configuration. You can use the DescribeApplication operation to get the current application version. If the version specified is not the current version, the ConcurrentModificationException is returned.

", + "AddApplicationOutputRequest$CurrentApplicationVersionId": "

Version of the application to which you want to add the output configuration. You can use the DescribeApplication operation to get the current application version. If the version specified is not the current version, the ConcurrentModificationException is returned.

", "AddApplicationReferenceDataSourceRequest$CurrentApplicationVersionId": "

Version of the application for which you are adding the reference data source. You can use the DescribeApplication operation to get the current application version. If the version specified is not the current version, the ConcurrentModificationException is returned.

", "ApplicationDetail$ApplicationVersionId": "

Provides the current application version.

", "DeleteApplicationCloudWatchLoggingOptionRequest$CurrentApplicationVersionId": "

The version ID of the Kinesis Analytics application.

", @@ -166,7 +166,7 @@ "BucketARN": { "base": null, "refs": { - "S3Configuration$BucketARN": null, + "S3Configuration$BucketARN": "

ARN of the S3 bucket that contains the data.

", "S3ReferenceDataSource$BucketARN": "

Amazon Resource Name (ARN) of the S3 bucket.

", "S3ReferenceDataSourceDescription$BucketARN": "

Amazon Resource Name (ARN) of the S3 bucket.

", "S3ReferenceDataSourceUpdate$BucketARNUpdate": "

Amazon Resource Name (ARN) of the S3 bucket.

" @@ -298,9 +298,9 @@ "DestinationSchema": { "base": "

Describes the data format when records are written to the destination. For more information, see Configuring Application Output.

", "refs": { - "Output$DestinationSchema": null, + "Output$DestinationSchema": "

Describes the data format when records are written to the destination. For more information, see Configuring Application Output.

", "OutputDescription$DestinationSchema": "

Data format used for writing data to the destination.

", - "OutputUpdate$DestinationSchemaUpdate": null + "OutputUpdate$DestinationSchemaUpdate": "

Describes the data format when records are written to the destination. For more information, see Configuring Application Output.

" } }, "DiscoverInputSchemaRequest": { @@ -331,7 +331,7 @@ "FileKey": { "base": null, "refs": { - "S3Configuration$FileKey": null, + "S3Configuration$FileKey": "

The name of the object that contains the data.

", "S3ReferenceDataSource$FileKey": "

Object key name containing reference data.

", "S3ReferenceDataSourceDescription$FileKey": "

Amazon S3 object key name.

", "S3ReferenceDataSourceUpdate$FileKeyUpdate": "

Object key name.

" @@ -340,11 +340,11 @@ "Id": { "base": null, "refs": { - "AddApplicationInputProcessingConfigurationRequest$InputId": "

The ID of the input configuration to which to add the input configuration. You can get a list of the input IDs for an application using the DescribeApplication operation.

", + "AddApplicationInputProcessingConfigurationRequest$InputId": "

The ID of the input configuration to add the input processing configuration to. You can get a list of the input IDs for an application using the DescribeApplication operation.

", "CloudWatchLoggingOptionDescription$CloudWatchLoggingOptionId": "

ID of the CloudWatch logging option description.

", "CloudWatchLoggingOptionUpdate$CloudWatchLoggingOptionId": "

ID of the CloudWatch logging option to update

", - "DeleteApplicationCloudWatchLoggingOptionRequest$CloudWatchLoggingOptionId": "

The CloudWatchLoggingOptionId of the CloudWatch logging option to delete. You can use the DescribeApplication operation to get the CloudWatchLoggingOptionId.

", - "DeleteApplicationInputProcessingConfigurationRequest$InputId": "

The ID of the input configuration from which to delete the input configuration. You can get a list of the input IDs for an application using the DescribeApplication operation.

", + "DeleteApplicationCloudWatchLoggingOptionRequest$CloudWatchLoggingOptionId": "

The CloudWatchLoggingOptionId of the CloudWatch logging option to delete. You can get the CloudWatchLoggingOptionId by using the DescribeApplication operation.

", + "DeleteApplicationInputProcessingConfigurationRequest$InputId": "

The ID of the input configuration from which to delete the input processing configuration. You can get a list of the input IDs for an application by using the DescribeApplication operation.

", "DeleteApplicationOutputRequest$OutputId": "

The ID of the configuration to delete. Each output configuration that is added to the application, either when the application is created or later using the AddApplicationOutput operation, has a unique ID. You need to provide the ID to uniquely identify the output configuration that you want to delete from the application configuration. You can use the DescribeApplication operation to get the specific OutputId.

", "DeleteApplicationReferenceDataSourceRequest$ReferenceId": "

ID of the reference data source. When you add a reference data source to your application using the AddApplicationReferenceDataSource, Amazon Kinesis Analytics assigns an ID. You can use the DescribeApplication operation to get the reference ID.

", "InputConfiguration$Id": "

Input source ID. You can get this ID by calling the DescribeApplication operation.

", @@ -360,7 +360,7 @@ "base": null, "refs": { "InAppStreamNames$member": null, - "Input$NamePrefix": "

Name prefix to use when creating in-application stream. Suppose you specify a prefix \"MyInApplicationStream\". Amazon Kinesis Analytics will then create one or more (as per the InputParallelism count you specified) in-application streams with names \"MyInApplicationStream_001\", \"MyInApplicationStream_002\" and so on.

", + "Input$NamePrefix": "

Name prefix to use when creating an in-application stream. Suppose that you specify a prefix \"MyInApplicationStream.\" Amazon Kinesis Analytics then creates one or more (as per the InputParallelism count you specified) in-application streams with names \"MyInApplicationStream_001,\" \"MyInApplicationStream_002,\" and so on.

", "InputDescription$NamePrefix": "

In-application name prefix.

", "InputUpdate$NamePrefixUpdate": "

Name prefix for in-application streams that Amazon Kinesis Analytics creates for the specific streaming source.

", "Output$Name": "

Name of the in-application stream.

", @@ -414,13 +414,13 @@ } }, "InputLambdaProcessor": { - "base": "

An object that contains the ARN of the AWS Lambda function that is used to preprocess records in the stream, and the ARN of the IAM role used to access the AWS Lambda function.

", + "base": "

An object that contains the Amazon Resource Name (ARN) of the AWS Lambda function that is used to preprocess records in the stream, and the ARN of the IAM role that is used to access the AWS Lambda function.

", "refs": { - "InputProcessingConfiguration$InputLambdaProcessor": "

The InputLambdaProcessor that is used to preprocess the records in the stream prior to being processed by your application code.

" + "InputProcessingConfiguration$InputLambdaProcessor": "

The InputLambdaProcessor that is used to preprocess the records in the stream before being processed by your application code.

" } }, "InputLambdaProcessorDescription": { - "base": "

An object that contains the ARN of the AWS Lambda function that is used to preprocess records in the stream, and the ARN of the IAM role used to access the AWS Lambda expression.

", + "base": "

An object that contains the Amazon Resource Name (ARN) of the AWS Lambda function that is used to preprocess records in the stream, and the ARN of the IAM role that is used to access the AWS Lambda expression.

", "refs": { "InputProcessingConfigurationDescription$InputLambdaProcessorDescription": "

Provides configuration information about the associated InputLambdaProcessorDescription.

" } @@ -434,7 +434,7 @@ "InputParallelism": { "base": "

Describes the number of in-application streams to create for a given streaming source. For information about parallelism, see Configuring Application Input.

", "refs": { - "Input$InputParallelism": "

Describes the number of in-application streams to create.

Data from your source will be routed to these in-application input streams.

(see Configuring Application Input.

", + "Input$InputParallelism": "

Describes the number of in-application streams to create.

Data from your source is routed to these in-application input streams.

(see Configuring Application Input.

", "InputDescription$InputParallelism": "

Describes the configured parallelism (number of in-application streams mapped to the streaming source).

" } }, @@ -452,11 +452,11 @@ } }, "InputProcessingConfiguration": { - "base": "

Provides a description of a processor that is used to preprocess the records in the stream prior to being processed by your application code. Currently, the only input processor available is AWS Lambda.

", + "base": "

Provides a description of a processor that is used to preprocess the records in the stream before being processed by your application code. Currently, the only input processor available is AWS Lambda.

", "refs": { "AddApplicationInputProcessingConfigurationRequest$InputProcessingConfiguration": "

The InputProcessingConfiguration to add to the application.

", "DiscoverInputSchemaRequest$InputProcessingConfiguration": "

The InputProcessingConfiguration to use to preprocess the records before discovering the schema of the records.

", - "Input$InputProcessingConfiguration": "

The InputProcessingConfiguration for the Input. An input processor transforms records as they are received from the stream, before the application's SQL code executes. Currently, the only input processing configuration available is InputLambdaProcessor.

" + "Input$InputProcessingConfiguration": "

The InputProcessingConfiguration for the input. An input processor transforms records as they are received from the stream, before the application's SQL code executes. Currently, the only input processing configuration available is InputLambdaProcessor.

" } }, "InputProcessingConfigurationDescription": { @@ -480,7 +480,7 @@ "InputStartingPosition": { "base": null, "refs": { - "InputStartingPositionConfiguration$InputStartingPosition": "

The starting position on the stream.

" + "InputStartingPositionConfiguration$InputStartingPosition": "

The starting position on the stream.

" } }, "InputStartingPositionConfiguration": { @@ -506,7 +506,7 @@ "Inputs": { "base": null, "refs": { - "CreateApplicationRequest$Inputs": "

Use this parameter to configure the application input.

You can configure your application to receive input from a single streaming source. In this configuration, you map this streaming source to an in-application stream that is created. Your application code can then query the in-application stream like a table (you can think of it as a constantly updating table).

For the streaming source, you provide its Amazon Resource Name (ARN) and format of data on the stream (for example, JSON, CSV, etc). You also must provide an IAM role that Amazon Kinesis Analytics can assume to read this stream on your behalf.

To create the in-application stream, you need to specify a schema to transform your data into a schematized version used in SQL. In the schema, you provide the necessary mapping of the data elements in the streaming source to record columns in the in-app stream.

" + "CreateApplicationRequest$Inputs": "

Use this parameter to configure the application input.

You can configure your application to receive input from a single streaming source. In this configuration, you map this streaming source to an in-application stream that is created. Your application code can then query the in-application stream like a table (you can think of it as a constantly updating table).

For the streaming source, you provide its Amazon Resource Name (ARN) and format of data on the stream (for example, JSON, CSV, etc.). You also must provide an IAM role that Amazon Kinesis Analytics can assume to read this stream on your behalf.

To create the in-application stream, you need to specify a schema to transform your data into a schematized version used in SQL. In the schema, you provide the necessary mapping of the data elements in the streaming source to record columns in the in-app stream.

" } }, "InvalidApplicationConfigurationException": { @@ -526,21 +526,21 @@ } }, "KinesisFirehoseInput": { - "base": "

Identifies an Amazon Kinesis Firehose delivery stream as the streaming source. You provide the Firehose delivery stream's Amazon Resource Name (ARN) and an IAM role ARN that enables Amazon Kinesis Analytics to access the stream on your behalf.

", + "base": "

Identifies an Amazon Kinesis Firehose delivery stream as the streaming source. You provide the delivery stream's Amazon Resource Name (ARN) and an IAM role ARN that enables Amazon Kinesis Analytics to access the stream on your behalf.

", "refs": { - "Input$KinesisFirehoseInput": "

If the streaming source is an Amazon Kinesis Firehose delivery stream, identifies the Firehose delivery stream's ARN and an IAM role that enables Amazon Kinesis Analytics to access the stream on your behalf.

Note: Either KinesisStreamsInput or KinesisFirehoseInput is required.

" + "Input$KinesisFirehoseInput": "

If the streaming source is an Amazon Kinesis Firehose delivery stream, identifies the delivery stream's ARN and an IAM role that enables Amazon Kinesis Analytics to access the stream on your behalf.

Note: Either KinesisStreamsInput or KinesisFirehoseInput is required.

" } }, "KinesisFirehoseInputDescription": { "base": "

Describes the Amazon Kinesis Firehose delivery stream that is configured as the streaming source in the application input configuration.

", "refs": { - "InputDescription$KinesisFirehoseInputDescription": "

If an Amazon Kinesis Firehose delivery stream is configured as a streaming source, provides the Firehose delivery stream's Amazon Resource Name (ARN) and an IAM role that enables Amazon Kinesis Analytics to access the stream on your behalf.

" + "InputDescription$KinesisFirehoseInputDescription": "

If an Amazon Kinesis Firehose delivery stream is configured as a streaming source, provides the delivery stream's ARN and an IAM role that enables Amazon Kinesis Analytics to access the stream on your behalf.

" } }, "KinesisFirehoseInputUpdate": { "base": "

When updating application input configuration, provides information about an Amazon Kinesis Firehose delivery stream as the streaming source.

", "refs": { - "InputUpdate$KinesisFirehoseInputUpdate": "

If an Amazon Kinesis Firehose delivery stream is the streaming source to be updated, provides an updated stream Amazon Resource Name (ARN) and IAM role ARN.

" + "InputUpdate$KinesisFirehoseInputUpdate": "

If an Amazon Kinesis Firehose delivery stream is the streaming source to be updated, provides an updated stream ARN and IAM role ARN.

" } }, "KinesisFirehoseOutput": { @@ -558,11 +558,11 @@ "KinesisFirehoseOutputUpdate": { "base": "

When updating an output configuration using the UpdateApplication operation, provides information about an Amazon Kinesis Firehose delivery stream configured as the destination.

", "refs": { - "OutputUpdate$KinesisFirehoseOutputUpdate": "

Describes a Amazon Kinesis Firehose delivery stream as the destination for the output.

" + "OutputUpdate$KinesisFirehoseOutputUpdate": "

Describes an Amazon Kinesis Firehose delivery stream as the destination for the output.

" } }, "KinesisStreamsInput": { - "base": "

Identifies an Amazon Kinesis stream as the streaming source. You provide the stream's ARN and an IAM role ARN that enables Amazon Kinesis Analytics to access the stream on your behalf.

", + "base": "

Identifies an Amazon Kinesis stream as the streaming source. You provide the stream's Amazon Resource Name (ARN) and an IAM role ARN that enables Amazon Kinesis Analytics to access the stream on your behalf.

", "refs": { "Input$KinesisStreamsInput": "

If the streaming source is an Amazon Kinesis stream, identifies the stream's Amazon Resource Name (ARN) and an IAM role that enables Amazon Kinesis Analytics to access the stream on your behalf.

Note: Either KinesisStreamsInput or KinesisFirehoseInput is required.

" } @@ -570,17 +570,17 @@ "KinesisStreamsInputDescription": { "base": "

Describes the Amazon Kinesis stream that is configured as the streaming source in the application input configuration.

", "refs": { - "InputDescription$KinesisStreamsInputDescription": "

If an Amazon Kinesis stream is configured as streaming source, provides Amazon Kinesis stream's ARN and an IAM role that enables Amazon Kinesis Analytics to access the stream on your behalf.

" + "InputDescription$KinesisStreamsInputDescription": "

If an Amazon Kinesis stream is configured as streaming source, provides Amazon Kinesis stream's Amazon Resource Name (ARN) and an IAM role that enables Amazon Kinesis Analytics to access the stream on your behalf.

" } }, "KinesisStreamsInputUpdate": { "base": "

When updating application input configuration, provides information about an Amazon Kinesis stream as the streaming source.

", "refs": { - "InputUpdate$KinesisStreamsInputUpdate": "

If a Amazon Kinesis stream is the streaming source to be updated, provides an updated stream ARN and IAM role ARN.

" + "InputUpdate$KinesisStreamsInputUpdate": "

If an Amazon Kinesis stream is the streaming source to be updated, provides an updated stream Amazon Resource Name (ARN) and IAM role ARN.

" } }, "KinesisStreamsOutput": { - "base": "

When configuring application output, identifies a Amazon Kinesis stream as the destination. You provide the stream Amazon Resource Name (ARN) and also an IAM role ARN that Amazon Kinesis Analytics can use to write to the stream on your behalf.

", + "base": "

When configuring application output, identifies an Amazon Kinesis stream as the destination. You provide the stream Amazon Resource Name (ARN) and also an IAM role ARN that Amazon Kinesis Analytics can use to write to the stream on your behalf.

", "refs": { "Output$KinesisStreamsOutput": "

Identifies an Amazon Kinesis stream as the destination.

" } @@ -597,6 +597,24 @@ "OutputUpdate$KinesisStreamsOutputUpdate": "

Describes an Amazon Kinesis stream as the destination for the output.

" } }, + "LambdaOutput": { + "base": "

When configuring application output, identifies an AWS Lambda function as the destination. You provide the function Amazon Resource Name (ARN) and also an IAM role ARN that Amazon Kinesis Analytics can use to write to the function on your behalf.

", + "refs": { + "Output$LambdaOutput": "

Identifies an AWS Lambda function as the destination.

" + } + }, + "LambdaOutputDescription": { + "base": "

For an application output, describes the AWS Lambda function configured as its destination.

", + "refs": { + "OutputDescription$LambdaOutputDescription": "

Describes the AWS Lambda function configured as the destination where output is written.

" + } + }, + "LambdaOutputUpdate": { + "base": "

When updating an output configuration using the UpdateApplication operation, provides information about an AWS Lambda function configured as the destination.

", + "refs": { + "OutputUpdate$LambdaOutputUpdate": "

Describes an AWS Lambda function as the destination for the output.

" + } + }, "LimitExceededException": { "base": "

Exceeded the number of applications allowed.

", "refs": { @@ -629,13 +647,13 @@ "MappingParameters": { "base": "

When configuring application input at the time of creating or updating an application, provides additional mapping information specific to the record format (such as JSON, CSV, or record fields delimited by some delimiter) on the streaming source.

", "refs": { - "RecordFormat$MappingParameters": null + "RecordFormat$MappingParameters": "

When configuring application input at the time of creating or updating an application, provides additional mapping information specific to the record format (such as JSON, CSV, or record fields delimited by some delimiter) on the streaming source.

" } }, "Output": { "base": "

Describes application output configuration in which you identify an in-application stream and a destination where you want the in-application stream data to be written. The destination can be an Amazon Kinesis stream or an Amazon Kinesis Firehose delivery stream.

For limits on how many destinations an application can write and other limitations, see Limits.

", "refs": { - "AddApplicationOutputRequest$Output": "

An array of objects, each describing one output configuration. In the output configuration, you specify the name of an in-application stream, a destination (that is, an Amazon Kinesis stream or an Amazon Kinesis Firehose delivery stream), and record the formation to use when writing to the destination.

", + "AddApplicationOutputRequest$Output": "

An array of objects, each describing one output configuration. In the output configuration, you specify the name of an in-application stream, a destination (that is, an Amazon Kinesis stream, an Amazon Kinesis Firehose delivery stream, or an Amazon Lambda function), and record the formation to use when writing to the destination.

", "Outputs$member": null } }, @@ -666,7 +684,7 @@ "Outputs": { "base": null, "refs": { - "CreateApplicationRequest$Outputs": "

You can configure application output to write data from any of the in-application streams to up to five destinations.

These destinations can be Amazon Kinesis streams, Amazon Kinesis Firehose delivery streams, or both.

In the configuration, you specify the in-application stream name, the destination stream Amazon Resource Name (ARN), and the format to use when writing data. You must also provide an IAM role that Amazon Kinesis Analytics can assume to write to the destination stream on your behalf.

In the output configuration, you also provide the output stream Amazon Resource Name (ARN) and the format of data in the stream (for example, JSON, CSV). You also must provide an IAM role that Amazon Kinesis Analytics can assume to write to this stream on your behalf.

" + "CreateApplicationRequest$Outputs": "

You can configure application output to write data from any of the in-application streams to up to three destinations.

These destinations can be Amazon Kinesis streams, Amazon Kinesis Firehose delivery streams, Amazon Lambda destinations, or any combination of the three.

In the configuration, you specify the in-application stream name, the destination stream or Lambda function Amazon Resource Name (ARN), and the format to use when writing data. You must also provide an IAM role that Amazon Kinesis Analytics can assume to write to the destination stream or Lambda function on your behalf.

In the output configuration, you also provide the output stream or Lambda function ARN. For stream destinations, you provide the format of data in the stream (for example, JSON, CSV). You also must provide an IAM role that Amazon Kinesis Analytics can assume to write to the stream or Lambda function on your behalf.

" } }, "ParsedInputRecord": { @@ -821,10 +839,10 @@ "DiscoverInputSchemaRequest$ResourceARN": "

Amazon Resource Name (ARN) of the streaming source.

", "InputLambdaProcessor$ResourceARN": "

The ARN of the AWS Lambda function that operates on records in the stream.

", "InputLambdaProcessorDescription$ResourceARN": "

The ARN of the AWS Lambda function that is used to preprocess the records in the stream.

", - "InputLambdaProcessorUpdate$ResourceARNUpdate": "

The ARN of the new AWS Lambda function that is used to preprocess the records in the stream.

", - "KinesisFirehoseInput$ResourceARN": "

ARN of the input Firehose delivery stream.

", + "InputLambdaProcessorUpdate$ResourceARNUpdate": "

The Amazon Resource Name (ARN) of the new AWS Lambda function that is used to preprocess the records in the stream.

", + "KinesisFirehoseInput$ResourceARN": "

ARN of the input delivery stream.

", "KinesisFirehoseInputDescription$ResourceARN": "

Amazon Resource Name (ARN) of the Amazon Kinesis Firehose delivery stream.

", - "KinesisFirehoseInputUpdate$ResourceARNUpdate": "

ARN of the input Amazon Kinesis Firehose delivery stream to read.

", + "KinesisFirehoseInputUpdate$ResourceARNUpdate": "

Amazon Resource Name (ARN) of the input Amazon Kinesis Firehose delivery stream to read.

", "KinesisFirehoseOutput$ResourceARN": "

ARN of the destination Amazon Kinesis Firehose delivery stream to write to.

", "KinesisFirehoseOutputDescription$ResourceARN": "

Amazon Resource Name (ARN) of the Amazon Kinesis Firehose delivery stream.

", "KinesisFirehoseOutputUpdate$ResourceARNUpdate": "

Amazon Resource Name (ARN) of the Amazon Kinesis Firehose delivery stream to write to.

", @@ -833,7 +851,10 @@ "KinesisStreamsInputUpdate$ResourceARNUpdate": "

Amazon Resource Name (ARN) of the input Amazon Kinesis stream to read.

", "KinesisStreamsOutput$ResourceARN": "

ARN of the destination Amazon Kinesis stream to write to.

", "KinesisStreamsOutputDescription$ResourceARN": "

Amazon Resource Name (ARN) of the Amazon Kinesis stream.

", - "KinesisStreamsOutputUpdate$ResourceARNUpdate": "

Amazon Resource Name (ARN) of the Amazon Kinesis stream where you want to write the output.

" + "KinesisStreamsOutputUpdate$ResourceARNUpdate": "

Amazon Resource Name (ARN) of the Amazon Kinesis stream where you want to write the output.

", + "LambdaOutput$ResourceARN": "

Amazon Resource Name (ARN) of the destination Lambda function to write to.

", + "LambdaOutputDescription$ResourceARN": "

Amazon Resource Name (ARN) of the destination Lambda function.

", + "LambdaOutputUpdate$ResourceARNUpdate": "

Amazon Resource Name (ARN) of the destination Lambda function.

" } }, "ResourceInUseException": { @@ -858,12 +879,12 @@ "CloudWatchLoggingOptionDescription$RoleARN": "

IAM ARN of the role to use to send application messages. Note: To write application messages to CloudWatch, the IAM role used must have the PutLogEvents policy action enabled.

", "CloudWatchLoggingOptionUpdate$RoleARNUpdate": "

IAM ARN of the role to use to send application messages. Note: To write application messages to CloudWatch, the IAM role used must have the PutLogEvents policy action enabled.

", "DiscoverInputSchemaRequest$RoleARN": "

ARN of the IAM role that Amazon Kinesis Analytics can assume to access the stream on your behalf.

", - "InputLambdaProcessor$RoleARN": "

The ARN of the IAM role used to access the AWS Lambda function.

", - "InputLambdaProcessorDescription$RoleARN": "

The ARN of the IAM role used to access the AWS Lambda function.

", - "InputLambdaProcessorUpdate$RoleARNUpdate": "

The ARN of the new IAM role used to access the AWS Lambda function.

", + "InputLambdaProcessor$RoleARN": "

The ARN of the IAM role that is used to access the AWS Lambda function.

", + "InputLambdaProcessorDescription$RoleARN": "

The ARN of the IAM role that is used to access the AWS Lambda function.

", + "InputLambdaProcessorUpdate$RoleARNUpdate": "

The ARN of the new IAM role that is used to access the AWS Lambda function.

", "KinesisFirehoseInput$RoleARN": "

ARN of the IAM role that Amazon Kinesis Analytics can assume to access the stream on your behalf. You need to make sure the role has necessary permissions to access the stream.

", "KinesisFirehoseInputDescription$RoleARN": "

ARN of the IAM role that Amazon Kinesis Analytics assumes to access the stream.

", - "KinesisFirehoseInputUpdate$RoleARNUpdate": "

Amazon Resource Name (ARN) of the IAM role that Amazon Kinesis Analytics can assume to access the stream on your behalf. You need to grant necessary permissions to this role.

", + "KinesisFirehoseInputUpdate$RoleARNUpdate": "

ARN of the IAM role that Amazon Kinesis Analytics can assume to access the stream on your behalf. You need to grant necessary permissions to this role.

", "KinesisFirehoseOutput$RoleARN": "

ARN of the IAM role that Amazon Kinesis Analytics can assume to write to the destination stream on your behalf. You need to grant the necessary permissions to this role.

", "KinesisFirehoseOutputDescription$RoleARN": "

ARN of the IAM role that Amazon Kinesis Analytics can assume to access the stream.

", "KinesisFirehoseOutputUpdate$RoleARNUpdate": "

ARN of the IAM role that Amazon Kinesis Analytics can assume to access the stream on your behalf. You need to grant necessary permissions to this role.

", @@ -873,22 +894,25 @@ "KinesisStreamsOutput$RoleARN": "

ARN of the IAM role that Amazon Kinesis Analytics can assume to write to the destination stream on your behalf. You need to grant the necessary permissions to this role.

", "KinesisStreamsOutputDescription$RoleARN": "

ARN of the IAM role that Amazon Kinesis Analytics can assume to access the stream.

", "KinesisStreamsOutputUpdate$RoleARNUpdate": "

ARN of the IAM role that Amazon Kinesis Analytics can assume to access the stream on your behalf. You need to grant the necessary permissions to this role.

", - "S3Configuration$RoleARN": null, + "LambdaOutput$RoleARN": "

ARN of the IAM role that Amazon Kinesis Analytics can assume to write to the destination function on your behalf. You need to grant the necessary permissions to this role.

", + "LambdaOutputDescription$RoleARN": "

ARN of the IAM role that Amazon Kinesis Analytics can assume to write to the destination function.

", + "LambdaOutputUpdate$RoleARNUpdate": "

ARN of the IAM role that Amazon Kinesis Analytics can assume to write to the destination function on your behalf. You need to grant the necessary permissions to this role.

", + "S3Configuration$RoleARN": "

IAM ARN of the role used to access the data.

", "S3ReferenceDataSource$ReferenceRoleARN": "

ARN of the IAM role that the service can assume to read data on your behalf. This role must have permission for the s3:GetObject action on the object and trust policy that allows Amazon Kinesis Analytics service principal to assume this role.

", "S3ReferenceDataSourceDescription$ReferenceRoleARN": "

ARN of the IAM role that Amazon Kinesis Analytics can assume to read the Amazon S3 object on your behalf to populate the in-application reference table.

", "S3ReferenceDataSourceUpdate$ReferenceRoleARNUpdate": "

ARN of the IAM role that Amazon Kinesis Analytics can assume to read the Amazon S3 object and populate the in-application.

" } }, "S3Configuration": { - "base": null, + "base": "

Provides a description of an Amazon S3 data source, including the Amazon Resource Name (ARN) of the S3 bucket, the ARN of the IAM role that is used to access the bucket, and the name of the S3 object that contains the data.

", "refs": { - "DiscoverInputSchemaRequest$S3Configuration": null + "DiscoverInputSchemaRequest$S3Configuration": "

Specify this parameter to discover a schema from data in an S3 object.

" } }, "S3ReferenceDataSource": { "base": "

Identifies the S3 bucket and object that contains the reference data. Also identifies the IAM role Amazon Kinesis Analytics can assume to read this object on your behalf.

An Amazon Kinesis Analytics application loads reference data only once. If the data changes, you call the UpdateApplication operation to trigger reloading of data into your application.

", "refs": { - "ReferenceDataSource$S3ReferenceDataSource": null + "ReferenceDataSource$S3ReferenceDataSource": "

Identifies the S3 bucket and object that contains the reference data. Also identifies the IAM role Amazon Kinesis Analytics can assume to read this object on your behalf. An Amazon Kinesis Analytics application loads reference data only once. If the data changes, you call the UpdateApplication operation to trigger reloading of data into your application.

" } }, "S3ReferenceDataSourceDescription": { @@ -914,9 +938,9 @@ "DiscoverInputSchemaResponse$InputSchema": "

Schema inferred from the streaming source. It identifies the format of the data in the streaming source and how each data element maps to corresponding columns in the in-application stream that you can create.

", "Input$InputSchema": "

Describes the format of the data in the streaming source, and how each data element maps to corresponding columns in the in-application stream that is being created.

Also used to describe the format of the reference data source.

", "InputDescription$InputSchema": "

Describes the format of the data in the streaming source, and how each data element maps to corresponding columns in the in-application stream that is being created.

", - "ReferenceDataSource$ReferenceSchema": null, - "ReferenceDataSourceDescription$ReferenceSchema": null, - "ReferenceDataSourceUpdate$ReferenceSchemaUpdate": null + "ReferenceDataSource$ReferenceSchema": "

Describes the format of the data in the streaming source, and how each data element maps to corresponding columns created in the in-application stream.

", + "ReferenceDataSourceDescription$ReferenceSchema": "

Describes the format of the data in the streaming source, and how each data element maps to corresponding columns created in the in-application stream.

", + "ReferenceDataSourceUpdate$ReferenceSchemaUpdate": "

Describes the format of the data in the streaming source, and how each data element maps to corresponding columns created in the in-application stream.

" } }, "StartApplicationRequest": { @@ -942,8 +966,8 @@ "Timestamp": { "base": null, "refs": { - "ApplicationDetail$CreateTimestamp": "

Timestamp when the application version was created.

", - "ApplicationDetail$LastUpdateTimestamp": "

Timestamp when the application was last updated.

", + "ApplicationDetail$CreateTimestamp": "

Time stamp when the application version was created.

", + "ApplicationDetail$LastUpdateTimestamp": "

Time stamp when the application was last updated.

", "DeleteApplicationRequest$CreateTimestamp": "

You can use the DescribeApplication operation to get this value.

" } }, diff --git a/models/apis/kms/2014-11-01/docs-2.json b/models/apis/kms/2014-11-01/docs-2.json index 935845654db..4630c94b3e2 100644 --- a/models/apis/kms/2014-11-01/docs-2.json +++ b/models/apis/kms/2014-11-01/docs-2.json @@ -88,7 +88,7 @@ "BooleanType": { "base": null, "refs": { - "CreateKeyRequest$BypassPolicyLockoutSafetyCheck": "

A flag to indicate whether to bypass the key policy lockout safety check.

Setting this value to true increases the likelihood that the CMK becomes unmanageable. Do not set this value to true indiscriminately.

For more information, refer to the scenario in the Default Key Policy section in the AWS Key Management Service Developer Guide.

Use this parameter only when you include a policy in the request and you intend to prevent the principal that is making the request from making a subsequent PutKeyPolicy request on the CMK.

The default value is false.

", + "CreateKeyRequest$BypassPolicyLockoutSafetyCheck": "

A flag to indicate whether to bypass the key policy lockout safety check.

Setting this value to true increases the risk that the CMK becomes unmanageable. Do not set this value to true indiscriminately.

For more information, refer to the scenario in the Default Key Policy section in the AWS Key Management Service Developer Guide.

Use this parameter only when you include a policy in the request and you intend to prevent the principal that is making the request from making a subsequent PutKeyPolicy request on the CMK.

The default value is false.

", "GetKeyRotationStatusResponse$KeyRotationEnabled": "

A Boolean value that specifies whether key rotation is enabled.

", "KeyMetadata$Enabled": "

Specifies whether the CMK is enabled. When KeyState is Enabled this value is true, otherwise it is false.

", "ListAliasesResponse$Truncated": "

A flag that indicates whether there are more items in the list. When this value is true, the list in this response is truncated. To get more items, pass the value of the NextMarker element in this response to the Marker parameter in a subsequent request.

", @@ -96,7 +96,7 @@ "ListKeyPoliciesResponse$Truncated": "

A flag that indicates whether there are more items in the list. When this value is true, the list in this response is truncated. To get more items, pass the value of the NextMarker element in this response to the Marker parameter in a subsequent request.

", "ListKeysResponse$Truncated": "

A flag that indicates whether there are more items in the list. When this value is true, the list in this response is truncated. To get more items, pass the value of the NextMarker element in this response to the Marker parameter in a subsequent request.

", "ListResourceTagsResponse$Truncated": "

A flag that indicates whether there are more items in the list. When this value is true, the list in this response is truncated. To get more items, pass the value of the NextMarker element in this response to the Marker parameter in a subsequent request.

", - "PutKeyPolicyRequest$BypassPolicyLockoutSafetyCheck": "

A flag to indicate whether to bypass the key policy lockout safety check.

Setting this value to true increases the likelihood that the CMK becomes unmanageable. Do not set this value to true indiscriminately.

For more information, refer to the scenario in the Default Key Policy section in the AWS Key Management Service Developer Guide.

Use this parameter only when you intend to prevent the principal that is making the request from making a subsequent PutKeyPolicy request on the CMK.

The default value is false.

" + "PutKeyPolicyRequest$BypassPolicyLockoutSafetyCheck": "

A flag to indicate whether to bypass the key policy lockout safety check.

Setting this value to true increases the risk that the CMK becomes unmanageable. Do not set this value to true indiscriminately.

For more information, refer to the scenario in the Default Key Policy section in the AWS Key Management Service Developer Guide.

Use this parameter only when you intend to prevent the principal that is making the request from making a subsequent PutKeyPolicy request on the CMK.

The default value is false.

" } }, "CancelKeyDeletionRequest": { @@ -721,13 +721,13 @@ "PolicyNameList": { "base": null, "refs": { - "ListKeyPoliciesResponse$PolicyNames": "

A list of policy names. Currently, there is only one policy and it is named \"Default\".

" + "ListKeyPoliciesResponse$PolicyNames": "

A list of key policy names. Currently, there is only one key policy per CMK and it is always named default.

" } }, "PolicyNameType": { "base": null, "refs": { - "GetKeyPolicyRequest$PolicyName": "

Specifies the name of the policy. The only valid name is default. To get the names of key policies, use ListKeyPolicies.

", + "GetKeyPolicyRequest$PolicyName": "

Specifies the name of the key policy. The only valid name is default. To get the names of key policies, use ListKeyPolicies.

", "PolicyNameList$member": null, "PutKeyPolicyRequest$PolicyName": "

The name of the key policy. The only valid value is default.

" } @@ -735,9 +735,9 @@ "PolicyType": { "base": null, "refs": { - "CreateKeyRequest$Policy": "

The key policy to attach to the CMK.

If you specify a policy and do not set BypassPolicyLockoutSafetyCheck to true, the policy must meet the following criteria:

If you do not specify a policy, AWS KMS attaches a default key policy to the CMK. For more information, see Default Key Policy in the AWS Key Management Service Developer Guide.

The policy size limit is 32 kilobytes (32768 bytes).

", - "GetKeyPolicyResponse$Policy": "

A policy document in JSON format.

", - "PutKeyPolicyRequest$Policy": "

The key policy to attach to the CMK.

If you do not set BypassPolicyLockoutSafetyCheck to true, the policy must meet the following criteria:

The policy size limit is 32 kilobytes (32768 bytes).

" + "CreateKeyRequest$Policy": "

The key policy to attach to the CMK.

If you provide a key policy, it must meet the following criteria:

If you do not provide a key policy, AWS KMS attaches a default key policy to the CMK. For more information, see Default Key Policy in the AWS Key Management Service Developer Guide.

The key policy size limit is 32 kilobytes (32768 bytes).

", + "GetKeyPolicyResponse$Policy": "

A key policy document in JSON format.

", + "PutKeyPolicyRequest$Policy": "

The key policy to attach to the CMK.

The key policy must meet the following criteria:

The key policy size limit is 32 kilobytes (32768 bytes).

" } }, "PrincipalIdType": { diff --git a/models/apis/rds/2014-10-31/api-2.json b/models/apis/rds/2014-10-31/api-2.json index 4520f990368..4cc8ff0adc0 100644 --- a/models/apis/rds/2014-10-31/api-2.json +++ b/models/apis/rds/2014-10-31/api-2.json @@ -2047,6 +2047,7 @@ "DBInstanceClass":{"shape":"String"}, "AvailabilityZone":{"shape":"String"}, "Port":{"shape":"IntegerOptional"}, + "MultiAZ":{"shape":"BooleanOptional"}, "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, "Iops":{"shape":"IntegerOptional"}, "OptionGroupName":{"shape":"String"}, diff --git a/models/apis/rds/2014-10-31/docs-2.json b/models/apis/rds/2014-10-31/docs-2.json index 9c9ec8340e8..1516d8b6ff6 100644 --- a/models/apis/rds/2014-10-31/docs-2.json +++ b/models/apis/rds/2014-10-31/docs-2.json @@ -16,7 +16,7 @@ "CreateDBClusterParameterGroup": "

Creates a new DB cluster parameter group.

Parameters in a DB cluster parameter group apply to all of the instances in a DB cluster.

A DB cluster parameter group is initially created with the default parameters for the database engine used by instances in the DB cluster. To provide custom values for any of the parameters, you must modify the group after creating it using ModifyDBClusterParameterGroup. Once you've created a DB cluster parameter group, you need to associate it with your DB cluster using ModifyDBCluster. When you associate a new DB cluster parameter group with a running DB cluster, you need to reboot the DB instances in the DB cluster without failover for the new DB cluster parameter group and associated settings to take effect.

After you create a DB cluster parameter group, you should wait at least 5 minutes before creating your first DB cluster that uses that DB cluster parameter group as the default parameter group. This allows Amazon RDS to fully complete the create action before the DB cluster parameter group is used as the default for a new DB cluster. This is especially important for parameters that are critical when creating the default database for a DB cluster, such as the character set for the default database defined by the character_set_database parameter. You can use the Parameter Groups option of the Amazon RDS console or the DescribeDBClusterParameters command to verify that your DB cluster parameter group has been created or modified.

For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

", "CreateDBClusterSnapshot": "

Creates a snapshot of a DB cluster. For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

", "CreateDBInstance": "

Creates a new DB instance.

", - "CreateDBInstanceReadReplica": "

Creates a new DB instance that acts as a Read Replica for an existing source DB instance. You can create a Read Replica for a DB instance running MySQL, MariaDB, or PostgreSQL.

Amazon Aurora does not support this action. You must call the CreateDBInstance action to create a DB instance for an Aurora DB cluster.

All Read Replica DB instances are created as Single-AZ deployments with backups disabled. All other DB instance attributes (including DB security groups and DB parameter groups) are inherited from the source DB instance, except as specified below.

The source DB instance must have backup retention enabled.

For more information, see Working with PostgreSQL, MySQL, and MariaDB Read Replicas.

", + "CreateDBInstanceReadReplica": "

Creates a new DB instance that acts as a Read Replica for an existing source DB instance. You can create a Read Replica for a DB instance running MySQL, MariaDB, or PostgreSQL. For more information, see Working with PostgreSQL, MySQL, and MariaDB Read Replicas.

Amazon Aurora does not support this action. You must call the CreateDBInstance action to create a DB instance for an Aurora DB cluster.

All Read Replica DB instances are created with backups disabled. All other DB instance attributes (including DB security groups and DB parameter groups) are inherited from the source DB instance, except as specified below.

Your source DB instance must have backup retention enabled.

", "CreateDBParameterGroup": "

Creates a new DB parameter group.

A DB parameter group is initially created with the default parameters for the database engine used by the DB instance. To provide custom values for any of the parameters, you must modify the group after creating it using ModifyDBParameterGroup. Once you've created a DB parameter group, you need to associate it with your DB instance using ModifyDBInstance. When you associate a new DB parameter group with a running DB instance, you need to reboot the DB instance without failover for the new DB parameter group and associated settings to take effect.

After you create a DB parameter group, you should wait at least 5 minutes before creating your first DB instance that uses that DB parameter group as the default parameter group. This allows Amazon RDS to fully complete the create action before the parameter group is used as the default for a new DB instance. This is especially important for parameters that are critical when creating the default database for a DB instance, such as the character set for the default database defined by the character_set_database parameter. You can use the Parameter Groups option of the Amazon RDS console or the DescribeDBParameters command to verify that your DB parameter group has been created or modified.

", "CreateDBSecurityGroup": "

Creates a new DB security group. DB security groups control access to a DB instance.

", "CreateDBSnapshot": "

Creates a DBSnapshot. The source DBInstance must be in \"available\" state.

", @@ -78,7 +78,7 @@ "PromoteReadReplica": "

Promotes a Read Replica DB instance to a standalone DB instance.

We recommend that you enable automated backups on your Read Replica before promoting the Read Replica. This ensures that no backup is taken during the promotion process. Once the instance is promoted to a primary instance, backups are taken based on your backup settings.

", "PromoteReadReplicaDBCluster": "

Promotes a Read Replica DB cluster to a standalone DB cluster.

", "PurchaseReservedDBInstancesOffering": "

Purchases a reserved DB instance offering.

", - "RebootDBInstance": "

Rebooting a DB instance restarts the database engine service. A reboot also applies to the DB instance any modifications to the associated DB parameter group that were pending. Rebooting a DB instance results in a momentary outage of the instance, during which the DB instance status is set to rebooting. If the RDS instance is configured for MultiAZ, it is possible that the reboot is conducted through a failover. An Amazon RDS event is created when the reboot is completed.

If your DB instance is deployed in multiple Availability Zones, you can force a failover from one AZ to the other during the reboot. You might force a failover to test the availability of your DB instance deployment or to restore operations to the original AZ after a failover occurs.

The time required to reboot is a function of the specific database engine's crash recovery process. To improve the reboot time, we recommend that you reduce database activities as much as possible during the reboot process to reduce rollback activity for in-transit transactions.

", + "RebootDBInstance": "

You might need to reboot your DB instance, usually for maintenance reasons. For example, if you make certain modifications, or if you change the DB parameter group associated with the DB instance, you must reboot the instance for the changes to take effect.

Rebooting a DB instance restarts the database engine service. Rebooting a DB instance results in a momentary outage, during which the DB instance status is set to rebooting.

For more information about rebooting, see Rebooting a DB Instance.

", "RemoveRoleFromDBCluster": "

Disassociates an Identity and Access Management (IAM) role from an Aurora DB cluster. For more information, see Authorizing Amazon Aurora to Access Other AWS Services On Your Behalf.

", "RemoveSourceIdentifierFromSubscription": "

Removes a source identifier from an existing RDS event notification subscription.

", "RemoveTagsFromResource": "

Removes metadata tags from an Amazon RDS resource.

For an overview on tagging an Amazon RDS resource, see Tagging Amazon RDS Resources.

", @@ -91,8 +91,8 @@ "RestoreDBInstanceFromS3": "

Amazon Relational Database Service (Amazon RDS) supports importing MySQL databases by using backup files. You can create a backup of your on-premises database, store it on Amazon Simple Storage Service (Amazon S3), and then restore the backup file onto a new Amazon RDS DB instance running MySQL. For more information, see Importing Data into an Amazon RDS MySQL DB Instance.

", "RestoreDBInstanceToPointInTime": "

Restores a DB instance to an arbitrary point in time. You can restore to any point in time before the time identified by the LatestRestorableTime property. You can restore to a point up to the number of days specified by the BackupRetentionPeriod property.

The target database is created with most of the original configuration, but in a system-selected availability zone, with the default security group, the default subnet group, and the default DB parameter group. By default, the new DB instance is created as a single-AZ deployment except when the instance is a SQL Server instance that has an option group that is associated with mirroring; in this case, the instance becomes a mirrored deployment and not a single-AZ deployment.

", "RevokeDBSecurityGroupIngress": "

Revokes ingress from a DBSecurityGroup for previously authorized IP ranges or EC2 or VPC Security Groups. Required parameters for this API are one of CIDRIP, EC2SecurityGroupId for VPC, or (EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId).

", - "StartDBInstance": "

Starts a DB instance that was stopped using the AWS console, the stop-db-instance AWS CLI command, or the StopDBInstance action. For more information, see Stopping and Starting a DB instance in the AWS RDS user guide.

", - "StopDBInstance": "

Stops a DB instance. When you stop a DB instance, Amazon RDS retains the DB instance's metadata, including its endpoint, DB parameter group, and option group membership. Amazon RDS also retains the transaction logs so you can do a point-in-time restore if necessary. For more information, see Stopping and Starting a DB instance in the AWS RDS user guide.

" + "StartDBInstance": "

Starts a DB instance that was stopped using the AWS console, the stop-db-instance AWS CLI command, or the StopDBInstance action. For more information, see Stopping and Starting a DB instance in the AWS RDS user guide.

This command does not apply to Aurora MySQL and Aurora PostgreSQL.

", + "StopDBInstance": "

Stops a DB instance. When you stop a DB instance, Amazon RDS retains the DB instance's metadata, including its endpoint, DB parameter group, and option group membership. Amazon RDS also retains the transaction logs so you can do a point-in-time restore if necessary. For more information, see Stopping and Starting a DB instance in the AWS RDS user guide.

This command does not apply to Aurora MySQL and Aurora PostgreSQL.

" }, "shapes": { "AccountAttributesMessage": { @@ -283,6 +283,7 @@ "CreateDBInstanceMessage$CopyTagsToSnapshot": "

True to copy all tags from the DB instance to snapshots of the DB instance, and otherwise false. The default is false.

", "CreateDBInstanceMessage$EnableIAMDatabaseAuthentication": "

True to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts, and otherwise false.

You can enable IAM database authentication for the following database engines:

Amazon Aurora

Not applicable. Mapping AWS IAM accounts to database accounts is managed by the DB cluster. For more information, see CreateDBCluster.

MySQL

Default: false

", "CreateDBInstanceMessage$EnablePerformanceInsights": "

True to enable Performance Insights for the DB instance, and otherwise false.

", + "CreateDBInstanceReadReplicaMessage$MultiAZ": "

Specifies whether the read replica is in a Multi-AZ deployment.

", "CreateDBInstanceReadReplicaMessage$AutoMinorVersionUpgrade": "

Indicates that minor engine upgrades are applied automatically to the Read Replica during the maintenance window.

Default: Inherits from the source DB instance

", "CreateDBInstanceReadReplicaMessage$PubliclyAccessible": "

Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address.

Default: The default behavior varies depending on whether a VPC has been requested or not. The following list shows the default behavior in each case.

If no DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance is publicly accessible. If a specific DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance is private.

", "CreateDBInstanceReadReplicaMessage$CopyTagsToSnapshot": "

True to copy all tags from the Read Replica to snapshots of the Read Replica, and otherwise false. The default is false.

", @@ -296,7 +297,7 @@ "DescribeReservedDBInstancesMessage$MultiAZ": "

The Multi-AZ filter value. Specify this parameter to show only those reservations matching the specified Multi-AZ parameter.

", "DescribeReservedDBInstancesOfferingsMessage$MultiAZ": "

The Multi-AZ filter value. Specify this parameter to show only the available offerings matching the specified Multi-AZ parameter.

", "ModifyDBClusterMessage$EnableIAMDatabaseAuthentication": "

True to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts, and otherwise false.

Default: false

", - "ModifyDBInstanceMessage$MultiAZ": "

Specifies if the DB instance is a Multi-AZ deployment. Changing this parameter does not result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request.

Constraints: Cannot be specified if the DB instance is a Read Replica.

", + "ModifyDBInstanceMessage$MultiAZ": "

Specifies if the DB instance is a Multi-AZ deployment. Changing this parameter does not result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request.

", "ModifyDBInstanceMessage$AutoMinorVersionUpgrade": "

Indicates that minor version upgrades are applied automatically to the DB instance during the maintenance window. Changing this parameter does not result in an outage except in the following case and the change is asynchronously applied as soon as possible. An outage will result if this parameter is set to true during the maintenance window, and a newer minor version is available, and RDS has enabled auto patching for that engine version.

", "ModifyDBInstanceMessage$CopyTagsToSnapshot": "

True to copy all tags from the DB instance to snapshots of the DB instance, and otherwise false. The default is false.

", "ModifyDBInstanceMessage$PubliclyAccessible": "

Boolean value that indicates if the DB instance has a publicly resolvable DNS name. Set to True to make the DB instance Internet-facing with a publicly resolvable DNS name, which resolves to a public IP address. Set to False to make the DB instance internal with a DNS name that resolves to a private IP address.

PubliclyAccessible only applies to DB instances in a VPC. The DB instance must be part of a public subnet and PubliclyAccessible must be true in order for it to be publicly accessible.

Changes to the PubliclyAccessible parameter are applied immediately regardless of the value of the ApplyImmediately parameter.

Default: false

", @@ -1294,7 +1295,7 @@ "DoubleRangeList": { "base": null, "refs": { - "ValidStorageOptions$IopsToStorageRatio": "

The valid range of Provisioned IOPS to gigabytes of storage multiplier. For example, 3-10, which means that provisioned IOPS can be between 3 and 10 times storage.

" + "ValidStorageOptions$IopsToStorageRatio": "

The valid range of Provisioned IOPS to gibibytes of storage multiplier. For example, 3-10, which means that provisioned IOPS can be between 3 and 10 times storage.

" } }, "DownloadDBLogFilePortionDetails": { @@ -1492,13 +1493,13 @@ "Integer": { "base": null, "refs": { - "DBClusterSnapshot$AllocatedStorage": "

Specifies the allocated storage size in gigabytes (GB).

", + "DBClusterSnapshot$AllocatedStorage": "

Specifies the allocated storage size in gibibytes (GiB).

", "DBClusterSnapshot$Port": "

Specifies the port that the DB cluster was listening on at the time of the snapshot.

", "DBClusterSnapshot$PercentProgress": "

Specifies the percentage of the estimated data that has been transferred.

", - "DBInstance$AllocatedStorage": "

Specifies the allocated storage size specified in gigabytes.

", + "DBInstance$AllocatedStorage": "

Specifies the allocated storage size specified in gibibytes.

", "DBInstance$BackupRetentionPeriod": "

Specifies the number of days for which automatic DB snapshots are retained.

", "DBInstance$DbInstancePort": "

Specifies the port that the DB instance listens on. If the DB instance is part of a DB cluster, this can be a different port than the DB cluster port.

", - "DBSnapshot$AllocatedStorage": "

Specifies the allocated storage size in gigabytes (GB).

", + "DBSnapshot$AllocatedStorage": "

Specifies the allocated storage size in gibibytes (GiB).

", "DBSnapshot$Port": "

Specifies the port that the database engine was listening on at the time of the snapshot.

", "DBSnapshot$PercentProgress": "

The percentage of the estimated data that has been transferred.

", "DownloadDBLogFilePortionMessage$NumberOfLines": "

The number of lines to download. If the number of lines specified results in a file over 1 MB in size, the file is truncated at 1 MB in size.

If the NumberOfLines parameter is specified, then the block of lines returned can be from the beginning or the end of the log file, depending on the value of the Marker parameter.

", @@ -1514,17 +1515,17 @@ "base": null, "refs": { "CreateDBClusterMessage$BackupRetentionPeriod": "

The number of days for which automated backups are retained. You must specify a minimum value of 1.

Default: 1

Constraints:

", - "CreateDBClusterMessage$Port": "

The port number on which the instances in the DB cluster accept connections.

Default: 3306

", - "CreateDBInstanceMessage$AllocatedStorage": "

The amount of storage (in gigabytes) to be initially allocated for the DB instance.

Type: Integer

Amazon Aurora

Not applicable. Aurora cluster volumes automatically grow as the amount of data in your database increases, though you are only charged for the space that you use in an Aurora cluster volume.

MySQL

Constraints to the amount of storage for each storage type are the following:

MariaDB

Constraints to the amount of storage for each storage type are the following:

PostgreSQL

Constraints to the amount of storage for each storage type are the following:

Oracle

Constraints to the amount of storage for each storage type are the following:

SQL Server

Constraints to the amount of storage for each storage type are the following:

", + "CreateDBClusterMessage$Port": "

The port number on which the instances in the DB cluster accept connections.

Default: 3306 if engine is set as aurora or 5432 if set to aurora-postgresql.

", + "CreateDBInstanceMessage$AllocatedStorage": "

The amount of storage (in gibibytes) to allocate for the DB instance.

Type: Integer

Amazon Aurora

Not applicable. Aurora cluster volumes automatically grow as the amount of data in your database increases, though you are only charged for the space that you use in an Aurora cluster volume.

MySQL

Constraints to the amount of storage for each storage type are the following:

MariaDB

Constraints to the amount of storage for each storage type are the following:

PostgreSQL

Constraints to the amount of storage for each storage type are the following:

Oracle

Constraints to the amount of storage for each storage type are the following:

SQL Server

Constraints to the amount of storage for each storage type are the following:

", "CreateDBInstanceMessage$BackupRetentionPeriod": "

The number of days for which automated backups are retained. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

Amazon Aurora

Not applicable. The retention period for automated backups is managed by the DB cluster. For more information, see CreateDBCluster.

Default: 1

Constraints:

", "CreateDBInstanceMessage$Port": "

The port number on which the database accepts connections.

MySQL

Default: 3306

Valid Values: 1150-65535

Type: Integer

MariaDB

Default: 3306

Valid Values: 1150-65535

Type: Integer

PostgreSQL

Default: 5432

Valid Values: 1150-65535

Type: Integer

Oracle

Default: 1521

Valid Values: 1150-65535

SQL Server

Default: 1433

Valid Values: 1150-65535 except for 1434, 3389, 47001, 49152, and 49152 through 49156.

Amazon Aurora

Default: 3306

Valid Values: 1150-65535

Type: Integer

", - "CreateDBInstanceMessage$Iops": "

The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB instance. For information about valid Iops values, see see Amazon RDS Provisioned IOPS Storage to Improve Performance.

Constraints: Must be a multiple between 3 and 10 of the storage amount for the DB instance. Must also be an integer multiple of 1000. For example, if the size of your DB instance is 500 GB, then your Iops value can be 2000, 3000, 4000, or 5000.

", + "CreateDBInstanceMessage$Iops": "

The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB instance. For information about valid Iops values, see see Amazon RDS Provisioned IOPS Storage to Improve Performance.

Constraints: Must be a multiple between 1 and 50 of the storage amount for the DB instance. Must also be an integer multiple of 1000. For example, if the size of your DB instance is 500 GiB, then your Iops value can be 2000, 3000, 4000, or 5000.

", "CreateDBInstanceMessage$MonitoringInterval": "

The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable collecting Enhanced Monitoring metrics, specify 0. The default is 0.

If MonitoringRoleArn is specified, then you must also set MonitoringInterval to a value other than 0.

Valid Values: 0, 1, 5, 10, 15, 30, 60

", "CreateDBInstanceMessage$PromotionTier": "

A value that specifies the order in which an Aurora Replica is promoted to the primary instance after a failure of the existing primary instance. For more information, see Fault Tolerance for an Aurora DB Cluster.

Default: 1

Valid Values: 0 - 15

", "CreateDBInstanceReadReplicaMessage$Port": "

The port number that the DB instance uses for connections.

Default: Inherits from the source DB instance

Valid Values: 1150-65535

", "CreateDBInstanceReadReplicaMessage$Iops": "

The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB instance.

", "CreateDBInstanceReadReplicaMessage$MonitoringInterval": "

The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the Read Replica. To disable collecting Enhanced Monitoring metrics, specify 0. The default is 0.

If MonitoringRoleArn is specified, then you must also set MonitoringInterval to a value other than 0.

Valid Values: 0, 1, 5, 10, 15, 30, 60

", - "DBCluster$AllocatedStorage": "

For all database engines except Amazon Aurora, AllocatedStorage specifies the allocated storage size in gigabytes (GB). For Aurora, AllocatedStorage always returns 1, because Aurora DB cluster storage size is not fixed, but instead automatically adjusts as needed.

", + "DBCluster$AllocatedStorage": "

For all database engines except Amazon Aurora, AllocatedStorage specifies the allocated storage size in gibibytes (GiB). For Aurora, AllocatedStorage always returns 1, because Aurora DB cluster storage size is not fixed, but instead automatically adjusts as needed.

", "DBCluster$BackupRetentionPeriod": "

Specifies the number of days for which automatic DB snapshots are retained.

", "DBCluster$Port": "

Specifies the port that the database engine is listening on.

", "DBClusterMember$PromotionTier": "

A value that specifies the order in which an Aurora Replica is promoted to the primary instance after a failure of the existing primary instance. For more information, see Fault Tolerance for an Aurora DB Cluster.

", @@ -1559,9 +1560,9 @@ "DescribeSourceRegionsMessage$MaxRecords": "

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

Default: 100

Constraints: Minimum 20, maximum 100.

", "ModifyDBClusterMessage$BackupRetentionPeriod": "

The number of days for which automated backups are retained. You must specify a minimum value of 1.

Default: 1

Constraints:

", "ModifyDBClusterMessage$Port": "

The port number on which the DB cluster accepts connections.

Constraints: Value must be 1150-65535

Default: The same port as the original DB cluster.

", - "ModifyDBInstanceMessage$AllocatedStorage": "

The new storage capacity of the RDS instance. Changing this setting does not result in an outage and the change is applied during the next maintenance window unless ApplyImmediately is set to true for this request.

MySQL

Default: Uses existing setting

Valid Values: 5-6144

Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value.

Type: Integer

MariaDB

Default: Uses existing setting

Valid Values: 5-6144

Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value.

Type: Integer

PostgreSQL

Default: Uses existing setting

Valid Values: 5-6144

Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value.

Type: Integer

Oracle

Default: Uses existing setting

Valid Values: 10-6144

Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value.

SQL Server

Cannot be modified.

If you choose to migrate your DB instance from using standard storage to using Provisioned IOPS, or from using Provisioned IOPS to using standard storage, the process can take time. The duration of the migration depends on several factors such as database load, storage size, storage type (standard or Provisioned IOPS), amount of IOPS provisioned (if any), and the number of prior scale storage operations. Typical migration times are under 24 hours, but the process can take up to several days in some cases. During the migration, the DB instance is available for use, but might experience performance degradation. While the migration takes place, nightly backups for the instance are suspended. No other Amazon RDS operations can take place for the instance, including modifying the instance, rebooting the instance, deleting the instance, creating a Read Replica for the instance, and creating a DB snapshot of the instance.

", + "ModifyDBInstanceMessage$AllocatedStorage": "

The new amount of storage (in gibibytes) to allocate for the DB instance.

For MariaDB, MySQL, Oracle, and PostgreSQL, the value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value.

For the valid values for allocated storage for each engine, see CreateDBInstance.

", "ModifyDBInstanceMessage$BackupRetentionPeriod": "

The number of days to retain automated backups. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

Changing this parameter can result in an outage if you change from 0 to a non-zero value or from a non-zero value to 0. These changes are applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request. If you change the parameter from one non-zero value to another non-zero value, the change is asynchronously applied as soon as possible.

Amazon Aurora

Not applicable. The retention period for automated backups is managed by the DB cluster. For more information, see ModifyDBCluster.

Default: Uses existing setting

Constraints:

", - "ModifyDBInstanceMessage$Iops": "

The new Provisioned IOPS (I/O operations per second) value for the RDS instance. Changing this setting does not result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request.

Default: Uses existing setting

Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value. If you are migrating from Provisioned IOPS to standard storage, set this value to 0. The DB instance will require a reboot for the change in storage type to take effect.

SQL Server

Setting the IOPS value for the SQL Server database engine is not supported.

Type: Integer

If you choose to migrate your DB instance from using standard storage to using Provisioned IOPS, or from using Provisioned IOPS to using standard storage, the process can take time. The duration of the migration depends on several factors such as database load, storage size, storage type (standard or Provisioned IOPS), amount of IOPS provisioned (if any), and the number of prior scale storage operations. Typical migration times are under 24 hours, but the process can take up to several days in some cases. During the migration, the DB instance is available for use, but might experience performance degradation. While the migration takes place, nightly backups for the instance are suspended. No other Amazon RDS operations can take place for the instance, including modifying the instance, rebooting the instance, deleting the instance, creating a Read Replica for the instance, and creating a DB snapshot of the instance.

", + "ModifyDBInstanceMessage$Iops": "

The new Provisioned IOPS (I/O operations per second) value for the RDS instance.

Changing this setting does not result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request. If you are migrating from Provisioned IOPS to standard storage, set this value to 0. The DB instance will require a reboot for the change in storage type to take effect.

If you choose to migrate your DB instance from using standard storage to using Provisioned IOPS, or from using Provisioned IOPS to using standard storage, the process can take time. The duration of the migration depends on several factors such as database load, storage size, storage type (standard or Provisioned IOPS), amount of IOPS provisioned (if any), and the number of prior scale storage operations. Typical migration times are under 24 hours, but the process can take up to several days in some cases. During the migration, the DB instance is available for use, but might experience performance degradation. While the migration takes place, nightly backups for the instance are suspended. No other Amazon RDS operations can take place for the instance, including modifying the instance, rebooting the instance, deleting the instance, creating a Read Replica for the instance, and creating a DB snapshot of the instance.

Constraints: For MariaDB, MySQL, Oracle, and PostgreSQL, the value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value.

Default: Uses existing setting

", "ModifyDBInstanceMessage$MonitoringInterval": "

The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable collecting Enhanced Monitoring metrics, specify 0. The default is 0.

If MonitoringRoleArn is specified, then you must also set MonitoringInterval to a value other than 0.

Valid Values: 0, 1, 5, 10, 15, 30, 60

", "ModifyDBInstanceMessage$DBPortNumber": "

The port number on which the database accepts connections.

The value of the DBPortNumber parameter must not match any of the port values specified for options in the option group for the DB instance.

Your database will restart when you change the DBPortNumber value regardless of the value of the ApplyImmediately parameter.

MySQL

Default: 3306

Valid Values: 1150-65535

MariaDB

Default: 3306

Valid Values: 1150-65535

PostgreSQL

Default: 5432

Valid Values: 1150-65535

Type: Integer

Oracle

Default: 1521

Valid Values: 1150-65535

SQL Server

Default: 1433

Valid Values: 1150-65535 except for 1434, 3389, 47001, 49152, and 49152 through 49156.

Amazon Aurora

Default: 3306

Valid Values: 1150-65535

", "ModifyDBInstanceMessage$PromotionTier": "

A value that specifies the order in which an Aurora Replica is promoted to the primary instance after a failure of the existing primary instance. For more information, see Fault Tolerance for an Aurora DB Cluster.

Default: 1

Valid Values: 0 - 15

", @@ -2048,7 +2049,7 @@ "RangeList": { "base": null, "refs": { - "ValidStorageOptions$StorageSize": "

The valid range of storage in gigabytes. For example, 100 to 6144.

", + "ValidStorageOptions$StorageSize": "

The valid range of storage in gibibytes. For example, 100 to 16384.

", "ValidStorageOptions$ProvisionedIops": "

The valid range of provisioned IOPS. For example, 1000-20000.

" } }, @@ -2427,7 +2428,7 @@ "CreateDBInstanceMessage$PreferredMaintenanceWindow": "

The time range each week during which system maintenance can occur, in Universal Coordinated Time (UTC). For more information, see Amazon RDS Maintenance Window.

Format: ddd:hh24:mi-ddd:hh24:mi

The default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region, occurring on a random day of the week.

Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun.

Constraints: Minimum 30-minute window.

", "CreateDBInstanceMessage$DBParameterGroupName": "

The name of the DB parameter group to associate with this DB instance. If this argument is omitted, the default DBParameterGroup for the specified engine is used.

Constraints:

", "CreateDBInstanceMessage$PreferredBackupWindow": "

The daily time range during which automated backups are created if automated backups are enabled, using the BackupRetentionPeriod parameter. For more information, see The Backup Window.

Amazon Aurora

Not applicable. The daily time range for creating automated backups is managed by the DB cluster. For more information, see CreateDBCluster.

The default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region. To see the time blocks available, see Adjusting the Preferred DB Instance Maintenance Window.

Constraints:

", - "CreateDBInstanceMessage$EngineVersion": "

The version number of the database engine to use.

The following are the database engines and major and minor versions that are available with Amazon RDS. Not every database engine is available for every AWS Region.

Amazon Aurora

Not applicable. The version number of the database engine to be used by the DB instance is managed by the DB cluster. For more information, see CreateDBCluster.

MariaDB

Microsoft SQL Server 2016

Microsoft SQL Server 2014

Microsoft SQL Server 2012

Microsoft SQL Server 2008 R2

MySQL

Oracle 12c

Oracle 11g

PostgreSQL

", + "CreateDBInstanceMessage$EngineVersion": "

The version number of the database engine to use.

The following are the database engines and major and minor versions that are available with Amazon RDS. Not every database engine is available for every AWS Region.

Amazon Aurora

Not applicable. The version number of the database engine to be used by the DB instance is managed by the DB cluster. For more information, see CreateDBCluster.

MariaDB

Microsoft SQL Server 2017

Microsoft SQL Server 2016

Microsoft SQL Server 2014

Microsoft SQL Server 2012

Microsoft SQL Server 2008 R2

MySQL

Oracle 12c

Oracle 11g

PostgreSQL

", "CreateDBInstanceMessage$LicenseModel": "

License model information for this DB instance.

Valid values: license-included | bring-your-own-license | general-public-license

", "CreateDBInstanceMessage$OptionGroupName": "

Indicates that the DB instance should be associated with the specified option group.

Permanent options, such as the TDE option for Oracle Advanced Security TDE, can't be removed from an option group, and that option group can't be removed from a DB instance once it is associated with a DB instance

", "CreateDBInstanceMessage$CharacterSetName": "

For supported engines, indicates that the DB instance should be associated with the specified CharacterSet.

Amazon Aurora

Not applicable. The character set is managed by the DB cluster. For more information, see CreateDBCluster.

", @@ -2747,7 +2748,7 @@ "ModifyDBInstanceMessage$LicenseModel": "

The license model for the DB instance.

Valid values: license-included | bring-your-own-license | general-public-license

", "ModifyDBInstanceMessage$OptionGroupName": "

Indicates that the DB instance should be associated with the specified option group. Changing this parameter does not result in an outage except in the following case and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request. If the parameter change results in an option group that enables OEM, this change can cause a brief (sub-second) period during which new connections are rejected but existing connections are not interrupted.

Permanent options, such as the TDE option for Oracle Advanced Security TDE, can't be removed from an option group, and that option group can't be removed from a DB instance once it is associated with a DB instance

", "ModifyDBInstanceMessage$NewDBInstanceIdentifier": "

The new DB instance identifier for the DB instance when renaming a DB instance. When you change the DB instance identifier, an instance reboot will occur immediately if you set Apply Immediately to true, or will occur during the next maintenance window if Apply Immediately to false. This value is stored as a lowercase string.

Constraints:

Example: mydbinstance

", - "ModifyDBInstanceMessage$StorageType": "

Specifies the storage type to be associated with the DB instance.

Valid values: standard | gp2 | io1

If you specify io1, you must also include a value for the Iops parameter.

Default: io1 if the Iops parameter is specified, otherwise standard

", + "ModifyDBInstanceMessage$StorageType": "

Specifies the storage type to be associated with the DB instance.

If you specify Provisioned IOPS (io1), you must also include a value for the Iops parameter.

If you choose to migrate your DB instance from using standard storage to using Provisioned IOPS, or from using Provisioned IOPS to using standard storage, the process can take time. The duration of the migration depends on several factors such as database load, storage size, storage type (standard or Provisioned IOPS), amount of IOPS provisioned (if any), and the number of prior scale storage operations. Typical migration times are under 24 hours, but the process can take up to several days in some cases. During the migration, the DB instance is available for use, but might experience performance degradation. While the migration takes place, nightly backups for the instance are suspended. No other Amazon RDS operations can take place for the instance, including modifying the instance, rebooting the instance, deleting the instance, creating a Read Replica for the instance, and creating a DB snapshot of the instance.

Valid values: standard | gp2 | io1

Default: io1 if the Iops parameter is specified, otherwise standard

", "ModifyDBInstanceMessage$TdeCredentialArn": "

The ARN from the key store with which to associate the instance for TDE encryption.

", "ModifyDBInstanceMessage$TdeCredentialPassword": "

The password for the given ARN from the key store in order to access the device.

", "ModifyDBInstanceMessage$CACertificateIdentifier": "

Indicates the certificate that needs to be associated with the instance.

", @@ -2901,7 +2902,7 @@ "RestoreDBInstanceFromDBSnapshotMessage$DBSubnetGroupName": "

The DB subnet group name to use for the new instance.

Constraints: If supplied, must match the name of an existing DBSubnetGroup.

Example: mySubnetgroup

", "RestoreDBInstanceFromDBSnapshotMessage$LicenseModel": "

License model information for the restored DB instance.

Default: Same as source.

Valid values: license-included | bring-your-own-license | general-public-license

", "RestoreDBInstanceFromDBSnapshotMessage$DBName": "

The database name for the restored DB instance.

This parameter doesn't apply to the MySQL, PostgreSQL, or MariaDB engines.

", - "RestoreDBInstanceFromDBSnapshotMessage$Engine": "

The database engine to use for the new instance.

Default: The same as source

Constraint: Must be compatible with the engine of the source. You can restore a MariaDB 10.1 DB instance from a MySQL 5.6 snapshot.

Valid Values:

", + "RestoreDBInstanceFromDBSnapshotMessage$Engine": "

The database engine to use for the new instance.

Default: The same as source

Constraint: Must be compatible with the engine of the source. For example, you can restore a MariaDB 10.1 DB instance from a MySQL 5.6 snapshot.

Valid Values:

", "RestoreDBInstanceFromDBSnapshotMessage$OptionGroupName": "

The name of the option group to be used for the restored DB instance.

Permanent options, such as the TDE option for Oracle Advanced Security TDE, can't be removed from an option group, and that option group can't be removed from a DB instance once it is associated with a DB instance

", "RestoreDBInstanceFromDBSnapshotMessage$StorageType": "

Specifies the storage type to be associated with the DB instance.

Valid values: standard | gp2 | io1

If you specify io1, you must also include a value for the Iops parameter.

Default: io1 if the Iops parameter is specified, otherwise standard

", "RestoreDBInstanceFromDBSnapshotMessage$TdeCredentialArn": "

The ARN from the key store with which to associate the instance for TDE encryption.

", diff --git a/models/apis/route53/2013-04-01/api-2.json b/models/apis/route53/2013-04-01/api-2.json index a86f6591337..73a434ed9bd 100644 --- a/models/apis/route53/2013-04-01/api-2.json +++ b/models/apis/route53/2013-04-01/api-2.json @@ -220,6 +220,7 @@ "errors":[ {"shape":"NoSuchTrafficPolicy"}, {"shape":"InvalidInput"}, + {"shape":"TooManyTrafficPolicyVersionsForCurrentPolicy"}, {"shape":"ConcurrentModification"}, {"shape":"InvalidTrafficPolicyDocument"} ] @@ -3435,6 +3436,14 @@ "error":{"httpStatusCode":400}, "exception":true }, + "TooManyTrafficPolicyVersionsForCurrentPolicy":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, "TooManyVPCAssociationAuthorizations":{ "type":"structure", "members":{ diff --git a/models/apis/route53/2013-04-01/docs-2.json b/models/apis/route53/2013-04-01/docs-2.json index 2b3c6e6f776..edce14294ab 100644 --- a/models/apis/route53/2013-04-01/docs-2.json +++ b/models/apis/route53/2013-04-01/docs-2.json @@ -546,6 +546,7 @@ "TooManyHostedZones$message": "

Descriptive message for the error response.

", "TooManyTrafficPolicies$message": "

Descriptive message for the error response.

", "TooManyTrafficPolicyInstances$message": "

Descriptive message for the error response.

", + "TooManyTrafficPolicyVersionsForCurrentPolicy$message": "

Descriptive message for the error response.

", "TooManyVPCAssociationAuthorizations$message": "

Descriptive message for the error response.

", "TrafficPolicyAlreadyExists$message": "

Descriptive message for the error response.

", "TrafficPolicyInUse$message": "

Descriptive message for the error response.

", @@ -1823,6 +1824,11 @@ "refs": { } }, + "TooManyTrafficPolicyVersionsForCurrentPolicy": { + "base": "

This traffic policy version can't be created because you've reached the limit of 1000 on the number of versions that you can create for the current traffic policy.

To create more traffic policy versions, you can use GetTrafficPolicy to get the traffic policy document for a specified traffic policy version, and then use CreateTrafficPolicy to create a new traffic policy using the traffic policy document.

", + "refs": { + } + }, "TooManyVPCAssociationAuthorizations": { "base": "

You've created the maximum number of authorizations that can be created for the specified hosted zone. To authorize another VPC to be associated with the hosted zone, submit a DeleteVPCAssociationAuthorization request to remove an existing authorization. To get a list of existing authorizations, submit a ListVPCAssociationAuthorizations request.

", "refs": { diff --git a/models/apis/sagemaker/2017-07-24/api-2.json b/models/apis/sagemaker/2017-07-24/api-2.json index bd3f986684b..4fb502642ab 100644 --- a/models/apis/sagemaker/2017-07-24/api-2.json +++ b/models/apis/sagemaker/2017-07-24/api-2.json @@ -372,11 +372,6 @@ "Environment":{"shape":"EnvironmentMap"} } }, - "ContainerDefinitionList":{ - "type":"list", - "member":{"shape":"ContainerDefinition"}, - "max":5 - }, "ContainerHostname":{ "type":"string", "max":63, @@ -434,7 +429,6 @@ "members":{ "ModelName":{"shape":"ModelName"}, "PrimaryContainer":{"shape":"ContainerDefinition"}, - "SupplementalContainers":{"shape":"ContainerDefinitionList"}, "ExecutionRoleArn":{"shape":"RoleArn"}, "Tags":{"shape":"TagList"} } @@ -627,7 +621,6 @@ "required":[ "ModelName", "PrimaryContainer", - "SupplementalContainers", "ExecutionRoleArn", "CreationTime", "ModelArn" @@ -635,7 +628,6 @@ "members":{ "ModelName":{"shape":"ModelName"}, "PrimaryContainer":{"shape":"ContainerDefinition"}, - "SupplementalContainers":{"shape":"ContainerDefinitionList"}, "ExecutionRoleArn":{"shape":"RoleArn"}, "CreationTime":{"shape":"Timestamp"}, "ModelArn":{"shape":"ModelArn"} diff --git a/models/apis/sagemaker/2017-07-24/docs-2.json b/models/apis/sagemaker/2017-07-24/docs-2.json index 739ea29c922..483a2159fe6 100644 --- a/models/apis/sagemaker/2017-07-24/docs-2.json +++ b/models/apis/sagemaker/2017-07-24/docs-2.json @@ -5,7 +5,7 @@ "AddTags": "

Adds or overwrites one or more tags for the specified Amazon SageMaker resource. You can add tags to notebook instances, training jobs, models, endpoint configurations, and endpoints.

Each tag consists of a key and an optional value. Tag keys must be unique per resource. For more information about tags, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide.

", "CreateEndpoint": "

Creates an endpoint using the endpoint configuration specified in the request. Amazon SageMaker uses the endpoint to provision resources and deploy models. You create the endpoint configuration with the CreateEndpointConfig API.

Use this API only for hosting models using Amazon SageMaker hosting services.

The endpoint name must be unique within an AWS Region in your AWS account.

When it receives the request, Amazon SageMaker creates the endpoint, launches the resources (ML compute instances), and deploys the model(s) on them.

When Amazon SageMaker receives the request, it sets the endpoint status to Creating. After it creates the endpoint, it sets the status to InService. Amazon SageMaker can then process incoming requests for inferences. To check the status of an endpoint, use the DescribeEndpoint API.

For an example, see Exercise 1: Using the K-Means Algorithm Provided by Amazon SageMaker.

", "CreateEndpointConfig": "

Creates an endpoint configuration that Amazon SageMaker hosting services uses to deploy models. In the configuration, you identify one or more models, created using the CreateModel API, to deploy and the resources that you want Amazon SageMaker to provision. Then you call the CreateEndpoint API.

Use this API only if you want to use Amazon SageMaker hosting services to deploy models into production.

In the request, you define one or more ProductionVariants, each of which identifies a model. Each ProductionVariant parameter also describes the resources that you want Amazon SageMaker to provision. This includes the number and type of ML compute instances to deploy.

If you are hosting multiple models, you also assign a VariantWeight to specify how much traffic you want to allocate to each model. For example, suppose that you want to host two models, A and B, and you assign traffic weight 2 for model A and 1 for model B. Amazon SageMaker distributes two-thirds of the traffic to Model A, and one-third to model B.

", - "CreateModel": "

Creates a model in Amazon SageMaker. In the request, you name the model and describe one or more containers. For each container, you specify the docker image containing inference code, artifacts (from prior training), and custom environment map that the inference code uses when you deploy the model into production.

Use this API to create a model only if you want to use Amazon SageMaker hosting services. To host your model, you create an endpoint configuration with the CreateEndpointConfig API, and then create an endpoint with the CreateEndpoint API.

Amazon SageMaker then deploys all of the containers that you defined for the model in the hosting environment.

In the CreateModel request, you must define at least one container with the PrimaryContainer parameter. You can optionally specify additional containers with the SupplementalContainers parameter.

In the request, you also provide an IAM role that Amazon SageMaker can assume to access model artifacts and docker image for deployment on ML compute hosting instances. In addition, you also use the IAM role to manage permissions the inference code needs. For example, if the inference code access any other AWS resources, you grant necessary permissions via this role.

", + "CreateModel": "

Creates a model in Amazon SageMaker. In the request, you name the model and describe one or more containers. For each container, you specify the docker image containing inference code, artifacts (from prior training), and custom environment map that the inference code uses when you deploy the model into production.

Use this API to create a model only if you want to use Amazon SageMaker hosting services. To host your model, you create an endpoint configuration with the CreateEndpointConfig API, and then create an endpoint with the CreateEndpoint API.

Amazon SageMaker then deploys all of the containers that you defined for the model in the hosting environment.

In the CreateModel request, you must define a container with the PrimaryContainer parameter.

In the request, you also provide an IAM role that Amazon SageMaker can assume to access model artifacts and docker image for deployment on ML compute hosting instances. In addition, you also use the IAM role to manage permissions the inference code needs. For example, if the inference code access any other AWS resources, you grant necessary permissions via this role.

", "CreateNotebookInstance": "

Creates an Amazon SageMaker notebook instance. A notebook instance is an ML compute instance running on a Jupyter notebook.

In a CreateNotebookInstance request, you specify the type of ML compute instance that you want to run. Amazon SageMaker launches the instance, installs common libraries that you can use to explore datasets for model training, and attaches an ML storage volume to the notebook instance.

Amazon SageMaker also provides a set of example notebooks. Each notebook demonstrates how to use Amazon SageMaker with a specific an algorithm or with a machine learning framework.

After receiving the request, Amazon SageMaker does the following:

  1. Creates a network interface in the Amazon SageMaker VPC.

  2. (Option) If you specified SubnetId, creates a network interface in your own VPC, which is inferred from the subnet ID that you provide in the input. When creating this network interface, Amazon SageMaker attaches the security group that you specified in the request to the network interface that it creates in your VPC.

  3. Launches an EC2 instance of the type specified in the request in the Amazon SageMaker VPC. If you specified SubnetId of your VPC, Amazon SageMaker specifies both network interfaces when launching this instance. This enables inbound traffic from your own VPC to the notebook instance, assuming that the security groups allow it.

After creating the notebook instance, Amazon SageMaker returns its Amazon Resource Name (ARN).

After Amazon SageMaker creates the notebook instance, you can connect to the Jupyter server and work in Jupyter notebooks. For example, you can write code to explore a dataset that you can use for model training, train a model, host models by creating Amazon SageMaker endpoints, and validate hosted models.

For more information, see How It Works.

", "CreatePresignedNotebookInstanceUrl": "

Returns a URL that you can use to connect to the Juypter server from a notebook instance. In the Amazon SageMaker console, when you choose Open next to a notebook instance, Amazon SageMaker opens a new tab showing the Jupyter server home page from the notebook instance. The console uses this API to get the URL and show the page.

", "CreateTrainingJob": "

Starts a model training job. After training completes, Amazon SageMaker saves the resulting model artifacts to an Amazon S3 location that you specify.

If you choose to host your model using Amazon SageMaker hosting services, you can use the resulting model artifacts as part of the model. You can also use the artifacts in a deep learning service other than Amazon SageMaker, provided that you know how to use them for inferences.

In the request body, you provide the following:

For more information about Amazon SageMaker, see How It Works.

", @@ -25,7 +25,7 @@ "ListNotebookInstances": "

Returns a list of the Amazon SageMaker notebook instances in the requester's account in an AWS Region.

", "ListTags": "

Returns the tags for the specified Amazon SageMaker resource.

", "ListTrainingJobs": "

Lists training jobs.

", - "StartNotebookInstance": "

Launches an ML compute instance with the latest version of the libraries and attaches your ML storage volume. After configuring the notebook instance, Amazon SageMaker sets the notebook instance status to InService. A notebook instance's status must be InService (is this same as \"Running\" in the console?) before you can connect to your Jupyter notebook.

", + "StartNotebookInstance": "

Launches an ML compute instance with the latest version of the libraries and attaches your ML storage volume. After configuring the notebook instance, Amazon SageMaker sets the notebook instance status to InService. A notebook instance's status must be InService before you can connect to your Jupyter notebook.

", "StopNotebookInstance": "

Terminates the ML compute instance. Before terminating the instance, Amazon SageMaker disconnects the ML storage volume from it. Amazon SageMaker preserves the ML storage volume.

To access data on the ML storage volume for a notebook instance that has been terminated, call the StartNotebookInstance API. StartNotebookInstance launches another ML compute instance, configures it, and attaches the preserved ML storage volume so you can continue your work.

", "StopTrainingJob": "

Stops a training job. To stop a job, Amazon SageMaker sends the algorithm the SIGTERM signal, which delays job termination for 120 seconds. Algorithms might use this 120-second window to save the model artifacts, so the results of the training is not lost.

Training algorithms provided by Amazon SageMaker save the intermediate results of a model training job. This intermediate data is a valid model artifact. You can use the model artifacts that are saved when Amazon SageMaker stops a training job to create a model.

When it receives a StopTrainingJob request, Amazon SageMaker changes the status of the job to Stopping. After Amazon SageMaker stops the job, it sets the status to Stopped.

", "UpdateEndpoint": "

Deploys the new EndpointConfig specified in the request, switches to using newly created endpoint, and then deletes resources provisioned for the endpoint using the previous EndpointConfig (there is no availability loss).

When Amazon SageMaker receives the request, it sets the endpoint status to Updating. After updating the endpoint, it sets the status to InService. To check the status of an endpoint, use the DescribeEndpoint API.

", @@ -77,18 +77,10 @@ "ContainerDefinition": { "base": "

Describes the container, as part of model definition.

", "refs": { - "ContainerDefinitionList$member": null, "CreateModelInput$PrimaryContainer": "

The location of the primary docker image containing inference code, associated artifacts, and custom environment map that the inference code uses when the model is deployed into production.

", "DescribeModelOutput$PrimaryContainer": "

The location of the primary inference code, associated artifacts, and custom environment map that the inference code uses when it is deployed in production.

" } }, - "ContainerDefinitionList": { - "base": null, - "refs": { - "CreateModelInput$SupplementalContainers": "

The additional optional containers to deploy.

", - "DescribeModelOutput$SupplementalContainers": "

The description of additional optional containers that you defined when creating the model.

" - } - }, "ContainerHostname": { "base": null, "refs": { diff --git a/models/apis/sms/2016-10-24/docs-2.json b/models/apis/sms/2016-10-24/docs-2.json index a4d9d522238..47e843f7570 100644 --- a/models/apis/sms/2016-10-24/docs-2.json +++ b/models/apis/sms/2016-10-24/docs-2.json @@ -54,7 +54,7 @@ } }, "ConnectorStatus": { - "base": "Status of on-premise Connector", + "base": "Status of on-premises Connector", "refs": { "Connector$status": null } diff --git a/models/apis/snowball/2016-06-30/docs-2.json b/models/apis/snowball/2016-06-30/docs-2.json index 194f4fe9661..bfbc33686c9 100755 --- a/models/apis/snowball/2016-06-30/docs-2.json +++ b/models/apis/snowball/2016-06-30/docs-2.json @@ -35,7 +35,7 @@ "Address$AddressId": "

The unique ID for an address.

", "ClusterMetadata$AddressId": "

The automatically generated ID for a specific address.

", "ClusterMetadata$ForwardingAddressId": "

The ID of the address that you want a cluster shipped to, after it will be shipped to its primary address. This field is not supported in most regions.

", - "CreateClusterRequest$AddressId": "

The ID for the address that you want the cluster shipped to.>

", + "CreateClusterRequest$AddressId": "

The ID for the address that you want the cluster shipped to.

", "CreateClusterRequest$ForwardingAddressId": "

The forwarding address ID for a cluster. This field is not supported in most regions.

", "CreateJobRequest$AddressId": "

The ID for the address that you want the Snowball shipped to.

", "CreateJobRequest$ForwardingAddressId": "

The forwarding address ID for a job. This field is not supported in most regions.

", @@ -476,8 +476,8 @@ "Shipment": { "base": "

The Status and TrackingNumber information for an inbound or outbound shipment.

", "refs": { - "ShippingDetails$InboundShipment": "

The Status and TrackingNumber values for a Snowball being delivered to the address that you specified for a particular job.

", - "ShippingDetails$OutboundShipment": "

The Status and TrackingNumber values for a Snowball being returned to AWS for a particular job.

" + "ShippingDetails$InboundShipment": "

The Status and TrackingNumber values for a Snowball being returned to AWS for a particular job.

", + "ShippingDetails$OutboundShipment": "

The Status and TrackingNumber values for a Snowball being delivered to the address that you specified for a particular job.

" } }, "ShippingDetails": { diff --git a/models/apis/ssm/2014-11-06/docs-2.json b/models/apis/ssm/2014-11-06/docs-2.json index da6fb58b554..64966e95ec9 100644 --- a/models/apis/ssm/2014-11-06/docs-2.json +++ b/models/apis/ssm/2014-11-06/docs-2.json @@ -2,14 +2,14 @@ "version": "2.0", "service": "AWS Systems Manager

AWS Systems Manager is a collection of capabilities that helps you automate management tasks such as collecting system inventory, applying operating system (OS) patches, automating the creation of Amazon Machine Images (AMIs), and configuring operating systems (OSs) and applications at scale. Systems Manager lets you remotely and securely manage the configuration of your managed instances. A managed instance is any Amazon EC2 instance or on-premises machine in your hybrid environment that has been configured for Systems Manager.

This reference is intended to be used with the AWS Systems Manager User Guide.

To get started, verify prerequisites and configure managed instances. For more information, see Systems Manager Prerequisites.

For information about other API actions you can perform on Amazon EC2 instances, see the Amazon EC2 API Reference. For information about how to use a Query API, see Making API Requests.

", "operations": { - "AddTagsToResource": "

Adds or overwrites one or more tags for the specified resource. Tags are metadata that you can assign to your documents, managed instances, Maintenance Windows, Parameter Store parameters, and patch baselines. Tags enable you to categorize your resources in different ways, for example, by purpose, owner, or environment. Each tag consists of a key and an optional value, both of which you define. For example, you could define a set of tags for your account's managed instances that helps you track each instance's owner and stack level. For example: Key=Owner and Value=DbAdmin, SysAdmin, or Dev. Or Key=Stack and Value=Production, Pre-Production, or Test.

Each resource can have a maximum of 10 tags.

We recommend that you devise a set of tag keys that meets your needs for each resource type. Using a consistent set of tag keys makes it easier for you to manage your resources. You can search and filter the resources based on the tags you add. Tags don't have any semantic meaning to Amazon EC2 and are interpreted strictly as a string of characters.

For more information about tags, see Tagging Your Amazon EC2 Resources in the Amazon EC2 User Guide.

", + "AddTagsToResource": "

Adds or overwrites one or more tags for the specified resource. Tags are metadata that you can assign to your documents, managed instances, Maintenance Windows, Parameter Store parameters, and patch baselines. Tags enable you to categorize your resources in different ways, for example, by purpose, owner, or environment. Each tag consists of a key and an optional value, both of which you define. For example, you could define a set of tags for your account's managed instances that helps you track each instance's owner and stack level. For example: Key=Owner and Value=DbAdmin, SysAdmin, or Dev. Or Key=Stack and Value=Production, Pre-Production, or Test.

Each resource can have a maximum of 50 tags.

We recommend that you devise a set of tag keys that meets your needs for each resource type. Using a consistent set of tag keys makes it easier for you to manage your resources. You can search and filter the resources based on the tags you add. Tags don't have any semantic meaning to Amazon EC2 and are interpreted strictly as a string of characters.

For more information about tags, see Tagging Your Amazon EC2 Resources in the Amazon EC2 User Guide.

", "CancelCommand": "

Attempts to cancel the command specified by the Command ID. There is no guarantee that the command will be terminated and the underlying process stopped.

", "CreateActivation": "

Registers your on-premises server or virtual machine with Amazon EC2 so that you can manage these resources using Run Command. An on-premises server or virtual machine that has been registered with EC2 is called a managed instance. For more information about activations, see Setting Up Systems Manager in Hybrid Environments.

", "CreateAssociation": "

Associates the specified Systems Manager document with the specified instances or targets.

When you associate a document with one or more instances using instance IDs or tags, the SSM Agent running on the instance processes the document and configures the instance as specified.

If you associate a document with an instance that already has an associated document, the system throws the AssociationAlreadyExists exception.

", "CreateAssociationBatch": "

Associates the specified Systems Manager document with the specified instances or targets.

When you associate a document with one or more instances using instance IDs or tags, the SSM Agent running on the instance processes the document and configures the instance as specified.

If you associate a document with an instance that already has an associated document, the system throws the AssociationAlreadyExists exception.

", "CreateDocument": "

Creates a Systems Manager document.

After you create a document, you can use CreateAssociation to associate it with one or more running instances.

", "CreateMaintenanceWindow": "

Creates a new Maintenance Window.

", - "CreatePatchBaseline": "

Creates a patch baseline.

", + "CreatePatchBaseline": "

Creates a patch baseline.

For information about valid key and value pairs in PatchFilters for each supported operating system type, see PatchFilter.

", "CreateResourceDataSync": "

Creates a resource data sync configuration to a single bucket in Amazon S3. This is an asynchronous operation that returns immediately. After a successful initial sync is completed, the system continuously syncs data to the Amazon S3 bucket. To check the status of the sync, use the ListResourceDataSync.

By default, data is not encrypted in Amazon S3. We strongly recommend that you enable encryption in Amazon S3 to ensure secure data storage. We also recommend that you secure access to the Amazon S3 bucket by creating a restrictive bucket policy. To view an example of a restrictive Amazon S3 bucket policy for Resource Data Sync, see Configuring Resource Data Sync for Inventory.

", "DeleteActivation": "

Deletes an activation. You are not required to delete an activation. If you delete an activation, you can no longer use it to register additional managed instances. Deleting an activation does not de-register managed instances. You must manually de-register managed instances.

", "DeleteAssociation": "

Disassociates the specified Systems Manager document from the specified instance.

When you disassociate a document from an instance, it does not change the configuration of the instance. To change the configuration state of an instance after you disassociate a document, you must create a new document with the desired configuration and associate it with the instance.

", @@ -62,7 +62,7 @@ "GetParameter": "

Get information about a parameter by using the parameter name.

", "GetParameterHistory": "

Query a list of all parameters used by the AWS account.

", "GetParameters": "

Get details of a parameter.

", - "GetParametersByPath": "

Retrieve parameters in a specific hierarchy. For more information, see Working with Systems Manager Parameters.

Request results are returned on a best-effort basis. If you specify MaxResults in the request, the response includes information up to the limit specified. The number of items returned, however, can be between zero and the value of MaxResults. If the service reaches an internal limit while processing the results, it stops the operation and returns the matching values up to that point and a NextToken. You can specify the NextToken in a subsequent call to get the next set of results.

", + "GetParametersByPath": "

Retrieve parameters in a specific hierarchy. For more information, see Working with Systems Manager Parameters.

Request results are returned on a best-effort basis. If you specify MaxResults in the request, the response includes information up to the limit specified. The number of items returned, however, can be between zero and the value of MaxResults. If the service reaches an internal limit while processing the results, it stops the operation and returns the matching values up to that point and a NextToken. You can specify the NextToken in a subsequent call to get the next set of results.

This API action doesn't support filtering by tags.

", "GetPatchBaseline": "

Retrieves information about a patch baseline.

", "GetPatchBaselineForPatchGroup": "

Retrieves the patch baseline that should be used for the specified patch group.

", "ListAssociationVersions": "

Retrieves all versions of an association for a specific association ID.

", @@ -78,7 +78,7 @@ "ListResourceDataSync": "

Lists your resource data sync configurations. Includes information about the last time a sync attempted to start, the last sync status, and the last time a sync successfully completed.

The number of sync configurations might be too large to return using a single call to ListResourceDataSync. You can limit the number of sync configurations returned by using the MaxResults parameter. To determine whether there are more sync configurations to list, check the value of NextToken in the output. If there are more sync configurations to list, you can request them by specifying the NextToken returned in the call to the parameter of a subsequent call.

", "ListTagsForResource": "

Returns a list of the tags assigned to the specified resource.

", "ModifyDocumentPermission": "

Shares a Systems Manager document publicly or privately. If you share a document privately, you must specify the AWS user account IDs for those people who can use the document. If you share a document publicly, you must specify All as the account ID.

", - "PutComplianceItems": "

Registers a compliance type and other compliance details on a designated resource. This action lets you register custom compliance details with a resource. This call overwrites existing compliance information on the resource, so you must provide a full list of compliance items each time that you send the request.

", + "PutComplianceItems": "

Registers a compliance type and other compliance details on a designated resource. This action lets you register custom compliance details with a resource. This call overwrites existing compliance information on the resource, so you must provide a full list of compliance items each time that you send the request.

ComplianceType can be one of the following:

", "PutInventory": "

Bulk update custom inventory items on one more instance. The request adds an inventory item, if it doesn't already exist, or updates an inventory item, if it does exist.

", "PutParameter": "

Add one or more parameters to the system.

", "RegisterDefaultPatchBaseline": "

Defines the default patch baseline.

", @@ -98,7 +98,7 @@ "UpdateMaintenanceWindowTarget": "

Modifies the target of an existing Maintenance Window. You can't change the target type, but you can change the following:

The target from being an ID target to a Tag target, or a Tag target to an ID target.

IDs for an ID target.

Tags for a Tag target.

Owner.

Name.

Description.

If a parameter is null, then the corresponding field is not modified.

", "UpdateMaintenanceWindowTask": "

Modifies a task assigned to a Maintenance Window. You can't change the task type, but you can change the following values:

Task ARN. For example, you can change a RUN_COMMAND task from AWS-RunPowerShellScript to AWS-RunShellScript.

Service role ARN.

Task parameters.

Task priority.

Task MaxConcurrency and MaxErrors.

Log location.

If a parameter is null, then the corresponding field is not modified. Also, if you set Replace to true, then all fields required by the RegisterTaskWithMaintenanceWindow action are required for this request. Optional fields that aren't specified are set to null.

", "UpdateManagedInstanceRole": "

Assigns or changes an Amazon Identity and Access Management (IAM) role to the managed instance.

", - "UpdatePatchBaseline": "

Modifies an existing patch baseline. Fields not specified in the request are left unchanged.

" + "UpdatePatchBaseline": "

Modifies an existing patch baseline. Fields not specified in the request are left unchanged.

For information about valid key and value pairs in PatchFilters for each supported operating system type, see PatchFilter.

" }, "shapes": { "AccountId": { @@ -1770,7 +1770,7 @@ } }, "DoesNotExistException": { - "base": "

Error returned when the ID specified for a resource (e.g. a Maintenance Window) doesn't exist.

", + "base": "

Error returned when the ID specified for a resource, such as a Maintenance Window or Patch baseline, doesn't exist.

For information about resource limits in Systems Manager, see AWS Systems Manager Limits.

", "refs": { } }, @@ -2045,7 +2045,7 @@ } }, "HierarchyLevelLimitExceededException": { - "base": "

A hierarchy can have a maximum of five levels. For example:

/Finance/Prod/IAD/OS/WinServ2016/license15

For more information, see Working with Systems Manager Parameters.

", + "base": "

A hierarchy can have a maximum of 15 levels. For more information, see Working with Systems Manager Parameters.

", "refs": { } }, @@ -3629,7 +3629,7 @@ "DeleteParameterRequest$Name": "

The name of the parameter to delete.

", "GetParameterHistoryRequest$Name": "

The name of a parameter you want to query.

", "GetParameterRequest$Name": "

The name of the parameter you want to query.

", - "GetParametersByPathRequest$Path": "

The hierarchy for the parameter. Hierarchies start with a forward slash (/) and end with the parameter name. A hierarchy can have a maximum of five levels. For example: /Finance/Prod/IAD/WinServ2016/license15

", + "GetParametersByPathRequest$Path": "

The hierarchy for the parameter. Hierarchies start with a forward slash (/) and end with the parameter name. A hierarchy can have a maximum of 15 levels. Here is an example of a hierarchy: /Finance/Prod/IAD/WinServ2016/license33

", "Parameter$Name": "

The name of the parameter.

", "ParameterHistory$Name": "

The name of the parameter.

", "ParameterMetadata$Name": "

The parameter name.

", @@ -3827,7 +3827,7 @@ } }, "ParametersFilter": { - "base": "

One or more filters. Use a filter to return a more specific list of results.

", + "base": "

This data type is deprecated. Instead, use ParameterStringFilter.

", "refs": { "ParametersFilterList$member": null } @@ -3954,7 +3954,7 @@ } }, "PatchFilter": { - "base": "

Defines a patch filter.

", + "base": "

Defines a patch filter.

A patch filter consists of key/value pairs, but not all keys are valid for all operating system types. For example, the key PRODUCT is valid for all supported operating system types. The key MSRC_SEVERITY, however, is valid only for Windows operating systems, and the key SECTION is valid only for Ubuntu operating systems.

Refer to the following sections for information about which keys may be used with each major operating system, and which values are valid for each key.

Windows Operating Systems

The supported keys for Windows operating systems are PRODUCT, CLASSIFICATION, and MSRC_SEVERITY. See the following lists for valid values for each of these keys.

Supported key: PRODUCT

Supported values:

Supported key: CLASSIFICATION

Supported values:

Supported key: MSRC_SEVERITY

Supported values:

Ubuntu Operating Systems

The supported keys for Ubuntu operating systems are PRODUCT, PRIORITY, and SECTION. See the following lists for valid values for each of these keys.

Supported key: PRODUCT

Supported values:

Supported key: PRIORITY

Supported values:

Supported key: SECTION

Only the length of the key value is validated. Minimum length is 1. Maximum length is 64.

Amazon Linux Operating Systems

The supported keys for Amazon Linux operating systems are PRODUCT, CLASSIFICATION, and SEVERITY. See the following lists for valid values for each of these keys.

Supported key: PRODUCT

Supported values:

Supported key: CLASSIFICATION

Supported values:

Supported key: SEVERITY

Supported values:

RedHat Enterprise Linux (RHEL) Operating Systems

The supported keys for RedHat Enterprise Linux operating systems are PRODUCT, CLASSIFICATION, and SEVERITY. See the following lists for valid values for each of these keys.

Supported key: PRODUCT

Supported values:

Supported key: CLASSIFICATION

Supported values:

Supported key: SEVERITY

Supported values:

", "refs": { "PatchFilterList$member": null } @@ -3972,7 +3972,7 @@ "PatchFilterKey": { "base": null, "refs": { - "PatchFilter$Key": "

The key for the filter (PRODUCT, CLASSIFICATION, MSRC_SEVERITY, PATCH_ID)

" + "PatchFilter$Key": "

The key for the filter.

See PatchFilter for lists of valid keys for each operating system type.

" } }, "PatchFilterList": { @@ -3990,7 +3990,7 @@ "PatchFilterValueList": { "base": null, "refs": { - "PatchFilter$Values": "

The value for the filter key.

" + "PatchFilter$Values": "

The value for the filter key.

See PatchFilter for lists of valid values for each key based on operating system type.

" } }, "PatchGroup": { @@ -4436,7 +4436,7 @@ } }, "ResourceLimitExceededException": { - "base": "

Error returned when the caller has exceeded the default resource limits (e.g. too many Maintenance Windows have been created).

", + "base": "

Error returned when the caller has exceeded the default resource limits. For example, too many Maintenance Windows or Patch baselines have been created.

For information about resource limits in Systems Manager, see AWS Systems Manager Limits.

", "refs": { } }, @@ -4743,7 +4743,7 @@ "FailureDetails$FailureStage": "

The stage of the Automation execution when the failure occurred. The stages include the following: InputValidation, PreVerification, Invocation, PostVerification.

", "FailureDetails$FailureType": "

The type of Automation failure. Failure types include the following: Action, Permission, Throttling, Verification, Internal.

", "FeatureNotAvailableException$Message": null, - "HierarchyLevelLimitExceededException$message": "

A hierarchy can have a maximum of five levels. For example:

/Finance/Prod/IAD/OS/WinServ2016/license15

For more information, see Working with Systems Manager Parameters.

", + "HierarchyLevelLimitExceededException$message": "

A hierarchy can have a maximum of 15 levels. For more information, see Working with Systems Manager Parameters.

", "HierarchyTypeMismatchException$message": "

Parameter Store does not support changing a parameter type in a hierarchy. For example, you can't change a parameter from a String type to a SecureString type. You must create a new, unique parameter.

", "IdempotentParameterMismatch$Message": null, "InstanceInformation$PlatformName": "

The name of the operating system platform running on your instance.

", diff --git a/models/apis/workspaces/2015-04-08/api-2.json b/models/apis/workspaces/2015-04-08/api-2.json index 05e36001c46..a69d2670138 100644 --- a/models/apis/workspaces/2015-04-08/api-2.json +++ b/models/apis/workspaces/2015-04-08/api-2.json @@ -210,7 +210,9 @@ "enum":[ "VALUE", "STANDARD", - "PERFORMANCE" + "PERFORMANCE", + "POWER", + "GRAPHICS" ] }, "ComputeType":{ @@ -441,6 +443,32 @@ "max":25, "min":1 }, + "ModificationResourceEnum":{ + "type":"string", + "enum":[ + "ROOT_VOLUME", + "USER_VOLUME", + "COMPUTE_TYPE" + ] + }, + "ModificationState":{ + "type":"structure", + "members":{ + "Resource":{"shape":"ModificationResourceEnum"}, + "State":{"shape":"ModificationStateEnum"} + } + }, + "ModificationStateEnum":{ + "type":"string", + "enum":[ + "UPDATE_INITIATED", + "UPDATE_IN_PROGRESS" + ] + }, + "ModificationStateList":{ + "type":"list", + "member":{"shape":"ModificationState"} + }, "ModifyWorkspacePropertiesRequest":{ "type":"structure", "required":[ @@ -553,6 +581,13 @@ }, "exception":true }, + "RootStorage":{ + "type":"structure", + "members":{ + "Capacity":{"shape":"NonEmptyString"} + } + }, + "RootVolumeSizeGib":{"type":"integer"}, "RunningMode":{ "type":"string", "enum":[ @@ -693,6 +728,7 @@ "Capacity":{"shape":"NonEmptyString"} } }, + "UserVolumeSizeGib":{"type":"integer"}, "VolumeEncryptionKey":{"type":"string"}, "Workspace":{ "type":"structure", @@ -710,7 +746,8 @@ "VolumeEncryptionKey":{"shape":"VolumeEncryptionKey"}, "UserVolumeEncryptionEnabled":{"shape":"BooleanObject"}, "RootVolumeEncryptionEnabled":{"shape":"BooleanObject"}, - "WorkspaceProperties":{"shape":"WorkspaceProperties"} + "WorkspaceProperties":{"shape":"WorkspaceProperties"}, + "ModificationStates":{"shape":"ModificationStateList"} } }, "WorkspaceBundle":{ @@ -720,6 +757,7 @@ "Name":{"shape":"NonEmptyString"}, "Owner":{"shape":"BundleOwner"}, "Description":{"shape":"Description"}, + "RootStorage":{"shape":"RootStorage"}, "UserStorage":{"shape":"UserStorage"}, "ComputeType":{"shape":"ComputeType"} } @@ -790,7 +828,10 @@ "type":"structure", "members":{ "RunningMode":{"shape":"RunningMode"}, - "RunningModeAutoStopTimeoutInMinutes":{"shape":"RunningModeAutoStopTimeoutInMinutes"} + "RunningModeAutoStopTimeoutInMinutes":{"shape":"RunningModeAutoStopTimeoutInMinutes"}, + "RootVolumeSizeGib":{"shape":"RootVolumeSizeGib"}, + "UserVolumeSizeGib":{"shape":"UserVolumeSizeGib"}, + "ComputeTypeName":{"shape":"Compute"} } }, "WorkspaceRequest":{ @@ -831,6 +872,7 @@ "TERMINATING", "TERMINATED", "SUSPENDED", + "UPDATING", "STOPPING", "STOPPED", "ERROR" diff --git a/models/apis/workspaces/2015-04-08/docs-2.json b/models/apis/workspaces/2015-04-08/docs-2.json index d4c90e4f04b..9c9db9eef23 100644 --- a/models/apis/workspaces/2015-04-08/docs-2.json +++ b/models/apis/workspaces/2015-04-08/docs-2.json @@ -1,21 +1,21 @@ { "version": "2.0", - "service": "Amazon WorkSpaces Service

This reference provides detailed information about the Amazon WorkSpaces operations.

", + "service": "Amazon WorkSpaces Service

Amazon WorkSpaces enables you to provision virtual, cloud-based Microsoft Windows desktops for your users.

", "operations": { - "CreateTags": "

Creates tags for a WorkSpace.

", - "CreateWorkspaces": "

Creates one or more WorkSpaces.

This operation is asynchronous and returns before the WorkSpaces are created.

", - "DeleteTags": "

Deletes tags from a WorkSpace.

", - "DescribeTags": "

Describes tags for a WorkSpace.

", - "DescribeWorkspaceBundles": "

Obtains information about the WorkSpace bundles that are available to your account in the specified region.

You can filter the results with either the BundleIds parameter, or the Owner parameter, but not both.

This operation supports pagination with the use of the NextToken request and response parameters. If more results are available, the NextToken response member contains a token that you pass in the next call to this operation to retrieve the next set of items.

", - "DescribeWorkspaceDirectories": "

Retrieves information about the AWS Directory Service directories in the region that are registered with Amazon WorkSpaces and are available to your account.

This operation supports pagination with the use of the NextToken request and response parameters. If more results are available, the NextToken response member contains a token that you pass in the next call to this operation to retrieve the next set of items.

", - "DescribeWorkspaces": "

Obtains information about the specified WorkSpaces.

Only one of the filter parameters, such as BundleId, DirectoryId, or WorkspaceIds, can be specified at a time.

This operation supports pagination with the use of the NextToken request and response parameters. If more results are available, the NextToken response member contains a token that you pass in the next call to this operation to retrieve the next set of items.

", - "DescribeWorkspacesConnectionStatus": "

Describes the connection status of a specified WorkSpace.

", - "ModifyWorkspaceProperties": "

Modifies the WorkSpace properties, including the running mode and AutoStop time.

", - "RebootWorkspaces": "

Reboots the specified WorkSpaces.

To be able to reboot a WorkSpace, the WorkSpace must have a State of AVAILABLE, IMPAIRED, or INOPERABLE.

This operation is asynchronous and returns before the WorkSpaces have rebooted.

", - "RebuildWorkspaces": "

Rebuilds the specified WorkSpaces.

Rebuilding a WorkSpace is a potentially destructive action that can result in the loss of data. Rebuilding a WorkSpace causes the following to occur:

To be able to rebuild a WorkSpace, the WorkSpace must have a State of AVAILABLE or ERROR.

This operation is asynchronous and returns before the WorkSpaces have been completely rebuilt.

", - "StartWorkspaces": "

Starts the specified WorkSpaces. The WorkSpaces must have a running mode of AutoStop and a state of STOPPED.

", - "StopWorkspaces": "

Stops the specified WorkSpaces. The WorkSpaces must have a running mode of AutoStop and a state of AVAILABLE, IMPAIRED, UNHEALTHY, or ERROR.

", - "TerminateWorkspaces": "

Terminates the specified WorkSpaces.

Terminating a WorkSpace is a permanent action and cannot be undone. The user's data is not maintained and will be destroyed. If you need to archive any user data, contact Amazon Web Services before terminating the WorkSpace.

You can terminate a WorkSpace that is in any state except SUSPENDED.

This operation is asynchronous and returns before the WorkSpaces have been completely terminated.

" + "CreateTags": "

Creates tags for the specified WorkSpace.

", + "CreateWorkspaces": "

Creates one or more WorkSpaces.

This operation is asynchronous and returns before the WorkSpaces are created.

", + "DeleteTags": "

Deletes the specified tags from a WorkSpace.

", + "DescribeTags": "

Describes the tags for the specified WorkSpace.

", + "DescribeWorkspaceBundles": "

Describes the available WorkSpace bundles.

You can filter the results using either bundle ID or owner, but not both.

", + "DescribeWorkspaceDirectories": "

Describes the available AWS Directory Service directories that are registered with Amazon WorkSpaces.

", + "DescribeWorkspaces": "

Describes the specified WorkSpaces.

You can filter the results using bundle ID, directory ID, or owner, but you can specify only one filter at a time.

", + "DescribeWorkspacesConnectionStatus": "

Describes the connection status of the specified WorkSpaces.

", + "ModifyWorkspaceProperties": "

Modifies the specified WorkSpace properties.

", + "RebootWorkspaces": "

Reboots the specified WorkSpaces.

You cannot reboot a WorkSpace unless its state is AVAILABLE, IMPAIRED, or INOPERABLE.

This operation is asynchronous and returns before the WorkSpaces have rebooted.

", + "RebuildWorkspaces": "

Rebuilds the specified WorkSpaces.

You cannot rebuild a WorkSpace unless its state is AVAILABLE or ERROR.

Rebuilding a WorkSpace is a potentially destructive action that can result in the loss of data. For more information, see Rebuild a WorkSpace.

This operation is asynchronous and returns before the WorkSpaces have been completely rebuilt.

", + "StartWorkspaces": "

Starts the specified WorkSpaces.

You cannot start a WorkSpace unless it has a running mode of AutoStop and a state of STOPPED.

", + "StopWorkspaces": "

Stops the specified WorkSpaces.

You cannot stop a WorkSpace unless it has a running mode of AutoStop and a state of AVAILABLE, IMPAIRED, UNHEALTHY, or ERROR.

", + "TerminateWorkspaces": "

Terminates the specified WorkSpaces.

Terminating a WorkSpace is a permanent action and cannot be undone. The user's data is destroyed. If you need to archive any user data, contact Amazon Web Services before terminating the WorkSpace.

You can terminate a WorkSpace that is in any state except SUSPENDED.

This operation is asynchronous and returns before the WorkSpaces have been completely terminated.

" }, "shapes": { "ARN": { @@ -38,137 +38,138 @@ "BooleanObject": { "base": null, "refs": { - "DefaultWorkspaceCreationProperties$EnableWorkDocs": "

Specifies if the directory is enabled for Amazon WorkDocs.

", - "DefaultWorkspaceCreationProperties$EnableInternetAccess": "

A public IP address will be attached to all WorkSpaces that are created or rebuilt.

", - "DefaultWorkspaceCreationProperties$UserEnabledAsLocalAdministrator": "

The WorkSpace user is an administrator on the WorkSpace.

", - "Workspace$UserVolumeEncryptionEnabled": "

Specifies whether the data stored on the user volume, or D: drive, is encrypted.

", - "Workspace$RootVolumeEncryptionEnabled": "

Specifies whether the data stored on the root volume, or C: drive, is encrypted.

", - "WorkspaceRequest$UserVolumeEncryptionEnabled": "

Specifies whether the data stored on the user volume, or D: drive, is encrypted.

", - "WorkspaceRequest$RootVolumeEncryptionEnabled": "

Specifies whether the data stored on the root volume, or C: drive, is encrypted.

" + "DefaultWorkspaceCreationProperties$EnableWorkDocs": "

Indicates whether the directory is enabled for Amazon WorkDocs.

", + "DefaultWorkspaceCreationProperties$EnableInternetAccess": "

The public IP address to attach to all WorkSpaces that are created or rebuilt.

", + "DefaultWorkspaceCreationProperties$UserEnabledAsLocalAdministrator": "

Indicates whether the WorkSpace user is an administrator on the WorkSpace.

", + "Workspace$UserVolumeEncryptionEnabled": "

Indicates whether the data stored on the user volume is encrypted.

", + "Workspace$RootVolumeEncryptionEnabled": "

Indicates whether the data stored on the root volume is encrypted.

", + "WorkspaceRequest$UserVolumeEncryptionEnabled": "

Indicates whether the data stored on the user volume is encrypted.

", + "WorkspaceRequest$RootVolumeEncryptionEnabled": "

Indicates whether the data stored on the root volume is encrypted.

" } }, "BundleId": { "base": null, "refs": { "BundleIdList$member": null, - "DescribeWorkspacesRequest$BundleId": "

The identifier of a bundle to obtain the WorkSpaces for. All WorkSpaces that are created from this bundle will be retrieved. This parameter cannot be combined with any other filter parameter.

", - "Workspace$BundleId": "

The identifier of the bundle that the WorkSpace was created from.

", + "DescribeWorkspacesRequest$BundleId": "

The ID of the bundle. All WorkSpaces that are created from this bundle are retrieved. This parameter cannot be combined with any other filter.

", + "Workspace$BundleId": "

The identifier of the bundle used to create the WorkSpace.

", "WorkspaceBundle$BundleId": "

The bundle identifier.

", - "WorkspaceRequest$BundleId": "

The identifier of the bundle to create the WorkSpace from. You can use the DescribeWorkspaceBundles operation to obtain a list of the bundles that are available.

" + "WorkspaceRequest$BundleId": "

The identifier of the bundle for the WorkSpace. You can use DescribeWorkspaceBundles to list the available bundles.

" } }, "BundleIdList": { "base": null, "refs": { - "DescribeWorkspaceBundlesRequest$BundleIds": "

An array of strings that contains the identifiers of the bundles to retrieve. This parameter cannot be combined with any other filter parameter.

" + "DescribeWorkspaceBundlesRequest$BundleIds": "

The IDs of the bundles. This parameter cannot be combined with any other filter.

" } }, "BundleList": { "base": null, "refs": { - "DescribeWorkspaceBundlesResult$Bundles": "

An array of structures that contain information about the bundles.

" + "DescribeWorkspaceBundlesResult$Bundles": "

Information about the bundles.

" } }, "BundleOwner": { "base": null, "refs": { - "DescribeWorkspaceBundlesRequest$Owner": "

The owner of the bundles to retrieve. This parameter cannot be combined with any other filter parameter.

This contains one of the following values:

", - "WorkspaceBundle$Owner": "

The owner of the bundle. This contains the owner's account identifier, or AMAZON if the bundle is provided by AWS.

" + "DescribeWorkspaceBundlesRequest$Owner": "

The owner of the bundles. This parameter cannot be combined with any other filter.

Specify AMAZON to describe the bundles provided by AWS or null to describe the bundles that belong to your account.

", + "WorkspaceBundle$Owner": "

The owner of the bundle. This is the account identifier of the owner, or AMAZON if the bundle is provided by AWS.

" } }, "Compute": { "base": null, "refs": { - "ComputeType$Name": "

The name of the compute type for the bundle.

" + "ComputeType$Name": "

The compute type.

", + "WorkspaceProperties$ComputeTypeName": "

The compute type. For more information, see Amazon WorkSpaces Bundles.

" } }, "ComputeType": { - "base": "

Contains information about the compute type of a WorkSpace bundle.

", + "base": "

Information about the compute type.

", "refs": { - "WorkspaceBundle$ComputeType": "

A ComputeType object that specifies the compute type for the bundle.

" + "WorkspaceBundle$ComputeType": "

The compute type. For more information, see Amazon WorkSpaces Bundles.

" } }, "ComputerName": { "base": null, "refs": { - "Workspace$ComputerName": "

The name of the WorkSpace as seen by the operating system.

" + "Workspace$ComputerName": "

The name of the WorkSpace, as seen by the operating system.

" } }, "ConnectionState": { "base": null, "refs": { - "WorkspaceConnectionStatus$ConnectionState": "

The connection state of the WorkSpace. Returns UNKOWN if the WorkSpace is in a Stopped state.

" + "WorkspaceConnectionStatus$ConnectionState": "

The connection state of the WorkSpace. The connection state is unknown if the WorkSpace is stopped.

" } }, "CreateTagsRequest": { - "base": "

The request of the CreateTags operation.

", + "base": null, "refs": { } }, "CreateTagsResult": { - "base": "

The result of the CreateTags operation.

", + "base": null, "refs": { } }, "CreateWorkspacesRequest": { - "base": "

Contains the inputs for the CreateWorkspaces operation.

", + "base": null, "refs": { } }, "CreateWorkspacesResult": { - "base": "

Contains the result of the CreateWorkspaces operation.

", + "base": null, "refs": { } }, "DefaultOu": { "base": null, "refs": { - "DefaultWorkspaceCreationProperties$DefaultOu": "

The organizational unit (OU) in the directory that the WorkSpace machine accounts are placed in.

" + "DefaultWorkspaceCreationProperties$DefaultOu": "

The organizational unit (OU) in the directory for the WorkSpace machine accounts.

" } }, "DefaultWorkspaceCreationProperties": { - "base": "

Contains default WorkSpace creation information.

", + "base": "

Information about defaults used to create a WorkSpace.

", "refs": { - "WorkspaceDirectory$WorkspaceCreationProperties": "

A structure that specifies the default creation properties for all WorkSpaces in the directory.

" + "WorkspaceDirectory$WorkspaceCreationProperties": "

The default creation properties for all WorkSpaces in the directory.

" } }, "DeleteTagsRequest": { - "base": "

The request of the DeleteTags operation.

", + "base": null, "refs": { } }, "DeleteTagsResult": { - "base": "

The result of the DeleteTags operation.

", + "base": null, "refs": { } }, "DescribeTagsRequest": { - "base": "

The request of the DescribeTags operation.

", + "base": null, "refs": { } }, "DescribeTagsResult": { - "base": "

The result of the DescribeTags operation.

", + "base": null, "refs": { } }, "DescribeWorkspaceBundlesRequest": { - "base": "

Contains the inputs for the DescribeWorkspaceBundles operation.

", + "base": null, "refs": { } }, "DescribeWorkspaceBundlesResult": { - "base": "

Contains the results of the DescribeWorkspaceBundles operation.

", + "base": null, "refs": { } }, "DescribeWorkspaceDirectoriesRequest": { - "base": "

Contains the inputs for the DescribeWorkspaceDirectories operation.

", + "base": null, "refs": { } }, "DescribeWorkspaceDirectoriesResult": { - "base": "

Contains the results of the DescribeWorkspaceDirectories operation.

", + "base": null, "refs": { } }, @@ -183,12 +184,12 @@ } }, "DescribeWorkspacesRequest": { - "base": "

Contains the inputs for the DescribeWorkspaces operation.

", + "base": null, "refs": { } }, "DescribeWorkspacesResult": { - "base": "

Contains the results for the DescribeWorkspaces operation.

", + "base": null, "refs": { } }, @@ -197,30 +198,30 @@ "refs": { "FailedCreateWorkspaceRequest$ErrorMessage": "

The textual error message.

", "FailedWorkspaceChangeRequest$ErrorMessage": "

The textual error message.

", - "Workspace$ErrorMessage": "

If the WorkSpace could not be created, this contains a textual error message that describes the failure.

", - "WorkspaceBundle$Description": "

The bundle description.

" + "Workspace$ErrorMessage": "

If the WorkSpace could not be created, contains a textual error message that describes the failure.

", + "WorkspaceBundle$Description": "

A description.

" } }, "DirectoryId": { "base": null, "refs": { - "DescribeWorkspacesRequest$DirectoryId": "

Specifies the directory identifier to which to limit the WorkSpaces. Optionally, you can specify a specific directory user with the UserName parameter. This parameter cannot be combined with any other filter parameter.

", + "DescribeWorkspacesRequest$DirectoryId": "

The ID of the directory. In addition, you can optionally specify a specific directory user (see UserName). This parameter cannot be combined with any other filter.

", "DirectoryIdList$member": null, - "Workspace$DirectoryId": "

The identifier of the AWS Directory Service directory that the WorkSpace belongs to.

", + "Workspace$DirectoryId": "

The identifier of the AWS Directory Service directory for the WorkSpace.

", "WorkspaceDirectory$DirectoryId": "

The directory identifier.

", - "WorkspaceRequest$DirectoryId": "

The identifier of the AWS Directory Service directory to create the WorkSpace in. You can use the DescribeWorkspaceDirectories operation to obtain a list of the directories that are available.

" + "WorkspaceRequest$DirectoryId": "

The identifier of the AWS Directory Service directory for the WorkSpace. You can use DescribeWorkspaceDirectories to list the available directories.

" } }, "DirectoryIdList": { "base": null, "refs": { - "DescribeWorkspaceDirectoriesRequest$DirectoryIds": "

An array of strings that contains the directory identifiers to retrieve information for. If this member is null, all directories are retrieved.

" + "DescribeWorkspaceDirectoriesRequest$DirectoryIds": "

The identifiers of the directories. If the value is null, all directories are retrieved.

" } }, "DirectoryList": { "base": null, "refs": { - "DescribeWorkspaceDirectoriesResult$Directories": "

An array of structures that contain information about the directories.

" + "DescribeWorkspaceDirectoriesResult$Directories": "

Information about the directories.

" } }, "DirectoryName": { @@ -232,7 +233,7 @@ "DnsIpAddresses": { "base": null, "refs": { - "WorkspaceDirectory$DnsIpAddresses": "

An array of strings that contains the IP addresses of the DNS servers for the directory.

" + "WorkspaceDirectory$DnsIpAddresses": "

The IP addresses of the DNS servers for the directory.

" } }, "ErrorType": { @@ -256,7 +257,7 @@ } }, "FailedCreateWorkspaceRequest": { - "base": "

Contains information about a WorkSpace that could not be created.

", + "base": "

Information about a WorkSpace that could not be created.

", "refs": { "FailedCreateWorkspaceRequests$member": null } @@ -264,41 +265,41 @@ "FailedCreateWorkspaceRequests": { "base": null, "refs": { - "CreateWorkspacesResult$FailedRequests": "

An array of structures that represent the WorkSpaces that could not be created.

" + "CreateWorkspacesResult$FailedRequests": "

Information about the WorkSpaces that could not be created.

" } }, "FailedRebootWorkspaceRequests": { "base": null, "refs": { - "RebootWorkspacesResult$FailedRequests": "

An array of structures representing any WorkSpaces that could not be rebooted.

" + "RebootWorkspacesResult$FailedRequests": "

Information about the WorkSpaces that could not be rebooted.

" } }, "FailedRebuildWorkspaceRequests": { "base": null, "refs": { - "RebuildWorkspacesResult$FailedRequests": "

An array of structures representing any WorkSpaces that could not be rebuilt.

" + "RebuildWorkspacesResult$FailedRequests": "

Information about the WorkSpaces that could not be rebuilt.

" } }, "FailedStartWorkspaceRequests": { "base": null, "refs": { - "StartWorkspacesResult$FailedRequests": "

The failed requests.

" + "StartWorkspacesResult$FailedRequests": "

Information about the WorkSpaces that could not be started.

" } }, "FailedStopWorkspaceRequests": { "base": null, "refs": { - "StopWorkspacesResult$FailedRequests": "

The failed requests.

" + "StopWorkspacesResult$FailedRequests": "

Information about the WorkSpaces that could not be stopped.

" } }, "FailedTerminateWorkspaceRequests": { "base": null, "refs": { - "TerminateWorkspacesResult$FailedRequests": "

An array of structures representing any WorkSpaces that could not be terminated.

" + "TerminateWorkspacesResult$FailedRequests": "

Information about the WorkSpaces that could not be terminated.

" } }, "FailedWorkspaceChangeRequest": { - "base": "

Contains information about a WorkSpace that could not be rebooted (RebootWorkspaces), rebuilt (RebuildWorkspaces), terminated (TerminateWorkspaces), started (StartWorkspaces), or stopped (StopWorkspaces).

", + "base": "

Information about a WorkSpace that could not be rebooted (RebootWorkspaces), rebuilt (RebuildWorkspaces), terminated (TerminateWorkspaces), started (StartWorkspaces), or stopped (StopWorkspaces).

", "refs": { "FailedRebootWorkspaceRequests$member": null, "FailedRebuildWorkspaceRequests$member": null, @@ -330,6 +331,30 @@ "DescribeWorkspacesRequest$Limit": "

The maximum number of items to return.

" } }, + "ModificationResourceEnum": { + "base": null, + "refs": { + "ModificationState$Resource": "

The resource.

" + } + }, + "ModificationState": { + "base": "

Information about a WorkSpace modification.

", + "refs": { + "ModificationStateList$member": null + } + }, + "ModificationStateEnum": { + "base": null, + "refs": { + "ModificationState$State": "

The modification state.

" + } + }, + "ModificationStateList": { + "base": null, + "refs": { + "Workspace$ModificationStates": "

The modification states of the WorkSpace.

" + } + }, "ModifyWorkspacePropertiesRequest": { "base": null, "refs": { @@ -343,13 +368,14 @@ "NonEmptyString": { "base": null, "refs": { - "CreateTagsRequest$ResourceId": "

The resource ID of the request.

", - "DeleteTagsRequest$ResourceId": "

The resource ID of the request.

", - "DescribeTagsRequest$ResourceId": "

The resource ID of the request.

", - "ResourceNotFoundException$ResourceId": "

The resource could not be found.

", + "CreateTagsRequest$ResourceId": "

The ID of the resource.

", + "DeleteTagsRequest$ResourceId": "

The ID of the resource.

", + "DescribeTagsRequest$ResourceId": "

The ID of the resource.

", + "ResourceNotFoundException$ResourceId": "

The ID of the resource that could not be found.

", "ResourceUnavailableException$ResourceId": "

The identifier of the resource that is not available.

", + "RootStorage$Capacity": "

The size of the root volume.

", "TagKeyList$member": null, - "UserStorage$Capacity": "

The amount of user storage for the bundle.

", + "UserStorage$Capacity": "

The size of the user storage.

", "WorkspaceBundle$Name": "

The name of the bundle.

" } }, @@ -361,18 +387,18 @@ "PaginationToken": { "base": null, "refs": { - "DescribeWorkspaceBundlesRequest$NextToken": "

The NextToken value from a previous call to this operation. Pass null if this is the first call.

", - "DescribeWorkspaceBundlesResult$NextToken": "

If not null, more results are available. Pass this value for the NextToken parameter in a subsequent call to this operation to retrieve the next set of items. This token is valid for one day and must be used within that time frame.

", - "DescribeWorkspaceDirectoriesRequest$NextToken": "

The NextToken value from a previous call to this operation. Pass null if this is the first call.

", - "DescribeWorkspaceDirectoriesResult$NextToken": "

If not null, more results are available. Pass this value for the NextToken parameter in a subsequent call to this operation to retrieve the next set of items. This token is valid for one day and must be used within that time frame.

", - "DescribeWorkspacesConnectionStatusRequest$NextToken": "

The next token of the request.

", - "DescribeWorkspacesConnectionStatusResult$NextToken": "

The next token of the result.

", - "DescribeWorkspacesRequest$NextToken": "

The NextToken value from a previous call to this operation. Pass null if this is the first call.

", - "DescribeWorkspacesResult$NextToken": "

If not null, more results are available. Pass this value for the NextToken parameter in a subsequent call to this operation to retrieve the next set of items. This token is valid for one day and must be used within that time frame.

" + "DescribeWorkspaceBundlesRequest$NextToken": "

The token for the next set of results. (You received this token from a previous call.)

", + "DescribeWorkspaceBundlesResult$NextToken": "

The token to use to retrieve the next set of results, or null if there are no more results available. This token is valid for one day and must be used within that time frame.

", + "DescribeWorkspaceDirectoriesRequest$NextToken": "

The token for the next set of results. (You received this token from a previous call.)

", + "DescribeWorkspaceDirectoriesResult$NextToken": "

The token to use to retrieve the next set of results, or null if there are no more results available. This token is valid for one day and must be used within that time frame.

", + "DescribeWorkspacesConnectionStatusRequest$NextToken": "

The token for the next set of results. (You received this token from a previous call.)

", + "DescribeWorkspacesConnectionStatusResult$NextToken": "

The token to use to retrieve the next set of results, or null if there are no more results available.

", + "DescribeWorkspacesRequest$NextToken": "

The token for the next set of results. (You received this token from a previous call.)

", + "DescribeWorkspacesResult$NextToken": "

The token to use to retrieve the next set of results, or null if there are no more results available. This token is valid for one day and must be used within that time frame.

" } }, "RebootRequest": { - "base": "

Contains information used with the RebootWorkspaces operation to reboot a WorkSpace.

", + "base": "

Information used to reboot a WorkSpace.

", "refs": { "RebootWorkspaceRequests$member": null } @@ -380,21 +406,21 @@ "RebootWorkspaceRequests": { "base": null, "refs": { - "RebootWorkspacesRequest$RebootWorkspaceRequests": "

An array of structures that specify the WorkSpaces to reboot.

" + "RebootWorkspacesRequest$RebootWorkspaceRequests": "

The WorkSpaces to reboot.

" } }, "RebootWorkspacesRequest": { - "base": "

Contains the inputs for the RebootWorkspaces operation.

", + "base": null, "refs": { } }, "RebootWorkspacesResult": { - "base": "

Contains the results of the RebootWorkspaces operation.

", + "base": null, "refs": { } }, "RebuildRequest": { - "base": "

Contains information used with the RebuildWorkspaces operation to rebuild a WorkSpace.

", + "base": "

Information used to rebuild a WorkSpace.

", "refs": { "RebuildWorkspaceRequests$member": null } @@ -402,16 +428,16 @@ "RebuildWorkspaceRequests": { "base": null, "refs": { - "RebuildWorkspacesRequest$RebuildWorkspaceRequests": "

An array of structures that specify the WorkSpaces to rebuild.

" + "RebuildWorkspacesRequest$RebuildWorkspaceRequests": "

The WorkSpaces to rebuild.

" } }, "RebuildWorkspacesRequest": { - "base": "

Contains the inputs for the RebuildWorkspaces operation.

", + "base": null, "refs": { } }, "RebuildWorkspacesResult": { - "base": "

Contains the results of the RebuildWorkspaces operation.

", + "base": null, "refs": { } }, @@ -436,10 +462,22 @@ "refs": { } }, + "RootStorage": { + "base": "

Information about the root volume for a WorkSpace bundle.

", + "refs": { + "WorkspaceBundle$RootStorage": "

The size of the root volume.

" + } + }, + "RootVolumeSizeGib": { + "base": null, + "refs": { + "WorkspaceProperties$RootVolumeSizeGib": "

The size of the root volume.

" + } + }, "RunningMode": { "base": null, "refs": { - "WorkspaceProperties$RunningMode": "

The running mode of the WorkSpace. AlwaysOn WorkSpaces are billed monthly. AutoStop WorkSpaces are billed by the hour and stopped when no longer being used in order to save on costs.

" + "WorkspaceProperties$RunningMode": "

The running mode. For more information, see Manage the WorkSpace Running Mode.

" } }, "RunningModeAutoStopTimeoutInMinutes": { @@ -451,12 +489,12 @@ "SecurityGroupId": { "base": null, "refs": { - "DefaultWorkspaceCreationProperties$CustomSecurityGroupId": "

The identifier of any custom security groups that are applied to the WorkSpaces when they are created.

", + "DefaultWorkspaceCreationProperties$CustomSecurityGroupId": "

The identifier of any security groups to apply to WorkSpaces when they are created.

", "WorkspaceDirectory$WorkspaceSecurityGroupId": "

The identifier of the security group that is assigned to new WorkSpaces.

" } }, "StartRequest": { - "base": "

Describes the start request.

", + "base": "

Information used to start a WorkSpace.

", "refs": { "StartWorkspaceRequests$member": null } @@ -464,7 +502,7 @@ "StartWorkspaceRequests": { "base": null, "refs": { - "StartWorkspacesRequest$StartWorkspaceRequests": "

The requests.

" + "StartWorkspacesRequest$StartWorkspaceRequests": "

The WorkSpaces to start.

" } }, "StartWorkspacesRequest": { @@ -478,7 +516,7 @@ } }, "StopRequest": { - "base": "

Describes the stop request.

", + "base": "

Information used to stop a WorkSpace.

", "refs": { "StopWorkspaceRequests$member": null } @@ -486,7 +524,7 @@ "StopWorkspaceRequests": { "base": null, "refs": { - "StopWorkspacesRequest$StopWorkspaceRequests": "

The requests.

" + "StopWorkspacesRequest$StopWorkspaceRequests": "

The WorkSpaces to stop.

" } }, "StopWorkspacesRequest": { @@ -503,17 +541,17 @@ "base": null, "refs": { "SubnetIds$member": null, - "Workspace$SubnetId": "

The identifier of the subnet that the WorkSpace is in.

" + "Workspace$SubnetId": "

The identifier of the subnet for the WorkSpace.

" } }, "SubnetIds": { "base": null, "refs": { - "WorkspaceDirectory$SubnetIds": "

An array of strings that contains the identifiers of the subnets used with the directory.

" + "WorkspaceDirectory$SubnetIds": "

The identifiers of the subnets used with the directory.

" } }, "Tag": { - "base": "

Describes the tag of the WorkSpace.

", + "base": "

Information about a tag.

", "refs": { "TagList$member": null } @@ -527,15 +565,15 @@ "TagKeyList": { "base": null, "refs": { - "DeleteTagsRequest$TagKeys": "

The tag keys of the request.

" + "DeleteTagsRequest$TagKeys": "

The tag keys.

" } }, "TagList": { "base": null, "refs": { - "CreateTagsRequest$Tags": "

The tags of the request.

", - "DescribeTagsResult$TagList": "

The list of tags.

", - "WorkspaceRequest$Tags": "

The tags of the WorkSpace request.

" + "CreateTagsRequest$Tags": "

The tags. Each resource can have a maximum of 50 tags.

", + "DescribeTagsResult$TagList": "

The tags.

", + "WorkspaceRequest$Tags": "

The tags for the WorkSpace.

" } }, "TagValue": { @@ -545,7 +583,7 @@ } }, "TerminateRequest": { - "base": "

Contains information used with the TerminateWorkspaces operation to terminate a WorkSpace.

", + "base": "

Information used to terminate a WorkSpace.

", "refs": { "TerminateWorkspaceRequests$member": null } @@ -553,16 +591,16 @@ "TerminateWorkspaceRequests": { "base": null, "refs": { - "TerminateWorkspacesRequest$TerminateWorkspaceRequests": "

An array of structures that specify the WorkSpaces to terminate.

" + "TerminateWorkspacesRequest$TerminateWorkspaceRequests": "

The WorkSpaces to terminate.

" } }, "TerminateWorkspacesRequest": { - "base": "

Contains the inputs for the TerminateWorkspaces operation.

", + "base": null, "refs": { } }, "TerminateWorkspacesResult": { - "base": "

Contains the results of the TerminateWorkspaces operation.

", + "base": null, "refs": { } }, @@ -581,16 +619,22 @@ "UserName": { "base": null, "refs": { - "DescribeWorkspacesRequest$UserName": "

Used with the DirectoryId parameter to specify the directory user for whom to obtain the WorkSpace.

", - "Workspace$UserName": "

The user that the WorkSpace is assigned to.

", + "DescribeWorkspacesRequest$UserName": "

The name of the directory user. You must specify this parameter with DirectoryId.

", + "Workspace$UserName": "

The user for the WorkSpace.

", "WorkspaceDirectory$CustomerUserName": "

The user name for the service account.

", - "WorkspaceRequest$UserName": "

The username that the WorkSpace is assigned to. This username must exist in the AWS Directory Service directory specified by the DirectoryId member.

" + "WorkspaceRequest$UserName": "

The username of the user for the WorkSpace. This username must exist in the AWS Directory Service directory for the WorkSpace.

" } }, "UserStorage": { - "base": "

Contains information about the user storage for a WorkSpace bundle.

", + "base": "

Information about the user storage for a WorkSpace bundle.

", + "refs": { + "WorkspaceBundle$UserStorage": "

The size of the user storage.

" + } + }, + "UserVolumeSizeGib": { + "base": null, "refs": { - "WorkspaceBundle$UserStorage": "

A UserStorage object that specifies the amount of user storage that the bundle contains.

" + "WorkspaceProperties$UserVolumeSizeGib": "

The size of the user storage.

" } }, "VolumeEncryptionKey": { @@ -601,13 +645,13 @@ } }, "Workspace": { - "base": "

Contains information about a WorkSpace.

", + "base": "

Information about a WorkSpace.

", "refs": { "WorkspaceList$member": null } }, "WorkspaceBundle": { - "base": "

Contains information about a WorkSpace bundle.

", + "base": "

Information about a WorkSpace bundle.

", "refs": { "BundleList$member": null } @@ -621,7 +665,7 @@ "WorkspaceConnectionStatusList": { "base": null, "refs": { - "DescribeWorkspacesConnectionStatusResult$WorkspacesConnectionStatus": "

The connection status of the WorkSpace.

" + "DescribeWorkspacesConnectionStatusResult$WorkspacesConnectionStatus": "

Information about the connection status of the WorkSpace.

" } }, "WorkspaceDirectory": { @@ -645,7 +689,7 @@ "WorkspaceErrorCode": { "base": null, "refs": { - "Workspace$ErrorCode": "

If the WorkSpace could not be created, this contains the error code.

" + "Workspace$ErrorCode": "

If the WorkSpace could not be created, contains the error code.

" } }, "WorkspaceId": { @@ -653,11 +697,11 @@ "refs": { "FailedWorkspaceChangeRequest$WorkspaceId": "

The identifier of the WorkSpace.

", "ModifyWorkspacePropertiesRequest$WorkspaceId": "

The ID of the WorkSpace.

", - "RebootRequest$WorkspaceId": "

The identifier of the WorkSpace to reboot.

", - "RebuildRequest$WorkspaceId": "

The identifier of the WorkSpace to rebuild.

", + "RebootRequest$WorkspaceId": "

The identifier of the WorkSpace.

", + "RebuildRequest$WorkspaceId": "

The identifier of the WorkSpace.

", "StartRequest$WorkspaceId": "

The ID of the WorkSpace.

", "StopRequest$WorkspaceId": "

The ID of the WorkSpace.

", - "TerminateRequest$WorkspaceId": "

The identifier of the WorkSpace to terminate.

", + "TerminateRequest$WorkspaceId": "

The identifier of the WorkSpace.

", "Workspace$WorkspaceId": "

The identifier of the WorkSpace.

", "WorkspaceConnectionStatus$WorkspaceId": "

The ID of the WorkSpace.

", "WorkspaceIdList$member": null @@ -666,36 +710,36 @@ "WorkspaceIdList": { "base": null, "refs": { - "DescribeWorkspacesConnectionStatusRequest$WorkspaceIds": "

An array of strings that contain the identifiers of the WorkSpaces.

", - "DescribeWorkspacesRequest$WorkspaceIds": "

An array of strings that contain the identifiers of the WorkSpaces for which to retrieve information. This parameter cannot be combined with any other filter parameter.

Because the CreateWorkspaces operation is asynchronous, the identifier it returns is not immediately available. If you immediately call DescribeWorkspaces with this identifier, no information is returned.

" + "DescribeWorkspacesConnectionStatusRequest$WorkspaceIds": "

The identifiers of the WorkSpaces.

", + "DescribeWorkspacesRequest$WorkspaceIds": "

The IDs of the WorkSpaces. This parameter cannot be combined with any other filter.

Because the CreateWorkspaces operation is asynchronous, the identifier it returns is not immediately available. If you immediately call DescribeWorkspaces with this identifier, no information is returned.

" } }, "WorkspaceList": { "base": null, "refs": { - "CreateWorkspacesResult$PendingRequests": "

An array of structures that represent the WorkSpaces that were created.

Because this operation is asynchronous, the identifier in WorkspaceId is not immediately available. If you immediately call DescribeWorkspaces with this identifier, no information will be returned.

", - "DescribeWorkspacesResult$Workspaces": "

An array of structures that contain the information about the WorkSpaces.

Because the CreateWorkspaces operation is asynchronous, some of this information may be incomplete for a newly-created WorkSpace.

" + "CreateWorkspacesResult$PendingRequests": "

Information about the WorkSpaces that were created.

Because this operation is asynchronous, the identifier returned is not immediately available for use with other operations. For example, if you call DescribeWorkspaces before the WorkSpace is created, the information returned can be incomplete.

", + "DescribeWorkspacesResult$Workspaces": "

Information about the WorkSpaces.

Because CreateWorkspaces is an asynchronous operation, some of the returned information could be incomplete.

" } }, "WorkspaceProperties": { - "base": "

Describes the properties of a WorkSpace.

", + "base": "

Information about a WorkSpace.

", "refs": { - "ModifyWorkspacePropertiesRequest$WorkspaceProperties": "

The WorkSpace properties of the request.

", - "Workspace$WorkspaceProperties": null, - "WorkspaceRequest$WorkspaceProperties": null + "ModifyWorkspacePropertiesRequest$WorkspaceProperties": "

The properties of the WorkSpace.

", + "Workspace$WorkspaceProperties": "

The properties of the WorkSpace.

", + "WorkspaceRequest$WorkspaceProperties": "

The WorkSpace properties.

" } }, "WorkspaceRequest": { - "base": "

Contains information about a WorkSpace creation request.

", + "base": "

Information used to create a WorkSpace.

", "refs": { - "FailedCreateWorkspaceRequest$WorkspaceRequest": "

A FailedCreateWorkspaceRequest$WorkspaceRequest object that contains the information about the WorkSpace that could not be created.

", + "FailedCreateWorkspaceRequest$WorkspaceRequest": "

Information about the WorkSpace.

", "WorkspaceRequestList$member": null } }, "WorkspaceRequestList": { "base": null, "refs": { - "CreateWorkspacesRequest$Workspaces": "

An array of structures that specify the WorkSpaces to create.

" + "CreateWorkspacesRequest$Workspaces": "

Information about the WorkSpaces to create.

" } }, "WorkspaceState": { diff --git a/service/applicationdiscoveryservice/api.go b/service/applicationdiscoveryservice/api.go index 4097c25f25f..4c1ad46b31d 100644 --- a/service/applicationdiscoveryservice/api.go +++ b/service/applicationdiscoveryservice/api.go @@ -848,13 +848,13 @@ func (r StartExportTaskRequest) Send() (*StartExportTaskOutput, error) { // // Begins the export of discovered data to an S3 bucket. // -// If you specify agentId in a filter, the task exports up to 72 hours of detailed +// If you specify agentIds in a filter, the task exports up to 72 hours of detailed // data collected by the identified Application Discovery Agent, including network, // process, and performance details. A time range for exported agent data may // be set by using startTime and endTime. Export of detailed agent data is limited // to five concurrently running exports. // -// If you do not include an agentId filter, summary data is exported that includes +// If you do not include an agentIds filter, summary data is exported that includes // both AWS Agentless Discovery Connector data and summary data from AWS Discovery // Agents. Export of summary data is limited to two exports per day. // @@ -1410,7 +1410,7 @@ type CreateTagsInput struct { // {"key": "serverType", "value": "webServer"} // // Tags is a required field - Tags []Tag `locationName:"tags" locationNameList:"item" type:"list" required:"true"` + Tags []Tag `locationName:"tags" type:"list" required:"true"` } // String returns the string representation @@ -1743,7 +1743,7 @@ type DeleteTagsInput struct { // the tags that you want to delete in a key-value format. For example: // // {"key": "serverType", "value": "webServer"} - Tags []Tag `locationName:"tags" locationNameList:"item" type:"list"` + Tags []Tag `locationName:"tags" type:"list"` } // String returns the string representation @@ -2289,7 +2289,7 @@ type DescribeTagsOutput struct { // Depending on the input, this is a list of configuration items tagged with // a specific tag, or a list of tags for a specific configuration item. - Tags []ConfigurationTag `locationName:"tags" locationNameList:"item" type:"list"` + Tags []ConfigurationTag `locationName:"tags" type:"list"` } // String returns the string representation @@ -2464,7 +2464,7 @@ type ExportFilter struct { // action. Typically an ADS agentId is in the form o-0123456789abcdef0. // // Values is a required field - Values []string `locationName:"values" locationNameList:"item" type:"list" required:"true"` + Values []string `locationName:"values" type:"list" required:"true"` } // String returns the string representation @@ -2647,7 +2647,7 @@ type Filter struct { // filter name, you could specify Ubuntu for the value. // // Values is a required field - Values []string `locationName:"values" locationNameList:"item" type:"list" required:"true"` + Values []string `locationName:"values" type:"list" required:"true"` } // String returns the string representation @@ -3275,7 +3275,8 @@ type StartExportTaskInput struct { // the most recent data collected by the agent. EndTime *time.Time `locationName:"endTime" type:"timestamp" timestampFormat:"unix"` - // The file format for the returned export data. Default value is CSV. + // The file format for the returned export data. Default value is CSV. Note:TheGRAPHMLoption + // has been deprecated. ExportDataFormat []ExportDataFormat `locationName:"exportDataFormat" type:"list"` // If a filter is present, it selects the single agentId of the Application @@ -3515,7 +3516,7 @@ type TagFilter struct { // Values for the tag filter. // // Values is a required field - Values []string `locationName:"values" locationNameList:"item" type:"list" required:"true"` + Values []string `locationName:"values" type:"list" required:"true"` } // String returns the string representation diff --git a/service/applicationdiscoveryservice/doc.go b/service/applicationdiscoveryservice/doc.go index 77fc842e6cf..fe2b55be70b 100644 --- a/service/applicationdiscoveryservice/doc.go +++ b/service/applicationdiscoveryservice/doc.go @@ -50,9 +50,8 @@ // // Your AWS account must be granted access to Application Discovery Service, // a process called whitelisting. This is true for AWS partners and customers -// alike. To request access, sign up for AWS Application Discovery Service here -// (http://aws.amazon.com/application-discovery/preview/). We send you information -// about how to get started. +// alike. To request access, sign up for AWS Application Discovery Service +// (http://aws.amazon.com/application-discovery/how-to-start/). // // This API reference provides descriptions, syntax, and usage examples for // each of the actions and data types for Application Discovery Service. The diff --git a/service/codebuild/api.go b/service/codebuild/api.go index c05e6e5cb79..d59a38244ab 100644 --- a/service/codebuild/api.go +++ b/service/codebuild/api.go @@ -1876,6 +1876,9 @@ type EnvironmentImage struct { // The name of the Docker image. Name *string `locationName:"name" type:"string"` + + // A list of environment image versions. + Versions []string `locationName:"versions" type:"list"` } // String returns the string representation @@ -1900,6 +1903,12 @@ func (s *EnvironmentImage) SetName(v string) *EnvironmentImage { return s } +// SetVersions sets the Versions field's value. +func (s *EnvironmentImage) SetVersions(v []string) *EnvironmentImage { + s.Versions = v + return s +} + // A set of Docker images that are related by programming language and are managed // by AWS CodeBuild. // Please also see https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/EnvironmentLanguage @@ -2652,10 +2661,7 @@ type Project struct { // The default is 60 minutes. TimeoutInMinutes *int64 `locationName:"timeoutInMinutes" min:"5" type:"integer"` - // If your AWS CodeBuild project accesses resources in an Amazon VPC, you provide - // this parameter that identifies the VPC ID and the list of security group - // IDs and subnet IDs. The security groups and subnets must belong to the same - // VPC. You must provide at least one security group and one subnet ID. + // Information about the VPC configuration that AWS CodeBuild will access. VpcConfig *VpcConfig `locationName:"vpcConfig" type:"structure"` // Information about a webhook in GitHub that connects repository events to @@ -3844,10 +3850,7 @@ func (s *UpdateProjectOutput) SetProject(v *Project) *UpdateProjectOutput { return s } -// If your AWS CodeBuild project accesses resources in an Amazon VPC, you provide -// this parameter that identifies the VPC ID and the list of security group -// IDs and subnet IDs. The security groups and subnets must belong to the same -// VPC. You must provide at least one security group and one subnet ID. +// Information about the VPC configuration that AWS CodeBuild will access. // Please also see https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/VpcConfig type VpcConfig struct { _ struct{} `type:"structure"` diff --git a/service/codedeploy/api.go b/service/codedeploy/api.go index a7a1c3bd290..aaab7cee2b0 100644 --- a/service/codedeploy/api.go +++ b/service/codedeploy/api.go @@ -763,6 +763,55 @@ func (c *CodeDeploy) DeleteDeploymentGroupRequest(input *DeleteDeploymentGroupIn return DeleteDeploymentGroupRequest{Request: req, Input: input} } +const opDeleteGitHubAccountToken = "DeleteGitHubAccountToken" + +// DeleteGitHubAccountTokenRequest is a API request type for the DeleteGitHubAccountToken API operation. +type DeleteGitHubAccountTokenRequest struct { + *aws.Request + Input *DeleteGitHubAccountTokenInput +} + +// Send marshals and sends the DeleteGitHubAccountToken API request. +func (r DeleteGitHubAccountTokenRequest) Send() (*DeleteGitHubAccountTokenOutput, error) { + err := r.Request.Send() + if err != nil { + return nil, err + } + + return r.Request.Data.(*DeleteGitHubAccountTokenOutput), nil +} + +// DeleteGitHubAccountTokenRequest returns a request value for making API operation for +// AWS CodeDeploy. +// +// Deletes a GitHub account connection. +// +// // Example sending a request using the DeleteGitHubAccountTokenRequest method. +// req := client.DeleteGitHubAccountTokenRequest(params) +// resp, err := req.Send() +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/DeleteGitHubAccountToken +func (c *CodeDeploy) DeleteGitHubAccountTokenRequest(input *DeleteGitHubAccountTokenInput) DeleteGitHubAccountTokenRequest { + op := &aws.Operation{ + Name: opDeleteGitHubAccountToken, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteGitHubAccountTokenInput{} + } + + output := &DeleteGitHubAccountTokenOutput{} + req := c.newRequest(op, input, output) + output.responseMetadata = aws.Response{Request: req} + + return DeleteGitHubAccountTokenRequest{Request: req, Input: input} +} + const opDeregisterOnPremisesInstance = "DeregisterOnPremisesInstance" // DeregisterOnPremisesInstanceRequest is a API request type for the DeregisterOnPremisesInstance API operation. @@ -1892,6 +1941,57 @@ func (c *CodeDeploy) ListOnPremisesInstancesRequest(input *ListOnPremisesInstanc return ListOnPremisesInstancesRequest{Request: req, Input: input} } +const opPutLifecycleEventHookExecutionStatus = "PutLifecycleEventHookExecutionStatus" + +// PutLifecycleEventHookExecutionStatusRequest is a API request type for the PutLifecycleEventHookExecutionStatus API operation. +type PutLifecycleEventHookExecutionStatusRequest struct { + *aws.Request + Input *PutLifecycleEventHookExecutionStatusInput +} + +// Send marshals and sends the PutLifecycleEventHookExecutionStatus API request. +func (r PutLifecycleEventHookExecutionStatusRequest) Send() (*PutLifecycleEventHookExecutionStatusOutput, error) { + err := r.Request.Send() + if err != nil { + return nil, err + } + + return r.Request.Data.(*PutLifecycleEventHookExecutionStatusOutput), nil +} + +// PutLifecycleEventHookExecutionStatusRequest returns a request value for making API operation for +// AWS CodeDeploy. +// +// Sets the result of a Lambda validation function. The function validates one +// or both lifecycle events (BeforeAllowTraffic and AfterAllowTraffic) and returns +// Succeeded or Failed. +// +// // Example sending a request using the PutLifecycleEventHookExecutionStatusRequest method. +// req := client.PutLifecycleEventHookExecutionStatusRequest(params) +// resp, err := req.Send() +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/PutLifecycleEventHookExecutionStatus +func (c *CodeDeploy) PutLifecycleEventHookExecutionStatusRequest(input *PutLifecycleEventHookExecutionStatusInput) PutLifecycleEventHookExecutionStatusRequest { + op := &aws.Operation{ + Name: opPutLifecycleEventHookExecutionStatus, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutLifecycleEventHookExecutionStatusInput{} + } + + output := &PutLifecycleEventHookExecutionStatusOutput{} + req := c.newRequest(op, input, output) + output.responseMetadata = aws.Response{Request: req} + + return PutLifecycleEventHookExecutionStatusRequest{Request: req, Input: input} +} + const opRegisterApplicationRevision = "RegisterApplicationRevision" // RegisterApplicationRevisionRequest is a API request type for the RegisterApplicationRevision API operation. @@ -2419,6 +2519,10 @@ type ApplicationInfo struct { // The application name. ApplicationName *string `locationName:"applicationName" min:"1" type:"string"` + // The destination platform type for deployment of the application (Lambda or + // Server). + ComputePlatform ComputePlatform `locationName:"computePlatform" type:"string" enum:"true"` + // The time at which the application was created. CreateTime *time.Time `locationName:"createTime" type:"timestamp" timestampFormat:"unix"` @@ -2452,6 +2556,12 @@ func (s *ApplicationInfo) SetApplicationName(v string) *ApplicationInfo { return s } +// SetComputePlatform sets the ComputePlatform field's value. +func (s *ApplicationInfo) SetComputePlatform(v ComputePlatform) *ApplicationInfo { + s.ComputePlatform = v + return s +} + // SetCreateTime sets the CreateTime field's value. func (s *ApplicationInfo) SetCreateTime(v time.Time) *ApplicationInfo { s.CreateTime = &v @@ -2655,7 +2765,9 @@ type BatchGetApplicationsInput struct { _ struct{} `type:"structure"` // A list of application names separated by spaces. - ApplicationNames []string `locationName:"applicationNames" type:"list"` + // + // ApplicationNames is a required field + ApplicationNames []string `locationName:"applicationNames" type:"list" required:"true"` } // String returns the string representation @@ -2668,6 +2780,20 @@ func (s BatchGetApplicationsInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchGetApplicationsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "BatchGetApplicationsInput"} + + if s.ApplicationNames == nil { + invalidParams.Add(aws.NewErrParamRequired("ApplicationNames")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetApplicationNames sets the ApplicationNames field's value. func (s *BatchGetApplicationsInput) SetApplicationNames(v []string) *BatchGetApplicationsInput { s.ApplicationNames = v @@ -2910,7 +3036,9 @@ type BatchGetDeploymentsInput struct { _ struct{} `type:"structure"` // A list of deployment IDs, separated by spaces. - DeploymentIds []string `locationName:"deploymentIds" type:"list"` + // + // DeploymentIds is a required field + DeploymentIds []string `locationName:"deploymentIds" type:"list" required:"true"` } // String returns the string representation @@ -2923,6 +3051,20 @@ func (s BatchGetDeploymentsInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchGetDeploymentsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "BatchGetDeploymentsInput"} + + if s.DeploymentIds == nil { + invalidParams.Add(aws.NewErrParamRequired("DeploymentIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetDeploymentIds sets the DeploymentIds field's value. func (s *BatchGetDeploymentsInput) SetDeploymentIds(v []string) *BatchGetDeploymentsInput { s.DeploymentIds = v @@ -2967,7 +3109,9 @@ type BatchGetOnPremisesInstancesInput struct { _ struct{} `type:"structure"` // The names of the on-premises instances about which to get information. - InstanceNames []string `locationName:"instanceNames" type:"list"` + // + // InstanceNames is a required field + InstanceNames []string `locationName:"instanceNames" type:"list" required:"true"` } // String returns the string representation @@ -2980,6 +3124,20 @@ func (s BatchGetOnPremisesInstancesInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchGetOnPremisesInstancesInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "BatchGetOnPremisesInstancesInput"} + + if s.InstanceNames == nil { + invalidParams.Add(aws.NewErrParamRequired("InstanceNames")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetInstanceNames sets the InstanceNames field's value. func (s *BatchGetOnPremisesInstancesInput) SetInstanceNames(v []string) *BatchGetOnPremisesInstancesInput { s.InstanceNames = v @@ -3163,6 +3321,9 @@ type CreateApplicationInput struct { // // ApplicationName is a required field ApplicationName *string `locationName:"applicationName" min:"1" type:"string" required:"true"` + + // The destination platform type for the deployment (Lambda or Server). + ComputePlatform ComputePlatform `locationName:"computePlatform" type:"string" enum:"true"` } // String returns the string representation @@ -3198,6 +3359,12 @@ func (s *CreateApplicationInput) SetApplicationName(v string) *CreateApplication return s } +// SetComputePlatform sets the ComputePlatform field's value. +func (s *CreateApplicationInput) SetComputePlatform(v ComputePlatform) *CreateApplicationInput { + s.ComputePlatform = v + return s +} + // Represents the output of a CreateApplication operation. // Please also see https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/CreateApplicationOutput type CreateApplicationOutput struct { @@ -3235,6 +3402,9 @@ func (s *CreateApplicationOutput) SetApplicationId(v string) *CreateApplicationO type CreateDeploymentConfigInput struct { _ struct{} `type:"structure"` + // The destination platform type for the deployment (Lambda or Server>). + ComputePlatform ComputePlatform `locationName:"computePlatform" type:"string" enum:"true"` + // The name of the deployment configuration to create. // // DeploymentConfigName is a required field @@ -3259,9 +3429,10 @@ type CreateDeploymentConfigInput struct { // // For example, to set a minimum of 95% healthy instance, specify a type of // FLEET_PERCENT and a value of 95. - // - // MinimumHealthyHosts is a required field - MinimumHealthyHosts *MinimumHealthyHosts `locationName:"minimumHealthyHosts" type:"structure" required:"true"` + MinimumHealthyHosts *MinimumHealthyHosts `locationName:"minimumHealthyHosts" type:"structure"` + + // The configuration that specifies how the deployment traffic will be routed. + TrafficRoutingConfig *TrafficRoutingConfig `locationName:"trafficRoutingConfig" type:"structure"` } // String returns the string representation @@ -3285,16 +3456,18 @@ func (s *CreateDeploymentConfigInput) Validate() error { invalidParams.Add(aws.NewErrParamMinLen("DeploymentConfigName", 1)) } - if s.MinimumHealthyHosts == nil { - invalidParams.Add(aws.NewErrParamRequired("MinimumHealthyHosts")) - } - if invalidParams.Len() > 0 { return invalidParams } return nil } +// SetComputePlatform sets the ComputePlatform field's value. +func (s *CreateDeploymentConfigInput) SetComputePlatform(v ComputePlatform) *CreateDeploymentConfigInput { + s.ComputePlatform = v + return s +} + // SetDeploymentConfigName sets the DeploymentConfigName field's value. func (s *CreateDeploymentConfigInput) SetDeploymentConfigName(v string) *CreateDeploymentConfigInput { s.DeploymentConfigName = &v @@ -3307,6 +3480,12 @@ func (s *CreateDeploymentConfigInput) SetMinimumHealthyHosts(v *MinimumHealthyHo return s } +// SetTrafficRoutingConfig sets the TrafficRoutingConfig field's value. +func (s *CreateDeploymentConfigInput) SetTrafficRoutingConfig(v *TrafficRoutingConfig) *CreateDeploymentConfigInput { + s.TrafficRoutingConfig = v + return s +} + // Represents the output of a CreateDeploymentConfig operation. // Please also see https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/CreateDeploymentConfigOutput type CreateDeploymentConfigOutput struct { @@ -4012,11 +4191,71 @@ func (s *DeleteDeploymentGroupOutput) SetHooksNotCleanedUp(v []AutoScalingGroup) return s } +// Represents the input of a DeleteGitHubAccount operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/DeleteGitHubAccountTokenInput +type DeleteGitHubAccountTokenInput struct { + _ struct{} `type:"structure"` + + // The name of the GitHub account connection to delete. + TokenName *string `locationName:"tokenName" type:"string"` +} + +// String returns the string representation +func (s DeleteGitHubAccountTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteGitHubAccountTokenInput) GoString() string { + return s.String() +} + +// SetTokenName sets the TokenName field's value. +func (s *DeleteGitHubAccountTokenInput) SetTokenName(v string) *DeleteGitHubAccountTokenInput { + s.TokenName = &v + return s +} + +// Represents the output of a DeleteGitHubAccountToken operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/DeleteGitHubAccountTokenOutput +type DeleteGitHubAccountTokenOutput struct { + _ struct{} `type:"structure"` + + responseMetadata aws.Response + + // The name of the GitHub account connection that was deleted. + TokenName *string `locationName:"tokenName" type:"string"` +} + +// String returns the string representation +func (s DeleteGitHubAccountTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteGitHubAccountTokenOutput) GoString() string { + return s.String() +} + +// SDKResponseMetdata return sthe response metadata for the API. +func (s DeleteGitHubAccountTokenOutput) SDKResponseMetadata() aws.Response { + return s.responseMetadata +} + +// SetTokenName sets the TokenName field's value. +func (s *DeleteGitHubAccountTokenOutput) SetTokenName(v string) *DeleteGitHubAccountTokenOutput { + s.TokenName = &v + return s +} + // Information about a deployment configuration. // Please also see https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/DeploymentConfigInfo type DeploymentConfigInfo struct { _ struct{} `type:"structure"` + // The destination platform type for the deployment (Lambda or Server). + ComputePlatform ComputePlatform `locationName:"computePlatform" type:"string" enum:"true"` + // The time at which the deployment configuration was created. CreateTime *time.Time `locationName:"createTime" type:"timestamp" timestampFormat:"unix"` @@ -4028,6 +4267,10 @@ type DeploymentConfigInfo struct { // Information about the number or percentage of minimum healthy instance. MinimumHealthyHosts *MinimumHealthyHosts `locationName:"minimumHealthyHosts" type:"structure"` + + // The configuration specifying how the deployment traffic will be routed. Only + // deployments with a Lambda compute platform can specify this. + TrafficRoutingConfig *TrafficRoutingConfig `locationName:"trafficRoutingConfig" type:"structure"` } // String returns the string representation @@ -4040,6 +4283,12 @@ func (s DeploymentConfigInfo) GoString() string { return s.String() } +// SetComputePlatform sets the ComputePlatform field's value. +func (s *DeploymentConfigInfo) SetComputePlatform(v ComputePlatform) *DeploymentConfigInfo { + s.ComputePlatform = v + return s +} + // SetCreateTime sets the CreateTime field's value. func (s *DeploymentConfigInfo) SetCreateTime(v time.Time) *DeploymentConfigInfo { s.CreateTime = &v @@ -4064,6 +4313,12 @@ func (s *DeploymentConfigInfo) SetMinimumHealthyHosts(v *MinimumHealthyHosts) *D return s } +// SetTrafficRoutingConfig sets the TrafficRoutingConfig field's value. +func (s *DeploymentConfigInfo) SetTrafficRoutingConfig(v *TrafficRoutingConfig) *DeploymentConfigInfo { + s.TrafficRoutingConfig = v + return s +} + // Information about a deployment group. // Please also see https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/DeploymentGroupInfo type DeploymentGroupInfo struct { @@ -4085,6 +4340,9 @@ type DeploymentGroupInfo struct { // Information about blue/green deployment options for a deployment group. BlueGreenDeploymentConfiguration *BlueGreenDeploymentConfiguration `locationName:"blueGreenDeploymentConfiguration" type:"structure"` + // The destination platform type for the deployment group (Lambda or Server). + ComputePlatform ComputePlatform `locationName:"computePlatform" type:"string" enum:"true"` + // The deployment configuration name. DeploymentConfigName *string `locationName:"deploymentConfigName" min:"1" type:"string"` @@ -4178,6 +4436,12 @@ func (s *DeploymentGroupInfo) SetBlueGreenDeploymentConfiguration(v *BlueGreenDe return s } +// SetComputePlatform sets the ComputePlatform field's value. +func (s *DeploymentGroupInfo) SetComputePlatform(v ComputePlatform) *DeploymentGroupInfo { + s.ComputePlatform = v + return s +} + // SetDeploymentConfigName sets the DeploymentConfigName field's value. func (s *DeploymentGroupInfo) SetDeploymentConfigName(v string) *DeploymentGroupInfo { s.DeploymentConfigName = &v @@ -4269,7 +4533,7 @@ type DeploymentInfo struct { // Provides information about the results of a deployment, such as whether instances // in the original environment in a blue/green deployment were not terminated. - AdditionalDeploymentStatusInfo *string `locationName:"additionalDeploymentStatusInfo" type:"string"` + AdditionalDeploymentStatusInfo *string `locationName:"additionalDeploymentStatusInfo" deprecated:"true" type:"string"` // The application name. ApplicationName *string `locationName:"applicationName" min:"1" type:"string"` @@ -4284,6 +4548,9 @@ type DeploymentInfo struct { // A timestamp indicating when the deployment was complete. CompleteTime *time.Time `locationName:"completeTime" type:"timestamp" timestampFormat:"unix"` + // The destination platform type for the deployment (Lambda or Server). + ComputePlatform ComputePlatform `locationName:"computePlatform" type:"string" enum:"true"` + // A timestamp indicating when the deployment was created. CreateTime *time.Time `locationName:"createTime" type:"timestamp" timestampFormat:"unix"` @@ -4308,6 +4575,9 @@ type DeploymentInfo struct { // A summary of the deployment status of the instances in the deployment. DeploymentOverview *DeploymentOverview `locationName:"deploymentOverview" type:"structure"` + // Messages that contain information about the status of a deployment. + DeploymentStatusMessages []string `locationName:"deploymentStatusMessages" type:"list"` + // Information about the type of deployment, either in-place or blue/green, // you want to run and whether to route deployment traffic behind a load balancer. DeploymentStyle *DeploymentStyle `locationName:"deploymentStyle" type:"structure"` @@ -4423,6 +4693,12 @@ func (s *DeploymentInfo) SetCompleteTime(v time.Time) *DeploymentInfo { return s } +// SetComputePlatform sets the ComputePlatform field's value. +func (s *DeploymentInfo) SetComputePlatform(v ComputePlatform) *DeploymentInfo { + s.ComputePlatform = v + return s +} + // SetCreateTime sets the CreateTime field's value. func (s *DeploymentInfo) SetCreateTime(v time.Time) *DeploymentInfo { s.CreateTime = &v @@ -4459,6 +4735,12 @@ func (s *DeploymentInfo) SetDeploymentOverview(v *DeploymentOverview) *Deploymen return s } +// SetDeploymentStatusMessages sets the DeploymentStatusMessages field's value. +func (s *DeploymentInfo) SetDeploymentStatusMessages(v []string) *DeploymentInfo { + s.DeploymentStatusMessages = v + return s +} + // SetDeploymentStyle sets the DeploymentStyle field's value. func (s *DeploymentInfo) SetDeploymentStyle(v *DeploymentStyle) *DeploymentInfo { s.DeploymentStyle = v @@ -4915,7 +5197,7 @@ type ELBInfo struct { // For blue/green deployments, the name of the load balancer that will be used // to route traffic from original instances to replacement instances in a blue/green // deployment. For in-place deployments, the name of the load balancer that - // instances are deregistered from, so they are not serving traffic during a + // instances are deregistered from so they are not serving traffic during a // deployment, and then re-registered with after the deployment completes. Name *string `locationName:"name" type:"string"` } @@ -6995,6 +7277,121 @@ func (s *OnPremisesTagSet) SetOnPremisesTagSetList(v [][]TagFilter) *OnPremisesT return s } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/PutLifecycleEventHookExecutionStatusInput +type PutLifecycleEventHookExecutionStatusInput struct { + _ struct{} `type:"structure"` + + // The ID of the deployment. Pass this ID to a Lambda function that validates + // a deployment lifecycle event. + DeploymentId *string `locationName:"deploymentId" type:"string"` + + // The execution ID of a deployment's lifecycle hook. A deployment lifecycle + // hook is specified in the hooks section of the AppSpec file. + LifecycleEventHookExecutionId *string `locationName:"lifecycleEventHookExecutionId" type:"string"` + + // The result of a Lambda function that validates a deployment lifecycle event + // (Succeeded or Failed). + Status LifecycleEventStatus `locationName:"status" type:"string" enum:"true"` +} + +// String returns the string representation +func (s PutLifecycleEventHookExecutionStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutLifecycleEventHookExecutionStatusInput) GoString() string { + return s.String() +} + +// SetDeploymentId sets the DeploymentId field's value. +func (s *PutLifecycleEventHookExecutionStatusInput) SetDeploymentId(v string) *PutLifecycleEventHookExecutionStatusInput { + s.DeploymentId = &v + return s +} + +// SetLifecycleEventHookExecutionId sets the LifecycleEventHookExecutionId field's value. +func (s *PutLifecycleEventHookExecutionStatusInput) SetLifecycleEventHookExecutionId(v string) *PutLifecycleEventHookExecutionStatusInput { + s.LifecycleEventHookExecutionId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *PutLifecycleEventHookExecutionStatusInput) SetStatus(v LifecycleEventStatus) *PutLifecycleEventHookExecutionStatusInput { + s.Status = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/PutLifecycleEventHookExecutionStatusOutput +type PutLifecycleEventHookExecutionStatusOutput struct { + _ struct{} `type:"structure"` + + responseMetadata aws.Response + + // The execution ID of the lifecycle event hook. A hook is specified in the + // hooks section of the deployment's AppSpec file. + LifecycleEventHookExecutionId *string `locationName:"lifecycleEventHookExecutionId" type:"string"` +} + +// String returns the string representation +func (s PutLifecycleEventHookExecutionStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutLifecycleEventHookExecutionStatusOutput) GoString() string { + return s.String() +} + +// SDKResponseMetdata return sthe response metadata for the API. +func (s PutLifecycleEventHookExecutionStatusOutput) SDKResponseMetadata() aws.Response { + return s.responseMetadata +} + +// SetLifecycleEventHookExecutionId sets the LifecycleEventHookExecutionId field's value. +func (s *PutLifecycleEventHookExecutionStatusOutput) SetLifecycleEventHookExecutionId(v string) *PutLifecycleEventHookExecutionStatusOutput { + s.LifecycleEventHookExecutionId = &v + return s +} + +// A revision for an AWS Lambda deployment that is a YAML-formatted or JSON-formatted +// string. For AWS Lambda deployments, the revision is the same as the AppSpec +// file. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/RawString +type RawString struct { + _ struct{} `type:"structure"` + + // The YAML-formatted or JSON-formatted revision string. It includes information + // about which Lambda function to update and optional Lambda functions that + // validate deployment lifecycle events. + Content *string `locationName:"content" type:"string"` + + // The SHA256 hash value of the revision that is specified as a RawString. + Sha256 *string `locationName:"sha256" type:"string"` +} + +// String returns the string representation +func (s RawString) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RawString) GoString() string { + return s.String() +} + +// SetContent sets the Content field's value. +func (s *RawString) SetContent(v string) *RawString { + s.Content = &v + return s +} + +// SetSha256 sets the Sha256 field's value. +func (s *RawString) SetSha256(v string) *RawString { + s.Sha256 = &v + return s +} + // Represents the input of a RegisterApplicationRevision operation. // Please also see https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/RegisterApplicationRevisionInput type RegisterApplicationRevisionInput struct { @@ -7293,12 +7690,19 @@ type RevisionLocation struct { // // * S3: An application revision stored in Amazon S3. // - // * GitHub: An application revision stored in GitHub. + // * GitHub: An application revision stored in GitHub (EC2/On-premises deployments + // only) + // + // * String: A YAML-formatted or JSON-formatted string (AWS Lambda deployments + // only) RevisionType RevisionLocationType `locationName:"revisionType" type:"string" enum:"true"` - // Information about the location of application artifacts stored in Amazon - // S3. + // Information about the location of a revision stored in Amazon S3. S3Location *S3Location `locationName:"s3Location" type:"structure"` + + // Information about the location of an AWS Lambda deployment revision stored + // as a RawString. + String_ *RawString `locationName:"string" type:"structure"` } // String returns the string representation @@ -7329,6 +7733,12 @@ func (s *RevisionLocation) SetS3Location(v *S3Location) *RevisionLocation { return s } +// SetString_ sets the String_ field's value. +func (s *RevisionLocation) SetString_(v *RawString) *RevisionLocation { + s.String_ = v + return s +} + // Information about a deployment rollback. // Please also see https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/RollbackInfo type RollbackInfo struct { @@ -7758,6 +8168,83 @@ func (s *TargetInstances) SetTagFilters(v []EC2TagFilter) *TargetInstances { return s } +// A configuration that shifts traffic from one version of a Lambda function +// to another in two increments. The original and target Lambda function versions +// are specified in the deployment's AppSpec file. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/TimeBasedCanary +type TimeBasedCanary struct { + _ struct{} `type:"structure"` + + // The number of minutes between the first and second traffic shifts of a TimeBasedCanary + // deployment. + CanaryInterval *int64 `locationName:"canaryInterval" type:"integer"` + + // The percentage of traffic to shift in the first increment of a TimeBasedCanary + // deployment. + CanaryPercentage *int64 `locationName:"canaryPercentage" type:"integer"` +} + +// String returns the string representation +func (s TimeBasedCanary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TimeBasedCanary) GoString() string { + return s.String() +} + +// SetCanaryInterval sets the CanaryInterval field's value. +func (s *TimeBasedCanary) SetCanaryInterval(v int64) *TimeBasedCanary { + s.CanaryInterval = &v + return s +} + +// SetCanaryPercentage sets the CanaryPercentage field's value. +func (s *TimeBasedCanary) SetCanaryPercentage(v int64) *TimeBasedCanary { + s.CanaryPercentage = &v + return s +} + +// A configuration that shifts traffic from one version of a Lambda function +// to another in equal increments, with an equal number of minutes between each +// increment. The original and target Lambda function versions are specified +// in the deployment's AppSpec file. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/TimeBasedLinear +type TimeBasedLinear struct { + _ struct{} `type:"structure"` + + // The number of minutes between each incremental traffic shift of a TimeBasedLinear + // deployment. + LinearInterval *int64 `locationName:"linearInterval" type:"integer"` + + // The percentage of traffic that is shifted at the start of each increment + // of a TimeBasedLinear deployment. + LinearPercentage *int64 `locationName:"linearPercentage" type:"integer"` +} + +// String returns the string representation +func (s TimeBasedLinear) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TimeBasedLinear) GoString() string { + return s.String() +} + +// SetLinearInterval sets the LinearInterval field's value. +func (s *TimeBasedLinear) SetLinearInterval(v int64) *TimeBasedLinear { + s.LinearInterval = &v + return s +} + +// SetLinearPercentage sets the LinearPercentage field's value. +func (s *TimeBasedLinear) SetLinearPercentage(v int64) *TimeBasedLinear { + s.LinearPercentage = &v + return s +} + // Information about a time range. // Please also see https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/TimeRange type TimeRange struct { @@ -7796,6 +8283,56 @@ func (s *TimeRange) SetStart(v time.Time) *TimeRange { return s } +// The configuration that specifies how traffic is shifted from one version +// of a Lambda function to another version during an AWS Lambda deployment. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/TrafficRoutingConfig +type TrafficRoutingConfig struct { + _ struct{} `type:"structure"` + + // A configuration that shifts traffic from one version of a Lambda function + // to another in two increments. The original and target Lambda function versions + // are specified in the deployment's AppSpec file. + TimeBasedCanary *TimeBasedCanary `locationName:"timeBasedCanary" type:"structure"` + + // A configuration that shifts traffic from one version of a Lambda function + // to another in equal increments, with an equal number of minutes between each + // increment. The original and target Lambda function versions are specified + // in the deployment's AppSpec file. + TimeBasedLinear *TimeBasedLinear `locationName:"timeBasedLinear" type:"structure"` + + // The type of traffic shifting (TimeBasedCanary or TimeBasedLinear) used by + // a deployment configuration . + Type TrafficRoutingType `locationName:"type" type:"string" enum:"true"` +} + +// String returns the string representation +func (s TrafficRoutingConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TrafficRoutingConfig) GoString() string { + return s.String() +} + +// SetTimeBasedCanary sets the TimeBasedCanary field's value. +func (s *TrafficRoutingConfig) SetTimeBasedCanary(v *TimeBasedCanary) *TrafficRoutingConfig { + s.TimeBasedCanary = v + return s +} + +// SetTimeBasedLinear sets the TimeBasedLinear field's value. +func (s *TrafficRoutingConfig) SetTimeBasedLinear(v *TimeBasedLinear) *TrafficRoutingConfig { + s.TimeBasedLinear = v + return s +} + +// SetType sets the Type field's value. +func (s *TrafficRoutingConfig) SetType(v TrafficRoutingType) *TrafficRoutingConfig { + s.Type = v + return s +} + // Information about notification triggers for the deployment group. // Please also see https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/TriggerConfig type TriggerConfig struct { @@ -8179,9 +8716,19 @@ type BundleType string // Enum values for BundleType const ( - BundleTypeTar BundleType = "tar" - BundleTypeTgz BundleType = "tgz" - BundleTypeZip BundleType = "zip" + BundleTypeTar BundleType = "tar" + BundleTypeTgz BundleType = "tgz" + BundleTypeZip BundleType = "zip" + BundleTypeYaml BundleType = "YAML" + BundleTypeJson BundleType = "JSON" +) + +type ComputePlatform string + +// Enum values for ComputePlatform +const ( + ComputePlatformServer ComputePlatform = "Server" + ComputePlatformLambda ComputePlatform = "Lambda" ) type DeploymentCreator string @@ -8243,24 +8790,32 @@ type ErrorCode string // Enum values for ErrorCode const ( - ErrorCodeDeploymentGroupMissing ErrorCode = "DEPLOYMENT_GROUP_MISSING" - ErrorCodeApplicationMissing ErrorCode = "APPLICATION_MISSING" - ErrorCodeRevisionMissing ErrorCode = "REVISION_MISSING" - ErrorCodeIamRoleMissing ErrorCode = "IAM_ROLE_MISSING" - ErrorCodeIamRolePermissions ErrorCode = "IAM_ROLE_PERMISSIONS" - ErrorCodeNoEc2Subscription ErrorCode = "NO_EC2_SUBSCRIPTION" - ErrorCodeOverMaxInstances ErrorCode = "OVER_MAX_INSTANCES" - ErrorCodeNoInstances ErrorCode = "NO_INSTANCES" - ErrorCodeTimeout ErrorCode = "TIMEOUT" - ErrorCodeHealthConstraintsInvalid ErrorCode = "HEALTH_CONSTRAINTS_INVALID" - ErrorCodeHealthConstraints ErrorCode = "HEALTH_CONSTRAINTS" - ErrorCodeInternalError ErrorCode = "INTERNAL_ERROR" - ErrorCodeThrottled ErrorCode = "THROTTLED" - ErrorCodeAlarmActive ErrorCode = "ALARM_ACTIVE" - ErrorCodeAgentIssue ErrorCode = "AGENT_ISSUE" - ErrorCodeAutoScalingIamRolePermissions ErrorCode = "AUTO_SCALING_IAM_ROLE_PERMISSIONS" - ErrorCodeAutoScalingConfiguration ErrorCode = "AUTO_SCALING_CONFIGURATION" - ErrorCodeManualStop ErrorCode = "MANUAL_STOP" + ErrorCodeDeploymentGroupMissing ErrorCode = "DEPLOYMENT_GROUP_MISSING" + ErrorCodeApplicationMissing ErrorCode = "APPLICATION_MISSING" + ErrorCodeRevisionMissing ErrorCode = "REVISION_MISSING" + ErrorCodeIamRoleMissing ErrorCode = "IAM_ROLE_MISSING" + ErrorCodeIamRolePermissions ErrorCode = "IAM_ROLE_PERMISSIONS" + ErrorCodeNoEc2Subscription ErrorCode = "NO_EC2_SUBSCRIPTION" + ErrorCodeOverMaxInstances ErrorCode = "OVER_MAX_INSTANCES" + ErrorCodeNoInstances ErrorCode = "NO_INSTANCES" + ErrorCodeTimeout ErrorCode = "TIMEOUT" + ErrorCodeHealthConstraintsInvalid ErrorCode = "HEALTH_CONSTRAINTS_INVALID" + ErrorCodeHealthConstraints ErrorCode = "HEALTH_CONSTRAINTS" + ErrorCodeInternalError ErrorCode = "INTERNAL_ERROR" + ErrorCodeThrottled ErrorCode = "THROTTLED" + ErrorCodeAlarmActive ErrorCode = "ALARM_ACTIVE" + ErrorCodeAgentIssue ErrorCode = "AGENT_ISSUE" + ErrorCodeAutoScalingIamRolePermissions ErrorCode = "AUTO_SCALING_IAM_ROLE_PERMISSIONS" + ErrorCodeAutoScalingConfiguration ErrorCode = "AUTO_SCALING_CONFIGURATION" + ErrorCodeManualStop ErrorCode = "MANUAL_STOP" + ErrorCodeMissingBlueGreenDeploymentConfiguration ErrorCode = "MISSING_BLUE_GREEN_DEPLOYMENT_CONFIGURATION" + ErrorCodeMissingElbInformation ErrorCode = "MISSING_ELB_INFORMATION" + ErrorCodeMissingGithubToken ErrorCode = "MISSING_GITHUB_TOKEN" + ErrorCodeElasticLoadBalancingInvalid ErrorCode = "ELASTIC_LOAD_BALANCING_INVALID" + ErrorCodeElbInvalidInstance ErrorCode = "ELB_INVALID_INSTANCE" + ErrorCodeInvalidLambdaConfiguration ErrorCode = "INVALID_LAMBDA_CONFIGURATION" + ErrorCodeInvalidLambdaFunction ErrorCode = "INVALID_LAMBDA_FUNCTION" + ErrorCodeHookExecutionFailure ErrorCode = "HOOK_EXECUTION_FAILURE" ) type FileExistsBehavior string @@ -8364,6 +8919,7 @@ type RevisionLocationType string const ( RevisionLocationTypeS3 RevisionLocationType = "S3" RevisionLocationTypeGitHub RevisionLocationType = "GitHub" + RevisionLocationTypeString RevisionLocationType = "String" ) type SortOrder string @@ -8391,6 +8947,15 @@ const ( TagFilterTypeKeyAndValue TagFilterType = "KEY_AND_VALUE" ) +type TrafficRoutingType string + +// Enum values for TrafficRoutingType +const ( + TrafficRoutingTypeTimeBasedCanary TrafficRoutingType = "TimeBasedCanary" + TrafficRoutingTypeTimeBasedLinear TrafficRoutingType = "TimeBasedLinear" + TrafficRoutingTypeAllAtOnce TrafficRoutingType = "AllAtOnce" +) + type TriggerEventType string // Enum values for TriggerEventType diff --git a/service/codedeploy/codedeployiface/interface.go b/service/codedeploy/codedeployiface/interface.go index 24904739ba2..f4e340b6eda 100644 --- a/service/codedeploy/codedeployiface/interface.go +++ b/service/codedeploy/codedeployiface/interface.go @@ -93,6 +93,8 @@ type CodeDeployAPI interface { DeleteDeploymentGroupRequest(*codedeploy.DeleteDeploymentGroupInput) codedeploy.DeleteDeploymentGroupRequest + DeleteGitHubAccountTokenRequest(*codedeploy.DeleteGitHubAccountTokenInput) codedeploy.DeleteGitHubAccountTokenRequest + DeregisterOnPremisesInstanceRequest(*codedeploy.DeregisterOnPremisesInstanceInput) codedeploy.DeregisterOnPremisesInstanceRequest GetApplicationRequest(*codedeploy.GetApplicationInput) codedeploy.GetApplicationRequest @@ -143,6 +145,8 @@ type CodeDeployAPI interface { ListOnPremisesInstancesRequest(*codedeploy.ListOnPremisesInstancesInput) codedeploy.ListOnPremisesInstancesRequest + PutLifecycleEventHookExecutionStatusRequest(*codedeploy.PutLifecycleEventHookExecutionStatusInput) codedeploy.PutLifecycleEventHookExecutionStatusRequest + RegisterApplicationRevisionRequest(*codedeploy.RegisterApplicationRevisionInput) codedeploy.RegisterApplicationRevisionRequest RegisterOnPremisesInstanceRequest(*codedeploy.RegisterOnPremisesInstanceInput) codedeploy.RegisterOnPremisesInstanceRequest diff --git a/service/codedeploy/doc.go b/service/codedeploy/doc.go index 53b211ea6f8..c0a79ccb912 100644 --- a/service/codedeploy/doc.go +++ b/service/codedeploy/doc.go @@ -4,13 +4,15 @@ // requests to AWS CodeDeploy. // // AWS CodeDeploy is a deployment service that automates application deployments -// to Amazon EC2 instances or on-premises instances running in your own facility. +// to Amazon EC2 instances, on-premises instances running in your own facility, +// or serverless AWS Lambda functions. // // You can deploy a nearly unlimited variety of application content, such as -// code, web and configuration files, executables, packages, scripts, multimedia -// files, and so on. AWS CodeDeploy can deploy application content stored in -// Amazon S3 buckets, GitHub repositories, or Bitbucket repositories. You do -// not need to make changes to your existing code before you can use AWS CodeDeploy. +// an updated Lambda function, code, web and configuration files, executables, +// packages, scripts, multimedia files, and so on. AWS CodeDeploy can deploy +// application content stored in Amazon S3 buckets, GitHub repositories, or +// Bitbucket repositories. You do not need to make changes to your existing +// code before you can use AWS CodeDeploy. // // AWS CodeDeploy makes it easier for you to rapidly release new features, helps // you avoid downtime during application deployment, and handles the complexity @@ -27,26 +29,30 @@ // to ensure the correct combination of revision, deployment configuration, // and deployment group are referenced during a deployment. // -// * Deployment group: A set of individual instances. A deployment group -// contains individually tagged instances, Amazon EC2 instances in Auto Scaling -// groups, or both. +// * Deployment group: A set of individual instances or CodeDeploy Lambda +// applications. A Lambda deployment group contains a group of applications. +// An EC2/On-premises deployment group contains individually tagged instances, +// Amazon EC2 instances in Auto Scaling groups, or both. // // * Deployment configuration: A set of deployment rules and deployment success // and failure conditions used by AWS CodeDeploy during a deployment. // -// * Deployment: The process, and the components involved in the process, -// of installing content on one or more instances. +// * Deployment: The process and the components used in the process of updating +// a Lambda function or of installing content on one or more instances. // -// * Application revisions: An archive file containing source content—source -// code, web pages, executable files, and deployment scripts—along with an -// application specification file (AppSpec file). Revisions are stored in -// Amazon S3 buckets or GitHub repositories. For Amazon S3, a revision is -// uniquely identified by its Amazon S3 object key and its ETag, version, -// or both. For GitHub, a revision is uniquely identified by its commit ID. +// * Application revisions: For an AWS Lambda deployment, this is an AppSpec +// file that specifies the Lambda function to update and one or more functions +// to validate deployment lifecycle events. For an EC2/On-premises deployment, +// this is an archive file containing source content—source code, web pages, +// executable files, and deployment scripts—along with an AppSpec file. Revisions +// are stored in Amazon S3 buckets or GitHub repositories. For Amazon S3, +// a revision is uniquely identified by its Amazon S3 object key and its +// ETag, version, or both. For GitHub, a revision is uniquely identified +// by its commit ID. // // This guide also contains information to help you get details about the instances -// in your deployments and to make on-premises instances available for AWS CodeDeploy -// deployments. +// in your deployments, to make on-premises instances available for AWS CodeDeploy +// deployments, and to get details about a Lambda function deployment. // // AWS CodeDeploy Information Resources // diff --git a/service/codedeploy/errors.go b/service/codedeploy/errors.go index 1ff3ee3438c..963a57a533f 100644 --- a/service/codedeploy/errors.go +++ b/service/codedeploy/errors.go @@ -147,6 +147,18 @@ const ( // The description is too long. ErrCodeDescriptionTooLongException = "DescriptionTooLongException" + // ErrCodeGitHubAccountTokenDoesNotExistException for service response error code + // "GitHubAccountTokenDoesNotExistException". + // + // No GitHub account connection exists with the named specified in the call. + ErrCodeGitHubAccountTokenDoesNotExistException = "GitHubAccountTokenDoesNotExistException" + + // ErrCodeGitHubAccountTokenNameRequiredException for service response error code + // "GitHubAccountTokenNameRequiredException". + // + // The call is missing a required GitHub account connection name. + ErrCodeGitHubAccountTokenNameRequiredException = "GitHubAccountTokenNameRequiredException" + // ErrCodeIamArnRequiredException for service response error code // "IamArnRequiredException". // @@ -260,6 +272,12 @@ const ( // The bucket name either doesn't exist or was specified in an invalid format. ErrCodeInvalidBucketNameFilterException = "InvalidBucketNameFilterException" + // ErrCodeInvalidComputePlatformException for service response error code + // "InvalidComputePlatformException". + // + // The computePlatform is invalid. The computePlatform should be Lambda or Server. + ErrCodeInvalidComputePlatformException = "InvalidComputePlatformException" + // ErrCodeInvalidDeployedStateFilterException for service response error code // "InvalidDeployedStateFilterException". // @@ -327,6 +345,12 @@ const ( // "DISALLOW", "OVERWRITE", and "RETAIN". ErrCodeInvalidFileExistsBehaviorException = "InvalidFileExistsBehaviorException" + // ErrCodeInvalidGitHubAccountTokenNameException for service response error code + // "InvalidGitHubAccountTokenNameException". + // + // The format of the specified GitHub account connection name is invalid. + ErrCodeInvalidGitHubAccountTokenNameException = "InvalidGitHubAccountTokenNameException" + // ErrCodeInvalidIamSessionArnException for service response error code // "InvalidIamSessionArnException". // @@ -339,6 +363,19 @@ const ( // The IAM user ARN was specified in an invalid format. ErrCodeInvalidIamUserArnException = "InvalidIamUserArnException" + // ErrCodeInvalidIgnoreApplicationStopFailuresValueException for service response error code + // "InvalidIgnoreApplicationStopFailuresValueException". + // + // The IgnoreApplicationStopFailures value is invalid. For AWS Lambda deployments, + // false is expected. For EC2/On-premises deployments, true or false is expected. + ErrCodeInvalidIgnoreApplicationStopFailuresValueException = "InvalidIgnoreApplicationStopFailuresValueException" + + // ErrCodeInvalidInputException for service response error code + // "InvalidInputException". + // + // The specified input was specified in an invalid format. + ErrCodeInvalidInputException = "InvalidInputException" + // ErrCodeInvalidInstanceNameException for service response error code // "InvalidInstanceNameException". // @@ -365,6 +402,20 @@ const ( // The specified key prefix filter was specified in an invalid format. ErrCodeInvalidKeyPrefixFilterException = "InvalidKeyPrefixFilterException" + // ErrCodeInvalidLifecycleEventHookExecutionIdException for service response error code + // "InvalidLifecycleEventHookExecutionIdException". + // + // A lifecycle event hook is invalid. Review the hooks section in your AppSpec + // file to ensure the lifecycle events and hooks functions are valid. + ErrCodeInvalidLifecycleEventHookExecutionIdException = "InvalidLifecycleEventHookExecutionIdException" + + // ErrCodeInvalidLifecycleEventHookExecutionStatusException for service response error code + // "InvalidLifecycleEventHookExecutionStatusException". + // + // The result of a Lambda validation function that verifies a lifecycle event + // is invalid. It should return Succeeded or Failed. + ErrCodeInvalidLifecycleEventHookExecutionStatusException = "InvalidLifecycleEventHookExecutionStatusException" + // ErrCodeInvalidLoadBalancerInfoException for service response error code // "InvalidLoadBalancerInfoException". // @@ -462,12 +513,32 @@ const ( // The specified time range was specified in an invalid format. ErrCodeInvalidTimeRangeException = "InvalidTimeRangeException" + // ErrCodeInvalidTrafficRoutingConfigurationException for service response error code + // "InvalidTrafficRoutingConfigurationException". + // + // The configuration that specifies how traffic is routed during a deployment + // is invalid. + ErrCodeInvalidTrafficRoutingConfigurationException = "InvalidTrafficRoutingConfigurationException" + // ErrCodeInvalidTriggerConfigException for service response error code // "InvalidTriggerConfigException". // // The trigger was specified in an invalid format. ErrCodeInvalidTriggerConfigException = "InvalidTriggerConfigException" + // ErrCodeInvalidUpdateOutdatedInstancesOnlyValueException for service response error code + // "InvalidUpdateOutdatedInstancesOnlyValueException". + // + // The UpdateOutdatedInstancesOnly value is invalid. For AWS Lambda deployments, + // false is expected. For EC2/On-premises deployments, true or false is expected. + ErrCodeInvalidUpdateOutdatedInstancesOnlyValueException = "InvalidUpdateOutdatedInstancesOnlyValueException" + + // ErrCodeLifecycleEventAlreadyCompletedException for service response error code + // "LifecycleEventAlreadyCompletedException". + // + // An attempt to return the status of an already completed lifecycle event occurred. + ErrCodeLifecycleEventAlreadyCompletedException = "LifecycleEventAlreadyCompletedException" + // ErrCodeLifecycleHookLimitExceededException for service response error code // "LifecycleHookLimitExceededException". // @@ -481,6 +552,12 @@ const ( // Use only one ARN type. ErrCodeMultipleIamArnsProvidedException = "MultipleIamArnsProvidedException" + // ErrCodeOperationNotSupportedException for service response error code + // "OperationNotSupportedException". + // + // The API used does not support the deployment. + ErrCodeOperationNotSupportedException = "OperationNotSupportedException" + // ErrCodeResourceValidationException for service response error code // "ResourceValidationException". // @@ -524,6 +601,12 @@ const ( // allowed limit of 3. ErrCodeTagSetListLimitExceededException = "TagSetListLimitExceededException" + // ErrCodeThrottlingException for service response error code + // "ThrottlingException". + // + // An API function was called too frequently. + ErrCodeThrottlingException = "ThrottlingException" + // ErrCodeTriggerTargetsLimitExceededException for service response error code // "TriggerTargetsLimitExceededException". // diff --git a/service/directoryservice/api.go b/service/directoryservice/api.go index 4a0be5529b7..948ae4fce8d 100644 --- a/service/directoryservice/api.go +++ b/service/directoryservice/api.go @@ -3193,6 +3193,10 @@ type CreateMicrosoftADInput struct { // console Directory Details page after the directory is created. Description *string `type:"string"` + // AWS Microsoft AD is available in two editions: Standard and Enterprise. Enterprise + // is the default. + Edition DirectoryEdition `type:"string" enum:"true"` + // The fully qualified domain name for the directory, such as corp.example.com. // This name will resolve inside your VPC only. It does not need to be publicly // resolvable. @@ -3259,6 +3263,12 @@ func (s *CreateMicrosoftADInput) SetDescription(v string) *CreateMicrosoftADInpu return s } +// SetEdition sets the Edition field's value. +func (s *CreateMicrosoftADInput) SetEdition(v DirectoryEdition) *CreateMicrosoftADInput { + s.Edition = v + return s +} + // SetName sets the Name field's value. func (s *CreateMicrosoftADInput) SetName(v string) *CreateMicrosoftADInput { s.Name = &v @@ -4714,6 +4724,9 @@ type DirectoryDescription struct { // which the AD Connector is connected. DnsIpAddrs []string `type:"list"` + // The edition associated with this directory. + Edition DirectoryEdition `type:"string" enum:"true"` + // Specifies when the directory was created. LaunchTime *time.Time `type:"timestamp" timestampFormat:"unix"` @@ -4807,6 +4820,12 @@ func (s *DirectoryDescription) SetDnsIpAddrs(v []string) *DirectoryDescription { return s } +// SetEdition sets the Edition field's value. +func (s *DirectoryDescription) SetEdition(v DirectoryEdition) *DirectoryDescription { + s.Edition = v + return s +} + // SetLaunchTime sets the LaunchTime field's value. func (s *DirectoryDescription) SetLaunchTime(v time.Time) *DirectoryDescription { s.LaunchTime = &v @@ -5042,10 +5061,7 @@ type DirectoryVpcSettingsDescription struct { // The list of Availability Zones that the directory is in. AvailabilityZones []string `type:"list"` - // The security group identifier for the directory. If the directory was created - // before 8/1/2014, this is the identifier of the directory members security - // group that was created when the directory was created. If the directory was - // created after this date, this value is null. + // The domain controller security group identifier for the directory. SecurityGroupId *string `type:"string"` // The identifiers of the subnets for the directory servers. @@ -7372,6 +7388,14 @@ func (s *VerifyTrustOutput) SetTrustId(v string) *VerifyTrustOutput { return s } +type DirectoryEdition string + +// Enum values for DirectoryEdition +const ( + DirectoryEditionEnterprise DirectoryEdition = "Enterprise" + DirectoryEditionStandard DirectoryEdition = "Standard" +) + type DirectorySize string // Enum values for DirectorySize diff --git a/service/ec2/api.go b/service/ec2/api.go index 4d9b5f0cb11..334ea80cbf3 100644 --- a/service/ec2/api.go +++ b/service/ec2/api.go @@ -14141,6 +14141,8 @@ func (r ReplaceNetworkAclAssociationRequest) Send() (*ReplaceNetworkAclAssociati // For more information about network ACLs, see Network ACLs (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html) // in the Amazon Virtual Private Cloud User Guide. // +// This is an idempotent operation. +// // // Example sending a request using the ReplaceNetworkAclAssociationRequest method. // req := client.ReplaceNetworkAclAssociationRequest(params) // resp, err := req.Send() @@ -16022,6 +16024,9 @@ type Address struct { // The Elastic IP address. PublicIp *string `locationName:"publicIp" type:"string"` + + // Any tags assigned to the Elastic IP address. + Tags []Tag `locationName:"tagSet" locationNameList:"item" type:"list"` } // String returns the string representation @@ -16082,6 +16087,12 @@ func (s *Address) SetPublicIp(v string) *Address { return s } +// SetTags sets the Tags field's value. +func (s *Address) SetTags(v []Tag) *Address { + s.Tags = v + return s +} + // Contains the parameters for AllocateAddress. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AllocateAddressRequest type AllocateAddressInput struct { @@ -36300,14 +36311,14 @@ type DescribeVpcEndpointConnectionsInput struct { // One or more filters. // - // * customer-account-id - The AWS account number of the owner of the endpoint. + // * service-id - The ID of the service. // - // * endpoint-connection-state - The state of the endpoint (PendingAcceptance - // | Pending | Available | Deleting | Deleted | Rejected | Failed). + // * vpc-endpoint-owner - The AWS account number of the owner of the endpoint. // - // * vpc-endpoint-id - The ID of the endpoint. + // * vpc-endpoint-state - The state of the endpoint (pendingAcceptance | + // pending | available | deleting | deleted | rejected | failed). // - // * vpc-endpoint-service-id - The ID of the service. + // * vpc-endpoint-id - The ID of the endpoint. Filters []Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // The maximum number of results to return for the request in a single page. @@ -36408,12 +36419,12 @@ type DescribeVpcEndpointServiceConfigurationsInput struct { // One or more filters. // - // * service-name - The ARN of the service. + // * service-name - The name of the service. // - // * vpc-endpoint-service-id - The ID of the service. + // * service-id - The ID of the service. // - // * vpc-endpoint-service-state - The state of the service (Pending | Available - // | Deleting | Deleted | Failed). + // * service-state - The state of the service (Pending | Available | Deleting + // | Deleted | Failed). Filters []Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // The maximum number of results to return for the request in a single page. @@ -44896,10 +44907,11 @@ type IpPermission struct { // [EC2-VPC only] One or more IPv6 ranges. Ipv6Ranges []Ipv6Range `locationName:"ipv6Ranges" locationNameList:"item" type:"list"` - // (Valid for AuthorizeSecurityGroupEgress, RevokeSecurityGroupEgress and DescribeSecurityGroups - // only) One or more prefix list IDs for an AWS service. In an AuthorizeSecurityGroupEgress - // request, this is the AWS service that you want to access through a VPC endpoint - // from instances associated with the security group. + // (EC2-VPC only; valid for AuthorizeSecurityGroupEgress, RevokeSecurityGroupEgress + // and DescribeSecurityGroups only) One or more prefix list IDs for an AWS service. + // In an AuthorizeSecurityGroupEgress request, this is the AWS service that + // you want to access through a VPC endpoint from instances associated with + // the security group. PrefixListIds []PrefixListId `locationName:"prefixListIds" locationNameList:"item" type:"list"` // The end of port range for the TCP and UDP protocols, or an ICMP/ICMPv6 code. @@ -51132,7 +51144,7 @@ func (s *PrefixList) SetPrefixListName(v string) *PrefixList { return s } -// The ID of the prefix. +// [EC2-VPC only] The ID of the prefix. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/PrefixListId type PrefixListId struct { _ struct{} `type:"structure"` diff --git a/service/ecs/api.go b/service/ecs/api.go index 113c5244c7c..e78fa33a2ab 100644 --- a/service/ecs/api.go +++ b/service/ecs/api.go @@ -1675,6 +1675,27 @@ func (r RunTaskRequest) Send() (*RunTaskOutput, error) { // Alternatively, you can use StartTask to use your own scheduler or place tasks // manually on specific container instances. // +// The Amazon ECS API follows an eventual consistency model, due to the distributed +// nature of the system supporting the API. This means that the result of an +// API command you run that affects your Amazon ECS resources might not be immediately +// visible to all subsequent commands you run. You should keep this in mind +// when you carry out an API command that immediately follows a previous API +// command. +// +// To manage eventual consistency, you can do the following: +// +// * Confirm the state of the resource before you run a command to modify +// it. Run the DescribeTasks command using an exponential backoff algorithm +// to ensure that you allow enough time for the previous command to propagate +// through the system. To do this, run the DescribeTasks command repeatedly, +// starting with a couple of seconds of wait time, and increasing gradually +// up to five minutes of wait time. +// +// * Add wait time between subsequent commands, even if the DescribeTasks +// command returns an accurate response. Apply an exponential backoff algorithm +// starting with a couple of seconds of wait time, and increase gradually +// up to about five minutes of wait time. +// // // Example sending a request using the RunTaskRequest method. // req := client.RunTaskRequest(params) // resp, err := req.Send() @@ -2938,6 +2959,9 @@ type ContainerDefinition struct { // allow the container to only reserve 128 MiB of memory from the remaining // resources on the container instance, but also allow the container to consume // more memory resources when needed. + // + // The Docker daemon reserves a minimum of 4 MiB of memory for a container, + // so you should not specify fewer than 4 MiB of memory for your containers. MemoryReservation *int64 `locationName:"memoryReservation" type:"integer"` // The mount points for data volumes in your container. @@ -3638,6 +3662,17 @@ type CreateServiceInput struct { // DesiredCount is a required field DesiredCount *int64 `locationName:"desiredCount" type:"integer" required:"true"` + // The period of time, in seconds, that the Amazon ECS service scheduler should + // ignore unhealthy Elastic Load Balancing target health checks after a task + // has first started. This is only valid if your service is configured to use + // a load balancer. If your service's tasks take a while to start and respond + // to ELB health checks, you can specify a health check grace period of up to + // 1,800 seconds during which the ECS service scheduler will ignore ELB health + // check status. This grace period can prevent the ECS service scheduler from + // marking tasks as unhealthy and stopping them before they have time to come + // up. + HealthCheckGracePeriodSeconds *int64 `locationName:"healthCheckGracePeriodSeconds" type:"integer"` + // The launch type on which to run your service. LaunchType LaunchType `locationName:"launchType" type:"string" enum:"true"` @@ -3779,6 +3814,12 @@ func (s *CreateServiceInput) SetDesiredCount(v int64) *CreateServiceInput { return s } +// SetHealthCheckGracePeriodSeconds sets the HealthCheckGracePeriodSeconds field's value. +func (s *CreateServiceInput) SetHealthCheckGracePeriodSeconds(v int64) *CreateServiceInput { + s.HealthCheckGracePeriodSeconds = &v + return s +} + // SetLaunchType sets the LaunchType field's value. func (s *CreateServiceInput) SetLaunchType(v LaunchType) *CreateServiceInput { s.LaunchType = v @@ -4462,7 +4503,7 @@ type DescribeClustersInput struct { // // * runningEC2TasksCount // - // * RunningFargateTasksCount + // * runningFargateTasksCount // // * pendingEC2TasksCount // @@ -6599,9 +6640,9 @@ func (s *PlacementStrategy) SetType(v PlacementStrategyType) *PlacementStrategy // to send or receive traffic. Port mappings are specified as part of the container // definition. // -// If using containers in a task with the Fargate launch type, exposed ports -// should be specified using containerPort. The hostPort can be left blank or -// it must be the same value as the containerPort. +// If using containers in a task with the awsvpc or host network mode, exposed +// ports should be specified using containerPort. The hostPort can be left blank +// or it must be the same value as the containerPort. // // After a task reaches the RUNNING status, manual and automatic host and container // port assignments are visible in the networkBindings section of DescribeTasks @@ -6613,11 +6654,11 @@ type PortMapping struct { // The port number on the container that is bound to the user-specified or automatically // assigned host port. // - // If using containers in a task with the Fargate launch type, exposed ports - // should be specified using containerPort. + // If using containers in a task with the awsvpc or host network mode, exposed + // ports should be specified using containerPort. // - // If using containers in a task with the EC2 launch type and you specify a - // container port and not a host port, your container automatically receives + // If using containers in a task with the bridge network mode and you specify + // a container port and not a host port, your container automatically receives // a host port in the ephemeral port range (for more information, see hostPort). // Port mappings that are automatically assigned in this way do not count toward // the 100 reserved ports limit of a container instance. @@ -6625,12 +6666,12 @@ type PortMapping struct { // The port number on the container instance to reserve for your container. // - // If using containers in a task with the Fargate launch type, the hostPort + // If using containers in a task with the awsvpc or host network mode, the hostPort // can either be left blank or needs to be the same value as the containerPort. // - // If using containers in a task with the EC2 launch type, you can specify a - // non-reserved host port for your container port mapping, or you can omit the - // hostPort (or set it to 0) while specifying a containerPort and your container + // If using containers in a task with the bridge network mode, you can specify + // a non-reserved host port for your container port mapping, or you can omit + // the hostPort (or set it to 0) while specifying a containerPort and your container // automatically receives a port in the ephemeral port range for your container // instance operating system and Docker version. // @@ -6921,11 +6962,16 @@ type RegisterTaskDefinitionInput struct { ContainerDefinitions []ContainerDefinition `locationName:"containerDefinitions" type:"list" required:"true"` // The number of cpu units used by the task. If using the EC2 launch type, this - // field is optional and any value can be used. If you are using the Fargate - // launch type, this field is required and you must use one of the following - // values, which determines your range of valid values for the memory parameter: + // field is optional and any value can be used. + // + // Task-level CPU and memory parameters are ignored for Windows containers. + // We recommend specifying container-level resources for Windows containers. + // + // If you are using the Fargate launch type, this field is required and you + // must use one of the following values, which determines your range of valid + // values for the memory parameter: // - // * 256 (.25 vCPU) - Available memory values: 512MB, 1GB, 2GB + // * 256 (.25 vCPU) - Available memory values: 0.5GB, 1GB, 2GB // // * 512 (.5 vCPU) - Available memory values: 1GB, 2GB, 3GB, 4GB // @@ -6952,11 +6998,16 @@ type RegisterTaskDefinitionInput struct { Family *string `locationName:"family" type:"string" required:"true"` // The amount (in MiB) of memory used by the task. If using the EC2 launch type, - // this field is optional and any value can be used. If you are using the Fargate - // launch type, this field is required and you must use one of the following - // values, which determines your range of valid values for the cpu parameter: + // this field is optional and any value can be used. // - // * 512MB, 1GB, 2GB - Available cpu values: 256 (.25 vCPU) + // Task-level CPU and memory parameters are ignored for Windows containers. + // We recommend specifying container-level resources for Windows containers. + // + // If you are using the Fargate launch type, this field is required and you + // must use one of the following values, which determines your range of valid + // values for the cpu parameter: + // + // * 0.5GB, 1GB, 2GB - Available cpu values: 256 (.25 vCPU) // // * 1GB, 2GB, 3GB, 4GB - Available cpu values: 512 (.5 vCPU) // @@ -7454,6 +7505,11 @@ type Service struct { // are displayed. Events []ServiceEvent `locationName:"events" type:"list"` + // The period of time, in seconds, that the Amazon ECS service scheduler ignores + // unhealthy Elastic Load Balancing target health checks after a task has first + // started. + HealthCheckGracePeriodSeconds *int64 `locationName:"healthCheckGracePeriodSeconds" type:"integer"` + // The launch type on which your service is running. LaunchType LaunchType `locationName:"launchType" type:"string" enum:"true"` @@ -7554,6 +7610,12 @@ func (s *Service) SetEvents(v []ServiceEvent) *Service { return s } +// SetHealthCheckGracePeriodSeconds sets the HealthCheckGracePeriodSeconds field's value. +func (s *Service) SetHealthCheckGracePeriodSeconds(v int64) *Service { + s.HealthCheckGracePeriodSeconds = &v + return s +} + // SetLaunchType sets the LaunchType field's value. func (s *Service) SetLaunchType(v LaunchType) *Service { s.LaunchType = v @@ -8223,7 +8285,7 @@ type Task struct { // type, this field is required and you must use one of the following values, // which determines your range of valid values for the memory parameter: // - // * 256 (.25 vCPU) - Available memory values: 512MB, 1GB, 2GB + // * 256 (.25 vCPU) - Available memory values: 0.5GB, 1GB, 2GB // // * 512 (.5 vCPU) - Available memory values: 1GB, 2GB, 3GB, 4GB // @@ -8261,7 +8323,7 @@ type Task struct { // type, this field is required and you must use one of the following values, // which determines your range of valid values for the cpu parameter: // - // * 512MB, 1GB, 2GB - Available cpu values: 256 (.25 vCPU) + // * 0.5GB, 1GB, 2GB - Available cpu values: 256 (.25 vCPU) // // * 1GB, 2GB, 3GB, 4GB - Available cpu values: 512 (.5 vCPU) // @@ -8510,7 +8572,7 @@ type TaskDefinition struct { // type, this field is required and you must use one of the following values, // which determines your range of valid values for the memory parameter: // - // * 256 (.25 vCPU) - Available memory values: 512MB, 1GB, 2GB + // * 256 (.25 vCPU) - Available memory values: 0.5GB, 1GB, 2GB // // * 512 (.5 vCPU) - Available memory values: 1GB, 2GB, 3GB, 4GB // @@ -8536,7 +8598,7 @@ type TaskDefinition struct { // type, this field is required and you must use one of the following values, // which determines your range of valid values for the cpu parameter: // - // * 512MB, 1GB, 2GB - Available cpu values: 256 (.25 vCPU) + // * 0.5GB, 1GB, 2GB - Available cpu values: 256 (.25 vCPU) // // * 1GB, 2GB, 3GB, 4GB - Available cpu values: 512 (.5 vCPU) // @@ -9099,6 +9161,17 @@ type UpdateServiceInput struct { // Whether or not to force a new deployment of the service. ForceNewDeployment *bool `locationName:"forceNewDeployment" type:"boolean"` + // The period of time, in seconds, that the Amazon ECS service scheduler should + // ignore unhealthy Elastic Load Balancing target health checks after a task + // has first started. This is only valid if your service is configured to use + // a load balancer. If your service's tasks take a while to start and respond + // to ELB health checks, you can specify a health check grace period of up to + // 1,800 seconds during which the ECS service scheduler will ignore ELB health + // check status. This grace period can prevent the ECS service scheduler from + // marking tasks as unhealthy and stopping them before they have time to come + // up. + HealthCheckGracePeriodSeconds *int64 `locationName:"healthCheckGracePeriodSeconds" type:"integer"` + // The network configuration for the service. This parameter is required for // task definitions that use the awsvpc network mode to receive their own Elastic // Network Interface, and it is not supported for other network modes. For more @@ -9180,6 +9253,12 @@ func (s *UpdateServiceInput) SetForceNewDeployment(v bool) *UpdateServiceInput { return s } +// SetHealthCheckGracePeriodSeconds sets the HealthCheckGracePeriodSeconds field's value. +func (s *UpdateServiceInput) SetHealthCheckGracePeriodSeconds(v int64) *UpdateServiceInput { + s.HealthCheckGracePeriodSeconds = &v + return s +} + // SetNetworkConfiguration sets the NetworkConfiguration field's value. func (s *UpdateServiceInput) SetNetworkConfiguration(v *NetworkConfiguration) *UpdateServiceInput { s.NetworkConfiguration = v diff --git a/service/ecs/doc.go b/service/ecs/doc.go index 44957191e3f..d9820c9cc82 100644 --- a/service/ecs/doc.go +++ b/service/ecs/doc.go @@ -10,7 +10,7 @@ // tasks using the Fargate launch type. For more control, you can host your // tasks on a cluster of Amazon Elastic Compute Cloud (Amazon EC2) instances // that you manage by using the EC2 launch type. For more information about -// launch types, see Amazon ECS Launch Types (http://docs.aws.amazon.com/AmazonECS/latest/developerguidelaunch_types.html). +// launch types, see Amazon ECS Launch Types (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html). // // Amazon ECS lets you launch and stop container-based applications with simple // API calls, allows you to get the state of your cluster from a centralized diff --git a/service/elb/doc.go b/service/elb/doc.go index 92f97d7292e..e1c40f8267c 100644 --- a/service/elb/doc.go +++ b/service/elb/doc.go @@ -3,27 +3,22 @@ // Package elb provides the client and types for making API // requests to Elastic Load Balancing. // -// A load balancer distributes incoming traffic across your EC2 instances. This -// enables you to increase the availability of your application. The load balancer -// also monitors the health of its registered instances and ensures that it -// routes traffic only to healthy instances. You configure your load balancer -// to accept incoming traffic by specifying one or more listeners, which are -// configured with a protocol and port number for connections from clients to -// the load balancer and a protocol and port number for connections from the -// load balancer to the instances. -// -// Elastic Load Balancing supports two types of load balancers: Classic Load -// Balancers and Application Load Balancers (new). A Classic Load Balancer makes -// routing and load balancing decisions either at the transport layer (TCP/SSL) -// or the application layer (HTTP/HTTPS), and supports either EC2-Classic or -// a VPC. An Application Load Balancer makes routing and load balancing decisions -// at the application layer (HTTP/HTTPS), supports path-based routing, and can -// route requests to one or more ports on each EC2 instance or container instance -// in your virtual private cloud (VPC). For more information, see the Elastic -// Load Balancing User Guide (http://docs.aws.amazon.com/elasticloadbalancing/latest/userguide/what-is-load-balancing.html). +// A load balancer can distribute incoming traffic across your EC2 instances. +// This enables you to increase the availability of your application. The load +// balancer also monitors the health of its registered instances and ensures +// that it routes traffic only to healthy instances. You configure your load +// balancer to accept incoming traffic by specifying one or more listeners, +// which are configured with a protocol and port number for connections from +// clients to the load balancer and a protocol and port number for connections +// from the load balancer to the instances. +// +// Elastic Load Balancing supports three types of load balancers: Application +// Load Balancers, Network Load Balancers, and Classic Load Balancers. You can +// select a load balancer based on your application needs. For more information, +// see the Elastic Load Balancing User Guide (http://docs.aws.amazon.com/elasticloadbalancing/latest/userguide/). // // This reference covers the 2012-06-01 API, which supports Classic Load Balancers. -// The 2015-12-01 API supports Application Load Balancers. +// The 2015-12-01 API supports Application Load Balancers and Network Load Balancers. // // To get started, create a load balancer with one or more listeners using CreateLoadBalancer. // Register your instances with the load balancer using RegisterInstancesWithLoadBalancer. diff --git a/service/elb/errors.go b/service/elb/errors.go index 77ffb20ecd6..fbf2140d87a 100644 --- a/service/elb/errors.go +++ b/service/elb/errors.go @@ -91,6 +91,12 @@ const ( // The specified load balancer attribute does not exist. ErrCodeLoadBalancerAttributeNotFoundException = "LoadBalancerAttributeNotFound" + // ErrCodeOperationNotPermittedException for service response error code + // "OperationNotPermitted". + // + // This operation is not allowed. + ErrCodeOperationNotPermittedException = "OperationNotPermitted" + // ErrCodePolicyNotFoundException for service response error code // "PolicyNotFound". // diff --git a/service/elb/examples_test.go b/service/elb/examples_test.go index a0709a79064..c85d4b827c9 100644 --- a/service/elb/examples_test.go +++ b/service/elb/examples_test.go @@ -349,6 +349,8 @@ func ExampleELB_CreateLoadBalancerRequest_shared00() { fmt.Println(elb.ErrCodeDuplicateTagKeysException, aerr.Error()) case elb.ErrCodeUnsupportedProtocolException: fmt.Println(elb.ErrCodeUnsupportedProtocolException, aerr.Error()) + case elb.ErrCodeOperationNotPermittedException: + fmt.Println(elb.ErrCodeOperationNotPermittedException, aerr.Error()) default: fmt.Println(aerr.Error()) } @@ -415,6 +417,8 @@ func ExampleELB_CreateLoadBalancerRequest_shared01() { fmt.Println(elb.ErrCodeDuplicateTagKeysException, aerr.Error()) case elb.ErrCodeUnsupportedProtocolException: fmt.Println(elb.ErrCodeUnsupportedProtocolException, aerr.Error()) + case elb.ErrCodeOperationNotPermittedException: + fmt.Println(elb.ErrCodeOperationNotPermittedException, aerr.Error()) default: fmt.Println(aerr.Error()) } @@ -491,6 +495,8 @@ func ExampleELB_CreateLoadBalancerRequest_shared02() { fmt.Println(elb.ErrCodeDuplicateTagKeysException, aerr.Error()) case elb.ErrCodeUnsupportedProtocolException: fmt.Println(elb.ErrCodeUnsupportedProtocolException, aerr.Error()) + case elb.ErrCodeOperationNotPermittedException: + fmt.Println(elb.ErrCodeOperationNotPermittedException, aerr.Error()) default: fmt.Println(aerr.Error()) } @@ -564,6 +570,8 @@ func ExampleELB_CreateLoadBalancerRequest_shared03() { fmt.Println(elb.ErrCodeDuplicateTagKeysException, aerr.Error()) case elb.ErrCodeUnsupportedProtocolException: fmt.Println(elb.ErrCodeUnsupportedProtocolException, aerr.Error()) + case elb.ErrCodeOperationNotPermittedException: + fmt.Println(elb.ErrCodeOperationNotPermittedException, aerr.Error()) default: fmt.Println(aerr.Error()) } @@ -634,6 +642,8 @@ func ExampleELB_CreateLoadBalancerRequest_shared04() { fmt.Println(elb.ErrCodeDuplicateTagKeysException, aerr.Error()) case elb.ErrCodeUnsupportedProtocolException: fmt.Println(elb.ErrCodeUnsupportedProtocolException, aerr.Error()) + case elb.ErrCodeOperationNotPermittedException: + fmt.Println(elb.ErrCodeOperationNotPermittedException, aerr.Error()) default: fmt.Println(aerr.Error()) } diff --git a/service/elbv2/api.go b/service/elbv2/api.go index 2d74b5183e6..cdf8c761dd4 100644 --- a/service/elbv2/api.go +++ b/service/elbv2/api.go @@ -146,14 +146,12 @@ func (r CreateListenerRequest) Send() (*CreateListenerOutput, error) { // Creates a listener for the specified Application Load Balancer or Network // Load Balancer. // +// You can create up to 10 listeners per load balancer. +// // To update a listener, use ModifyListener. When you are finished with a listener, // you can delete it using DeleteListener. If you are finished with both the // listener and the load balancer, you can delete them both using DeleteLoadBalancer. // -// This operation is idempotent, which means that it completes at most one time. -// If you attempt to create multiple listeners with the same settings, each -// call succeeds. -// // For more information, see Listeners for Your Application Load Balancers (http://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-listeners.html) // in the Application Load Balancers Guide and Listeners for Your Network Load // Balancers (http://docs.aws.amazon.com/elasticloadbalancing/latest/network/load-balancer-listeners.html) @@ -216,15 +214,13 @@ func (r CreateLoadBalancerRequest) Send() (*CreateLoadBalancerOutput, error) { // your current load balancers, see DescribeLoadBalancers. When you are finished // with a load balancer, you can delete it using DeleteLoadBalancer. // -// For limit information, see Limits for Your Application Load Balancer (http://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-limits.html) +// You can create up to 20 load balancers per region per account. You can request +// an increase for the number of load balancers for your account. For more information, +// see Limits for Your Application Load Balancer (http://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-limits.html) // in the Application Load Balancers Guide and Limits for Your Network Load // Balancer (http://docs.aws.amazon.com/elasticloadbalancing/latest/network/load-balancer-limits.html) // in the Network Load Balancers Guide. // -// This operation is idempotent, which means that it completes at most one time. -// If you attempt to create multiple load balancers with the same settings, -// each call succeeds. -// // For more information, see Application Load Balancers (http://docs.aws.amazon.com/elasticloadbalancing/latest/application/application-load-balancers.html) // in the Application Load Balancers Guide and Network Load Balancers (http://docs.aws.amazon.com/elasticloadbalancing/latest/network/network-load-balancers.html) // in the Network Load Balancers Guide. @@ -347,10 +343,6 @@ func (r CreateTargetGroupRequest) Send() (*CreateTargetGroupOutput, error) { // // To delete a target group, use DeleteTargetGroup. // -// This operation is idempotent, which means that it completes at most one time. -// If you attempt to create multiple target groups with the same settings, each -// call succeeds. -// // For more information, see Target Groups for Your Application Load Balancers // (http://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-target-groups.html) // in the Application Load Balancers Guide or Target Groups for Your Network @@ -2546,11 +2538,10 @@ type CreateLoadBalancerInput struct { // one subnet per Availability Zone. You must specify either subnets or subnet // mappings. // - // [Application Load Balancers] You must specify subnets from at least two Availability - // Zones. You cannot specify Elastic IP addresses for your subnets. + // [Network Load Balancers] You can specify one Elastic IP address per subnet. // - // [Network Load Balancers] You can specify subnets from one or more Availability - // Zones. You can specify one Elastic IP address per subnet. + // [Application Load Balancers] You cannot specify Elastic IP addresses for + // your subnets. SubnetMappings []SubnetMapping `type:"list"` // The IDs of the subnets to attach to the load balancer. You can specify only @@ -2559,9 +2550,6 @@ type CreateLoadBalancerInput struct { // // [Application Load Balancers] You must specify subnets from at least two Availability // Zones. - // - // [Network Load Balancers] You can specify subnets from one or more Availability - // Zones. Subnets []string `type:"list"` // One or more tags to assign to the load balancer. @@ -4471,10 +4459,6 @@ type Limit struct { // * target-groups // // * targets-per-application-load-balancer - // - // * targets-per-availability-zone-per-network-load-balancer - // - // * targets-per-network-load-balancer Name *string `type:"string"` } @@ -6131,7 +6115,8 @@ type SetSubnetsInput struct { // Zones. You can specify only one subnet per Availability Zone. You must specify // either subnets or subnet mappings. // - // You cannot specify Elastic IP addresses for your subnets. + // The load balancer is allocated one static IP address per subnet. You cannot + // specify your own Elastic IP addresses. SubnetMappings []SubnetMapping `type:"list"` // The IDs of the subnets. You must specify subnets from at least two Availability @@ -6626,9 +6611,6 @@ type TargetGroupAttribute struct { // from draining to unused. The range is 0-3600 seconds. The default value // is 300 seconds. // - // * proxy_protocol_v2.enabled - [Network Load Balancers] Indicates whether - // Proxy Protocol version 2 is enabled. - // // * stickiness.enabled - [Application Load Balancers] Indicates whether // sticky sessions are enabled. The value is true or false. // diff --git a/service/elbv2/errors.go b/service/elbv2/errors.go index 88edc02e9cb..4571d645db2 100644 --- a/service/elbv2/errors.go +++ b/service/elbv2/errors.go @@ -86,8 +86,8 @@ const ( // ErrCodeInvalidTargetException for service response error code // "InvalidTarget". // - // The specified target does not exist, is not in the same VPC as the target - // group, or has an unsupported instance type. + // The specified target does not exist or is not in the same VPC as the target + // group. ErrCodeInvalidTargetException = "InvalidTarget" // ErrCodeListenerNotFoundException for service response error code diff --git a/service/elbv2/examples_test.go b/service/elbv2/examples_test.go index 35809f9efd0..51cd52df3b4 100644 --- a/service/elbv2/examples_test.go +++ b/service/elbv2/examples_test.go @@ -272,6 +272,8 @@ func ExampleELBV2_CreateLoadBalancerRequest_shared00() { fmt.Println(elbv2.ErrCodeAllocationIdNotFoundException, aerr.Error()) case elbv2.ErrCodeAvailabilityZoneNotSupportedException: fmt.Println(elbv2.ErrCodeAvailabilityZoneNotSupportedException, aerr.Error()) + case elbv2.ErrCodeOperationNotPermittedException: + fmt.Println(elbv2.ErrCodeOperationNotPermittedException, aerr.Error()) default: fmt.Println(aerr.Error()) } @@ -335,6 +337,8 @@ func ExampleELBV2_CreateLoadBalancerRequest_shared01() { fmt.Println(elbv2.ErrCodeAllocationIdNotFoundException, aerr.Error()) case elbv2.ErrCodeAvailabilityZoneNotSupportedException: fmt.Println(elbv2.ErrCodeAvailabilityZoneNotSupportedException, aerr.Error()) + case elbv2.ErrCodeOperationNotPermittedException: + fmt.Println(elbv2.ErrCodeOperationNotPermittedException, aerr.Error()) default: fmt.Println(aerr.Error()) } diff --git a/service/glue/api.go b/service/glue/api.go index a303a36e8b9..6b39078b389 100644 --- a/service/glue/api.go +++ b/service/glue/api.go @@ -276,7 +276,7 @@ func (r BatchStopJobRunRequest) Send() (*BatchStopJobRunOutput, error) { // BatchStopJobRunRequest returns a request value for making API operation for // AWS Glue. // -// Stops a batch of job runs for a given job. +// Stops one or more job runs for a specified Job. // // // Example sending a request using the BatchStopJobRunRequest method. // req := client.BatchStopJobRunRequest(params) @@ -671,7 +671,7 @@ func (r CreateScriptRequest) Send() (*CreateScriptOutput, error) { // CreateScriptRequest returns a request value for making API operation for // AWS Glue. // -// Transforms a directed acyclic graph (DAG) into a Python script. +// Transforms a directed acyclic graph (DAG) into code. // // // Example sending a request using the CreateScriptRequest method. // req := client.CreateScriptRequest(params) @@ -1113,7 +1113,7 @@ func (r DeleteJobRequest) Send() (*DeleteJobOutput, error) { // DeleteJobRequest returns a request value for making API operation for // AWS Glue. // -// Deletes a specified job. +// Deletes a specified job. If the job is not found, no exception is thrown. // // // Example sending a request using the DeleteJobRequest method. // req := client.DeleteJobRequest(params) @@ -1260,7 +1260,8 @@ func (r DeleteTriggerRequest) Send() (*DeleteTriggerOutput, error) { // DeleteTriggerRequest returns a request value for making API operation for // AWS Glue. // -// Deletes a specified trigger. +// Deletes a specified trigger. If the trigger is not found, no exception is +// thrown. // // // Example sending a request using the DeleteTriggerRequest method. // req := client.DeleteTriggerRequest(params) @@ -2842,7 +2843,7 @@ func (r GetPlanRequest) Send() (*GetPlanOutput, error) { // GetPlanRequest returns a request value for making API operation for // AWS Glue. // -// Gets a Python script to perform a specified mapping. +// Gets code to perform a specified mapping. // // // Example sending a request using the GetPlanRequest method. // req := client.GetPlanRequest(params) @@ -3706,7 +3707,8 @@ func (r StartTriggerRequest) Send() (*StartTriggerOutput, error) { // StartTriggerRequest returns a request value for making API operation for // AWS Glue. // -// Starts an existing trigger. +// Starts an existing trigger. See Triggering Jobs (http://docs.aws.amazon.com/glue/latest/dg/trigger-job.html) +// for information about how different types of trigger are started. // // // Example sending a request using the StartTriggerRequest method. // req := client.StartTriggerRequest(params) @@ -4428,6 +4430,17 @@ type Action struct { _ struct{} `type:"structure"` // Arguments to be passed to the job. + // + // You can specify arguments here that your own job-execution script consumes, + // as well as arguments that AWS Glue itself consumes. + // + // For information about how to specify and consume your own Job arguments, + // see the Calling AWS Glue APIs in Python (http://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html) + // topic in the developer guide. + // + // For information about the key-value pairs that AWS Glue consumes to set up + // your job, see the Special Parameters Used by AWS Glue (http://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-glue-arguments.html) + // topic in the developer guide. Arguments map[string]string `type:"map"` // The name of a job to be executed. @@ -5056,19 +5069,18 @@ func (s *BatchGetPartitionOutput) SetUnprocessedKeys(v []PartitionValueList) *Ba return s } -// Details about the job run and the error that occurred while trying to submit -// it for stopping. +// Records an error that occurred when attempting to stop a specified JobRun. // Please also see https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/BatchStopJobRunError type BatchStopJobRunError struct { _ struct{} `type:"structure"` - // The details of the error that occurred. + // Specifies details about the error that was encountered. ErrorDetail *ErrorDetail `type:"structure"` - // The name of the job. + // The name of the Job in question. JobName *string `min:"1" type:"string"` - // The job run Id. + // The JobRunId of the JobRun in question. JobRunId *string `min:"1" type:"string"` } @@ -5104,12 +5116,12 @@ func (s *BatchStopJobRunError) SetJobRunId(v string) *BatchStopJobRunError { type BatchStopJobRunInput struct { _ struct{} `type:"structure"` - // The name of the job whose job runs are to be stopped. + // The name of the Job in question. // // JobName is a required field JobName *string `min:"1" type:"string" required:"true"` - // A list of job run Ids of the given job to be stopped. + // A list of the JobRunIds that should be stopped for that Job. // // JobRunIds is a required field JobRunIds []string `min:"1" type:"list" required:"true"` @@ -5167,11 +5179,11 @@ type BatchStopJobRunOutput struct { responseMetadata aws.Response - // A list containing the job run Ids and details of the error that occurred - // for each job run while submitting to stop. + // A list of the errors that were encountered in tryng to stop JobRuns, including + // the JobRunId for which each error was encountered and details about the error. Errors []BatchStopJobRunError `type:"list"` - // A list of job runs which are successfully submitted for stopping. + // A list of the JobRuns that were successfully submitted for stopping. SuccessfulSubmissions []BatchStopJobRunSuccessfulSubmission `type:"list"` } @@ -5202,15 +5214,15 @@ func (s *BatchStopJobRunOutput) SetSuccessfulSubmissions(v []BatchStopJobRunSucc return s } -// Details about the job run which is submitted successfully for stopping. +// Records a successful request to stop a specified JobRun. // Please also see https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/BatchStopJobRunSuccessfulSubmission type BatchStopJobRunSuccessfulSubmission struct { _ struct{} `type:"structure"` - // The name of the job. + // The Name of the Job in question. JobName *string `min:"1" type:"string"` - // The job run Id. + // The JobRunId of the JobRun in question. JobRunId *string `min:"1" type:"string"` } @@ -5675,13 +5687,15 @@ func (s *Column) SetType(v string) *Column { type Condition struct { _ struct{} `type:"structure"` - // The name of the job in question. + // The name of the Job to whose JobRuns this condition applies and on which + // this trigger waits. JobName *string `min:"1" type:"string"` // A logical operator. LogicalOperator LogicalOperator `type:"string" enum:"true"` - // The condition state. + // The condition state. Currently, the values supported are SUCCEEDED, STOPPED + // and FAILED. State JobRunState `type:"string" enum:"true"` } @@ -5957,6 +5971,9 @@ type Crawler struct { // input format, output format, serde information, and schema from their parent // table, rather than detect this information separately for each partition. // Use the following JSON string to specify that behavior: + // + // Example: '{ "Version": 1.0, "CrawlerOutput": { "Partitions": { "AddOrUpdateBehavior": + // "InheritFromTable" } } }' Configuration *string `type:"string"` // If the crawler is running, contains the total time elapsed since the last @@ -6404,6 +6421,10 @@ type CreateCrawlerInput struct { // You can use this field to force partitions to inherit metadata such as classification, // input format, output format, serde information, and schema from their parent // table, rather than detect this information separately for each partition. + // Use the following JSON string to specify that behavior: + // + // Example: '{ "Version": 1.0, "CrawlerOutput": { "Partitions": { "AddOrUpdateBehavior": + // "InheritFromTable" } } }' Configuration *string `type:"string"` // The AWS Glue database where results are written, such as: arn:aws:daylight:us-east-1::database/sometable/*. @@ -7007,7 +7028,11 @@ func (s *CreateGrokClassifierRequest) SetName(v string) *CreateGrokClassifierReq type CreateJobInput struct { _ struct{} `type:"structure"` - // The number of capacity units allocated to this job. + // The number of AWS Glue data processing units (DPUs) to allocate to this Job. + // From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative + // measure of processing power that consists of 4 vCPUs of compute capacity + // and 16 GB of memory. For more information, see the AWS Glue pricing page + // (https://aws.amazon.com/glue/pricing/). AllocatedCapacity *int64 `type:"integer"` // The JobCommand that executes this job. @@ -7018,7 +7043,18 @@ type CreateJobInput struct { // The connections used for this job. Connections *ConnectionsList `type:"structure"` - // The default parameters for this job. + // The default arguments for this job. + // + // You can specify arguments here that your own job-execution script consumes, + // as well as arguments that AWS Glue itself consumes. + // + // For information about how to specify and consume your own Job arguments, + // see the Calling AWS Glue APIs in Python (http://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html) + // topic in the developer guide. + // + // For information about the key-value pairs that AWS Glue consumes to set up + // your job, see the Special Parameters Used by AWS Glue (http://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-glue-arguments.html) + // topic in the developer guide. DefaultArguments map[string]string `type:"map"` // Description of the job. @@ -7034,12 +7070,12 @@ type CreateJobInput struct { // The maximum number of times to retry this job if it fails. MaxRetries *int64 `type:"integer"` - // The name you assign to this job. + // The name you assign to this job. It must be unique in your account. // // Name is a required field Name *string `min:"1" type:"string" required:"true"` - // The role associated with this job. + // The name of the IAM role associated with this job. // // Role is a required field Role *string `type:"string" required:"true"` @@ -7146,7 +7182,7 @@ type CreateJobOutput struct { responseMetadata aws.Response - // The unique name of the new job that has been created. + // The unique name that was provided. Name *string `min:"1" type:"string"` } @@ -7296,6 +7332,9 @@ type CreateScriptInput struct { // A list of the nodes in the DAG. DagNodes []CodeGenNode `type:"list"` + + // The programming language of the resulting code from the DAG. + Language Language `type:"string" enum:"true"` } // String returns the string representation @@ -7344,6 +7383,12 @@ func (s *CreateScriptInput) SetDagNodes(v []CodeGenNode) *CreateScriptInput { return s } +// SetLanguage sets the Language field's value. +func (s *CreateScriptInput) SetLanguage(v Language) *CreateScriptInput { + s.Language = v + return s +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CreateScriptResponse type CreateScriptOutput struct { _ struct{} `type:"structure"` @@ -7352,6 +7397,9 @@ type CreateScriptOutput struct { // The Python script generated from the DAG. PythonScript *string `type:"string"` + + // The Scala code generated from the DAG. + ScalaCode *string `type:"string"` } // String returns the string representation @@ -7375,6 +7423,12 @@ func (s *CreateScriptOutput) SetPythonScript(v string) *CreateScriptOutput { return s } +// SetScalaCode sets the ScalaCode field's value. +func (s *CreateScriptOutput) SetScalaCode(v string) *CreateScriptOutput { + s.ScalaCode = &v + return s +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CreateTableRequest type CreateTableInput struct { _ struct{} `type:"structure"` @@ -7485,18 +7539,22 @@ type CreateTriggerInput struct { // A description of the new trigger. Description *string `type:"string"` - // The name to assign to the new trigger. + // The name of the trigger. // // Name is a required field Name *string `min:"1" type:"string" required:"true"` // A predicate to specify when the new trigger should fire. + // + // This field is required when the trigger type is CONDITIONAL. Predicate *Predicate `type:"structure"` // A cron expression used to specify the schedule (see Time-Based Schedules // for Jobs and Crawlers (http://docs.aws.amazon.com/glue/latest/dg/monitor-data-warehouse-schedule.html). // For example, to run something every day at 12:15 UTC, you would specify: // cron(15 12 * * ? *). + // + // This field is required when the trigger type is SCHEDULED. Schedule *string `type:"string"` // The type of the new trigger. @@ -7593,7 +7651,7 @@ type CreateTriggerOutput struct { responseMetadata aws.Response - // The name assigned to the new trigger. + // The name of the trigger. Name *string `min:"1" type:"string"` } @@ -7732,8 +7790,10 @@ type CreateXMLClassifierRequest struct { Name *string `min:"1" type:"string" required:"true"` // The XML tag designating the element that contains each record in an XML document - // being parsed. Note that this cannot be an empty element. It must contain - // child elements representing fields in the record. + // being parsed. Note that this cannot identify a self-closing element (closed + // by />). An empty row element that contains only attributes can be parsed + // as long as it ends with a closing tag (for example, + // is okay, but is not). RowTag *string `type:"string"` } @@ -8994,7 +9054,9 @@ func (s *ErrorDetail) SetErrorMessage(v string) *ErrorDetail { type ExecutionProperty struct { _ struct{} `type:"structure"` - // The maximum number of concurrent runs allowed for a job. + // The maximum number of concurrent runs allowed for a job. The default is 1. + // An error is returned when this threshold is reached. The maximum value you + // can specify is controlled by a service limit. MaxConcurrentRuns *int64 `type:"integer"` } @@ -10225,7 +10287,7 @@ type GetJobRunInput struct { // JobName is a required field JobName *string `min:"1" type:"string" required:"true"` - // A list of the predecessor runs to return as well. + // True if a list of predecessor runs should be returned. PredecessorsIncluded *bool `type:"boolean"` // The ID of the job run. @@ -10898,6 +10960,9 @@ func (s *GetPartitionsOutput) SetPartitions(v []Partition) *GetPartitionsOutput type GetPlanInput struct { _ struct{} `type:"structure"` + // The programming language of the code to perform the mapping. + Language Language `type:"string" enum:"true"` + // Parameters for the mapping. Location *Location `type:"structure"` @@ -10960,6 +11025,12 @@ func (s *GetPlanInput) Validate() error { return nil } +// SetLanguage sets the Language field's value. +func (s *GetPlanInput) SetLanguage(v Language) *GetPlanInput { + s.Language = v + return s +} + // SetLocation sets the Location field's value. func (s *GetPlanInput) SetLocation(v *Location) *GetPlanInput { s.Location = v @@ -10992,6 +11063,9 @@ type GetPlanOutput struct { // A Python script to perform the mapping. PythonScript *string `type:"string"` + + // Scala code to perform the mapping. + ScalaCode *string `type:"string"` } // String returns the string representation @@ -11015,6 +11089,12 @@ func (s *GetPlanOutput) SetPythonScript(v string) *GetPlanOutput { return s } +// SetScalaCode sets the ScalaCode field's value. +func (s *GetPlanOutput) SetScalaCode(v string) *GetPlanOutput { + s.ScalaCode = &v + return s +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetTableRequest type GetTableInput struct { _ struct{} `type:"structure"` @@ -11461,7 +11541,9 @@ func (s *GetTriggerOutput) SetTrigger(v *Trigger) *GetTriggerOutput { type GetTriggersInput struct { _ struct{} `type:"structure"` - // The name of the job for which to retrieve triggers. + // The name of the job for which to retrieve triggers. The trigger that can + // start this job will be returned, and if there is no such trigger, all triggers + // will be returned. DependentJobName *string `min:"1" type:"string"` // The maximum size of the response. @@ -11989,12 +12071,16 @@ func (s *JdbcTarget) SetPath(v string) *JdbcTarget { return s } -// Specifies a job in the Data Catalog. +// Specifies a job. // Please also see https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/Job type Job struct { _ struct{} `type:"structure"` - // The number of capacity units allocated to this job. + // The number of AWS Glue data processing units (DPUs) allocated to this Job. + // From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative + // measure of processing power that consists of 4 vCPUs of compute capacity + // and 16 GB of memory. For more information, see the AWS Glue pricing page + // (https://aws.amazon.com/glue/pricing/). AllocatedCapacity *int64 `type:"integer"` // The JobCommand that executes this job. @@ -12006,7 +12092,18 @@ type Job struct { // The time and date that this job specification was created. CreatedOn *time.Time `type:"timestamp" timestampFormat:"unix"` - // The default parameters for this job. + // The default arguments for this job, specified as name-value pairs. + // + // You can specify arguments here that your own job-execution script consumes, + // as well as arguments that AWS Glue itself consumes. + // + // For information about how to specify and consume your own Job arguments, + // see the Calling AWS Glue APIs in Python (http://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html) + // topic in the developer guide. + // + // For information about the key-value pairs that AWS Glue consumes to set up + // your job, see the Special Parameters Used by AWS Glue (http://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-glue-arguments.html) + // topic in the developer guide. DefaultArguments map[string]string `type:"map"` // Description of this job. @@ -12028,7 +12125,7 @@ type Job struct { // The name you assign to this job. Name *string `min:"1" type:"string"` - // The role associated with this job. + // The name of the IAM role associated with this job. Role *string `type:"string"` } @@ -12180,10 +12277,10 @@ func (s *JobBookmarkEntry) SetVersion(v int64) *JobBookmarkEntry { type JobCommand struct { _ struct{} `type:"structure"` - // The name of this job command. + // The name of the job command: this must be glueetl. Name *string `type:"string"` - // Specifies the location of a script that executes a job. + // Specifies the S3 path to a script that executes a job (required). ScriptLocation *string `type:"string"` } @@ -12214,13 +12311,29 @@ func (s *JobCommand) SetScriptLocation(v string) *JobCommand { type JobRun struct { _ struct{} `type:"structure"` - // The amount of infrastructure capacity allocated to this job run. + // The number of AWS Glue data processing units (DPUs) allocated to this JobRun. + // From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative + // measure of processing power that consists of 4 vCPUs of compute capacity + // and 16 GB of memory. For more information, see the AWS Glue pricing page + // (https://aws.amazon.com/glue/pricing/). AllocatedCapacity *int64 `type:"integer"` - // The job arguments associated with this run. + // The job arguments associated with this run. These override equivalent default + // arguments set for the job. + // + // You can specify arguments here that your own job-execution script consumes, + // as well as arguments that AWS Glue itself consumes. + // + // For information about how to specify and consume your own job arguments, + // see the Calling AWS Glue APIs in Python (http://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html) + // topic in the developer guide. + // + // For information about the key-value pairs that AWS Glue consumes to set up + // your job, see the Special Parameters Used by AWS Glue (http://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-glue-arguments.html) + // topic in the developer guide. Arguments map[string]string `type:"map"` - // The number or the attempt to run this job. + // The number of the attempt to run this job. Attempt *int64 `type:"integer"` // The date and time this job run completed. @@ -12244,13 +12357,14 @@ type JobRun struct { // A list of predecessors to this job run. PredecessorRuns []Predecessor `type:"list"` - // The ID of the previous run of this job. + // The ID of the previous run of this job. For example, the JobRunId specified + // in the StartJobRun action. PreviousRunId *string `min:"1" type:"string"` // The date and time at which this job run was started. StartedOn *time.Time `type:"timestamp" timestampFormat:"unix"` - // The name of the trigger for this job run. + // The name of the trigger that started this job run. TriggerName *string `min:"1" type:"string"` } @@ -12342,21 +12456,37 @@ func (s *JobRun) SetTriggerName(v string) *JobRun { return s } -// Specifies information used to update an existing job. +// Specifies information used to update an existing job. Note that the previous +// job definition will be completely overwritten by this information. // Please also see https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/JobUpdate type JobUpdate struct { _ struct{} `type:"structure"` - // The number of capacity units allocated to this job. + // The number of AWS Glue data processing units (DPUs) to allocate to this Job. + // From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative + // measure of processing power that consists of 4 vCPUs of compute capacity + // and 16 GB of memory. For more information, see the AWS Glue pricing page + // (https://aws.amazon.com/glue/pricing/). AllocatedCapacity *int64 `type:"integer"` - // The JobCommand that executes this job. + // The JobCommand that executes this job (required). Command *JobCommand `type:"structure"` // The connections used for this job. Connections *ConnectionsList `type:"structure"` - // The default parameters for this job. + // The default arguments for this job. + // + // You can specify arguments here that your own job-execution script consumes, + // as well as arguments that AWS Glue itself consumes. + // + // For information about how to specify and consume your own Job arguments, + // see the Calling AWS Glue APIs in Python (http://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html) + // topic in the developer guide. + // + // For information about the key-value pairs that AWS Glue consumes to set up + // your job, see the Special Parameters Used by AWS Glue (http://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-glue-arguments.html) + // topic in the developer guide. DefaultArguments map[string]string `type:"map"` // Description of the job. @@ -12372,7 +12502,7 @@ type JobUpdate struct { // The maximum number of times to retry this job if it fails. MaxRetries *int64 `type:"integer"` - // The role associated with this job. + // The name of the IAM role associated with this job (required). Role *string `type:"string"` } @@ -12996,7 +13126,8 @@ func (s *PhysicalConnectionRequirements) SetSubnetId(v string) *PhysicalConnecti return s } -// A job run that preceded this one. +// A job run that was used in the predicate of a conditional trigger that triggered +// this job run. // Please also see https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/Predecessor type Predecessor struct { _ struct{} `type:"structure"` @@ -13004,7 +13135,7 @@ type Predecessor struct { // The name of the predecessor job. JobName *string `min:"1" type:"string"` - // The job-run ID of the precessor job run. + // The job-run ID of the predecessor job run. RunId *string `min:"1" type:"string"` } @@ -13605,10 +13736,26 @@ func (s StartCrawlerScheduleOutput) SDKResponseMetadata() aws.Response { type StartJobRunInput struct { _ struct{} `type:"structure"` - // The infrastructure capacity to allocate to this job. + // The number of AWS Glue data processing units (DPUs) to allocate to this JobRun. + // From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative + // measure of processing power that consists of 4 vCPUs of compute capacity + // and 16 GB of memory. For more information, see the AWS Glue pricing page + // (https://aws.amazon.com/glue/pricing/). AllocatedCapacity *int64 `type:"integer"` - // Specific arguments for this job run. + // The job arguments specifically for this run. They override the equivalent + // default arguments set for the job itself. + // + // You can specify arguments here that your own job-execution script consumes, + // as well as arguments that AWS Glue itself consumes. + // + // For information about how to specify and consume your own Job arguments, + // see the Calling AWS Glue APIs in Python (http://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html) + // topic in the developer guide. + // + // For information about the key-value pairs that AWS Glue consumes to set up + // your job, see the Special Parameters Used by AWS Glue (http://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-glue-arguments.html) + // topic in the developer guide. Arguments map[string]string `type:"map"` // The name of the job to start. @@ -13616,7 +13763,7 @@ type StartJobRunInput struct { // JobName is a required field JobName *string `min:"1" type:"string" required:"true"` - // The ID of the job run to start. + // The ID of a previous JobRun to retry. JobRunId *string `min:"1" type:"string"` } @@ -14546,13 +14693,13 @@ type Trigger struct { // A description of this trigger. Description *string `type:"string"` - // The trigger ID. + // Reserved for future use. Id *string `min:"1" type:"string"` // Name of the trigger. Name *string `min:"1" type:"string"` - // The predicate of this trigger. + // The predicate of this trigger, which defines when it will fire. Predicate *Predicate `type:"structure"` // A cron expression used to specify the schedule (see Time-Based Schedules @@ -14626,7 +14773,8 @@ func (s *Trigger) SetType(v TriggerType) *Trigger { return s } -// A structure used to provide information used to updata a trigger. +// A structure used to provide information used to update a trigger. This object +// will update the the previous trigger definition by overwriting it completely. // Please also see https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/TriggerUpdate type TriggerUpdate struct { _ struct{} `type:"structure"` @@ -14637,13 +14785,13 @@ type TriggerUpdate struct { // A description of this trigger. Description *string `type:"string"` - // The name of the trigger. + // Reserved for future use. Name *string `min:"1" type:"string"` // The predicate of this trigger, which defines when it will fire. Predicate *Predicate `type:"structure"` - // An updated cron expression used to specify the schedule (see Time-Based Schedules + // A cron expression used to specify the schedule (see Time-Based Schedules // for Jobs and Crawlers (http://docs.aws.amazon.com/glue/latest/dg/monitor-data-warehouse-schedule.html). // For example, to run something every day at 12:15 UTC, you would specify: // cron(15 12 * * ? *). @@ -14904,6 +15052,9 @@ type UpdateCrawlerInput struct { // input format, output format, serde information, and schema from their parent // table, rather than detect this information separately for each partition. // Use the following JSON string to specify that behavior: + // + // Example:  '{ "Version": 1.0, "CrawlerOutput": { "Partitions": { "AddOrUpdateBehavior": + // "InheritFromTable" } } }' Configuration *string `type:"string"` // The AWS Glue database where results are stored, such as: arn:aws:daylight:us-east-1::database/sometable/*. @@ -15929,8 +16080,10 @@ type UpdateXMLClassifierRequest struct { Name *string `min:"1" type:"string" required:"true"` // The XML tag designating the element that contains each record in an XML document - // being parsed. Note that this cannot be an empty element. It must contain - // child elements representing fields in the record. + // being parsed. Note that this cannot identify a self-closing element (closed + // by />). An empty row element that contains only attributes can be parsed + // as long as it ends with a closing tag (for example, + // is okay, but is not). RowTag *string `type:"string"` } @@ -16158,8 +16311,10 @@ type XMLClassifier struct { Name *string `min:"1" type:"string" required:"true"` // The XML tag designating the element that contains each record in an XML document - // being parsed. Note that this cannot be an empty element. It must contain - // child elements representing fields in the record. + // being parsed. Note that this cannot identify a self-closing element (closed + // by />). An empty row element that contains only attributes can be parsed + // as long as it ends with a closing tag (for example, + // is okay, but is not). RowTag *string `type:"string"` // The version of this classifier. @@ -16267,6 +16422,14 @@ const ( JobRunStateFailed JobRunState = "FAILED" ) +type Language string + +// Enum values for Language +const ( + LanguagePython Language = "PYTHON" + LanguageScala Language = "SCALA" +) + type LastCrawlStatus string // Enum values for LastCrawlStatus @@ -16281,6 +16444,7 @@ type Logical string // Enum values for Logical const ( LogicalAnd Logical = "AND" + LogicalAny Logical = "ANY" ) type LogicalOperator string diff --git a/service/inspector/api.go b/service/inspector/api.go index 29685041009..d556485a72b 100644 --- a/service/inspector/api.go +++ b/service/inspector/api.go @@ -84,9 +84,12 @@ func (r CreateAssessmentTargetRequest) Send() (*CreateAssessmentTargetOutput, er // Amazon Inspector. // // Creates a new assessment target using the ARN of the resource group that -// is generated by CreateResourceGroup. You can create up to 50 assessment targets -// per AWS account. You can run up to 500 concurrent agents per AWS account. -// For more information, see Amazon Inspector Assessment Targets (http://docs.aws.amazon.com/inspector/latest/userguide/inspector_applications.html). +// is generated by CreateResourceGroup. If the service-linked role (https://docs.aws.amazon.com/inspector/latest/userguide/inspector_slr.html) +// isn’t already registered, also creates and registers a service-linked role +// to grant Amazon Inspector access to AWS Services needed to perform security +// assessments. You can create up to 50 assessment targets per AWS account. +// You can run up to 500 concurrent agents per AWS account. For more information, +// see Amazon Inspector Assessment Targets (http://docs.aws.amazon.com/inspector/latest/userguide/inspector_applications.html). // // // Example sending a request using the CreateAssessmentTargetRequest method. // req := client.CreateAssessmentTargetRequest(params) @@ -136,7 +139,10 @@ func (r CreateAssessmentTemplateRequest) Send() (*CreateAssessmentTemplateOutput // Amazon Inspector. // // Creates an assessment template for the assessment target that is specified -// by the ARN of the assessment target. +// by the ARN of the assessment target. If the service-linked role (https://docs.aws.amazon.com/inspector/latest/userguide/inspector_slr.html) +// isn’t already registered, also creates and registers a service-linked role +// to grant Amazon Inspector access to AWS Services needed to perform security +// assessments. // // // Example sending a request using the CreateAssessmentTemplateRequest method. // req := client.CreateAssessmentTemplateRequest(params) @@ -1739,8 +1745,8 @@ func (r RegisterCrossAccountAccessRoleRequest) Send() (*RegisterCrossAccountAcce // RegisterCrossAccountAccessRoleRequest returns a request value for making API operation for // Amazon Inspector. // -// Registers the IAM role that Amazon Inspector uses to list your EC2 instances -// at the start of the assessment run or when you call the PreviewAgents action. +// Registers the IAM role that grants Amazon Inspector access to AWS Services +// needed to perform security assessments. // // // Example sending a request using the RegisterCrossAccountAccessRoleRequest method. // req := client.RegisterCrossAccountAccessRoleRequest(params) @@ -2334,13 +2340,34 @@ func (s *AgentFilter) SetAgentHealths(v []AgentHealth) *AgentFilter { type AgentPreview struct { _ struct{} `type:"structure"` + // The health status of the Amazon Inspector Agent. + AgentHealth AgentHealth `locationName:"agentHealth" type:"string" enum:"true"` + // The ID of the EC2 instance where the agent is installed. // // AgentId is a required field AgentId *string `locationName:"agentId" min:"1" type:"string" required:"true"` + // The version of the Amazon Inspector Agent. + AgentVersion *string `locationName:"agentVersion" min:"1" type:"string"` + // The Auto Scaling group for the EC2 instance where the agent is installed. AutoScalingGroup *string `locationName:"autoScalingGroup" min:"1" type:"string"` + + // The hostname of the EC2 instance on which the Amazon Inspector Agent is installed. + Hostname *string `locationName:"hostname" type:"string"` + + // The IP address of the EC2 instance on which the Amazon Inspector Agent is + // installed. + Ipv4Address *string `locationName:"ipv4Address" min:"7" type:"string"` + + // The kernel version of the operating system running on the EC2 instance on + // which the Amazon Inspector Agent is installed. + KernelVersion *string `locationName:"kernelVersion" min:"1" type:"string"` + + // The operating system running on the EC2 instance on which the Amazon Inspector + // Agent is installed. + OperatingSystem *string `locationName:"operatingSystem" min:"1" type:"string"` } // String returns the string representation @@ -2353,18 +2380,54 @@ func (s AgentPreview) GoString() string { return s.String() } +// SetAgentHealth sets the AgentHealth field's value. +func (s *AgentPreview) SetAgentHealth(v AgentHealth) *AgentPreview { + s.AgentHealth = v + return s +} + // SetAgentId sets the AgentId field's value. func (s *AgentPreview) SetAgentId(v string) *AgentPreview { s.AgentId = &v return s } +// SetAgentVersion sets the AgentVersion field's value. +func (s *AgentPreview) SetAgentVersion(v string) *AgentPreview { + s.AgentVersion = &v + return s +} + // SetAutoScalingGroup sets the AutoScalingGroup field's value. func (s *AgentPreview) SetAutoScalingGroup(v string) *AgentPreview { s.AutoScalingGroup = &v return s } +// SetHostname sets the Hostname field's value. +func (s *AgentPreview) SetHostname(v string) *AgentPreview { + s.Hostname = &v + return s +} + +// SetIpv4Address sets the Ipv4Address field's value. +func (s *AgentPreview) SetIpv4Address(v string) *AgentPreview { + s.Ipv4Address = &v + return s +} + +// SetKernelVersion sets the KernelVersion field's value. +func (s *AgentPreview) SetKernelVersion(v string) *AgentPreview { + s.KernelVersion = &v + return s +} + +// SetOperatingSystem sets the OperatingSystem field's value. +func (s *AgentPreview) SetOperatingSystem(v string) *AgentPreview { + s.OperatingSystem = &v + return s +} + // A snapshot of an Amazon Inspector assessment run that contains the findings // of the assessment run . // @@ -2992,6 +3055,12 @@ type AssessmentTemplate struct { // Arn is a required field Arn *string `locationName:"arn" min:"1" type:"string" required:"true"` + // The number of existing assessment runs associated with this assessment template. + // This value can be zero or a positive integer. + // + // AssessmentRunCount is a required field + AssessmentRunCount *int64 `locationName:"assessmentRunCount" type:"integer" required:"true"` + // The ARN of the assessment target that corresponds to this assessment template. // // AssessmentTargetArn is a required field @@ -3009,6 +3078,11 @@ type AssessmentTemplate struct { // DurationInSeconds is a required field DurationInSeconds *int64 `locationName:"durationInSeconds" min:"180" type:"integer" required:"true"` + // The Amazon Resource Name (ARN) of the most recent assessment run associated + // with this assessment template. This value exists only when the value of assessmentRunCount + // is greater than zero. + LastAssessmentRunArn *string `locationName:"lastAssessmentRunArn" min:"1" type:"string"` + // The name of the assessment template. // // Name is a required field @@ -3042,6 +3116,12 @@ func (s *AssessmentTemplate) SetArn(v string) *AssessmentTemplate { return s } +// SetAssessmentRunCount sets the AssessmentRunCount field's value. +func (s *AssessmentTemplate) SetAssessmentRunCount(v int64) *AssessmentTemplate { + s.AssessmentRunCount = &v + return s +} + // SetAssessmentTargetArn sets the AssessmentTargetArn field's value. func (s *AssessmentTemplate) SetAssessmentTargetArn(v string) *AssessmentTemplate { s.AssessmentTargetArn = &v @@ -3060,6 +3140,12 @@ func (s *AssessmentTemplate) SetDurationInSeconds(v int64) *AssessmentTemplate { return s } +// SetLastAssessmentRunArn sets the LastAssessmentRunArn field's value. +func (s *AssessmentTemplate) SetLastAssessmentRunArn(v string) *AssessmentTemplate { + s.LastAssessmentRunArn = &v + return s +} + // SetName sets the Name field's value. func (s *AssessmentTemplate) SetName(v string) *AssessmentTemplate { s.Name = &v @@ -6076,8 +6162,8 @@ func (s *PreviewAgentsOutput) SetNextToken(v string) *PreviewAgentsOutput { type RegisterCrossAccountAccessRoleInput struct { _ struct{} `type:"structure"` - // The ARN of the IAM role that Amazon Inspector uses to list your EC2 instances - // during the assessment run or when you call the PreviewAgents action. + // The ARN of the IAM role that grants Amazon Inspector access to AWS Services + // needed to perform security assessments. // // RoleArn is a required field RoleArn *string `locationName:"roleArn" min:"1" type:"string" required:"true"` @@ -7215,6 +7301,7 @@ type AgentHealth string const ( AgentHealthHealthy AgentHealth = "HEALTHY" AgentHealthUnhealthy AgentHealth = "UNHEALTHY" + AgentHealthUnknown AgentHealth = "UNKNOWN" ) type AgentHealthCode string diff --git a/service/kinesisanalytics/api.go b/service/kinesisanalytics/api.go index df31055d608..1e67ae0a5d7 100644 --- a/service/kinesisanalytics/api.go +++ b/service/kinesisanalytics/api.go @@ -198,10 +198,10 @@ func (r AddApplicationOutputRequest) Send() (*AddApplicationOutputOutput, error) // // If you want Amazon Kinesis Analytics to deliver data from an in-application // stream within your application to an external destination (such as an Amazon -// Kinesis stream or a Firehose delivery stream), you add the relevant configuration -// to your application using this operation. You can configure one or more outputs -// for your application. Each output configuration maps an in-application stream -// and an external destination. +// Kinesis stream, an Amazon Kinesis Firehose delivery stream, or an Amazon +// Lambda function), you add the relevant configuration to your application +// using this operation. You can configure one or more outputs for your application. +// Each output configuration maps an in-application stream and an external destination. // // You can use one of the output configurations to deliver data from your in-application // error stream to an external destination so that you can analyze the errors. @@ -330,9 +330,9 @@ func (r CreateApplicationRequest) Send() (*CreateApplicationOutput, error) { // // Creates an Amazon Kinesis Analytics application. You can configure each application // with one streaming source as input, application code to process the input, -// and up to five streaming destinations where you want Amazon Kinesis Analytics -// to write the output data from your application. For an overview, see How -// it Works (http://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works.html). +// and up to three destinations where you want Amazon Kinesis Analytics to write +// the output data from your application. For an overview, see How it Works +// (http://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works.html). // // In the input configuration, you map the streaming source to an in-application // stream, which you can think of as a constantly updating table. In the mapping, @@ -344,8 +344,7 @@ func (r CreateApplicationRequest) Send() (*CreateApplicationOutput, error) { // more SQL artifacts like SQL streams or pumps. // // In the output configuration, you can configure the application to write data -// from in-application streams created in your applications to up to five streaming -// destinations. +// from in-application streams created in your applications to up to three destinations. // // To read data from your source stream or write data to destination streams, // Amazon Kinesis Analytics needs your permissions. You grant these permissions @@ -724,9 +723,9 @@ func (r DiscoverInputSchemaRequest) Send() (*DiscoverInputSchemaOutput, error) { // Amazon Kinesis Analytics. // // Infers a schema by evaluating sample records on the specified streaming source -// (Amazon Kinesis stream or Amazon Kinesis Firehose delivery stream). In the -// response, the operation returns the inferred schema and also the sample records -// that the operation used to infer the schema. +// (Amazon Kinesis stream or Amazon Kinesis Firehose delivery stream) or S3 +// object. In the response, the operation returns the inferred schema and also +// the sample records that the operation used to infer the schema. // // You can use the inferred schema when configuring a streaming source for your // application. For conceptual information, see Configuring Application Input @@ -1225,8 +1224,8 @@ type AddApplicationInputProcessingConfigurationInput struct { // CurrentApplicationVersionId is a required field CurrentApplicationVersionId *int64 `min:"1" type:"long" required:"true"` - // The ID of the input configuration to which to add the input configuration. - // You can get a list of the input IDs for an application using the DescribeApplication + // The ID of the input configuration to add the input processing configuration + // to. You can get a list of the input IDs for an application using the DescribeApplication // operation. // // InputId is a required field @@ -1343,7 +1342,7 @@ type AddApplicationOutputInput struct { // ApplicationName is a required field ApplicationName *string `min:"1" type:"string" required:"true"` - // Version of the application to which you want add the output configuration. + // Version of the application to which you want to add the output configuration. // You can use the DescribeApplication operation to get the current application // version. If the version specified is not the current version, the ConcurrentModificationException // is returned. @@ -1353,8 +1352,9 @@ type AddApplicationOutputInput struct { // An array of objects, each describing one output configuration. In the output // configuration, you specify the name of an in-application stream, a destination - // (that is, an Amazon Kinesis stream or an Amazon Kinesis Firehose delivery - // stream), and record the formation to use when writing to the destination. + // (that is, an Amazon Kinesis stream, an Amazon Kinesis Firehose delivery stream, + // or an Amazon Lambda function), and record the formation to use when writing + // to the destination. // // Output is a required field Output *Output `type:"structure" required:"true"` @@ -1592,14 +1592,14 @@ type ApplicationDetail struct { // Kinesis Analytics applications, see Working with Amazon CloudWatch Logs (http://docs.aws.amazon.com/kinesisanalytics/latest/dev/cloudwatch-logs.html). CloudWatchLoggingOptionDescriptions []CloudWatchLoggingOptionDescription `type:"list"` - // Timestamp when the application version was created. + // Time stamp when the application version was created. CreateTimestamp *time.Time `type:"timestamp" timestampFormat:"unix"` // Describes the application input configuration. For more information, see // Configuring Application Input (http://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works-input.html). InputDescriptions []InputDescription `type:"list"` - // Timestamp when the application was last updated. + // Time stamp when the application was last updated. LastUpdateTimestamp *time.Time `type:"timestamp" timestampFormat:"unix"` // Describes the application output configuration. For more information, see @@ -2105,7 +2105,7 @@ type CreateApplicationInput struct { // output. For example, you can write a SQL statement that reads data from one // in-application stream, generates a running average of the number of advertisement // clicks by vendor, and insert resulting rows in another in-application stream - // using pumps. For more inforamtion about the typical pattern, see Application + // using pumps. For more information about the typical pattern, see Application // Code (http://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works-app-code.html). // // You can provide such series of SQL statements, where output of one statement @@ -2139,7 +2139,7 @@ type CreateApplicationInput struct { // stream like a table (you can think of it as a constantly updating table). // // For the streaming source, you provide its Amazon Resource Name (ARN) and - // format of data on the stream (for example, JSON, CSV, etc). You also must + // format of data on the stream (for example, JSON, CSV, etc.). You also must // provide an IAM role that Amazon Kinesis Analytics can assume to read this // stream on your behalf. // @@ -2150,20 +2150,22 @@ type CreateApplicationInput struct { Inputs []Input `type:"list"` // You can configure application output to write data from any of the in-application - // streams to up to five destinations. + // streams to up to three destinations. // // These destinations can be Amazon Kinesis streams, Amazon Kinesis Firehose - // delivery streams, or both. + // delivery streams, Amazon Lambda destinations, or any combination of the three. // // In the configuration, you specify the in-application stream name, the destination - // stream Amazon Resource Name (ARN), and the format to use when writing data. - // You must also provide an IAM role that Amazon Kinesis Analytics can assume - // to write to the destination stream on your behalf. + // stream or Lambda function Amazon Resource Name (ARN), and the format to use + // when writing data. You must also provide an IAM role that Amazon Kinesis + // Analytics can assume to write to the destination stream or Lambda function + // on your behalf. // - // In the output configuration, you also provide the output stream Amazon Resource - // Name (ARN) and the format of data in the stream (for example, JSON, CSV). - // You also must provide an IAM role that Amazon Kinesis Analytics can assume - // to write to this stream on your behalf. + // In the output configuration, you also provide the output stream or Lambda + // function ARN. For stream destinations, you provide the format of data in + // the stream (for example, JSON, CSV). You also must provide an IAM role that + // Amazon Kinesis Analytics can assume to write to the stream or Lambda function + // on your behalf. Outputs []Output `type:"list"` } @@ -2297,7 +2299,8 @@ type DeleteApplicationCloudWatchLoggingOptionInput struct { ApplicationName *string `min:"1" type:"string" required:"true"` // The CloudWatchLoggingOptionId of the CloudWatch logging option to delete. - // You can use the DescribeApplication operation to get the CloudWatchLoggingOptionId. + // You can get the CloudWatchLoggingOptionId by using the DescribeApplication + // operation. // // CloudWatchLoggingOptionId is a required field CloudWatchLoggingOptionId *string `min:"1" type:"string" required:"true"` @@ -2461,9 +2464,9 @@ type DeleteApplicationInputProcessingConfigurationInput struct { // CurrentApplicationVersionId is a required field CurrentApplicationVersionId *int64 `min:"1" type:"long" required:"true"` - // The ID of the input configuration from which to delete the input configuration. - // You can get a list of the input IDs for an application using the DescribeApplication - // operation. + // The ID of the input configuration from which to delete the input processing + // configuration. You can get a list of the input IDs for an application by + // using the DescribeApplication operation. // // InputId is a required field InputId *string `min:"1" type:"string" required:"true"` @@ -2909,6 +2912,7 @@ type DiscoverInputSchemaInput struct { // stream on your behalf. RoleARN *string `min:"1" type:"string"` + // Specify this parameter to discover a schema from data in an S3 object. S3Configuration *S3Configuration `type:"structure"` } @@ -3049,12 +3053,12 @@ type Input struct { // Describes the number of in-application streams to create. // - // Data from your source will be routed to these in-application input streams. + // Data from your source is routed to these in-application input streams. // // (see Configuring Application Input (http://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works-input.html). InputParallelism *InputParallelism `type:"structure"` - // The InputProcessingConfiguration for the Input. An input processor transforms + // The InputProcessingConfiguration for the input. An input processor transforms // records as they are received from the stream, before the application's SQL // code executes. Currently, the only input processing configuration available // is InputLambdaProcessor. @@ -3070,8 +3074,8 @@ type Input struct { InputSchema *SourceSchema `type:"structure" required:"true"` // If the streaming source is an Amazon Kinesis Firehose delivery stream, identifies - // the Firehose delivery stream's ARN and an IAM role that enables Amazon Kinesis - // Analytics to access the stream on your behalf. + // the delivery stream's ARN and an IAM role that enables Amazon Kinesis Analytics + // to access the stream on your behalf. // // Note: Either KinesisStreamsInput or KinesisFirehoseInput is required. KinesisFirehoseInput *KinesisFirehoseInput `type:"structure"` @@ -3083,10 +3087,10 @@ type Input struct { // Note: Either KinesisStreamsInput or KinesisFirehoseInput is required. KinesisStreamsInput *KinesisStreamsInput `type:"structure"` - // Name prefix to use when creating in-application stream. Suppose you specify - // a prefix "MyInApplicationStream". Amazon Kinesis Analytics will then create + // Name prefix to use when creating an in-application stream. Suppose that you + // specify a prefix "MyInApplicationStream." Amazon Kinesis Analytics then creates // one or more (as per the InputParallelism count you specified) in-application - // streams with names "MyInApplicationStream_001", "MyInApplicationStream_002" + // streams with names "MyInApplicationStream_001," "MyInApplicationStream_002," // and so on. // // NamePrefix is a required field @@ -3277,14 +3281,13 @@ type InputDescription struct { InputStartingPositionConfiguration *InputStartingPositionConfiguration `type:"structure"` // If an Amazon Kinesis Firehose delivery stream is configured as a streaming - // source, provides the Firehose delivery stream's Amazon Resource Name (ARN) - // and an IAM role that enables Amazon Kinesis Analytics to access the stream - // on your behalf. + // source, provides the delivery stream's ARN and an IAM role that enables Amazon + // Kinesis Analytics to access the stream on your behalf. KinesisFirehoseInputDescription *KinesisFirehoseInputDescription `type:"structure"` // If an Amazon Kinesis stream is configured as streaming source, provides Amazon - // Kinesis stream's ARN and an IAM role that enables Amazon Kinesis Analytics - // to access the stream on your behalf. + // Kinesis stream's Amazon Resource Name (ARN) and an IAM role that enables + // Amazon Kinesis Analytics to access the stream on your behalf. KinesisStreamsInputDescription *KinesisStreamsInputDescription `type:"structure"` // In-application name prefix. @@ -3355,9 +3358,10 @@ func (s *InputDescription) SetNamePrefix(v string) *InputDescription { return s } -// An object that contains the ARN of the AWS Lambda (https://aws.amazon.com/documentation/lambda/) -// function that is used to preprocess records in the stream, and the ARN of -// the IAM role used to access the AWS Lambda function. +// An object that contains the Amazon Resource Name (ARN) of the AWS Lambda +// (https://aws.amazon.com/documentation/lambda/) function that is used to preprocess +// records in the stream, and the ARN of the IAM role that is used to access +// the AWS Lambda function. // Please also see https://docs.aws.amazon.com/goto/WebAPI/kinesisanalytics-2015-08-14/InputLambdaProcessor type InputLambdaProcessor struct { _ struct{} `type:"structure"` @@ -3368,7 +3372,7 @@ type InputLambdaProcessor struct { // ResourceARN is a required field ResourceARN *string `min:"1" type:"string" required:"true"` - // The ARN of the IAM role used to access the AWS Lambda function. + // The ARN of the IAM role that is used to access the AWS Lambda function. // // RoleARN is a required field RoleARN *string `min:"1" type:"string" required:"true"` @@ -3420,9 +3424,10 @@ func (s *InputLambdaProcessor) SetRoleARN(v string) *InputLambdaProcessor { return s } -// An object that contains the ARN of the AWS Lambda (https://aws.amazon.com/documentation/lambda/) -// function that is used to preprocess records in the stream, and the ARN of -// the IAM role used to access the AWS Lambda expression. +// An object that contains the Amazon Resource Name (ARN) of the AWS Lambda +// (https://aws.amazon.com/documentation/lambda/) function that is used to preprocess +// records in the stream, and the ARN of the IAM role that is used to access +// the AWS Lambda expression. // Please also see https://docs.aws.amazon.com/goto/WebAPI/kinesisanalytics-2015-08-14/InputLambdaProcessorDescription type InputLambdaProcessorDescription struct { _ struct{} `type:"structure"` @@ -3431,7 +3436,7 @@ type InputLambdaProcessorDescription struct { // function that is used to preprocess the records in the stream. ResourceARN *string `min:"1" type:"string"` - // The ARN of the IAM role used to access the AWS Lambda function. + // The ARN of the IAM role that is used to access the AWS Lambda function. RoleARN *string `min:"1" type:"string"` } @@ -3463,11 +3468,11 @@ func (s *InputLambdaProcessorDescription) SetRoleARN(v string) *InputLambdaProce type InputLambdaProcessorUpdate struct { _ struct{} `type:"structure"` - // The ARN of the new AWS Lambda (https://aws.amazon.com/documentation/lambda/) + // The Amazon Resource Name (ARN) of the new AWS Lambda (https://aws.amazon.com/documentation/lambda/) // function that is used to preprocess the records in the stream. ResourceARNUpdate *string `min:"1" type:"string"` - // The ARN of the new IAM role used to access the AWS Lambda function. + // The ARN of the new IAM role that is used to access the AWS Lambda function. RoleARNUpdate *string `min:"1" type:"string"` } @@ -3589,14 +3594,14 @@ func (s *InputParallelismUpdate) SetCountUpdate(v int64) *InputParallelismUpdate } // Provides a description of a processor that is used to preprocess the records -// in the stream prior to being processed by your application code. Currently, +// in the stream before being processed by your application code. Currently, // the only input processor available is AWS Lambda (https://aws.amazon.com/documentation/lambda/). // Please also see https://docs.aws.amazon.com/goto/WebAPI/kinesisanalytics-2015-08-14/InputProcessingConfiguration type InputProcessingConfiguration struct { _ struct{} `type:"structure"` // The InputLambdaProcessor that is used to preprocess the records in the stream - // prior to being processed by your application code. + // before being processed by your application code. // // InputLambdaProcessor is a required field InputLambdaProcessor *InputLambdaProcessor `type:"structure" required:"true"` @@ -3788,7 +3793,7 @@ type InputStartingPositionConfiguration struct { // The starting position on the stream. // // * NOW - Start reading just after the most recent record in the stream, - // start at the request timestamp that the customer issued. + // start at the request time stamp that the customer issued. // // * TRIM_HORIZON - Start reading at the last untrimmed record in the stream, // which is the oldest record available in the stream. This option is not @@ -3839,12 +3844,11 @@ type InputUpdate struct { InputSchemaUpdate *InputSchemaUpdate `type:"structure"` // If an Amazon Kinesis Firehose delivery stream is the streaming source to - // be updated, provides an updated stream Amazon Resource Name (ARN) and IAM - // role ARN. + // be updated, provides an updated stream ARN and IAM role ARN. KinesisFirehoseInputUpdate *KinesisFirehoseInputUpdate `type:"structure"` - // If a Amazon Kinesis stream is the streaming source to be updated, provides - // an updated stream ARN and IAM role ARN. + // If an Amazon Kinesis stream is the streaming source to be updated, provides + // an updated stream Amazon Resource Name (ARN) and IAM role ARN. KinesisStreamsInputUpdate *KinesisStreamsInputUpdate `type:"structure"` // Name prefix for in-application streams that Amazon Kinesis Analytics creates @@ -3995,14 +3999,13 @@ func (s *JSONMappingParameters) SetRecordRowPath(v string) *JSONMappingParameter } // Identifies an Amazon Kinesis Firehose delivery stream as the streaming source. -// You provide the Firehose delivery stream's Amazon Resource Name (ARN) and -// an IAM role ARN that enables Amazon Kinesis Analytics to access the stream -// on your behalf. +// You provide the delivery stream's Amazon Resource Name (ARN) and an IAM role +// ARN that enables Amazon Kinesis Analytics to access the stream on your behalf. // Please also see https://docs.aws.amazon.com/goto/WebAPI/kinesisanalytics-2015-08-14/KinesisFirehoseInput type KinesisFirehoseInput struct { _ struct{} `type:"structure"` - // ARN of the input Firehose delivery stream. + // ARN of the input delivery stream. // // ResourceARN is a required field ResourceARN *string `min:"1" type:"string" required:"true"` @@ -4102,12 +4105,12 @@ func (s *KinesisFirehoseInputDescription) SetRoleARN(v string) *KinesisFirehoseI type KinesisFirehoseInputUpdate struct { _ struct{} `type:"structure"` - // ARN of the input Amazon Kinesis Firehose delivery stream to read. + // Amazon Resource Name (ARN) of the input Amazon Kinesis Firehose delivery + // stream to read. ResourceARNUpdate *string `min:"1" type:"string"` - // Amazon Resource Name (ARN) of the IAM role that Amazon Kinesis Analytics - // can assume to access the stream on your behalf. You need to grant necessary - // permissions to this role. + // ARN of the IAM role that Amazon Kinesis Analytics can assume to access the + // stream on your behalf. You need to grant necessary permissions to this role. RoleARNUpdate *string `min:"1" type:"string"` } @@ -4307,8 +4310,8 @@ func (s *KinesisFirehoseOutputUpdate) SetRoleARNUpdate(v string) *KinesisFirehos } // Identifies an Amazon Kinesis stream as the streaming source. You provide -// the stream's ARN and an IAM role ARN that enables Amazon Kinesis Analytics -// to access the stream on your behalf. +// the stream's Amazon Resource Name (ARN) and an IAM role ARN that enables +// Amazon Kinesis Analytics to access the stream on your behalf. // Please also see https://docs.aws.amazon.com/goto/WebAPI/kinesisanalytics-2015-08-14/KinesisStreamsInput type KinesisStreamsInput struct { _ struct{} `type:"structure"` @@ -4461,10 +4464,10 @@ func (s *KinesisStreamsInputUpdate) SetRoleARNUpdate(v string) *KinesisStreamsIn return s } -// When configuring application output, identifies a Amazon Kinesis stream as -// the destination. You provide the stream Amazon Resource Name (ARN) and also -// an IAM role ARN that Amazon Kinesis Analytics can use to write to the stream -// on your behalf. +// When configuring application output, identifies an Amazon Kinesis stream +// as the destination. You provide the stream Amazon Resource Name (ARN) and +// also an IAM role ARN that Amazon Kinesis Analytics can use to write to the +// stream on your behalf. // Please also see https://docs.aws.amazon.com/goto/WebAPI/kinesisanalytics-2015-08-14/KinesisStreamsOutput type KinesisStreamsOutput struct { _ struct{} `type:"structure"` @@ -4618,6 +4621,162 @@ func (s *KinesisStreamsOutputUpdate) SetRoleARNUpdate(v string) *KinesisStreamsO return s } +// When configuring application output, identifies an AWS Lambda function as +// the destination. You provide the function Amazon Resource Name (ARN) and +// also an IAM role ARN that Amazon Kinesis Analytics can use to write to the +// function on your behalf. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/kinesisanalytics-2015-08-14/LambdaOutput +type LambdaOutput struct { + _ struct{} `type:"structure"` + + // Amazon Resource Name (ARN) of the destination Lambda function to write to. + // + // ResourceARN is a required field + ResourceARN *string `min:"1" type:"string" required:"true"` + + // ARN of the IAM role that Amazon Kinesis Analytics can assume to write to + // the destination function on your behalf. You need to grant the necessary + // permissions to this role. + // + // RoleARN is a required field + RoleARN *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s LambdaOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LambdaOutput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LambdaOutput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "LambdaOutput"} + + if s.ResourceARN == nil { + invalidParams.Add(aws.NewErrParamRequired("ResourceARN")) + } + if s.ResourceARN != nil && len(*s.ResourceARN) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ResourceARN", 1)) + } + + if s.RoleARN == nil { + invalidParams.Add(aws.NewErrParamRequired("RoleARN")) + } + if s.RoleARN != nil && len(*s.RoleARN) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("RoleARN", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceARN sets the ResourceARN field's value. +func (s *LambdaOutput) SetResourceARN(v string) *LambdaOutput { + s.ResourceARN = &v + return s +} + +// SetRoleARN sets the RoleARN field's value. +func (s *LambdaOutput) SetRoleARN(v string) *LambdaOutput { + s.RoleARN = &v + return s +} + +// For an application output, describes the AWS Lambda function configured as +// its destination. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/kinesisanalytics-2015-08-14/LambdaOutputDescription +type LambdaOutputDescription struct { + _ struct{} `type:"structure"` + + // Amazon Resource Name (ARN) of the destination Lambda function. + ResourceARN *string `min:"1" type:"string"` + + // ARN of the IAM role that Amazon Kinesis Analytics can assume to write to + // the destination function. + RoleARN *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s LambdaOutputDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LambdaOutputDescription) GoString() string { + return s.String() +} + +// SetResourceARN sets the ResourceARN field's value. +func (s *LambdaOutputDescription) SetResourceARN(v string) *LambdaOutputDescription { + s.ResourceARN = &v + return s +} + +// SetRoleARN sets the RoleARN field's value. +func (s *LambdaOutputDescription) SetRoleARN(v string) *LambdaOutputDescription { + s.RoleARN = &v + return s +} + +// When updating an output configuration using the UpdateApplication operation, +// provides information about an AWS Lambda function configured as the destination. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/kinesisanalytics-2015-08-14/LambdaOutputUpdate +type LambdaOutputUpdate struct { + _ struct{} `type:"structure"` + + // Amazon Resource Name (ARN) of the destination Lambda function. + ResourceARNUpdate *string `min:"1" type:"string"` + + // ARN of the IAM role that Amazon Kinesis Analytics can assume to write to + // the destination function on your behalf. You need to grant the necessary + // permissions to this role. + RoleARNUpdate *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s LambdaOutputUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LambdaOutputUpdate) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LambdaOutputUpdate) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "LambdaOutputUpdate"} + if s.ResourceARNUpdate != nil && len(*s.ResourceARNUpdate) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ResourceARNUpdate", 1)) + } + if s.RoleARNUpdate != nil && len(*s.RoleARNUpdate) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("RoleARNUpdate", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceARNUpdate sets the ResourceARNUpdate field's value. +func (s *LambdaOutputUpdate) SetResourceARNUpdate(v string) *LambdaOutputUpdate { + s.ResourceARNUpdate = &v + return s +} + +// SetRoleARNUpdate sets the RoleARNUpdate field's value. +func (s *LambdaOutputUpdate) SetRoleARNUpdate(v string) *LambdaOutputUpdate { + s.RoleARNUpdate = &v + return s +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/kinesisanalytics-2015-08-14/ListApplicationsRequest type ListApplicationsInput struct { _ struct{} `type:"structure"` @@ -4796,6 +4955,9 @@ type Output struct { // Identifies an Amazon Kinesis stream as the destination. KinesisStreamsOutput *KinesisStreamsOutput `type:"structure"` + // Identifies an AWS Lambda function as the destination. + LambdaOutput *LambdaOutput `type:"structure"` + // Name of the in-application stream. // // Name is a required field @@ -4836,6 +4998,11 @@ func (s *Output) Validate() error { invalidParams.AddNested("KinesisStreamsOutput", err.(aws.ErrInvalidParams)) } } + if s.LambdaOutput != nil { + if err := s.LambdaOutput.Validate(); err != nil { + invalidParams.AddNested("LambdaOutput", err.(aws.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -4861,6 +5028,12 @@ func (s *Output) SetKinesisStreamsOutput(v *KinesisStreamsOutput) *Output { return s } +// SetLambdaOutput sets the LambdaOutput field's value. +func (s *Output) SetLambdaOutput(v *LambdaOutput) *Output { + s.LambdaOutput = v + return s +} + // SetName sets the Name field's value. func (s *Output) SetName(v string) *Output { s.Name = &v @@ -4885,6 +5058,10 @@ type OutputDescription struct { // is written. KinesisStreamsOutputDescription *KinesisStreamsOutputDescription `type:"structure"` + // Describes the AWS Lambda function configured as the destination where output + // is written. + LambdaOutputDescription *LambdaOutputDescription `type:"structure"` + // Name of the in-application stream configured as output. Name *string `min:"1" type:"string"` @@ -4920,6 +5097,12 @@ func (s *OutputDescription) SetKinesisStreamsOutputDescription(v *KinesisStreams return s } +// SetLambdaOutputDescription sets the LambdaOutputDescription field's value. +func (s *OutputDescription) SetLambdaOutputDescription(v *LambdaOutputDescription) *OutputDescription { + s.LambdaOutputDescription = v + return s +} + // SetName sets the Name field's value. func (s *OutputDescription) SetName(v string) *OutputDescription { s.Name = &v @@ -4941,13 +5124,16 @@ type OutputUpdate struct { // more information, see Configuring Application Output (http://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works-output.html). DestinationSchemaUpdate *DestinationSchema `type:"structure"` - // Describes a Amazon Kinesis Firehose delivery stream as the destination for + // Describes an Amazon Kinesis Firehose delivery stream as the destination for // the output. KinesisFirehoseOutputUpdate *KinesisFirehoseOutputUpdate `type:"structure"` // Describes an Amazon Kinesis stream as the destination for the output. KinesisStreamsOutputUpdate *KinesisStreamsOutputUpdate `type:"structure"` + // Describes an AWS Lambda function as the destination for the output. + LambdaOutputUpdate *LambdaOutputUpdate `type:"structure"` + // If you want to specify a different in-application stream for this output // configuration, use this field to specify the new in-application stream name. NameUpdate *string `min:"1" type:"string"` @@ -4991,6 +5177,11 @@ func (s *OutputUpdate) Validate() error { invalidParams.AddNested("KinesisStreamsOutputUpdate", err.(aws.ErrInvalidParams)) } } + if s.LambdaOutputUpdate != nil { + if err := s.LambdaOutputUpdate.Validate(); err != nil { + invalidParams.AddNested("LambdaOutputUpdate", err.(aws.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -5016,6 +5207,12 @@ func (s *OutputUpdate) SetKinesisStreamsOutputUpdate(v *KinesisStreamsOutputUpda return s } +// SetLambdaOutputUpdate sets the LambdaOutputUpdate field's value. +func (s *OutputUpdate) SetLambdaOutputUpdate(v *LambdaOutputUpdate) *OutputUpdate { + s.LambdaOutputUpdate = v + return s +} + // SetNameUpdate sets the NameUpdate field's value. func (s *OutputUpdate) SetNameUpdate(v string) *OutputUpdate { s.NameUpdate = &v @@ -5175,11 +5372,9 @@ type ReferenceDataSource struct { // Identifies the S3 bucket and object that contains the reference data. Also // identifies the IAM role Amazon Kinesis Analytics can assume to read this - // object on your behalf. - // - // An Amazon Kinesis Analytics application loads reference data only once. If - // the data changes, you call the UpdateApplication operation to trigger reloading - // of data into your application. + // object on your behalf. An Amazon Kinesis Analytics application loads reference + // data only once. If the data changes, you call the UpdateApplication operation + // to trigger reloading of data into your application. S3ReferenceDataSource *S3ReferenceDataSource `type:"structure"` // Name of the in-application table to create. @@ -5404,16 +5599,25 @@ func (s *ReferenceDataSourceUpdate) SetTableNameUpdate(v string) *ReferenceDataS return s } +// Provides a description of an Amazon S3 data source, including the Amazon +// Resource Name (ARN) of the S3 bucket, the ARN of the IAM role that is used +// to access the bucket, and the name of the S3 object that contains the data. // Please also see https://docs.aws.amazon.com/goto/WebAPI/kinesisanalytics-2015-08-14/S3Configuration type S3Configuration struct { _ struct{} `type:"structure"` + // ARN of the S3 bucket that contains the data. + // // BucketARN is a required field BucketARN *string `min:"1" type:"string" required:"true"` + // The name of the object that contains the data. + // // FileKey is a required field FileKey *string `min:"1" type:"string" required:"true"` + // IAM ARN of the role used to access the data. + // // RoleARN is a required field RoleARN *string `min:"1" type:"string" required:"true"` } diff --git a/service/kms/api.go b/service/kms/api.go index 3080bcaecd5..ec4ca1e72f3 100644 --- a/service/kms/api.go +++ b/service/kms/api.go @@ -2774,8 +2774,8 @@ type CreateKeyInput struct { // A flag to indicate whether to bypass the key policy lockout safety check. // - // Setting this value to true increases the likelihood that the CMK becomes - // unmanageable. Do not set this value to true indiscriminately. + // Setting this value to true increases the risk that the CMK becomes unmanageable. + // Do not set this value to true indiscriminately. // // For more information, refer to the scenario in the Default Key Policy (http://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) // section in the AWS Key Management Service Developer Guide. @@ -2812,28 +2812,29 @@ type CreateKeyInput struct { // The key policy to attach to the CMK. // - // If you specify a policy and do not set BypassPolicyLockoutSafetyCheck to - // true, the policy must meet the following criteria: - // - // * It must allow the principal that is making the CreateKey request to - // make a subsequent PutKeyPolicy request on the CMK. This reduces the likelihood - // that the CMK becomes unmanageable. For more information, refer to the - // scenario in the Default Key Policy (http://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) - // section in the AWS Key Management Service Developer Guide. - // - // * The principals that are specified in the key policy must exist and be - // visible to AWS KMS. When you create a new AWS principal (for example, - // an IAM user or role), you might need to enforce a delay before specifying - // the new principal in a key policy because the new principal might not - // immediately be visible to AWS KMS. For more information, see Changes that - // I make are not always immediately visible (http://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency) - // in the IAM User Guide. - // - // If you do not specify a policy, AWS KMS attaches a default key policy to - // the CMK. For more information, see Default Key Policy (http://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default) + // If you provide a key policy, it must meet the following criteria: + // + // * If you don't set BypassPolicyLockoutSafetyCheck to true, the key policy + // must allow the principal that is making the CreateKey request to make + // a subsequent PutKeyPolicy request on the CMK. This reduces the risk that + // the CMK becomes unmanageable. For more information, refer to the scenario + // in the Default Key Policy (http://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) + // section of the AWS Key Management Service Developer Guide. + // + // * Each statement in the key policy must contain one or more principals. + // The principals in the key policy must exist and be visible to AWS KMS. + // When you create a new AWS principal (for example, an IAM user or role), + // you might need to enforce a delay before including the new principal in + // a key policy because the new principal might not be immediately visible + // to AWS KMS. For more information, see Changes that I make are not always + // immediately visible (http://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency) + // in the AWS Identity and Access Management User Guide. + // + // If you do not provide a key policy, AWS KMS attaches a default key policy + // to the CMK. For more information, see Default Key Policy (http://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default) // in the AWS Key Management Service Developer Guide. // - // The policy size limit is 32 kilobytes (32768 bytes). + // The key policy size limit is 32 kilobytes (32768 bytes). Policy *string `min:"1" type:"string"` // One or more tags. Each tag consists of a tag key and a tag value. Tag keys @@ -4156,8 +4157,8 @@ type GetKeyPolicyInput struct { // KeyId is a required field KeyId *string `min:"1" type:"string" required:"true"` - // Specifies the name of the policy. The only valid name is default. To get - // the names of key policies, use ListKeyPolicies. + // Specifies the name of the key policy. The only valid name is default. To + // get the names of key policies, use ListKeyPolicies. // // PolicyName is a required field PolicyName *string `min:"1" type:"string" required:"true"` @@ -4215,7 +4216,7 @@ type GetKeyPolicyOutput struct { responseMetadata aws.Response - // A policy document in JSON format. + // A key policy document in JSON format. Policy *string `min:"1" type:"string"` } @@ -5254,8 +5255,8 @@ type ListKeyPoliciesOutput struct { // use for the Marker parameter in a subsequent request. NextMarker *string `min:"1" type:"string"` - // A list of policy names. Currently, there is only one policy and it is named - // "Default". + // A list of key policy names. Currently, there is only one key policy per CMK + // and it is always named default. PolicyNames []string `type:"list"` // A flag that indicates whether there are more items in the list. When this @@ -5689,8 +5690,8 @@ type PutKeyPolicyInput struct { // A flag to indicate whether to bypass the key policy lockout safety check. // - // Setting this value to true increases the likelihood that the CMK becomes - // unmanageable. Do not set this value to true indiscriminately. + // Setting this value to true increases the risk that the CMK becomes unmanageable. + // Do not set this value to true indiscriminately. // // For more information, refer to the scenario in the Default Key Policy (http://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) // section in the AWS Key Management Service Developer Guide. @@ -5718,24 +5719,25 @@ type PutKeyPolicyInput struct { // The key policy to attach to the CMK. // - // If you do not set BypassPolicyLockoutSafetyCheck to true, the policy must - // meet the following criteria: - // - // * It must allow the principal that is making the PutKeyPolicy request - // to make a subsequent PutKeyPolicy request on the CMK. This reduces the - // likelihood that the CMK becomes unmanageable. For more information, refer - // to the scenario in the Default Key Policy (http://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) - // section in the AWS Key Management Service Developer Guide. - // - // * The principals that are specified in the key policy must exist and be - // visible to AWS KMS. When you create a new AWS principal (for example, - // an IAM user or role), you might need to enforce a delay before specifying - // the new principal in a key policy because the new principal might not - // immediately be visible to AWS KMS. For more information, see Changes that - // I make are not always immediately visible (http://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency) - // in the IAM User Guide. - // - // The policy size limit is 32 kilobytes (32768 bytes). + // The key policy must meet the following criteria: + // + // * If you don't set BypassPolicyLockoutSafetyCheck to true, the key policy + // must allow the principal that is making the PutKeyPolicy request to make + // a subsequent PutKeyPolicy request on the CMK. This reduces the risk that + // the CMK becomes unmanageable. For more information, refer to the scenario + // in the Default Key Policy (http://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) + // section of the AWS Key Management Service Developer Guide. + // + // * Each statement in the key policy must contain one or more principals. + // The principals in the key policy must exist and be visible to AWS KMS. + // When you create a new AWS principal (for example, an IAM user or role), + // you might need to enforce a delay before including the new principal in + // a key policy because the new principal might not be immediately visible + // to AWS KMS. For more information, see Changes that I make are not always + // immediately visible (http://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency) + // in the AWS Identity and Access Management User Guide. + // + // The key policy size limit is 32 kilobytes (32768 bytes). // // Policy is a required field Policy *string `min:"1" type:"string" required:"true"` diff --git a/service/migrationhub/api.go b/service/migrationhub/api.go index 8329cff72de..2a9b089841c 100644 --- a/service/migrationhub/api.go +++ b/service/migrationhub/api.go @@ -1727,7 +1727,7 @@ func (s DisassociateDiscoveredResourceOutput) SDKResponseMetadata() aws.Response type DiscoveredResource struct { _ struct{} `type:"structure"` - // The configurationId in ADS that uniquely identifies the on-premise resource. + // The configurationId in ADS that uniquely identifies the on-premises resource. // // ConfigurationId is a required field ConfigurationId *string `min:"1" type:"string" required:"true"` diff --git a/service/rds/api.go b/service/rds/api.go index c20a605ca75..c0f2719a6da 100644 --- a/service/rds/api.go +++ b/service/rds/api.go @@ -854,20 +854,17 @@ func (r CreateDBInstanceReadReplicaRequest) Send() (*CreateDBInstanceReadReplica // // Creates a new DB instance that acts as a Read Replica for an existing source // DB instance. You can create a Read Replica for a DB instance running MySQL, -// MariaDB, or PostgreSQL. +// MariaDB, or PostgreSQL. For more information, see Working with PostgreSQL, +// MySQL, and MariaDB Read Replicas (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_ReadRepl.html). // // Amazon Aurora does not support this action. You must call the CreateDBInstance // action to create a DB instance for an Aurora DB cluster. // -// All Read Replica DB instances are created as Single-AZ deployments with backups -// disabled. All other DB instance attributes (including DB security groups -// and DB parameter groups) are inherited from the source DB instance, except -// as specified below. +// All Read Replica DB instances are created with backups disabled. All other +// DB instance attributes (including DB security groups and DB parameter groups) +// are inherited from the source DB instance, except as specified below. // -// The source DB instance must have backup retention enabled. -// -// For more information, see Working with PostgreSQL, MySQL, and MariaDB Read -// Replicas (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_ReadRepl.html). +// Your source DB instance must have backup retention enabled. // // // Example sending a request using the CreateDBInstanceReadReplicaRequest method. // req := client.CreateDBInstanceReadReplicaRequest(params) @@ -5135,23 +5132,16 @@ func (r RebootDBInstanceRequest) Send() (*RebootDBInstanceOutput, error) { // RebootDBInstanceRequest returns a request value for making API operation for // Amazon Relational Database Service. // -// Rebooting a DB instance restarts the database engine service. A reboot also -// applies to the DB instance any modifications to the associated DB parameter -// group that were pending. Rebooting a DB instance results in a momentary outage -// of the instance, during which the DB instance status is set to rebooting. -// If the RDS instance is configured for MultiAZ, it is possible that the reboot -// is conducted through a failover. An Amazon RDS event is created when the -// reboot is completed. +// You might need to reboot your DB instance, usually for maintenance reasons. +// For example, if you make certain modifications, or if you change the DB parameter +// group associated with the DB instance, you must reboot the instance for the +// changes to take effect. // -// If your DB instance is deployed in multiple Availability Zones, you can force -// a failover from one AZ to the other during the reboot. You might force a -// failover to test the availability of your DB instance deployment or to restore -// operations to the original AZ after a failover occurs. +// Rebooting a DB instance restarts the database engine service. Rebooting a +// DB instance results in a momentary outage, during which the DB instance status +// is set to rebooting. // -// The time required to reboot is a function of the specific database engine's -// crash recovery process. To improve the reboot time, we recommend that you -// reduce database activities as much as possible during the reboot process -// to reduce rollback activity for in-transit transactions. +// For more information about rebooting, see Rebooting a DB Instance (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_RebootInstance.html). // // // Example sending a request using the RebootDBInstanceRequest method. // req := client.RebootDBInstanceRequest(params) @@ -5884,6 +5874,8 @@ func (r StartDBInstanceRequest) Send() (*StartDBInstanceOutput, error) { // AWS CLI command, or the StopDBInstance action. For more information, see // Stopping and Starting a DB instance in the AWS RDS user guide. // +// This command does not apply to Aurora MySQL and Aurora PostgreSQL. +// // // Example sending a request using the StartDBInstanceRequest method. // req := client.StartDBInstanceRequest(params) // resp, err := req.Send() @@ -5937,6 +5929,8 @@ func (r StopDBInstanceRequest) Send() (*StopDBInstanceOutput, error) { // do a point-in-time restore if necessary. For more information, see Stopping // and Starting a DB instance in the AWS RDS user guide. // +// This command does not apply to Aurora MySQL and Aurora PostgreSQL. +// // // Example sending a request using the StopDBInstanceRequest method. // req := client.StopDBInstanceRequest(params) // resp, err := req.Send() @@ -7642,7 +7636,7 @@ type CreateDBClusterInput struct { // The port number on which the instances in the DB cluster accept connections. // - // Default: 3306 + // Default: 3306 if engine is set as aurora or 5432 if set to aurora-postgresql. Port *int64 `type:"integer"` // A URL that contains a Signature Version 4 signed request for the CreateDBCluster @@ -8177,8 +8171,7 @@ func (s *CreateDBClusterSnapshotOutput) SetDBClusterSnapshot(v *DBClusterSnapsho type CreateDBInstanceInput struct { _ struct{} `type:"structure"` - // The amount of storage (in gigabytes) to be initially allocated for the DB - // instance. + // The amount of storage (in gibibytes) to allocate for the DB instance. // // Type: Integer // @@ -8192,9 +8185,9 @@ type CreateDBInstanceInput struct { // // Constraints to the amount of storage for each storage type are the following: // - // * General Purpose (SSD) storage (gp2): Must be an integer from 5 to 6144. + // * General Purpose (SSD) storage (gp2): Must be an integer from 20 to 16384. // - // * Provisioned IOPS storage (io1): Must be an integer from 100 to 6144. + // * Provisioned IOPS storage (io1): Must be an integer from 100 to 16384. // // * Magnetic storage (standard): Must be an integer from 5 to 3072. // @@ -8202,9 +8195,9 @@ type CreateDBInstanceInput struct { // // Constraints to the amount of storage for each storage type are the following: // - // * General Purpose (SSD) storage (gp2): Must be an integer from 5 to 6144. + // * General Purpose (SSD) storage (gp2): Must be an integer from 20 to 16384. // - // * Provisioned IOPS storage (io1): Must be an integer from 100 to 6144. + // * Provisioned IOPS storage (io1): Must be an integer from 100 to 16384. // // * Magnetic storage (standard): Must be an integer from 5 to 3072. // @@ -8212,9 +8205,9 @@ type CreateDBInstanceInput struct { // // Constraints to the amount of storage for each storage type are the following: // - // * General Purpose (SSD) storage (gp2): Must be an integer from 5 to 6144. + // * General Purpose (SSD) storage (gp2): Must be an integer from 20 to 16384. // - // * Provisioned IOPS storage (io1): Must be an integer from 100 to 6144. + // * Provisioned IOPS storage (io1): Must be an integer from 100 to 16384. // // * Magnetic storage (standard): Must be an integer from 5 to 3072. // @@ -8222,9 +8215,9 @@ type CreateDBInstanceInput struct { // // Constraints to the amount of storage for each storage type are the following: // - // * General Purpose (SSD) storage (gp2): Must be an integer from 10 to 6144. + // * General Purpose (SSD) storage (gp2): Must be an integer from 20 to 16384. // - // * Provisioned IOPS storage (io1): Must be an integer from 100 to 6144. + // * Provisioned IOPS storage (io1): Must be an integer from 100 to 16384. // // * Magnetic storage (standard): Must be an integer from 10 to 3072. // @@ -8504,13 +8497,19 @@ type CreateDBInstanceInput struct { // // MariaDB // + // * 10.2.11 (supported in all AWS Regions) + // + // 10.1.26 (supported in all AWS Regions) + // // * 10.1.23 (supported in all AWS Regions) // // * 10.1.19 (supported in all AWS Regions) // // * 10.1.14 (supported in all AWS Regions except us-east-2) // - // 10.0.31 (supported in all AWS Regions) + // * 10.0.32 (supported in all AWS Regions) + // + // * 10.0.31 (supported in all AWS Regions) // // * 10.0.28 (supported in all AWS Regions) // @@ -8519,15 +8518,21 @@ type CreateDBInstanceInput struct { // * 10.0.17 (supported in all AWS Regions except us-east-2, ca-central-1, // eu-west-2) // + // Microsoft SQL Server 2017 + // + // * 14.00.1000.169.v1 (supported for all editions, and all AWS Regions) + // // Microsoft SQL Server 2016 // - // 13.00.4422.0.v1 (supported for all editions, and all AWS Regions) + // * 13.00.4451.0.v1 (supported for all editions, and all AWS Regions) + // + // * 13.00.4422.0.v1 (supported for all editions, and all AWS Regions) // // * 13.00.2164.0.v1 (supported for all editions, and all AWS Regions) // // Microsoft SQL Server 2014 // - // 12.00.5546.0.v1 (supported for all editions, and all AWS Regions) + // * 12.00.5546.0.v1 (supported for all editions, and all AWS Regions) // // * 12.00.5000.0.v1 (supported for all editions, and all AWS Regions) // @@ -8536,7 +8541,7 @@ type CreateDBInstanceInput struct { // // Microsoft SQL Server 2012 // - // 11.00.6594.0.v1 (supported for all editions, and all AWS Regions) + // * 11.00.6594.0.v1 (supported for all editions, and all AWS Regions) // // * 11.00.6020.0.v1 (supported for all editions, and all AWS Regions) // @@ -8548,8 +8553,8 @@ type CreateDBInstanceInput struct { // // Microsoft SQL Server 2008 R2 // - // 10.50.6529.0.v1 (supported for all editions, and all AWS Regions except us-east-2, - // ca-central-1, and eu-west-2) + // * 10.50.6529.0.v1 (supported for all editions, and all AWS Regions except + // us-east-2, ca-central-1, and eu-west-2) // // * 10.50.6000.34.v1 (supported for all editions, and all AWS Regions except // us-east-2, ca-central-1, and eu-west-2) @@ -8559,86 +8564,21 @@ type CreateDBInstanceInput struct { // // MySQL // - // 5.7.19 (supported in all AWS regions) + // * 5.7.19 (supported in all AWS regions) // // * 5.7.17 (supported in all AWS regions) // // * 5.7.16 (supported in all AWS regions) // - // * 5.6.37 (supported in all AWS Regions) - // - // * 5.6.35 (supported in all AWS Regions) - // - // * 5.6.34 (supported in all AWS Regions) - // - // * 5.6.29 (supported in all AWS Regions) - // - // * 5.6.27 (supported in all AWS Regions except us-east-2, ca-central-1, - // eu-west-2) - // - // 5.5.57(supported in all AWS Regions) - // - // 5.5.54(supported in all AWS Regions) - // - // 5.5.53(supported in all AWS Regions) - // - // 5.5.46(supported in all AWS Regions) - // - // Oracle 12c - // - // 12.1.0.2.v9(supported for EE in all AWS regions, and SE2 in all AWS regions except us-gov-west-1) - // - // 12.1.0.2.v8(supported for EE in all AWS regions, and SE2 in all AWS regions except us-gov-west-1) - // - // 12.1.0.2.v7(supported for EE in all AWS regions, and SE2 in all AWS regions except us-gov-west-1) - // - // 12.1.0.2.v6(supported for EE in all AWS regions, and SE2 in all AWS regions except us-gov-west-1) - // - // 12.1.0.2.v5(supported for EE in all AWS regions, and SE2 in all AWS regions except us-gov-west-1) - // - // 12.1.0.2.v4(supported for EE in all AWS regions, and SE2 in all AWS regions except us-gov-west-1) - // - // 12.1.0.2.v3(supported for EE in all AWS regions, and SE2 in all AWS regions except us-gov-west-1) - // - // 12.1.0.2.v2(supported for EE in all AWS regions, and SE2 in all AWS regions except us-gov-west-1) - // - // 12.1.0.2.v1(supported for EE in all AWS regions, and SE2 in all AWS regions except us-gov-west-1) - // - // Oracle 11g - // - // 11.2.0.4.v13(supported for EE, SE1, and SE, in all AWS regions) + // 5.6.37(supported in all AWS Regions) // - // 11.2.0.4.v12(supported for EE, SE1, and SE, in all AWS regions) + // 5.6.35(supported in all AWS Regions) // - // 11.2.0.4.v11(supported for EE, SE1, and SE, in all AWS regions) + // 5.6.34(supported in all AWS Regions) // - // 11.2.0.4.v10(supported for EE, SE1, and SE, in all AWS regions) + // 5.6.29(supported in all AWS Regions) // - // 11.2.0.4.v9(supported for EE, SE1, and SE, in all AWS regions) - // - // 11.2.0.4.v8(supported for EE, SE1, and SE, in all AWS regions) - // - // 11.2.0.4.v7(supported for EE, SE1, and SE, in all AWS regions) - // - // 11.2.0.4.v6(supported for EE, SE1, and SE, in all AWS regions) - // - // 11.2.0.4.v5(supported for EE, SE1, and SE, in all AWS regions) - // - // 11.2.0.4.v4(supported for EE, SE1, and SE, in all AWS regions) - // - // 11.2.0.4.v3(supported for EE, SE1, and SE, in all AWS regions) - // - // 11.2.0.4.v1(supported for EE, SE1, and SE, in all AWS regions) - // - // PostgreSQL - // - // Version 9.6.x: 9.6.5 | 9.6.3 | 9.6.2 | 9.6.1 - // - // Version 9.5.x: 9.5.9 | 9.5.7 | 9.5.6 | 9.5.4 | 9.5.2 - // - // Version 9.4.x: 9.4.14 | 9.4.12 | 9.4.11 | 9.4.9 | 9.4.7 - // - // Version 9.3.x: 9.3.19 | 9.3.17 | 9.3.16 | 9.3.14 | 9.3.12 + // 5.6.27 EngineVersion *string `type:"string"` // The amount of Provisioned IOPS (input/output operations per second) to be @@ -8646,9 +8586,9 @@ type CreateDBInstanceInput struct { // values, see see Amazon RDS Provisioned IOPS Storage to Improve Performance // (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Storage.html#USER_PIOPS). // - // Constraints: Must be a multiple between 3 and 10 of the storage amount for + // Constraints: Must be a multiple between 1 and 50 of the storage amount for // the DB instance. Must also be an integer multiple of 1000. For example, if - // the size of your DB instance is 500 GB, then your Iops value can be 2000, + // the size of your DB instance is 500 GiB, then your Iops value can be 2000, // 3000, 4000, or 5000. Iops *int64 `type:"integer"` @@ -9398,6 +9338,9 @@ type CreateDBInstanceReadReplicaInput struct { // a MonitoringRoleArn value. MonitoringRoleArn *string `type:"string"` + // Specifies whether the read replica is in a Multi-AZ deployment. + MultiAZ *bool `type:"boolean"` + // The option group the DB instance is associated with. If omitted, the default // option group for the engine specified is used. OptionGroupName *string `type:"string"` @@ -9627,6 +9570,12 @@ func (s *CreateDBInstanceReadReplicaInput) SetMonitoringRoleArn(v string) *Creat return s } +// SetMultiAZ sets the MultiAZ field's value. +func (s *CreateDBInstanceReadReplicaInput) SetMultiAZ(v bool) *CreateDBInstanceReadReplicaInput { + s.MultiAZ = &v + return s +} + // SetOptionGroupName sets the OptionGroupName field's value. func (s *CreateDBInstanceReadReplicaInput) SetOptionGroupName(v string) *CreateDBInstanceReadReplicaInput { s.OptionGroupName = &v @@ -10490,7 +10439,7 @@ type DBCluster struct { _ struct{} `type:"structure"` // For all database engines except Amazon Aurora, AllocatedStorage specifies - // the allocated storage size in gigabytes (GB). For Aurora, AllocatedStorage + // the allocated storage size in gibibytes (GiB). For Aurora, AllocatedStorage // always returns 1, because Aurora DB cluster storage size is not fixed, but // instead automatically adjusts as needed. AllocatedStorage *int64 `type:"integer"` @@ -11049,7 +10998,7 @@ func (s *DBClusterRole) SetStatus(v string) *DBClusterRole { type DBClusterSnapshot struct { _ struct{} `type:"structure"` - // Specifies the allocated storage size in gigabytes (GB). + // Specifies the allocated storage size in gibibytes (GiB). AllocatedStorage *int64 `type:"integer"` // Provides the list of EC2 Availability Zones that instances in the DB cluster @@ -11445,7 +11394,7 @@ func (s *DBEngineVersion) SetValidUpgradeTarget(v []UpgradeTarget) *DBEngineVers type DBInstance struct { _ struct{} `type:"structure"` - // Specifies the allocated storage size specified in gigabytes. + // Specifies the allocated storage size specified in gibibytes. AllocatedStorage *int64 `type:"integer"` // Indicates that minor version patches are applied automatically. @@ -12277,7 +12226,7 @@ func (s *DBSecurityGroupMembership) SetStatus(v string) *DBSecurityGroupMembersh type DBSnapshot struct { _ struct{} `type:"structure"` - // Specifies the allocated storage size in gigabytes (GB). + // Specifies the allocated storage size in gibibytes (GiB). AllocatedStorage *int64 `type:"integer"` // Specifies the name of the Availability Zone the DB instance was located in @@ -18714,72 +18663,14 @@ func (s *ModifyDBClusterSnapshotAttributeOutput) SetDBClusterSnapshotAttributesR type ModifyDBInstanceInput struct { _ struct{} `type:"structure"` - // The new storage capacity of the RDS instance. Changing this setting does - // not result in an outage and the change is applied during the next maintenance - // window unless ApplyImmediately is set to true for this request. - // - // MySQL - // - // Default: Uses existing setting - // - // Valid Values: 5-6144 - // - // Constraints: Value supplied must be at least 10% greater than the current - // value. Values that are not at least 10% greater than the existing value are - // rounded up so that they are 10% greater than the current value. - // - // Type: Integer - // - // MariaDB - // - // Default: Uses existing setting - // - // Valid Values: 5-6144 - // - // Constraints: Value supplied must be at least 10% greater than the current - // value. Values that are not at least 10% greater than the existing value are - // rounded up so that they are 10% greater than the current value. - // - // Type: Integer - // - // PostgreSQL - // - // Default: Uses existing setting - // - // Valid Values: 5-6144 - // - // Constraints: Value supplied must be at least 10% greater than the current - // value. Values that are not at least 10% greater than the existing value are - // rounded up so that they are 10% greater than the current value. - // - // Type: Integer - // - // Oracle - // - // Default: Uses existing setting - // - // Valid Values: 10-6144 - // - // Constraints: Value supplied must be at least 10% greater than the current - // value. Values that are not at least 10% greater than the existing value are - // rounded up so that they are 10% greater than the current value. - // - // SQL Server + // The new amount of storage (in gibibytes) to allocate for the DB instance. // - // Cannot be modified. + // For MariaDB, MySQL, Oracle, and PostgreSQL, the value supplied must be at + // least 10% greater than the current value. Values that are not at least 10% + // greater than the existing value are rounded up so that they are 10% greater + // than the current value. // - // If you choose to migrate your DB instance from using standard storage to - // using Provisioned IOPS, or from using Provisioned IOPS to using standard - // storage, the process can take time. The duration of the migration depends - // on several factors such as database load, storage size, storage type (standard - // or Provisioned IOPS), amount of IOPS provisioned (if any), and the number - // of prior scale storage operations. Typical migration times are under 24 hours, - // but the process can take up to several days in some cases. During the migration, - // the DB instance is available for use, but might experience performance degradation. - // While the migration takes place, nightly backups for the instance are suspended. - // No other Amazon RDS operations can take place for the instance, including - // modifying the instance, rebooting the instance, deleting the instance, creating - // a Read Replica for the instance, and creating a DB snapshot of the instance. + // For the valid values for allocated storage for each engine, see CreateDBInstance. AllocatedStorage *int64 `type:"integer"` // Indicates that major version upgrades are allowed. Changing this parameter @@ -19002,24 +18893,12 @@ type ModifyDBInstanceInput struct { EngineVersion *string `type:"string"` // The new Provisioned IOPS (I/O operations per second) value for the RDS instance. + // // Changing this setting does not result in an outage and the change is applied // during the next maintenance window unless the ApplyImmediately parameter - // is set to true for this request. - // - // Default: Uses existing setting - // - // Constraints: Value supplied must be at least 10% greater than the current - // value. Values that are not at least 10% greater than the existing value are - // rounded up so that they are 10% greater than the current value. If you are - // migrating from Provisioned IOPS to standard storage, set this value to 0. - // The DB instance will require a reboot for the change in storage type to take - // effect. - // - // SQL Server - // - // Setting the IOPS value for the SQL Server database engine is not supported. - // - // Type: Integer + // is set to true for this request. If you are migrating from Provisioned IOPS + // to standard storage, set this value to 0. The DB instance will require a + // reboot for the change in storage type to take effect. // // If you choose to migrate your DB instance from using standard storage to // using Provisioned IOPS, or from using Provisioned IOPS to using standard @@ -19033,6 +18912,13 @@ type ModifyDBInstanceInput struct { // No other Amazon RDS operations can take place for the instance, including // modifying the instance, rebooting the instance, deleting the instance, creating // a Read Replica for the instance, and creating a DB snapshot of the instance. + // + // Constraints: For MariaDB, MySQL, Oracle, and PostgreSQL, the value supplied + // must be at least 10% greater than the current value. Values that are not + // at least 10% greater than the existing value are rounded up so that they + // are 10% greater than the current value. + // + // Default: Uses existing setting Iops *int64 `type:"integer"` // The license model for the DB instance. @@ -19102,8 +18988,6 @@ type ModifyDBInstanceInput struct { // Specifies if the DB instance is a Multi-AZ deployment. Changing this parameter // does not result in an outage and the change is applied during the next maintenance // window unless the ApplyImmediately parameter is set to true for this request. - // - // Constraints: Cannot be specified if the DB instance is a Read Replica. MultiAZ *bool `type:"boolean"` // The new DB instance identifier for the DB instance when renaming a DB instance. @@ -19207,9 +19091,23 @@ type ModifyDBInstanceInput struct { // Specifies the storage type to be associated with the DB instance. // - // Valid values: standard | gp2 | io1 + // If you specify Provisioned IOPS (io1), you must also include a value for + // the Iops parameter. // - // If you specify io1, you must also include a value for the Iops parameter. + // If you choose to migrate your DB instance from using standard storage to + // using Provisioned IOPS, or from using Provisioned IOPS to using standard + // storage, the process can take time. The duration of the migration depends + // on several factors such as database load, storage size, storage type (standard + // or Provisioned IOPS), amount of IOPS provisioned (if any), and the number + // of prior scale storage operations. Typical migration times are under 24 hours, + // but the process can take up to several days in some cases. During the migration, + // the DB instance is available for use, but might experience performance degradation. + // While the migration takes place, nightly backups for the instance are suspended. + // No other Amazon RDS operations can take place for the instance, including + // modifying the instance, rebooting the instance, deleting the instance, creating + // a Read Replica for the instance, and creating a DB snapshot of the instance. + // + // Valid values: standard | gp2 | io1 // // Default: io1 if the Iops parameter is specified, otherwise standard StorageType *string `type:"string"` @@ -23637,8 +23535,8 @@ type RestoreDBInstanceFromDBSnapshotInput struct { // // Default: The same as source // - // Constraint: Must be compatible with the engine of the source. You can restore - // a MariaDB 10.1 DB instance from a MySQL 5.6 snapshot. + // Constraint: Must be compatible with the engine of the source. For example, + // you can restore a MariaDB 10.1 DB instance from a MySQL 5.6 snapshot. // // Valid Values: // @@ -25475,7 +25373,7 @@ func (s *ValidDBInstanceModificationsMessage) SetStorage(v []ValidStorageOptions type ValidStorageOptions struct { _ struct{} `type:"structure"` - // The valid range of Provisioned IOPS to gigabytes of storage multiplier. For + // The valid range of Provisioned IOPS to gibibytes of storage multiplier. For // example, 3-10, which means that provisioned IOPS can be between 3 and 10 // times storage. IopsToStorageRatio []DoubleRange `locationNameList:"DoubleRange" type:"list"` @@ -25483,7 +25381,7 @@ type ValidStorageOptions struct { // The valid range of provisioned IOPS. For example, 1000-20000. ProvisionedIops []Range `locationNameList:"Range" type:"list"` - // The valid range of storage in gigabytes. For example, 100 to 6144. + // The valid range of storage in gibibytes. For example, 100 to 16384. StorageSize []Range `locationNameList:"Range" type:"list"` // The valid storage types for your DB instance. For example, gp2, io1. diff --git a/service/route53/errors.go b/service/route53/errors.go index 856039e95d7..d37e10cdebd 100644 --- a/service/route53/errors.go +++ b/service/route53/errors.go @@ -379,6 +379,19 @@ const ( // with the AWS Support Center. ErrCodeTooManyTrafficPolicyInstances = "TooManyTrafficPolicyInstances" + // ErrCodeTooManyTrafficPolicyVersionsForCurrentPolicy for service response error code + // "TooManyTrafficPolicyVersionsForCurrentPolicy". + // + // This traffic policy version can't be created because you've reached the limit + // of 1000 on the number of versions that you can create for the current traffic + // policy. + // + // To create more traffic policy versions, you can use GetTrafficPolicy to get + // the traffic policy document for a specified traffic policy version, and then + // use CreateTrafficPolicy to create a new traffic policy using the traffic + // policy document. + ErrCodeTooManyTrafficPolicyVersionsForCurrentPolicy = "TooManyTrafficPolicyVersionsForCurrentPolicy" + // ErrCodeTooManyVPCAssociationAuthorizations for service response error code // "TooManyVPCAssociationAuthorizations". // diff --git a/service/sagemaker/api.go b/service/sagemaker/api.go index bad31a9d975..0d545a64346 100644 --- a/service/sagemaker/api.go +++ b/service/sagemaker/api.go @@ -238,9 +238,8 @@ func (r CreateModelRequest) Send() (*CreateModelOutput, error) { // Amazon SageMaker then deploys all of the containers that you defined for // the model in the hosting environment. // -// In the CreateModel request, you must define at least one container with the -// PrimaryContainer parameter. You can optionally specify additional containers -// with the SupplementalContainers parameter. +// In the CreateModel request, you must define a container with the PrimaryContainer +// parameter. // // In the request, you also provide an IAM role that Amazon SageMaker can assume // to access model artifacts and docker image for deployment on ML compute hosting @@ -1664,8 +1663,8 @@ func (r StartNotebookInstanceRequest) Send() (*StartNotebookInstanceOutput, erro // Launches an ML compute instance with the latest version of the libraries // and attaches your ML storage volume. After configuring the notebook instance, // Amazon SageMaker sets the notebook instance status to InService. A notebook -// instance's status must be InService (is this same as "Running" in the console?) -// before you can connect to your Jupyter notebook. +// instance's status must be InService before you can connect to your Jupyter +// notebook. // // // Example sending a request using the StartNotebookInstanceRequest method. // req := client.StartNotebookInstanceRequest(params) @@ -2578,9 +2577,6 @@ type CreateModelInput struct { // PrimaryContainer is a required field PrimaryContainer *ContainerDefinition `type:"structure" required:"true"` - // The additional optional containers to deploy. - SupplementalContainers []ContainerDefinition `type:"list"` - // An array of key-value pairs. For more information, see Using Cost Allocation // Tags (http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-what) // in the AWS Billing and Cost Management User Guide. @@ -2620,13 +2616,6 @@ func (s *CreateModelInput) Validate() error { invalidParams.AddNested("PrimaryContainer", err.(aws.ErrInvalidParams)) } } - if s.SupplementalContainers != nil { - for i, v := range s.SupplementalContainers { - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "SupplementalContainers", i), err.(aws.ErrInvalidParams)) - } - } - } if s.Tags != nil { for i, v := range s.Tags { if err := v.Validate(); err != nil { @@ -2659,12 +2648,6 @@ func (s *CreateModelInput) SetPrimaryContainer(v *ContainerDefinition) *CreateMo return s } -// SetSupplementalContainers sets the SupplementalContainers field's value. -func (s *CreateModelInput) SetSupplementalContainers(v []ContainerDefinition) *CreateModelInput { - s.SupplementalContainers = v - return s -} - // SetTags sets the Tags field's value. func (s *CreateModelInput) SetTags(v []Tag) *CreateModelInput { s.Tags = v @@ -3915,12 +3898,6 @@ type DescribeModelOutput struct { // // PrimaryContainer is a required field PrimaryContainer *ContainerDefinition `type:"structure" required:"true"` - - // The description of additional optional containers that you defined when creating - // the model. - // - // SupplementalContainers is a required field - SupplementalContainers []ContainerDefinition `type:"list" required:"true"` } // String returns the string representation @@ -3968,12 +3945,6 @@ func (s *DescribeModelOutput) SetPrimaryContainer(v *ContainerDefinition) *Descr return s } -// SetSupplementalContainers sets the SupplementalContainers field's value. -func (s *DescribeModelOutput) SetSupplementalContainers(v []ContainerDefinition) *DescribeModelOutput { - s.SupplementalContainers = v - return s -} - // Please also see https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DescribeNotebookInstanceInput type DescribeNotebookInstanceInput struct { _ struct{} `type:"structure"` diff --git a/service/sms/api.go b/service/sms/api.go index 4329a1e2815..e8b66450f5b 100644 --- a/service/sms/api.go +++ b/service/sms/api.go @@ -816,7 +816,7 @@ type Connector struct { // Hardware (MAC) address MacAddress *string `locationName:"macAddress" type:"string"` - // Status of on-premise Connector + // Status of on-premises Connector Status ConnectorStatus `locationName:"status" type:"string" enum:"true"` // Connector version string @@ -2172,7 +2172,7 @@ const ( ConnectorCapabilityVsphere ConnectorCapability = "VSPHERE" ) -// Status of on-premise Connector +// Status of on-premises Connector type ConnectorStatus string // Enum values for ConnectorStatus diff --git a/service/snowball/api.go b/service/snowball/api.go index 231080b1267..a2f35158a35 100644 --- a/service/snowball/api.go +++ b/service/snowball/api.go @@ -1642,7 +1642,7 @@ func (s *CreateAddressOutput) SetAddressId(v string) *CreateAddressOutput { type CreateClusterInput struct { _ struct{} `type:"structure"` - // The ID for the address that you want the cluster shipped to.> + // The ID for the address that you want the cluster shipped to. // // AddressId is a required field AddressId *string `min:"40" type:"string" required:"true"` @@ -3612,12 +3612,12 @@ func (s *Shipment) SetTrackingNumber(v string) *Shipment { type ShippingDetails struct { _ struct{} `type:"structure"` - // The Status and TrackingNumber values for a Snowball being delivered to the - // address that you specified for a particular job. - InboundShipment *Shipment `type:"structure"` - // The Status and TrackingNumber values for a Snowball being returned to AWS // for a particular job. + InboundShipment *Shipment `type:"structure"` + + // The Status and TrackingNumber values for a Snowball being delivered to the + // address that you specified for a particular job. OutboundShipment *Shipment `type:"structure"` // The shipping speed for a particular job. This speed doesn't dictate how soon diff --git a/service/ssm/api.go b/service/ssm/api.go index 3df4f2426ac..4a420a54418 100644 --- a/service/ssm/api.go +++ b/service/ssm/api.go @@ -41,7 +41,7 @@ func (r AddTagsToResourceRequest) Send() (*AddTagsToResourceOutput, error) { // and stack level. For example: Key=Owner and Value=DbAdmin, SysAdmin, or Dev. // Or Key=Stack and Value=Production, Pre-Production, or Test. // -// Each resource can have a maximum of 10 tags. +// Each resource can have a maximum of 50 tags. // // We recommend that you devise a set of tag keys that meets your needs for // each resource type. Using a consistent set of tag keys makes it easier for @@ -419,6 +419,9 @@ func (r CreatePatchBaselineRequest) Send() (*CreatePatchBaselineOutput, error) { // // Creates a patch baseline. // +// For information about valid key and value pairs in PatchFilters for each +// supported operating system type, see PatchFilter (http://docs.aws.amazon.com/systems-manager/latest/APIReference/API_PatchFilter.html). +// // // Example sending a request using the CreatePatchBaselineRequest method. // req := client.CreatePatchBaselineRequest(params) // resp, err := req.Send() @@ -3314,6 +3317,8 @@ func (r GetParametersByPathRequest) Send() (*GetParametersByPathOutput, error) { // that point and a NextToken. You can specify the NextToken in a subsequent // call to get the next set of results. // +// This API action doesn't support filtering by tags. +// // // Example sending a request using the GetParametersByPathRequest method. // req := client.GetParametersByPathRequest(params) // resp, err := req.Send() @@ -4406,6 +4411,43 @@ func (r PutComplianceItemsRequest) Send() (*PutComplianceItemsOutput, error) { // so you must provide a full list of compliance items each time that you send // the request. // +// ComplianceType can be one of the following: +// +// * ExecutionId: The execution ID when the patch, association, or custom +// compliance item was applied. +// +// * ExecutionType: Specify patch, association, or Custom:string. +// +// * ExecutionTime. The time the patch, association, or custom compliance +// item was applied to the instance. +// +// * Id: The patch, association, or custom compliance ID. +// +// * Title: A title. +// +// * Status: The status of the compliance item. For example, approved for +// patches, or Failed for associations. +// +// * Severity: A patch severity. For example, critical. +// +// * DocumentName: A SSM document name. For example, AWS-RunPatchBaseline. +// +// * DocumentVersion: An SSM document version number. For example, 4. +// +// * Classification: A patch classification. For example, security updates. +// +// * PatchBaselineId: A patch baseline ID. +// +// * PatchSeverity: A patch severity. For example, Critical. +// +// * PatchState: A patch state. For example, InstancesWithFailedPatches. +// +// * PatchGroup: The name of a patch group. +// +// * InstalledTime: The time the association, patch, or custom compliance +// item was applied to the resource. Specify the time by using the following +// format: yyyy-MM-dd'T'HH:mm:ss'Z' +// // // Example sending a request using the PutComplianceItemsRequest method. // req := client.PutComplianceItemsRequest(params) // resp, err := req.Send() @@ -5428,6 +5470,9 @@ func (r UpdatePatchBaselineRequest) Send() (*UpdatePatchBaselineOutput, error) { // Modifies an existing patch baseline. Fields not specified in the request // are left unchanged. // +// For information about valid key and value pairs in PatchFilters for each +// supported operating system type, see PatchFilter (http://docs.aws.amazon.com/systems-manager/latest/APIReference/API_PatchFilter.html). +// // // Example sending a request using the UpdatePatchBaselineRequest method. // req := client.UpdatePatchBaselineRequest(params) // resp, err := req.Send() @@ -15373,8 +15418,8 @@ type GetParametersByPathInput struct { ParameterFilters []ParameterStringFilter `type:"list"` // The hierarchy for the parameter. Hierarchies start with a forward slash (/) - // and end with the parameter name. A hierarchy can have a maximum of five levels. - // For example: /Finance/Prod/IAD/WinServ2016/license15 + // and end with the parameter name. A hierarchy can have a maximum of 15 levels. + // Here is an example of a hierarchy: /Finance/Prod/IAD/WinServ2016/license33 // // Path is a required field Path *string `min:"1" type:"string" required:"true"` @@ -20136,7 +20181,7 @@ func (s *ParameterStringFilter) SetValues(v []string) *ParameterStringFilter { return s } -// One or more filters. Use a filter to return a more specific list of results. +// This data type is deprecated. Instead, use ParameterStringFilter. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/ParametersFilter type ParametersFilter struct { _ struct{} `type:"structure"` @@ -20478,17 +20523,249 @@ func (s *PatchComplianceData) SetTitle(v string) *PatchComplianceData { } // Defines a patch filter. +// +// A patch filter consists of key/value pairs, but not all keys are valid for +// all operating system types. For example, the key PRODUCT is valid for all +// supported operating system types. The key MSRC_SEVERITY, however, is valid +// only for Windows operating systems, and the key SECTION is valid only for +// Ubuntu operating systems. +// +// Refer to the following sections for information about which keys may be used +// with each major operating system, and which values are valid for each key. +// +// Windows Operating Systems +// +// The supported keys for Windows operating systems are PRODUCT, CLASSIFICATION, +// and MSRC_SEVERITY. See the following lists for valid values for each of these +// keys. +// +// Supported key:PRODUCT +// +// Supported values: +// +// * Windows7 +// +// * Windows8 +// +// * Windows8.1 +// +// * Windows8Embedded +// +// * Windows10 +// +// * Windows10LTSB +// +// * WindowsServer2008 +// +// * WindowsServer2008R2 +// +// * WindowsServer2012 +// +// * WindowsServer2012R2 +// +// * WindowsServer2016 +// +// Supported key:CLASSIFICATION +// +// Supported values: +// +// * CriticalUpdates +// +// * DefinitionUpdates +// +// * Drivers +// +// * FeaturePacks +// +// * SecurityUpdates +// +// * ServicePacks +// +// * Tools +// +// * UpdateRollups +// +// * Updates +// +// * Upgrades +// +// Supported key:MSRC_SEVERITY +// +// Supported values: +// +// * Critical +// +// * Important +// +// * Moderate +// +// * Low +// +// * Unspecified +// +// Ubuntu Operating Systems +// +// The supported keys for Ubuntu operating systems are PRODUCT, PRIORITY, and +// SECTION. See the following lists for valid values for each of these keys. +// +// Supported key:PRODUCT +// +// Supported values: +// +// * Ubuntu14.04 +// +// * Ubuntu16.04 +// +// Supported key:PRIORITY +// +// Supported values: +// +// * Required +// +// * Important +// +// * Standard +// +// * Optional +// +// * Extra +// +// Supported key:SECTION +// +// Only the length of the key value is validated. Minimum length is 1. Maximum +// length is 64. +// +// Amazon Linux Operating Systems +// +// The supported keys for Amazon Linux operating systems are PRODUCT, CLASSIFICATION, +// and SEVERITY. See the following lists for valid values for each of these +// keys. +// +// Supported key:PRODUCT +// +// Supported values: +// +// * AmazonLinux2012.03 +// +// * AmazonLinux2012.09 +// +// * AmazonLinux2013.03 +// +// * AmazonLinux2013.09 +// +// * AmazonLinux2014.03 +// +// * AmazonLinux2014.09 +// +// * AmazonLinux2015.03 +// +// * AmazonLinux2015.09 +// +// * AmazonLinux2016.03 +// +// * AmazonLinux2016.09 +// +// * AmazonLinux2017.03 +// +// * AmazonLinux2017.09 +// +// Supported key:CLASSIFICATION +// +// Supported values: +// +// * Security +// +// * Bugfix +// +// * Enhancement +// +// * Recommended +// +// * Newpackage +// +// Supported key:SEVERITY +// +// Supported values: +// +// * Critical +// +// * Important +// +// * Medium +// +// * Low +// +// RedHat Enterprise Linux (RHEL) Operating Systems +// +// The supported keys for RedHat Enterprise Linux operating systems are PRODUCT, +// CLASSIFICATION, and SEVERITY. See the following lists for valid values for +// each of these keys. +// +// Supported key:PRODUCT +// +// Supported values: +// +// * RedhatEnterpriseLinux6.5 +// +// * RedhatEnterpriseLinux6.6 +// +// * RedhatEnterpriseLinux6.7 +// +// * RedhatEnterpriseLinux6.8 +// +// * RedhatEnterpriseLinux6.9 +// +// * RedhatEnterpriseLinux7.0 +// +// * RedhatEnterpriseLinux7.1 +// +// * RedhatEnterpriseLinux7.2 +// +// * RedhatEnterpriseLinux7.3 +// +// * RedhatEnterpriseLinux7.4 +// +// Supported key:CLASSIFICATION +// +// Supported values: +// +// * Security +// +// * Bugfix +// +// * Enhancement +// +// * Recommended +// +// * Newpackage +// +// Supported key:SEVERITY +// +// Supported values: +// +// * Critical +// +// * Important +// +// * Medium +// +// * Low // Please also see https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/PatchFilter type PatchFilter struct { _ struct{} `type:"structure"` - // The key for the filter (PRODUCT, CLASSIFICATION, MSRC_SEVERITY, PATCH_ID) + // The key for the filter. + // + // See PatchFilter for lists of valid keys for each operating system type. // // Key is a required field Key PatchFilterKey `type:"string" required:"true" enum:"true"` // The value for the filter key. // + // See PatchFilter for lists of valid values for each key based on operating + // system type. + // // Values is a required field Values []string `min:"1" type:"list" required:"true"` } diff --git a/service/ssm/errors.go b/service/ssm/errors.go index c0030a51957..8c932a093c3 100644 --- a/service/ssm/errors.go +++ b/service/ssm/errors.go @@ -120,8 +120,11 @@ const ( // ErrCodeDoesNotExistException for service response error code // "DoesNotExistException". // - // Error returned when the ID specified for a resource (e.g. a Maintenance Window) - // doesn't exist. + // Error returned when the ID specified for a resource, such as a Maintenance + // Window or Patch baseline, doesn't exist. + // + // For information about resource limits in Systems Manager, see AWS Systems + // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). ErrCodeDoesNotExistException = "DoesNotExistException" // ErrCodeDuplicateDocumentContent for service response error code @@ -147,11 +150,8 @@ const ( // ErrCodeHierarchyLevelLimitExceededException for service response error code // "HierarchyLevelLimitExceededException". // - // A hierarchy can have a maximum of five levels. For example: - // - // /Finance/Prod/IAD/OS/WinServ2016/license15 - // - // For more information, see Working with Systems Manager Parameters (http://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-paramstore-working.html). + // A hierarchy can have a maximum of 15 levels. For more information, see Working + // with Systems Manager Parameters (http://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-paramstore-working.html). ErrCodeHierarchyLevelLimitExceededException = "HierarchyLevelLimitExceededException" // ErrCodeHierarchyTypeMismatchException for service response error code @@ -525,8 +525,11 @@ const ( // ErrCodeResourceLimitExceededException for service response error code // "ResourceLimitExceededException". // - // Error returned when the caller has exceeded the default resource limits (e.g. - // too many Maintenance Windows have been created). + // Error returned when the caller has exceeded the default resource limits. + // For example, too many Maintenance Windows or Patch baselines have been created. + // + // For information about resource limits in Systems Manager, see AWS Systems + // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). ErrCodeResourceLimitExceededException = "ResourceLimitExceededException" // ErrCodeStatusUnchanged for service response error code diff --git a/service/workspaces/api.go b/service/workspaces/api.go index 8c4878b38c5..52cc6cce356 100644 --- a/service/workspaces/api.go +++ b/service/workspaces/api.go @@ -31,7 +31,7 @@ func (r CreateTagsRequest) Send() (*CreateTagsOutput, error) { // CreateTagsRequest returns a request value for making API operation for // Amazon WorkSpaces. // -// Creates tags for a WorkSpace. +// Creates tags for the specified WorkSpace. // // // Example sending a request using the CreateTagsRequest method. // req := client.CreateTagsRequest(params) @@ -131,7 +131,7 @@ func (r DeleteTagsRequest) Send() (*DeleteTagsOutput, error) { // DeleteTagsRequest returns a request value for making API operation for // Amazon WorkSpaces. // -// Deletes tags from a WorkSpace. +// Deletes the specified tags from a WorkSpace. // // // Example sending a request using the DeleteTagsRequest method. // req := client.DeleteTagsRequest(params) @@ -180,7 +180,7 @@ func (r DescribeTagsRequest) Send() (*DescribeTagsOutput, error) { // DescribeTagsRequest returns a request value for making API operation for // Amazon WorkSpaces. // -// Describes tags for a WorkSpace. +// Describes the tags for the specified WorkSpace. // // // Example sending a request using the DescribeTagsRequest method. // req := client.DescribeTagsRequest(params) @@ -229,16 +229,9 @@ func (r DescribeWorkspaceBundlesRequest) Send() (*DescribeWorkspaceBundlesOutput // DescribeWorkspaceBundlesRequest returns a request value for making API operation for // Amazon WorkSpaces. // -// Obtains information about the WorkSpace bundles that are available to your -// account in the specified region. +// Describes the available WorkSpace bundles. // -// You can filter the results with either the BundleIds parameter, or the Owner -// parameter, but not both. -// -// This operation supports pagination with the use of the NextToken request -// and response parameters. If more results are available, the NextToken response -// member contains a token that you pass in the next call to this operation -// to retrieve the next set of items. +// You can filter the results using either bundle ID or owner, but not both. // // // Example sending a request using the DescribeWorkspaceBundlesRequest method. // req := client.DescribeWorkspaceBundlesRequest(params) @@ -343,14 +336,8 @@ func (r DescribeWorkspaceDirectoriesRequest) Send() (*DescribeWorkspaceDirectori // DescribeWorkspaceDirectoriesRequest returns a request value for making API operation for // Amazon WorkSpaces. // -// Retrieves information about the AWS Directory Service directories in the -// region that are registered with Amazon WorkSpaces and are available to your -// account. -// -// This operation supports pagination with the use of the NextToken request -// and response parameters. If more results are available, the NextToken response -// member contains a token that you pass in the next call to this operation -// to retrieve the next set of items. +// Describes the available AWS Directory Service directories that are registered +// with Amazon WorkSpaces. // // // Example sending a request using the DescribeWorkspaceDirectoriesRequest method. // req := client.DescribeWorkspaceDirectoriesRequest(params) @@ -455,15 +442,10 @@ func (r DescribeWorkspacesRequest) Send() (*DescribeWorkspacesOutput, error) { // DescribeWorkspacesRequest returns a request value for making API operation for // Amazon WorkSpaces. // -// Obtains information about the specified WorkSpaces. -// -// Only one of the filter parameters, such as BundleId, DirectoryId, or WorkspaceIds, -// can be specified at a time. +// Describes the specified WorkSpaces. // -// This operation supports pagination with the use of the NextToken request -// and response parameters. If more results are available, the NextToken response -// member contains a token that you pass in the next call to this operation -// to retrieve the next set of items. +// You can filter the results using bundle ID, directory ID, or owner, but you +// can specify only one filter at a time. // // // Example sending a request using the DescribeWorkspacesRequest method. // req := client.DescribeWorkspacesRequest(params) @@ -568,7 +550,7 @@ func (r DescribeWorkspacesConnectionStatusRequest) Send() (*DescribeWorkspacesCo // DescribeWorkspacesConnectionStatusRequest returns a request value for making API operation for // Amazon WorkSpaces. // -// Describes the connection status of a specified WorkSpace. +// Describes the connection status of the specified WorkSpaces. // // // Example sending a request using the DescribeWorkspacesConnectionStatusRequest method. // req := client.DescribeWorkspacesConnectionStatusRequest(params) @@ -617,8 +599,7 @@ func (r ModifyWorkspacePropertiesRequest) Send() (*ModifyWorkspacePropertiesOutp // ModifyWorkspacePropertiesRequest returns a request value for making API operation for // Amazon WorkSpaces. // -// Modifies the WorkSpace properties, including the running mode and AutoStop -// time. +// Modifies the specified WorkSpace properties. // // // Example sending a request using the ModifyWorkspacePropertiesRequest method. // req := client.ModifyWorkspacePropertiesRequest(params) @@ -669,8 +650,8 @@ func (r RebootWorkspacesRequest) Send() (*RebootWorkspacesOutput, error) { // // Reboots the specified WorkSpaces. // -// To be able to reboot a WorkSpace, the WorkSpace must have a State of AVAILABLE, -// IMPAIRED, or INOPERABLE. +// You cannot reboot a WorkSpace unless its state is AVAILABLE, IMPAIRED, or +// INOPERABLE. // // This operation is asynchronous and returns before the WorkSpaces have rebooted. // @@ -723,20 +704,10 @@ func (r RebuildWorkspacesRequest) Send() (*RebuildWorkspacesOutput, error) { // // Rebuilds the specified WorkSpaces. // -// Rebuilding a WorkSpace is a potentially destructive action that can result -// in the loss of data. Rebuilding a WorkSpace causes the following to occur: -// -// * The system is restored to the image of the bundle that the WorkSpace -// is created from. Any applications that have been installed, or system -// settings that have been made since the WorkSpace was created will be lost. -// -// * The data drive (D drive) is re-created from the last automatic snapshot -// taken of the data drive. The current contents of the data drive are overwritten. -// Automatic snapshots of the data drive are taken every 12 hours, so the -// snapshot can be as much as 12 hours old. +// You cannot rebuild a WorkSpace unless its state is AVAILABLE or ERROR. // -// To be able to rebuild a WorkSpace, the WorkSpace must have a State of AVAILABLE -// or ERROR. +// Rebuilding a WorkSpace is a potentially destructive action that can result +// in the loss of data. For more information, see Rebuild a WorkSpace (http://docs.aws.amazon.com/workspaces/latest/adminguide/reset-workspace.html). // // This operation is asynchronous and returns before the WorkSpaces have been // completely rebuilt. @@ -788,8 +759,10 @@ func (r StartWorkspacesRequest) Send() (*StartWorkspacesOutput, error) { // StartWorkspacesRequest returns a request value for making API operation for // Amazon WorkSpaces. // -// Starts the specified WorkSpaces. The WorkSpaces must have a running mode -// of AutoStop and a state of STOPPED. +// Starts the specified WorkSpaces. +// +// You cannot start a WorkSpace unless it has a running mode of AutoStop and +// a state of STOPPED. // // // Example sending a request using the StartWorkspacesRequest method. // req := client.StartWorkspacesRequest(params) @@ -838,8 +811,10 @@ func (r StopWorkspacesRequest) Send() (*StopWorkspacesOutput, error) { // StopWorkspacesRequest returns a request value for making API operation for // Amazon WorkSpaces. // -// Stops the specified WorkSpaces. The WorkSpaces must have a running mode of -// AutoStop and a state of AVAILABLE, IMPAIRED, UNHEALTHY, or ERROR. +// Stops the specified WorkSpaces. +// +// You cannot stop a WorkSpace unless it has a running mode of AutoStop and +// a state of AVAILABLE, IMPAIRED, UNHEALTHY, or ERROR. // // // Example sending a request using the StopWorkspacesRequest method. // req := client.StopWorkspacesRequest(params) @@ -891,8 +866,8 @@ func (r TerminateWorkspacesRequest) Send() (*TerminateWorkspacesOutput, error) { // Terminates the specified WorkSpaces. // // Terminating a WorkSpace is a permanent action and cannot be undone. The user's -// data is not maintained and will be destroyed. If you need to archive any -// user data, contact Amazon Web Services before terminating the WorkSpace. +// data is destroyed. If you need to archive any user data, contact Amazon Web +// Services before terminating the WorkSpace. // // You can terminate a WorkSpace that is in any state except SUSPENDED. // @@ -925,12 +900,12 @@ func (c *WorkSpaces) TerminateWorkspacesRequest(input *TerminateWorkspacesInput) return TerminateWorkspacesRequest{Request: req, Input: input} } -// Contains information about the compute type of a WorkSpace bundle. +// Information about the compute type. // Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/ComputeType type ComputeType struct { _ struct{} `type:"structure"` - // The name of the compute type for the bundle. + // The compute type. Name Compute `type:"string" enum:"true"` } @@ -950,17 +925,16 @@ func (s *ComputeType) SetName(v Compute) *ComputeType { return s } -// The request of the CreateTags operation. // Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/CreateTagsRequest type CreateTagsInput struct { _ struct{} `type:"structure"` - // The resource ID of the request. + // The ID of the resource. // // ResourceId is a required field ResourceId *string `min:"1" type:"string" required:"true"` - // The tags of the request. + // The tags. Each resource can have a maximum of 50 tags. // // Tags is a required field Tags []Tag `type:"list" required:"true"` @@ -1016,7 +990,6 @@ func (s *CreateTagsInput) SetTags(v []Tag) *CreateTagsInput { return s } -// The result of the CreateTags operation. // Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/CreateTagsResult type CreateTagsOutput struct { _ struct{} `type:"structure"` @@ -1039,12 +1012,11 @@ func (s CreateTagsOutput) SDKResponseMetadata() aws.Response { return s.responseMetadata } -// Contains the inputs for the CreateWorkspaces operation. // Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/CreateWorkspacesRequest type CreateWorkspacesInput struct { _ struct{} `type:"structure"` - // An array of structures that specify the WorkSpaces to create. + // Information about the WorkSpaces to create. // // Workspaces is a required field Workspaces []WorkspaceRequest `min:"1" type:"list" required:"true"` @@ -1090,21 +1062,20 @@ func (s *CreateWorkspacesInput) SetWorkspaces(v []WorkspaceRequest) *CreateWorks return s } -// Contains the result of the CreateWorkspaces operation. // Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/CreateWorkspacesResult type CreateWorkspacesOutput struct { _ struct{} `type:"structure"` responseMetadata aws.Response - // An array of structures that represent the WorkSpaces that could not be created. + // Information about the WorkSpaces that could not be created. FailedRequests []FailedCreateWorkspaceRequest `type:"list"` - // An array of structures that represent the WorkSpaces that were created. + // Information about the WorkSpaces that were created. // - // Because this operation is asynchronous, the identifier in WorkspaceId is - // not immediately available. If you immediately call DescribeWorkspaces with - // this identifier, no information will be returned. + // Because this operation is asynchronous, the identifier returned is not immediately + // available for use with other operations. For example, if you call DescribeWorkspaces + // before the WorkSpace is created, the information returned can be incomplete. PendingRequests []Workspace `type:"list"` } @@ -1135,27 +1106,25 @@ func (s *CreateWorkspacesOutput) SetPendingRequests(v []Workspace) *CreateWorksp return s } -// Contains default WorkSpace creation information. +// Information about defaults used to create a WorkSpace. // Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DefaultWorkspaceCreationProperties type DefaultWorkspaceCreationProperties struct { _ struct{} `type:"structure"` - // The identifier of any custom security groups that are applied to the WorkSpaces - // when they are created. + // The identifier of any security groups to apply to WorkSpaces when they are + // created. CustomSecurityGroupId *string `type:"string"` - // The organizational unit (OU) in the directory that the WorkSpace machine - // accounts are placed in. + // The organizational unit (OU) in the directory for the WorkSpace machine accounts. DefaultOu *string `type:"string"` - // A public IP address will be attached to all WorkSpaces that are created or - // rebuilt. + // The public IP address to attach to all WorkSpaces that are created or rebuilt. EnableInternetAccess *bool `type:"boolean"` - // Specifies if the directory is enabled for Amazon WorkDocs. + // Indicates whether the directory is enabled for Amazon WorkDocs. EnableWorkDocs *bool `type:"boolean"` - // The WorkSpace user is an administrator on the WorkSpace. + // Indicates whether the WorkSpace user is an administrator on the WorkSpace. UserEnabledAsLocalAdministrator *bool `type:"boolean"` } @@ -1199,17 +1168,16 @@ func (s *DefaultWorkspaceCreationProperties) SetUserEnabledAsLocalAdministrator( return s } -// The request of the DeleteTags operation. // Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DeleteTagsRequest type DeleteTagsInput struct { _ struct{} `type:"structure"` - // The resource ID of the request. + // The ID of the resource. // // ResourceId is a required field ResourceId *string `min:"1" type:"string" required:"true"` - // The tag keys of the request. + // The tag keys. // // TagKeys is a required field TagKeys []string `type:"list" required:"true"` @@ -1258,7 +1226,6 @@ func (s *DeleteTagsInput) SetTagKeys(v []string) *DeleteTagsInput { return s } -// The result of the DeleteTags operation. // Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DeleteTagsResult type DeleteTagsOutput struct { _ struct{} `type:"structure"` @@ -1281,12 +1248,11 @@ func (s DeleteTagsOutput) SDKResponseMetadata() aws.Response { return s.responseMetadata } -// The request of the DescribeTags operation. // Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeTagsRequest type DescribeTagsInput struct { _ struct{} `type:"structure"` - // The resource ID of the request. + // The ID of the resource. // // ResourceId is a required field ResourceId *string `min:"1" type:"string" required:"true"` @@ -1325,14 +1291,13 @@ func (s *DescribeTagsInput) SetResourceId(v string) *DescribeTagsInput { return s } -// The result of the DescribeTags operation. // Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeTagsResult type DescribeTagsOutput struct { _ struct{} `type:"structure"` responseMetadata aws.Response - // The list of tags. + // The tags. TagList []Tag `type:"list"` } @@ -1357,27 +1322,23 @@ func (s *DescribeTagsOutput) SetTagList(v []Tag) *DescribeTagsOutput { return s } -// Contains the inputs for the DescribeWorkspaceBundles operation. // Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeWorkspaceBundlesRequest type DescribeWorkspaceBundlesInput struct { _ struct{} `type:"structure"` - // An array of strings that contains the identifiers of the bundles to retrieve. - // This parameter cannot be combined with any other filter parameter. + // The IDs of the bundles. This parameter cannot be combined with any other + // filter. BundleIds []string `min:"1" type:"list"` - // The NextToken value from a previous call to this operation. Pass null if - // this is the first call. + // The token for the next set of results. (You received this token from a previous + // call.) NextToken *string `min:"1" type:"string"` - // The owner of the bundles to retrieve. This parameter cannot be combined with - // any other filter parameter. - // - // This contains one of the following values: - // - // * null- Retrieves the bundles that belong to the account making the call. + // The owner of the bundles. This parameter cannot be combined with any other + // filter. // - // * AMAZON- Retrieves the bundles that are provided by AWS. + // Specify AMAZON to describe the bundles provided by AWS or null to describe + // the bundles that belong to your account. Owner *string `type:"string"` } @@ -1425,20 +1386,18 @@ func (s *DescribeWorkspaceBundlesInput) SetOwner(v string) *DescribeWorkspaceBun return s } -// Contains the results of the DescribeWorkspaceBundles operation. // Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeWorkspaceBundlesResult type DescribeWorkspaceBundlesOutput struct { _ struct{} `type:"structure"` responseMetadata aws.Response - // An array of structures that contain information about the bundles. + // Information about the bundles. Bundles []WorkspaceBundle `type:"list"` - // If not null, more results are available. Pass this value for the NextToken - // parameter in a subsequent call to this operation to retrieve the next set - // of items. This token is valid for one day and must be used within that time - // frame. + // The token to use to retrieve the next set of results, or null if there are + // no more results available. This token is valid for one day and must be used + // within that time frame. NextToken *string `min:"1" type:"string"` } @@ -1469,17 +1428,16 @@ func (s *DescribeWorkspaceBundlesOutput) SetNextToken(v string) *DescribeWorkspa return s } -// Contains the inputs for the DescribeWorkspaceDirectories operation. // Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeWorkspaceDirectoriesRequest type DescribeWorkspaceDirectoriesInput struct { _ struct{} `type:"structure"` - // An array of strings that contains the directory identifiers to retrieve information - // for. If this member is null, all directories are retrieved. + // The identifiers of the directories. If the value is null, all directories + // are retrieved. DirectoryIds []string `min:"1" type:"list"` - // The NextToken value from a previous call to this operation. Pass null if - // this is the first call. + // The token for the next set of results. (You received this token from a previous + // call.) NextToken *string `min:"1" type:"string"` } @@ -1521,20 +1479,18 @@ func (s *DescribeWorkspaceDirectoriesInput) SetNextToken(v string) *DescribeWork return s } -// Contains the results of the DescribeWorkspaceDirectories operation. // Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeWorkspaceDirectoriesResult type DescribeWorkspaceDirectoriesOutput struct { _ struct{} `type:"structure"` responseMetadata aws.Response - // An array of structures that contain information about the directories. + // Information about the directories. Directories []WorkspaceDirectory `type:"list"` - // If not null, more results are available. Pass this value for the NextToken - // parameter in a subsequent call to this operation to retrieve the next set - // of items. This token is valid for one day and must be used within that time - // frame. + // The token to use to retrieve the next set of results, or null if there are + // no more results available. This token is valid for one day and must be used + // within that time frame. NextToken *string `min:"1" type:"string"` } @@ -1569,10 +1525,11 @@ func (s *DescribeWorkspaceDirectoriesOutput) SetNextToken(v string) *DescribeWor type DescribeWorkspacesConnectionStatusInput struct { _ struct{} `type:"structure"` - // The next token of the request. + // The token for the next set of results. (You received this token from a previous + // call.) NextToken *string `min:"1" type:"string"` - // An array of strings that contain the identifiers of the WorkSpaces. + // The identifiers of the WorkSpaces. WorkspaceIds []string `min:"1" type:"list"` } @@ -1620,10 +1577,11 @@ type DescribeWorkspacesConnectionStatusOutput struct { responseMetadata aws.Response - // The next token of the result. + // The token to use to retrieve the next set of results, or null if there are + // no more results available. NextToken *string `min:"1" type:"string"` - // The connection status of the WorkSpace. + // Information about the connection status of the WorkSpace. WorkspacesConnectionStatus []WorkspaceConnectionStatus `type:"list"` } @@ -1654,35 +1612,31 @@ func (s *DescribeWorkspacesConnectionStatusOutput) SetWorkspacesConnectionStatus return s } -// Contains the inputs for the DescribeWorkspaces operation. // Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeWorkspacesRequest type DescribeWorkspacesInput struct { _ struct{} `type:"structure"` - // The identifier of a bundle to obtain the WorkSpaces for. All WorkSpaces that - // are created from this bundle will be retrieved. This parameter cannot be - // combined with any other filter parameter. + // The ID of the bundle. All WorkSpaces that are created from this bundle are + // retrieved. This parameter cannot be combined with any other filter. BundleId *string `type:"string"` - // Specifies the directory identifier to which to limit the WorkSpaces. Optionally, - // you can specify a specific directory user with the UserName parameter. This - // parameter cannot be combined with any other filter parameter. + // The ID of the directory. In addition, you can optionally specify a specific + // directory user (see UserName). This parameter cannot be combined with any + // other filter. DirectoryId *string `type:"string"` // The maximum number of items to return. Limit *int64 `min:"1" type:"integer"` - // The NextToken value from a previous call to this operation. Pass null if - // this is the first call. + // The token for the next set of results. (You received this token from a previous + // call.) NextToken *string `min:"1" type:"string"` - // Used with the DirectoryId parameter to specify the directory user for whom - // to obtain the WorkSpace. + // The name of the directory user. You must specify this parameter with DirectoryId. UserName *string `min:"1" type:"string"` - // An array of strings that contain the identifiers of the WorkSpaces for which - // to retrieve information. This parameter cannot be combined with any other - // filter parameter. + // The IDs of the WorkSpaces. This parameter cannot be combined with any other + // filter. // // Because the CreateWorkspaces operation is asynchronous, the identifier it // returns is not immediately available. If you immediately call DescribeWorkspaces @@ -1758,23 +1712,21 @@ func (s *DescribeWorkspacesInput) SetWorkspaceIds(v []string) *DescribeWorkspace return s } -// Contains the results for the DescribeWorkspaces operation. // Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeWorkspacesResult type DescribeWorkspacesOutput struct { _ struct{} `type:"structure"` responseMetadata aws.Response - // If not null, more results are available. Pass this value for the NextToken - // parameter in a subsequent call to this operation to retrieve the next set - // of items. This token is valid for one day and must be used within that time - // frame. + // The token to use to retrieve the next set of results, or null if there are + // no more results available. This token is valid for one day and must be used + // within that time frame. NextToken *string `min:"1" type:"string"` - // An array of structures that contain the information about the WorkSpaces. + // Information about the WorkSpaces. // - // Because the CreateWorkspaces operation is asynchronous, some of this information - // may be incomplete for a newly-created WorkSpace. + // Because CreateWorkspaces is an asynchronous operation, some of the returned + // information could be incomplete. Workspaces []Workspace `type:"list"` } @@ -1805,7 +1757,7 @@ func (s *DescribeWorkspacesOutput) SetWorkspaces(v []Workspace) *DescribeWorkspa return s } -// Contains information about a WorkSpace that could not be created. +// Information about a WorkSpace that could not be created. // Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/FailedCreateWorkspaceRequest type FailedCreateWorkspaceRequest struct { _ struct{} `type:"structure"` @@ -1816,8 +1768,7 @@ type FailedCreateWorkspaceRequest struct { // The textual error message. ErrorMessage *string `type:"string"` - // A FailedCreateWorkspaceRequest$WorkspaceRequest object that contains the - // information about the WorkSpace that could not be created. + // Information about the WorkSpace. WorkspaceRequest *WorkspaceRequest `type:"structure"` } @@ -1849,7 +1800,7 @@ func (s *FailedCreateWorkspaceRequest) SetWorkspaceRequest(v *WorkspaceRequest) return s } -// Contains information about a WorkSpace that could not be rebooted (RebootWorkspaces), +// Information about a WorkSpace that could not be rebooted (RebootWorkspaces), // rebuilt (RebuildWorkspaces), terminated (TerminateWorkspaces), started (StartWorkspaces), // or stopped (StopWorkspaces). // Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/FailedWorkspaceChangeRequest @@ -1894,6 +1845,40 @@ func (s *FailedWorkspaceChangeRequest) SetWorkspaceId(v string) *FailedWorkspace return s } +// Information about a WorkSpace modification. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/ModificationState +type ModificationState struct { + _ struct{} `type:"structure"` + + // The resource. + Resource ModificationResourceEnum `type:"string" enum:"true"` + + // The modification state. + State ModificationStateEnum `type:"string" enum:"true"` +} + +// String returns the string representation +func (s ModificationState) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModificationState) GoString() string { + return s.String() +} + +// SetResource sets the Resource field's value. +func (s *ModificationState) SetResource(v ModificationResourceEnum) *ModificationState { + s.Resource = v + return s +} + +// SetState sets the State field's value. +func (s *ModificationState) SetState(v ModificationStateEnum) *ModificationState { + s.State = v + return s +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/ModifyWorkspacePropertiesRequest type ModifyWorkspacePropertiesInput struct { _ struct{} `type:"structure"` @@ -1903,7 +1888,7 @@ type ModifyWorkspacePropertiesInput struct { // WorkspaceId is a required field WorkspaceId *string `type:"string" required:"true"` - // The WorkSpace properties of the request. + // The properties of the WorkSpace. // // WorkspaceProperties is a required field WorkspaceProperties *WorkspaceProperties `type:"structure" required:"true"` @@ -1971,13 +1956,12 @@ func (s ModifyWorkspacePropertiesOutput) SDKResponseMetadata() aws.Response { return s.responseMetadata } -// Contains information used with the RebootWorkspaces operation to reboot a -// WorkSpace. +// Information used to reboot a WorkSpace. // Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/RebootRequest type RebootRequest struct { _ struct{} `type:"structure"` - // The identifier of the WorkSpace to reboot. + // The identifier of the WorkSpace. // // WorkspaceId is a required field WorkspaceId *string `type:"string" required:"true"` @@ -2013,12 +1997,11 @@ func (s *RebootRequest) SetWorkspaceId(v string) *RebootRequest { return s } -// Contains the inputs for the RebootWorkspaces operation. // Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/RebootWorkspacesRequest type RebootWorkspacesInput struct { _ struct{} `type:"structure"` - // An array of structures that specify the WorkSpaces to reboot. + // The WorkSpaces to reboot. // // RebootWorkspaceRequests is a required field RebootWorkspaceRequests []RebootRequest `min:"1" type:"list" required:"true"` @@ -2064,14 +2047,13 @@ func (s *RebootWorkspacesInput) SetRebootWorkspaceRequests(v []RebootRequest) *R return s } -// Contains the results of the RebootWorkspaces operation. // Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/RebootWorkspacesResult type RebootWorkspacesOutput struct { _ struct{} `type:"structure"` responseMetadata aws.Response - // An array of structures representing any WorkSpaces that could not be rebooted. + // Information about the WorkSpaces that could not be rebooted. FailedRequests []FailedWorkspaceChangeRequest `type:"list"` } @@ -2096,13 +2078,12 @@ func (s *RebootWorkspacesOutput) SetFailedRequests(v []FailedWorkspaceChangeRequ return s } -// Contains information used with the RebuildWorkspaces operation to rebuild -// a WorkSpace. +// Information used to rebuild a WorkSpace. // Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/RebuildRequest type RebuildRequest struct { _ struct{} `type:"structure"` - // The identifier of the WorkSpace to rebuild. + // The identifier of the WorkSpace. // // WorkspaceId is a required field WorkspaceId *string `type:"string" required:"true"` @@ -2138,12 +2119,11 @@ func (s *RebuildRequest) SetWorkspaceId(v string) *RebuildRequest { return s } -// Contains the inputs for the RebuildWorkspaces operation. // Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/RebuildWorkspacesRequest type RebuildWorkspacesInput struct { _ struct{} `type:"structure"` - // An array of structures that specify the WorkSpaces to rebuild. + // The WorkSpaces to rebuild. // // RebuildWorkspaceRequests is a required field RebuildWorkspaceRequests []RebuildRequest `min:"1" type:"list" required:"true"` @@ -2189,14 +2169,13 @@ func (s *RebuildWorkspacesInput) SetRebuildWorkspaceRequests(v []RebuildRequest) return s } -// Contains the results of the RebuildWorkspaces operation. // Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/RebuildWorkspacesResult type RebuildWorkspacesOutput struct { _ struct{} `type:"structure"` responseMetadata aws.Response - // An array of structures representing any WorkSpaces that could not be rebuilt. + // Information about the WorkSpaces that could not be rebuilt. FailedRequests []FailedWorkspaceChangeRequest `type:"list"` } @@ -2221,7 +2200,32 @@ func (s *RebuildWorkspacesOutput) SetFailedRequests(v []FailedWorkspaceChangeReq return s } -// Describes the start request. +// Information about the root volume for a WorkSpace bundle. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/RootStorage +type RootStorage struct { + _ struct{} `type:"structure"` + + // The size of the root volume. + Capacity *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s RootStorage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RootStorage) GoString() string { + return s.String() +} + +// SetCapacity sets the Capacity field's value. +func (s *RootStorage) SetCapacity(v string) *RootStorage { + s.Capacity = &v + return s +} + +// Information used to start a WorkSpace. // Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/StartRequest type StartRequest struct { _ struct{} `type:"structure"` @@ -2250,7 +2254,7 @@ func (s *StartRequest) SetWorkspaceId(v string) *StartRequest { type StartWorkspacesInput struct { _ struct{} `type:"structure"` - // The requests. + // The WorkSpaces to start. // // StartWorkspaceRequests is a required field StartWorkspaceRequests []StartRequest `min:"1" type:"list" required:"true"` @@ -2295,7 +2299,7 @@ type StartWorkspacesOutput struct { responseMetadata aws.Response - // The failed requests. + // Information about the WorkSpaces that could not be started. FailedRequests []FailedWorkspaceChangeRequest `type:"list"` } @@ -2320,7 +2324,7 @@ func (s *StartWorkspacesOutput) SetFailedRequests(v []FailedWorkspaceChangeReque return s } -// Describes the stop request. +// Information used to stop a WorkSpace. // Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/StopRequest type StopRequest struct { _ struct{} `type:"structure"` @@ -2349,7 +2353,7 @@ func (s *StopRequest) SetWorkspaceId(v string) *StopRequest { type StopWorkspacesInput struct { _ struct{} `type:"structure"` - // The requests. + // The WorkSpaces to stop. // // StopWorkspaceRequests is a required field StopWorkspaceRequests []StopRequest `min:"1" type:"list" required:"true"` @@ -2394,7 +2398,7 @@ type StopWorkspacesOutput struct { responseMetadata aws.Response - // The failed requests. + // Information about the WorkSpaces that could not be stopped. FailedRequests []FailedWorkspaceChangeRequest `type:"list"` } @@ -2419,7 +2423,7 @@ func (s *StopWorkspacesOutput) SetFailedRequests(v []FailedWorkspaceChangeReques return s } -// Describes the tag of the WorkSpace. +// Information about a tag. // Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/Tag type Tag struct { _ struct{} `type:"structure"` @@ -2472,13 +2476,12 @@ func (s *Tag) SetValue(v string) *Tag { return s } -// Contains information used with the TerminateWorkspaces operation to terminate -// a WorkSpace. +// Information used to terminate a WorkSpace. // Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/TerminateRequest type TerminateRequest struct { _ struct{} `type:"structure"` - // The identifier of the WorkSpace to terminate. + // The identifier of the WorkSpace. // // WorkspaceId is a required field WorkspaceId *string `type:"string" required:"true"` @@ -2514,12 +2517,11 @@ func (s *TerminateRequest) SetWorkspaceId(v string) *TerminateRequest { return s } -// Contains the inputs for the TerminateWorkspaces operation. // Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/TerminateWorkspacesRequest type TerminateWorkspacesInput struct { _ struct{} `type:"structure"` - // An array of structures that specify the WorkSpaces to terminate. + // The WorkSpaces to terminate. // // TerminateWorkspaceRequests is a required field TerminateWorkspaceRequests []TerminateRequest `min:"1" type:"list" required:"true"` @@ -2565,14 +2567,13 @@ func (s *TerminateWorkspacesInput) SetTerminateWorkspaceRequests(v []TerminateRe return s } -// Contains the results of the TerminateWorkspaces operation. // Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/TerminateWorkspacesResult type TerminateWorkspacesOutput struct { _ struct{} `type:"structure"` responseMetadata aws.Response - // An array of structures representing any WorkSpaces that could not be terminated. + // Information about the WorkSpaces that could not be terminated. FailedRequests []FailedWorkspaceChangeRequest `type:"list"` } @@ -2597,12 +2598,12 @@ func (s *TerminateWorkspacesOutput) SetFailedRequests(v []FailedWorkspaceChangeR return s } -// Contains information about the user storage for a WorkSpace bundle. +// Information about the user storage for a WorkSpace bundle. // Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/UserStorage type UserStorage struct { _ struct{} `type:"structure"` - // The amount of user storage for the bundle. + // The size of the user storage. Capacity *string `min:"1" type:"string"` } @@ -2622,44 +2623,46 @@ func (s *UserStorage) SetCapacity(v string) *UserStorage { return s } -// Contains information about a WorkSpace. +// Information about a WorkSpace. // Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/Workspace type Workspace struct { _ struct{} `type:"structure"` - // The identifier of the bundle that the WorkSpace was created from. + // The identifier of the bundle used to create the WorkSpace. BundleId *string `type:"string"` - // The name of the WorkSpace as seen by the operating system. + // The name of the WorkSpace, as seen by the operating system. ComputerName *string `type:"string"` - // The identifier of the AWS Directory Service directory that the WorkSpace - // belongs to. + // The identifier of the AWS Directory Service directory for the WorkSpace. DirectoryId *string `type:"string"` - // If the WorkSpace could not be created, this contains the error code. + // If the WorkSpace could not be created, contains the error code. ErrorCode *string `type:"string"` - // If the WorkSpace could not be created, this contains a textual error message - // that describes the failure. + // If the WorkSpace could not be created, contains a textual error message that + // describes the failure. ErrorMessage *string `type:"string"` // The IP address of the WorkSpace. IpAddress *string `type:"string"` - // Specifies whether the data stored on the root volume, or C: drive, is encrypted. + // The modification states of the WorkSpace. + ModificationStates []ModificationState `type:"list"` + + // Indicates whether the data stored on the root volume is encrypted. RootVolumeEncryptionEnabled *bool `type:"boolean"` // The operational state of the WorkSpace. State WorkspaceState `type:"string" enum:"true"` - // The identifier of the subnet that the WorkSpace is in. + // The identifier of the subnet for the WorkSpace. SubnetId *string `type:"string"` - // The user that the WorkSpace is assigned to. + // The user for the WorkSpace. UserName *string `min:"1" type:"string"` - // Specifies whether the data stored on the user volume, or D: drive, is encrypted. + // Indicates whether the data stored on the user volume is encrypted. UserVolumeEncryptionEnabled *bool `type:"boolean"` // The KMS key used to encrypt data stored on your WorkSpace. @@ -2668,7 +2671,7 @@ type Workspace struct { // The identifier of the WorkSpace. WorkspaceId *string `type:"string"` - // Describes the properties of a WorkSpace. + // The properties of the WorkSpace. WorkspaceProperties *WorkspaceProperties `type:"structure"` } @@ -2718,6 +2721,12 @@ func (s *Workspace) SetIpAddress(v string) *Workspace { return s } +// SetModificationStates sets the ModificationStates field's value. +func (s *Workspace) SetModificationStates(v []ModificationState) *Workspace { + s.ModificationStates = v + return s +} + // SetRootVolumeEncryptionEnabled sets the RootVolumeEncryptionEnabled field's value. func (s *Workspace) SetRootVolumeEncryptionEnabled(v bool) *Workspace { s.RootVolumeEncryptionEnabled = &v @@ -2766,7 +2775,7 @@ func (s *Workspace) SetWorkspaceProperties(v *WorkspaceProperties) *Workspace { return s } -// Contains information about a WorkSpace bundle. +// Information about a WorkSpace bundle. // Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/WorkspaceBundle type WorkspaceBundle struct { _ struct{} `type:"structure"` @@ -2774,21 +2783,23 @@ type WorkspaceBundle struct { // The bundle identifier. BundleId *string `type:"string"` - // A ComputeType object that specifies the compute type for the bundle. + // The compute type. For more information, see Amazon WorkSpaces Bundles (http://aws.amazon.com/workspaces/details/#Amazon_WorkSpaces_Bundles). ComputeType *ComputeType `type:"structure"` - // The bundle description. + // A description. Description *string `type:"string"` // The name of the bundle. Name *string `min:"1" type:"string"` - // The owner of the bundle. This contains the owner's account identifier, or + // The owner of the bundle. This is the account identifier of the owner, or // AMAZON if the bundle is provided by AWS. Owner *string `type:"string"` - // A UserStorage object that specifies the amount of user storage that the bundle - // contains. + // The size of the root volume. + RootStorage *RootStorage `type:"structure"` + + // The size of the user storage. UserStorage *UserStorage `type:"structure"` } @@ -2832,6 +2843,12 @@ func (s *WorkspaceBundle) SetOwner(v string) *WorkspaceBundle { return s } +// SetRootStorage sets the RootStorage field's value. +func (s *WorkspaceBundle) SetRootStorage(v *RootStorage) *WorkspaceBundle { + s.RootStorage = v + return s +} + // SetUserStorage sets the UserStorage field's value. func (s *WorkspaceBundle) SetUserStorage(v *UserStorage) *WorkspaceBundle { s.UserStorage = v @@ -2843,8 +2860,8 @@ func (s *WorkspaceBundle) SetUserStorage(v *UserStorage) *WorkspaceBundle { type WorkspaceConnectionStatus struct { _ struct{} `type:"structure"` - // The connection state of the WorkSpace. Returns UNKOWN if the WorkSpace is - // in a Stopped state. + // The connection state of the WorkSpace. The connection state is unknown if + // the WorkSpace is stopped. ConnectionState ConnectionState `type:"string" enum:"true"` // The timestamp of the connection state check. @@ -2912,8 +2929,7 @@ type WorkspaceDirectory struct { // The directory type. DirectoryType WorkspaceDirectoryType `type:"string" enum:"true"` - // An array of strings that contains the IP addresses of the DNS servers for - // the directory. + // The IP addresses of the DNS servers for the directory. DnsIpAddresses []string `type:"list"` // The identifier of the IAM role. This is the role that allows Amazon WorkSpaces @@ -2927,12 +2943,10 @@ type WorkspaceDirectory struct { // The state of the directory's registration with Amazon WorkSpaces State WorkspaceDirectoryState `type:"string" enum:"true"` - // An array of strings that contains the identifiers of the subnets used with - // the directory. + // The identifiers of the subnets used with the directory. SubnetIds []string `type:"list"` - // A structure that specifies the default creation properties for all WorkSpaces - // in the directory. + // The default creation properties for all WorkSpaces in the directory. WorkspaceCreationProperties *DefaultWorkspaceCreationProperties `type:"structure"` // The identifier of the security group that is assigned to new WorkSpaces. @@ -3021,19 +3035,27 @@ func (s *WorkspaceDirectory) SetWorkspaceSecurityGroupId(v string) *WorkspaceDir return s } -// Describes the properties of a WorkSpace. +// Information about a WorkSpace. // Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/WorkspaceProperties type WorkspaceProperties struct { _ struct{} `type:"structure"` - // The running mode of the WorkSpace. AlwaysOn WorkSpaces are billed monthly. - // AutoStop WorkSpaces are billed by the hour and stopped when no longer being - // used in order to save on costs. + // The compute type. For more information, see Amazon WorkSpaces Bundles (http://aws.amazon.com/workspaces/details/#Amazon_WorkSpaces_Bundles). + ComputeTypeName Compute `type:"string" enum:"true"` + + // The size of the root volume. + RootVolumeSizeGib *int64 `type:"integer"` + + // The running mode. For more information, see Manage the WorkSpace Running + // Mode (http://docs.aws.amazon.com/workspaces/latest/adminguide/running-mode.html). RunningMode RunningMode `type:"string" enum:"true"` // The time after a user logs off when WorkSpaces are automatically stopped. // Configured in 60 minute intervals. RunningModeAutoStopTimeoutInMinutes *int64 `type:"integer"` + + // The size of the user storage. + UserVolumeSizeGib *int64 `type:"integer"` } // String returns the string representation @@ -3046,6 +3068,18 @@ func (s WorkspaceProperties) GoString() string { return s.String() } +// SetComputeTypeName sets the ComputeTypeName field's value. +func (s *WorkspaceProperties) SetComputeTypeName(v Compute) *WorkspaceProperties { + s.ComputeTypeName = v + return s +} + +// SetRootVolumeSizeGib sets the RootVolumeSizeGib field's value. +func (s *WorkspaceProperties) SetRootVolumeSizeGib(v int64) *WorkspaceProperties { + s.RootVolumeSizeGib = &v + return s +} + // SetRunningMode sets the RunningMode field's value. func (s *WorkspaceProperties) SetRunningMode(v RunningMode) *WorkspaceProperties { s.RunningMode = v @@ -3058,44 +3092,48 @@ func (s *WorkspaceProperties) SetRunningModeAutoStopTimeoutInMinutes(v int64) *W return s } -// Contains information about a WorkSpace creation request. +// SetUserVolumeSizeGib sets the UserVolumeSizeGib field's value. +func (s *WorkspaceProperties) SetUserVolumeSizeGib(v int64) *WorkspaceProperties { + s.UserVolumeSizeGib = &v + return s +} + +// Information used to create a WorkSpace. // Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/WorkspaceRequest type WorkspaceRequest struct { _ struct{} `type:"structure"` - // The identifier of the bundle to create the WorkSpace from. You can use the - // DescribeWorkspaceBundles operation to obtain a list of the bundles that are - // available. + // The identifier of the bundle for the WorkSpace. You can use DescribeWorkspaceBundles + // to list the available bundles. // // BundleId is a required field BundleId *string `type:"string" required:"true"` - // The identifier of the AWS Directory Service directory to create the WorkSpace - // in. You can use the DescribeWorkspaceDirectories operation to obtain a list - // of the directories that are available. + // The identifier of the AWS Directory Service directory for the WorkSpace. + // You can use DescribeWorkspaceDirectories to list the available directories. // // DirectoryId is a required field DirectoryId *string `type:"string" required:"true"` - // Specifies whether the data stored on the root volume, or C: drive, is encrypted. + // Indicates whether the data stored on the root volume is encrypted. RootVolumeEncryptionEnabled *bool `type:"boolean"` - // The tags of the WorkSpace request. + // The tags for the WorkSpace. Tags []Tag `type:"list"` - // The username that the WorkSpace is assigned to. This username must exist - // in the AWS Directory Service directory specified by the DirectoryId member. + // The username of the user for the WorkSpace. This username must exist in the + // AWS Directory Service directory for the WorkSpace. // // UserName is a required field UserName *string `min:"1" type:"string" required:"true"` - // Specifies whether the data stored on the user volume, or D: drive, is encrypted. + // Indicates whether the data stored on the user volume is encrypted. UserVolumeEncryptionEnabled *bool `type:"boolean"` // The KMS key used to encrypt data stored on your WorkSpace. VolumeEncryptionKey *string `type:"string"` - // Describes the properties of a WorkSpace. + // The WorkSpace properties. WorkspaceProperties *WorkspaceProperties `type:"structure"` } @@ -3196,6 +3234,8 @@ const ( ComputeValue Compute = "VALUE" ComputeStandard Compute = "STANDARD" ComputePerformance Compute = "PERFORMANCE" + ComputePower Compute = "POWER" + ComputeGraphics Compute = "GRAPHICS" ) type ConnectionState string @@ -3207,6 +3247,23 @@ const ( ConnectionStateUnknown ConnectionState = "UNKNOWN" ) +type ModificationResourceEnum string + +// Enum values for ModificationResourceEnum +const ( + ModificationResourceEnumRootVolume ModificationResourceEnum = "ROOT_VOLUME" + ModificationResourceEnumUserVolume ModificationResourceEnum = "USER_VOLUME" + ModificationResourceEnumComputeType ModificationResourceEnum = "COMPUTE_TYPE" +) + +type ModificationStateEnum string + +// Enum values for ModificationStateEnum +const ( + ModificationStateEnumUpdateInitiated ModificationStateEnum = "UPDATE_INITIATED" + ModificationStateEnumUpdateInProgress ModificationStateEnum = "UPDATE_IN_PROGRESS" +) + type RunningMode string // Enum values for RunningMode @@ -3249,6 +3306,7 @@ const ( WorkspaceStateTerminating WorkspaceState = "TERMINATING" WorkspaceStateTerminated WorkspaceState = "TERMINATED" WorkspaceStateSuspended WorkspaceState = "SUSPENDED" + WorkspaceStateUpdating WorkspaceState = "UPDATING" WorkspaceStateStopping WorkspaceState = "STOPPING" WorkspaceStateStopped WorkspaceState = "STOPPED" WorkspaceStateError WorkspaceState = "ERROR" diff --git a/service/workspaces/doc.go b/service/workspaces/doc.go index 8320f961c3d..59399971075 100644 --- a/service/workspaces/doc.go +++ b/service/workspaces/doc.go @@ -3,8 +3,8 @@ // Package workspaces provides the client and types for making API // requests to Amazon WorkSpaces. // -// This reference provides detailed information about the Amazon WorkSpaces -// operations. +// Amazon WorkSpaces enables you to provision virtual, cloud-based Microsoft +// Windows desktops for your users. // // See https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08 for more information on this service. //