diff --git a/.changes/1.12.147.json b/.changes/1.12.147.json new file mode 100644 index 0000000000..4ff17afb63 --- /dev/null +++ b/.changes/1.12.147.json @@ -0,0 +1,17 @@ +[ + { + "category": "``datasync``", + "description": "Update datasync client to latest version", + "type": "api-change" + }, + { + "category": "``iotanalytics``", + "description": "Update iotanalytics client to latest version", + "type": "api-change" + }, + { + "category": "``lambda``", + "description": "Update lambda client to latest version", + "type": "api-change" + } +] \ No newline at end of file diff --git a/CHANGELOG.rst b/CHANGELOG.rst index fc64b91eb3..656d05273b 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,14 @@ CHANGELOG ========= +1.12.147 +======== + +* api-change:``datasync``: Update datasync client to latest version +* api-change:``iotanalytics``: Update iotanalytics client to latest version +* api-change:``lambda``: Update lambda client to latest version + + 1.12.146 ======== diff --git a/botocore/__init__.py b/botocore/__init__.py index b809be8185..68c965401e 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import re import logging -__version__ = '1.12.146' +__version__ = '1.12.147' class NullHandler(logging.Handler): diff --git a/botocore/data/datasync/2018-11-09/service-2.json b/botocore/data/datasync/2018-11-09/service-2.json index 36c9e6ef50..3872ce8927 100644 --- a/botocore/data/datasync/2018-11-09/service-2.json +++ b/botocore/data/datasync/2018-11-09/service-2.json @@ -23,9 +23,10 @@ "input":{"shape":"CancelTaskExecutionRequest"}, "output":{"shape":"CancelTaskExecutionResponse"}, "errors":[ - {"shape":"InvalidRequestException"} + {"shape":"InvalidRequestException"}, + {"shape":"InternalException"} ], - "documentation":"

Cancels execution of a task.

When you cancel a task execution, the transfer of some files are abruptly interrupted. The contents of files that are transferred to the destination might be incomplete or inconsistent with the source files. However, if you start a new task execution on the same task and you allow the task execution to complete, file content on the destination is complete and consistent. This applies to other unexpected failures that interrupt a task execution. In all of these cases, AWS DataSync successfully complete the transfer when you start the next task execution.

" + "documentation":"

Cancels execution of a task.

When you cancel a task execution, the transfer of some files are abruptly interrupted. The contents of files that are transferred to the destination might be incomplete or inconsistent with the source files. However, if you start a new task execution on the same task and you allow the task execution to complete, file content on the destination is complete and consistent. This applies to other unexpected failures that interrupt a task execution. In all of these cases, AWS DataSync successfully complete the transfer when you start the next task execution.

" }, "CreateAgent":{ "name":"CreateAgent", @@ -36,9 +37,10 @@ "input":{"shape":"CreateAgentRequest"}, "output":{"shape":"CreateAgentResponse"}, "errors":[ - {"shape":"InvalidRequestException"} + {"shape":"InvalidRequestException"}, + {"shape":"InternalException"} ], - "documentation":"

Activates an AWS DataSync agent that you have deployed on your host. The activation process associates your agent with your account. In the activation process, you specify information such as the AWS Region that you want to activate the agent in. You activate the agent in the AWS Region where your target locations (in Amazon S3 or Amazon EFS) reside. Your tasks are created in this AWS Region.

You can use an agent for more than one location. If a task uses multiple agents, all of them need to have status AVAILABLE for the task to run. If you use multiple agents for a source location, the status of all the agents must be AVAILABLE for the task to run. For more information, see Activating a Sync Agent in the AWS DataSync User Guide.

Agents are automatically updated by AWS on a regular basis, using a mechanism that ensures minimal interruption to your tasks.

" + "documentation":"

Activates an AWS DataSync agent that you have deployed on your host. The activation process associates your agent with your account. In the activation process, you specify information such as the AWS Region that you want to activate the agent in. You activate the agent in the AWS Region where your target locations (in Amazon S3 or Amazon EFS) reside. Your tasks are created in this AWS Region.

You can use an agent for more than one location. If a task uses multiple agents, all of them need to have status AVAILABLE for the task to run. If you use multiple agents for a source location, the status of all the agents must be AVAILABLE for the task to run.

For more information, see \"https://docs.aws.amazon.com/datasync/latest/userguide/working-with-agents.html#activating-agent\" (Activating an Agent) in the AWS DataSync User Guide.

Agents are automatically updated by AWS on a regular basis, using a mechanism that ensures minimal interruption to your tasks.

" }, "CreateLocationEfs":{ "name":"CreateLocationEfs", @@ -49,7 +51,8 @@ "input":{"shape":"CreateLocationEfsRequest"}, "output":{"shape":"CreateLocationEfsResponse"}, "errors":[ - {"shape":"InvalidRequestException"} + {"shape":"InvalidRequestException"}, + {"shape":"InternalException"} ], "documentation":"

Creates an endpoint for an Amazon EFS file system.

" }, @@ -62,7 +65,8 @@ "input":{"shape":"CreateLocationNfsRequest"}, "output":{"shape":"CreateLocationNfsResponse"}, "errors":[ - {"shape":"InvalidRequestException"} + {"shape":"InvalidRequestException"}, + {"shape":"InternalException"} ], "documentation":"

Creates an endpoint for a Network File System (NFS) file system.

" }, @@ -75,9 +79,10 @@ "input":{"shape":"CreateLocationS3Request"}, "output":{"shape":"CreateLocationS3Response"}, "errors":[ - {"shape":"InvalidRequestException"} + {"shape":"InvalidRequestException"}, + {"shape":"InternalException"} ], - "documentation":"

Creates an endpoint for an Amazon S3 bucket.

For AWS DataSync to access a destination S3 bucket, it needs an AWS Identity and Access Management (IAM) role that has the required permissions. You can set up the required permissions by creating an IAM policy that grants the required permissions and attaching the policy to the role. An example of such a policy is shown in the examples section. For more information, see Configuring Amazon S3 Location Settings in the AWS DataSync User Guide.

" + "documentation":"

Creates an endpoint for an Amazon S3 bucket.

For AWS DataSync to access a destination S3 bucket, it needs an AWS Identity and Access Management (IAM) role that has the required permissions. You can set up the required permissions by creating an IAM policy that grants the required permissions and attaching the policy to the role. An example of such a policy is shown in the examples section.

For more information, see \"https://docs.aws.amazon.com/datasync/latest/userguide/working-with-locations.html#create-s3-location\" (Configuring Amazon S3 Location Settings) in the AWS DataSync User Guide.

" }, "CreateTask":{ "name":"CreateTask", @@ -88,9 +93,10 @@ "input":{"shape":"CreateTaskRequest"}, "output":{"shape":"CreateTaskResponse"}, "errors":[ - {"shape":"InvalidRequestException"} + {"shape":"InvalidRequestException"}, + {"shape":"InternalException"} ], - "documentation":"

Creates a task. A task is a set of two locations (source and destination) and a set of default OverrideOptions that you use to control the behavior of a task. If you don't specify default values for Options when you create a task, AWS DataSync populates them with safe service defaults.

When you initially create a task, it enters the INITIALIZING status and then the CREATING status. In CREATING status, AWS DataSync attempts to mount the source Network File System (NFS) location. The task transitions to the AVAILABLE status without waiting for the destination location to mount. Instead, AWS DataSync mounts a destination before every task execution and then unmounts it after every task execution.

If an agent that is associated with a source (NFS) location goes offline, the task transitions to the UNAVAILABLE status. If the status of the task remains in the CREATING status for more than a few minutes, it means that your agent might be having trouble mounting the source NFS file system. Check the task's ErrorCode and ErrorDetail. Mount issues are often caused by either a misconfigured firewall or a mistyped NFS server host name.

" + "documentation":"

Creates a task. A task is a set of two locations (source and destination) and a set of Options that you use to control the behavior of a task. If you don't specify Options when you create a task, AWS DataSync populates them with service defaults.

When you create a task, it first enters the CREATING state. During CREATING AWS DataSync attempts to mount the on-premises Network File System (NFS) location. The task transitions to the AVAILABLE state without waiting for the AWS location to become mounted. If required, AWS DataSync mounts the AWS location before each task execution.

If an agent that is associated with a source (NFS) location goes offline, the task transitions to the UNAVAILABLE status. If the status of the task remains in the CREATING status for more than a few minutes, it means that your agent might be having trouble mounting the source NFS file system. Check the task's ErrorCode and ErrorDetail. Mount issues are often caused by either a misconfigured firewall or a mistyped NFS server host name.

" }, "DeleteAgent":{ "name":"DeleteAgent", @@ -101,9 +107,10 @@ "input":{"shape":"DeleteAgentRequest"}, "output":{"shape":"DeleteAgentResponse"}, "errors":[ - {"shape":"InvalidRequestException"} + {"shape":"InvalidRequestException"}, + {"shape":"InternalException"} ], - "documentation":"

Deletes an agent. To specify which agent to delete, use the Amazon Resource Name (ARN) of the agent in your request. The operation disassociates the agent from your AWS account. However, it doesn't delete the agent virtual machine (VM) from your on-premises environment.

After you delete an agent, you can't reactivate it and you longer pay software charges for it.

" + "documentation":"

Deletes an agent. To specify which agent to delete, use the Amazon Resource Name (ARN) of the agent in your request. The operation disassociates the agent from your AWS account. However, it doesn't delete the agent virtual machine (VM) from your on-premises environment.

" }, "DeleteLocation":{ "name":"DeleteLocation", @@ -114,7 +121,8 @@ "input":{"shape":"DeleteLocationRequest"}, "output":{"shape":"DeleteLocationResponse"}, "errors":[ - {"shape":"InvalidRequestException"} + {"shape":"InvalidRequestException"}, + {"shape":"InternalException"} ], "documentation":"

Deletes the configuration of a location used by AWS DataSync.

" }, @@ -127,7 +135,8 @@ "input":{"shape":"DeleteTaskRequest"}, "output":{"shape":"DeleteTaskResponse"}, "errors":[ - {"shape":"InvalidRequestException"} + {"shape":"InvalidRequestException"}, + {"shape":"InternalException"} ], "documentation":"

Deletes a task.

" }, @@ -140,7 +149,8 @@ "input":{"shape":"DescribeAgentRequest"}, "output":{"shape":"DescribeAgentResponse"}, "errors":[ - {"shape":"InvalidRequestException"} + {"shape":"InvalidRequestException"}, + {"shape":"InternalException"} ], "documentation":"

Returns metadata such as the name, the network interfaces, and the status (that is, whether the agent is running or not) for an agent. To specify which agent to describe, use the Amazon Resource Name (ARN) of the agent in your request.

" }, @@ -153,7 +163,8 @@ "input":{"shape":"DescribeLocationEfsRequest"}, "output":{"shape":"DescribeLocationEfsResponse"}, "errors":[ - {"shape":"InvalidRequestException"} + {"shape":"InvalidRequestException"}, + {"shape":"InternalException"} ], "documentation":"

Returns metadata, such as the path information about an Amazon EFS location.

" }, @@ -166,7 +177,8 @@ "input":{"shape":"DescribeLocationNfsRequest"}, "output":{"shape":"DescribeLocationNfsResponse"}, "errors":[ - {"shape":"InvalidRequestException"} + {"shape":"InvalidRequestException"}, + {"shape":"InternalException"} ], "documentation":"

Returns metadata, such as the path information, about a NFS location.

" }, @@ -179,7 +191,8 @@ "input":{"shape":"DescribeLocationS3Request"}, "output":{"shape":"DescribeLocationS3Response"}, "errors":[ - {"shape":"InvalidRequestException"} + {"shape":"InvalidRequestException"}, + {"shape":"InternalException"} ], "documentation":"

Returns metadata, such as bucket name, about an Amazon S3 bucket location.

" }, @@ -192,7 +205,8 @@ "input":{"shape":"DescribeTaskRequest"}, "output":{"shape":"DescribeTaskResponse"}, "errors":[ - {"shape":"InvalidRequestException"} + {"shape":"InvalidRequestException"}, + {"shape":"InternalException"} ], "documentation":"

Returns metadata about a task.

" }, @@ -205,7 +219,8 @@ "input":{"shape":"DescribeTaskExecutionRequest"}, "output":{"shape":"DescribeTaskExecutionResponse"}, "errors":[ - {"shape":"InvalidRequestException"} + {"shape":"InvalidRequestException"}, + {"shape":"InternalException"} ], "documentation":"

Returns detailed metadata about a task that is being executed.

" }, @@ -218,7 +233,8 @@ "input":{"shape":"ListAgentsRequest"}, "output":{"shape":"ListAgentsResponse"}, "errors":[ - {"shape":"InvalidRequestException"} + {"shape":"InvalidRequestException"}, + {"shape":"InternalException"} ], "documentation":"

Returns a list of agents owned by an AWS account in the AWS Region specified in the request. The returned list is ordered by agent Amazon Resource Name (ARN).

By default, this operation returns a maximum of 100 agents. This operation supports pagination that enables you to optionally reduce the number of agents returned in a response.

If you have more agents than are returned in a response (that is, the response returns only a truncated list of your agents), the response contains a marker that you can specify in your next request to fetch the next page of agents.

" }, @@ -231,7 +247,8 @@ "input":{"shape":"ListLocationsRequest"}, "output":{"shape":"ListLocationsResponse"}, "errors":[ - {"shape":"InvalidRequestException"} + {"shape":"InvalidRequestException"}, + {"shape":"InternalException"} ], "documentation":"

Returns a lists of source and destination locations.

If you have more locations than are returned in a response (that is, the response returns only a truncated list of your agents), the response contains a token that you can specify in your next request to fetch the next page of locations.

" }, @@ -244,7 +261,8 @@ "input":{"shape":"ListTagsForResourceRequest"}, "output":{"shape":"ListTagsForResourceResponse"}, "errors":[ - {"shape":"InvalidRequestException"} + {"shape":"InvalidRequestException"}, + {"shape":"InternalException"} ], "documentation":"

Returns all the tags associated with a specified resources.

" }, @@ -257,7 +275,8 @@ "input":{"shape":"ListTaskExecutionsRequest"}, "output":{"shape":"ListTaskExecutionsResponse"}, "errors":[ - {"shape":"InvalidRequestException"} + {"shape":"InvalidRequestException"}, + {"shape":"InternalException"} ], "documentation":"

Returns a list of executed tasks.

" }, @@ -270,7 +289,8 @@ "input":{"shape":"ListTasksRequest"}, "output":{"shape":"ListTasksResponse"}, "errors":[ - {"shape":"InvalidRequestException"} + {"shape":"InvalidRequestException"}, + {"shape":"InternalException"} ], "documentation":"

Returns a list of all the tasks.

" }, @@ -283,9 +303,10 @@ "input":{"shape":"StartTaskExecutionRequest"}, "output":{"shape":"StartTaskExecutionResponse"}, "errors":[ - {"shape":"InvalidRequestException"} + {"shape":"InvalidRequestException"}, + {"shape":"InternalException"} ], - "documentation":"

Starts a specific invocation of a task. A TaskExecution value represents an individual run of a task. Each task can have at most one TaskExecution at a time.

TaskExecution has the following transition phases: INITIALIZING | PREPARING | TRANSFERRING | VERIFYING | SUCCESS/FAILURE.

For detailed information, see Task Execution in Components and Terminology in the AWS DataSync User Guide.

" + "documentation":"

Starts a specific invocation of a task. A TaskExecution value represents an individual run of a task. Each task can have at most one TaskExecution at a time.

TaskExecution has the following transition phases: INITIALIZING | PREPARING | TRANSFERRING | VERIFYING | SUCCESS/FAILURE.

For detailed information, see Task Execution in \"https://docs.aws.amazon.com/datasync/latest/userguide/how-datasync-works.html#terminology\" (Components and Terminology) in the AWS DataSync User Guide.

" }, "TagResource":{ "name":"TagResource", @@ -296,7 +317,8 @@ "input":{"shape":"TagResourceRequest"}, "output":{"shape":"TagResourceResponse"}, "errors":[ - {"shape":"InvalidRequestException"} + {"shape":"InvalidRequestException"}, + {"shape":"InternalException"} ], "documentation":"

Applies a key-value pair to an AWS resource.

" }, @@ -309,7 +331,8 @@ "input":{"shape":"UntagResourceRequest"}, "output":{"shape":"UntagResourceResponse"}, "errors":[ - {"shape":"InvalidRequestException"} + {"shape":"InvalidRequestException"}, + {"shape":"InternalException"} ], "documentation":"

Removes a tag from an AWS resource.

" }, @@ -322,7 +345,8 @@ "input":{"shape":"UpdateAgentRequest"}, "output":{"shape":"UpdateAgentResponse"}, "errors":[ - {"shape":"InvalidRequestException"} + {"shape":"InvalidRequestException"}, + {"shape":"InternalException"} ], "documentation":"

Updates the name of an agent.

" }, @@ -335,7 +359,8 @@ "input":{"shape":"UpdateTaskRequest"}, "output":{"shape":"UpdateTaskResponse"}, "errors":[ - {"shape":"InvalidRequestException"} + {"shape":"InvalidRequestException"}, + {"shape":"InternalException"} ], "documentation":"

Updates the metadata associated with a task.

" } @@ -419,7 +444,7 @@ "members":{ "ActivationKey":{ "shape":"ActivationKey", - "documentation":"

Your agent activation key. You can get the activation key either by sending an HTTP GET request with redirects that enable you to get the agent IP address (port 80). Alternatively, you can get it from the AWS DataSync console.

The redirect URL returned in the response provides you the activation key for your agent in the query string parameter activationKey. It might also include other activation-related parameters; however, these are merely defaults. The arguments you pass to this API call determine the actual configuration of your agent. For more information, see Activating a Sync Agent in the AWS DataSync User Guide.

" + "documentation":"

Your agent activation key. You can get the activation key either by sending an HTTP GET request with redirects that enable you to get the agent IP address (port 80). Alternatively, you can get it from the AWS DataSync console.

The redirect URL returned in the response provides you the activation key for your agent in the query string parameter activationKey. It might also include other activation-related parameters; however, these are merely defaults. The arguments you pass to this API call determine the actual configuration of your agent.

For more information, see \"https://docs.aws.amazon.com/datasync/latest/userguide/working-with-agents.html#activating-agent\" (Activating a Agent) in the AWS DataSync User Guide.

" }, "AgentName":{ "shape":"TagValue", @@ -427,7 +452,7 @@ }, "Tags":{ "shape":"TagList", - "documentation":"

The key-value pair that represents the tag you want to associate with the agent. The value can be an empty string. This value helps you manage, filter, and search for your agents.

Valid characters for key and value are letters, spaces, and numbers representable in UTF-8 format, and the following special characters: + - = . _ : / @.

" + "documentation":"

The key-value pair that represents the tag that you want to associate with the agent. The value can be an empty string. This value helps you manage, filter, and search for your agents.

Valid characters for key and value are letters, spaces, and numbers representable in UTF-8 format, and the following special characters: + - = . _ : / @.

" } }, "documentation":"

CreateAgentRequest

" @@ -445,7 +470,6 @@ "CreateLocationEfsRequest":{ "type":"structure", "required":[ - "Subdirectory", "EfsFilesystemArn", "Ec2Config" ], @@ -460,7 +484,7 @@ }, "Ec2Config":{ "shape":"Ec2Config", - "documentation":"

The subnet and security group that the Amazon EFS file system uses.

" + "documentation":"

The subnet and security group that the Amazon EFS file system uses. The security group that you provide needs to be able to communicate with the security group on the mount target in the subnet specified.

The exact relationship between security group M (of the mount target) and security group S (which you provide for DataSync to use at this stage) is as follows:

" }, "Tags":{ "shape":"TagList", @@ -488,8 +512,8 @@ ], "members":{ "Subdirectory":{ - "shape":"Subdirectory", - "documentation":"

The subdirectory in the NFS file system that is used to read data from the NFS source location or write data to the NFS destination. The NFS path should be a path that's exported by the NFS server, or a subdirectory of that path. The path should be such that it can be mounted by other NFS clients in your network.

To see all the paths exported by your NFS server. run \"showmount -e nfs-server-name\" from an NFS client that has access to your server. You can specify any directory that appears in the results, and any subdirectory of that directory. Ensure that the NFS export is accessible without Kerberos authentication.

To transfer all the data in the folder you specified, DataSync needs to have permissions to read all the data. To ensure this, either configure the NFS export with no_root_squash, or ensure that the permissions for all of the files that you want sync allow read access for all users. Doing either enables the agent to read the files. For the agent to access directories, you must additionally enable all execute access. For information about NFS export configuration, see 18.7. The /etc/exports Configuration File in the Centos documentation.

" + "shape":"NonEmptySubdirectory", + "documentation":"

The subdirectory in the NFS file system that is used to read data from the NFS source location or write data to the NFS destination. The NFS path should be a path that's exported by the NFS server, or a subdirectory of that path. The path should be such that it can be mounted by other NFS clients in your network.

To see all the paths exported by your NFS server. run \"showmount -e nfs-server-name\" from an NFS client that has access to your server. You can specify any directory that appears in the results, and any subdirectory of that directory. Ensure that the NFS export is accessible without Kerberos authentication.

To transfer all the data in the folder you specified, DataSync needs to have permissions to read all the data. To ensure this, either configure the NFS export with no_root_squash, or ensure that the permissions for all of the files that you want DataSync allow read access for all users. Doing either enables the agent to read the files. For the agent to access directories, you must additionally enable all execute access.

For information about NFS export configuration, see \"http://web.mit.edu/rhel-doc/5/RHEL-5-manual/Deployment_Guide-en-US/s1-nfs-server-config-exports.html\" (18.7. The /etc/exports Configuration File).

" }, "ServerHostname":{ "shape":"ServerHostname", @@ -499,6 +523,10 @@ "shape":"OnPremConfig", "documentation":"

Contains a list of Amazon Resource Names (ARNs) of agents that are used to connect to an NFS server.

" }, + "MountOptions":{ + "shape":"NfsMountOptions", + "documentation":"

The NFS mount options that DataSync can use to mount your NFS share.

" + }, "Tags":{ "shape":"TagList", "documentation":"

The key-value pair that represents the tag that you want to add to the location. The value can be an empty string. We recommend using tags to name your resources.

" @@ -519,7 +547,6 @@ "CreateLocationS3Request":{ "type":"structure", "required":[ - "Subdirectory", "S3BucketArn", "S3Config" ], @@ -567,7 +594,7 @@ }, "CloudWatchLogGroupArn":{ "shape":"LogGroupArn", - "documentation":"

The Amazon Resource Name (ARN) of the Amazon CloudWatch log group that is used to monitor and log events in the task. For more information on these groups, see Working with Log Groups and Log Streams in the Amazon CloudWatch User Guide.

For more information about how to useCloudWatchLogs with DataSync, see Monitoring Your Task.

" + "documentation":"

The Amazon Resource Name (ARN) of the Amazon CloudWatch log group that is used to monitor and log events in the task.

For more information on these groups, see \"https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/Working-with-log-groups-and-streams.html\" (Working with Log Groups and Log Streams) in the Amazon CloudWatch User Guide.

For more information about how to useCloudWatchLogs with DataSync, see \"https://docs.aws.amazon.com/datasync/latest/userguide/monitor-datasync.html\" (Monitoring Your Task)

" }, "Name":{ "shape":"TagValue", @@ -577,6 +604,10 @@ "shape":"Options", "documentation":"

The set of configuration options that control the behavior of a single execution of the task that occurs when you call StartTaskExecution. You can configure these options to preserve metadata such as user ID (UID) and group ID (GID), file permissions, data integrity verification, and so on.

For each individual task execution, you can override these options by specifying the OverrideOptions before starting a the task execution. For more information, see the operation.

" }, + "Excludes":{ + "shape":"FilterList", + "documentation":"

A filter that determines which files to exclude from a task based on the specified pattern. Transfers all files in the task’s subdirectory, except files that match the filter that is set.

" + }, "Tags":{ "shape":"TagList", "documentation":"

The key-value pair that represents the tag that you want to add to the resource. The value can be an empty string.

" @@ -670,7 +701,7 @@ }, "LastConnectionTime":{ "shape":"Time", - "documentation":"

The time that the agent was last connected.

" + "documentation":"

The time that the agent last connected to DataSyc.

" }, "CreationTime":{ "shape":"Time", @@ -732,6 +763,10 @@ "documentation":"

The URL of the source NFS location that was described.

" }, "OnPremConfig":{"shape":"OnPremConfig"}, + "MountOptions":{ + "shape":"NfsMountOptions", + "documentation":"

The NFS mount options that DataSync used to mount your NFS share.

" + }, "CreationTime":{ "shape":"Time", "documentation":"

The time that the NFS location was created.

" @@ -785,13 +820,21 @@ "members":{ "TaskExecutionArn":{ "shape":"TaskExecutionArn", - "documentation":"

The Amazon Resource Name (ARN) of the task execution that was described. TaskExecutionArn is hierarchical and includes TaskArn for the task that was executed.

For example, a TaskExecution value with the ARN arn:aws:sync:us-east-1:209870788375:task/task-0208075f79cedf4a2/execution/exec-08ef1e88ec491019b executed the task with the ARN arn:aws:sync:us-east-1:209870788375:task/task-0208075f79cedf4a2.

" + "documentation":"

The Amazon Resource Name (ARN) of the task execution that was described. TaskExecutionArn is hierarchical and includes TaskArn for the task that was executed.

For example, a TaskExecution value with the ARN arn:aws:datasync:us-east-1:111222333444:task/task-0208075f79cedf4a2/execution/exec-08ef1e88ec491019b executed the task with the ARN arn:aws:datasync:us-east-1:111222333444:task/task-0208075f79cedf4a2.

" }, "Status":{ "shape":"TaskExecutionStatus", - "documentation":"

The status of the task. For detailed information about sync statuses, see Understanding Sync Task Statuses.

" + "documentation":"

The status of the task execution.

For detailed information about task execution statuses, see \"https://docs.aws.amazon.com/datasync/latest/userguide/working-with-tasks.html#understand-task-creation-statuses\" (Understanding Task Statuses).

" }, "Options":{"shape":"Options"}, + "Excludes":{ + "shape":"FilterList", + "documentation":"

Specifies that the task execution excludes files from the transfer based on the specified pattern in the filter. Transfers all files in the task’s subdirectory, except files that match the filter that is set.

" + }, + "Includes":{ + "shape":"FilterList", + "documentation":"

Specifies that the task execution excludes files in the transfer based on the specified pattern in the filter. When multiple include filters are set, they are interpreted as an OR.

" + }, "StartTime":{ "shape":"Time", "documentation":"

The time that the task execution was started.

" @@ -843,7 +886,7 @@ }, "Status":{ "shape":"TaskStatus", - "documentation":"

The status of the task that was described. For detailed information about sync statuses, see Understanding Sync Task Statuses.

" + "documentation":"

The status of the task that was described.

For detailed information about task execution statuses, see \"https://docs.aws.amazon.com/datasync/latest/userguide/working-with-tasks.html#understand-task-creation-statuses\" (Understanding Task Statuses).

" }, "Name":{ "shape":"TagValue", @@ -863,12 +906,16 @@ }, "CloudWatchLogGroupArn":{ "shape":"LogGroupArn", - "documentation":"

The Amazon Resource Name (ARN) of the Amazon CloudWatch log group that was used to monitor and log events in the task. For more information on these groups, see Working with Log Groups and Log Streams in the Amazon CloudWatch User Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of the Amazon CloudWatch log group that was used to monitor and log events in the task.

For more information on these groups, see \"https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/Working-with-log-groups-and-streams.html\" (Working with Log Groups and Log Streams) in the Amazon CloudWatch UserGuide.

" }, "Options":{ "shape":"Options", "documentation":"

The set of configuration options that control the behavior of a single execution of the task that occurs when you call StartTaskExecution. You can configure these options to preserve metadata such as user ID (UID) and group (GID), file permissions, data integrity verification, and so on.

For each individual task execution, you can override these options by specifying the overriding OverrideOptions value to operation.

" }, + "Excludes":{ + "shape":"FilterList", + "documentation":"

Specifies that the task excludes files in the transfer based on the specified pattern in the filter. Transfers all files in the task’s subdirectory, except files that match the filter that is set.

" + }, "ErrorCode":{ "shape":"string", "documentation":"

Errors that AWS DataSync encountered during execution of the task. You can use this error code to help troubleshoot issues.

" @@ -897,14 +944,14 @@ "members":{ "SubnetArn":{ "shape":"Ec2SubnetArn", - "documentation":"

The ARN of the subnet that the Amazon EC2 resource belongs in.

" + "documentation":"

The ARN of the subnet and the security group that DataSync uses to access the target EFS file system.

" }, "SecurityGroupArns":{ "shape":"Ec2SecurityGroupArnList", "documentation":"

The Amazon Resource Names (ARNs) of the security groups that are configured for the Amazon EC2 resource.

" } }, - "documentation":"

The subnet and the security group that the target Amazon EFS file system uses. The subnet must have at least one mount target for that file system. The security group that you provide needs to be able to communicate with the security group on the mount target in the subnet specified.

The exact relationship between security group M (of the mount target) and security group S (which you provide for DataSync to use at this stage) is as follows:

" + "documentation":"

The subnet and the security group that DataSync uses to access target EFS file system. The subnet must have at least one mount target for that file system. The security group that you provide needs to be able to communicate with the security group on the mount target in the subnet specified.

" }, "Ec2SecurityGroupArn":{ "type":"string", @@ -927,6 +974,37 @@ "max":128, "pattern":"^arn:(aws|aws-cn):elasticfilesystem:[a-z\\-0-9]*:[0-9]{12}:file-system/fs-.*$" }, + "FilterList":{ + "type":"list", + "member":{"shape":"FilterRule"}, + "max":1, + "min":0 + }, + "FilterRule":{ + "type":"structure", + "members":{ + "FilterType":{ + "shape":"FilterType", + "documentation":"

Specifies the type of filter rule pattern to apply. DataSync only supports the SIMPLE_PATTERN rule type.

" + }, + "Value":{ + "shape":"FilterValue", + "documentation":"

A pattern that defines the filter. The filter might include or exclude files is a transfer.

" + } + }, + "documentation":"

A pattern that determines which files to include in the transfer or which files to exclude.

" + }, + "FilterType":{ + "type":"string", + "enum":["SIMPLE_PATTERN"], + "max":128, + "pattern":"^[A-Z0-9_]+$" + }, + "FilterValue":{ + "type":"string", + "max":409600, + "pattern":"^.+$" + }, "Gid":{ "type":"string", "enum":[ @@ -941,6 +1019,16 @@ "max":2048, "pattern":"^arn:(aws|aws-cn):iam::[0-9]{12}:role/.*$" }, + "InternalException":{ + "type":"structure", + "members":{ + "message":{"shape":"string"}, + "errorCode":{"shape":"string"} + }, + "documentation":"

This exception is thrown when an error occurs in the AWS DataSync service.

", + "exception":true, + "fault":true + }, "InvalidRequestException":{ "type":"structure", "members":{ @@ -1148,6 +1236,30 @@ "max":93, "pattern":"[a-zA-Z0-9=_-]+" }, + "NfsMountOptions":{ + "type":"structure", + "members":{ + "Version":{ + "shape":"NfsVersion", + "documentation":"

The specific NFS version that you want DataSync to use to mount your NFS share. If you don't specify a version, DataSync defaults to AUTOMATIC. That is, DataSync automatically selects a version based on negotiation with the NFS server.

" + } + }, + "documentation":"

Represents the mount options that are available for DataSync to access an NFS location.

" + }, + "NfsVersion":{ + "type":"string", + "enum":[ + "AUTOMATIC", + "NFS3", + "NFS4_0", + "NFS4_1" + ] + }, + "NonEmptySubdirectory":{ + "type":"string", + "max":4096, + "pattern":"^[a-zA-Z0-9_\\-\\./]+$" + }, "OnPremConfig":{ "type":"structure", "required":["AgentArns"], @@ -1245,7 +1357,7 @@ "documentation":"

The Amazon S3 bucket to access. This bucket is used as a parameter in the CreateLocationS3 operation.

" } }, - "documentation":"

The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that is used to access an Amazon S3 bucket. For detailed information about using such a role, see Components and Terminology in the AWS DataSync User Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that is used to access an Amazon S3 bucket.

For detailed information about using such a role, see \"https://docs.aws.amazon.com/datasync/latest/userguide/working-with-locations.html#create-s3-location\" (Creating a Location for Amazon S3) in the AWS DataSync User Guide.

" }, "ServerHostname":{ "type":"string", @@ -1260,7 +1372,11 @@ "shape":"TaskArn", "documentation":"

The Amazon Resource Name (ARN) of the task to start.

" }, - "OverrideOptions":{"shape":"Options"} + "OverrideOptions":{"shape":"Options"}, + "Includes":{ + "shape":"FilterList", + "documentation":"

A filter that determines which files to include in the transfer during a task execution based on the specified pattern in the filter. When multiple include filters are set, they are interpreted as an OR.

" + } }, "documentation":"

StartTaskExecutionRequest

" }, @@ -1277,13 +1393,13 @@ "Subdirectory":{ "type":"string", "max":4096, - "pattern":"^[a-zA-Z0-9_\\-\\./]+$" + "pattern":"^[a-zA-Z0-9_\\-\\./]*$" }, "TagKey":{ "type":"string", "max":256, "min":1, - "pattern":"^[a-zA-Z0-9\\s+=._:/-]{1,128}$" + "pattern":"^[a-zA-Z0-9\\s+=._:/-]+$" }, "TagKeyList":{ "type":"list", @@ -1299,6 +1415,7 @@ }, "TagListEntry":{ "type":"structure", + "required":["Key"], "members":{ "Key":{ "shape":"TagKey", @@ -1338,7 +1455,7 @@ "type":"string", "max":256, "min":1, - "pattern":"^[a-zA-Z0-9\\s+=._:/-]{1,256}$" + "pattern":"^[a-zA-Z0-9\\s+=._:/-]+$" }, "TaggableResourceArn":{ "type":"string", @@ -1515,9 +1632,17 @@ "documentation":"

The Amazon Resource Name (ARN) of the resource name of the task to update.

" }, "Options":{"shape":"Options"}, + "Excludes":{ + "shape":"FilterList", + "documentation":"

A filter that determines which files to exclude from a task based on the specified pattern in the filter. Transfers all files in the task’s subdirectory, except files that match the filter that is set.

" + }, "Name":{ "shape":"TagValue", "documentation":"

The name of the task to update.

" + }, + "CloudWatchLogGroupArn":{ + "shape":"LogGroupArn", + "documentation":"

The Amazon Resource Name (ARN) of the resource name of the CloudWatch LogGroup.

" } }, "documentation":"

UpdateTaskResponse

" diff --git a/botocore/data/iotanalytics/2017-11-27/service-2.json b/botocore/data/iotanalytics/2017-11-27/service-2.json index 8961e15d21..85ff4a3102 100644 --- a/botocore/data/iotanalytics/2017-11-27/service-2.json +++ b/botocore/data/iotanalytics/2017-11-27/service-2.json @@ -694,6 +694,18 @@ } } }, + "BucketKeyExpression":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^[a-zA-Z0-9!_.*'()/{}:-]*$" + }, + "BucketName":{ + "type":"string", + "max":255, + "min":3, + "pattern":"^[a-zA-Z0-9.\\-_]*$" + }, "CancelPipelineReprocessingRequest":{ "type":"structure", "required":[ @@ -1124,7 +1136,7 @@ "documentation":"

The type of action by which the data set's contents are automatically created.

" } }, - "documentation":"

" + "documentation":"

Information about the action which automatically creates the data set's contents.

" }, "DatasetActionType":{ "type":"string", @@ -1146,6 +1158,10 @@ "iotEventsDestinationConfiguration":{ "shape":"IotEventsDestinationConfiguration", "documentation":"

Configuration information for delivery of data set contents to AWS IoT Events.

" + }, + "s3DestinationConfiguration":{ + "shape":"S3DestinationConfiguration", + "documentation":"

Configuration information for delivery of data set contents to Amazon S3.

" } }, "documentation":"

The destination to which data set contents are delivered.

" @@ -1767,6 +1783,36 @@ } } }, + "GlueConfiguration":{ + "type":"structure", + "required":[ + "tableName", + "databaseName" + ], + "members":{ + "tableName":{ + "shape":"GlueTableName", + "documentation":"

The name of the table in your AWS Glue Data Catalog which is used to perform the ETL (extract, transform and load) operations. (An AWS Glue Data Catalog table contains partitioned data and descriptions of data sources and targets.)

" + }, + "databaseName":{ + "shape":"GlueDatabaseName", + "documentation":"

The name of the database in your AWS Glue Data Catalog in which the table is located. (An AWS Glue Data Catalog database contains Glue Data tables.)

" + } + }, + "documentation":"

Configuration information for coordination with the AWS Glue ETL (extract, transform and load) service.

" + }, + "GlueDatabaseName":{ + "type":"string", + "max":150, + "min":1, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*" + }, + "GlueTableName":{ + "type":"string", + "max":150, + "min":1, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*" + }, "Image":{ "type":"string", "max":255 @@ -2473,6 +2519,33 @@ } } }, + "S3DestinationConfiguration":{ + "type":"structure", + "required":[ + "bucket", + "key", + "roleArn" + ], + "members":{ + "bucket":{ + "shape":"BucketName", + "documentation":"

The name of the Amazon S3 bucket to which data set contents are delivered.

" + }, + "key":{ + "shape":"BucketKeyExpression", + "documentation":"

The key of the data set contents object. Each object in an Amazon S3 bucket has a key that is its unique identifier within the bucket (each object in a bucket has exactly one key).

" + }, + "glueConfiguration":{ + "shape":"GlueConfiguration", + "documentation":"

Configuration information for coordination with the AWS Glue ETL (extract, transform and load) service.

" + }, + "roleArn":{ + "shape":"RoleArn", + "documentation":"

The ARN of the role which grants AWS IoT Analytics permission to interact with your Amazon S3 and AWS Glue resources.

" + } + }, + "documentation":"

Configuration information for delivery of data set contents to Amazon S3.

" + }, "SampleChannelDataRequest":{ "type":"structure", "required":["channelName"], @@ -2857,7 +2930,8 @@ "shape":"MaxVersions", "documentation":"

How many versions of data set contents will be kept. The \"unlimited\" parameter must be false.

" } - } + }, + "documentation":"

Information about the versioning of data set contents.

" }, "VolumeSizeInGB":{ "type":"integer", diff --git a/botocore/data/lambda/2015-03-31/service-2.json b/botocore/data/lambda/2015-03-31/service-2.json index 1c838fd3d3..e5289b83ff 100644 --- a/botocore/data/lambda/2015-03-31/service-2.json +++ b/botocore/data/lambda/2015-03-31/service-2.json @@ -2721,6 +2721,7 @@ "nodejs4.3", "nodejs6.10", "nodejs8.10", + "nodejs10.x", "java8", "python2.7", "python3.6", diff --git a/docs/source/conf.py b/docs/source/conf.py index dcd865bec2..a2962818cd 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -54,7 +54,7 @@ # The short X.Y version. version = '1.12.1' # The full version, including alpha/beta/rc tags. -release = '1.12.146' +release = '1.12.147' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages.