azure-native.awsconnector.DynamoDbTable
Explore with Pulumi AI
A Microsoft.AwsConnector resource Azure REST API version: 2024-12-01.
Example Usage
DynamoDbTables_CreateOrReplace
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using AzureNative = Pulumi.AzureNative;
return await Deployment.RunAsync(() =>
{
var dynamoDbTable = new AzureNative.AwsConnector.DynamoDbTable("dynamoDbTable", new()
{
Location = "fmkjilswdjyisfuwxuj",
Name = "Replace this value with a string matching RegExp ^(z=.{0,259}[^zs.]$)(z!.*[zzzzzzzz])",
Properties = new AzureNative.AwsConnector.Inputs.DynamoDBTablePropertiesArgs
{
Arn = "gimtbcfiznraniycjyalnwrfstm",
AwsAccountId = "dejqcxb",
AwsProperties = new AzureNative.AwsConnector.Inputs.AwsDynamoDBTablePropertiesArgs
{
Arn = "qbvqgymuxfzuwybdspdhcuvfouwnet",
AttributeDefinitions = new[]
{
new AzureNative.AwsConnector.Inputs.AttributeDefinitionArgs
{
AttributeName = "caryhpofnkqtoc",
AttributeType = "bcmjgzaljcemcrswr",
},
},
BillingMode = "pwxrsjcybdcidejuhvrckvxyxad",
ContributorInsightsSpecification = new AzureNative.AwsConnector.Inputs.ContributorInsightsSpecificationArgs
{
Enabled = true,
},
DeletionProtectionEnabled = true,
GlobalSecondaryIndexes = new[]
{
new AzureNative.AwsConnector.Inputs.GlobalSecondaryIndexArgs
{
ContributorInsightsSpecification = new AzureNative.AwsConnector.Inputs.ContributorInsightsSpecificationArgs
{
Enabled = true,
},
IndexName = "uqlzacnvsvayrvirrwwttb",
KeySchema = new[]
{
new AzureNative.AwsConnector.Inputs.KeySchemaArgs
{
AttributeName = "wisgqkyoouaxivtrtay",
KeyType = "kwkqgbxrwnoklpgmoypovxe",
},
},
Projection = new AzureNative.AwsConnector.Inputs.ProjectionArgs
{
NonKeyAttributes = new[]
{
"loqmvohtjsscueegam",
},
ProjectionType = "atbzepkydpgudoaqi",
},
ProvisionedThroughput = new AzureNative.AwsConnector.Inputs.ProvisionedThroughputArgs
{
ReadCapacityUnits = 10,
WriteCapacityUnits = 28,
},
},
},
ImportSourceSpecification = new AzureNative.AwsConnector.Inputs.ImportSourceSpecificationArgs
{
InputCompressionType = "bjswmnwxleqmcth",
InputFormat = "grnhhysgejvbnecrqoynjomz",
InputFormatOptions = new AzureNative.AwsConnector.Inputs.InputFormatOptionsArgs
{
Csv = new AzureNative.AwsConnector.Inputs.CsvArgs
{
Delimiter = "qzowvvpwwhptthlgvrtnpyjszetrt",
HeaderList = new[]
{
"gminuylhgebpjx",
},
},
},
S3BucketSource = new AzureNative.AwsConnector.Inputs.S3BucketSourceArgs
{
S3Bucket = "exulhkspgmo",
S3BucketOwner = "pyawhaxbwqhgarz",
S3KeyPrefix = "ogjgqdsvu",
},
},
KeySchema = new[]
{
new AzureNative.AwsConnector.Inputs.KeySchemaArgs
{
AttributeName = "wisgqkyoouaxivtrtay",
KeyType = "kwkqgbxrwnoklpgmoypovxe",
},
},
KinesisStreamSpecification = new AzureNative.AwsConnector.Inputs.KinesisStreamSpecificationArgs
{
ApproximateCreationDateTimePrecision = AzureNative.AwsConnector.KinesisStreamSpecificationApproximateCreationDateTimePrecision.MICROSECOND,
StreamArn = "qldltl",
},
LocalSecondaryIndexes = new[]
{
new AzureNative.AwsConnector.Inputs.LocalSecondaryIndexArgs
{
IndexName = "gintyosxvkjqpe",
KeySchema = new[]
{
new AzureNative.AwsConnector.Inputs.KeySchemaArgs
{
AttributeName = "wisgqkyoouaxivtrtay",
KeyType = "kwkqgbxrwnoklpgmoypovxe",
},
},
Projection = new AzureNative.AwsConnector.Inputs.ProjectionArgs
{
NonKeyAttributes = new[]
{
"loqmvohtjsscueegam",
},
ProjectionType = "atbzepkydpgudoaqi",
},
},
},
PointInTimeRecoverySpecification = new AzureNative.AwsConnector.Inputs.PointInTimeRecoverySpecificationArgs
{
PointInTimeRecoveryEnabled = true,
},
ProvisionedThroughput = new AzureNative.AwsConnector.Inputs.ProvisionedThroughputArgs
{
ReadCapacityUnits = 10,
WriteCapacityUnits = 28,
},
ResourcePolicy = null,
SseSpecification = new AzureNative.AwsConnector.Inputs.SSESpecificationArgs
{
KmsMasterKeyId = "rvwuejohzknzrntkvprgxt",
SseEnabled = true,
SseType = "osjalywculjbrystezvjojxe",
},
StreamArn = "xvkrzs",
StreamSpecification = new AzureNative.AwsConnector.Inputs.StreamSpecificationArgs
{
ResourcePolicy = null,
StreamViewType = "wemod",
},
TableClass = "tmbfrfbppwhjpm",
TableName = "mqvlcdboopn",
Tags = new[]
{
new AzureNative.AwsConnector.Inputs.TagArgs
{
Key = "txipennfw",
Value = "dkgweupnz",
},
},
TimeToLiveSpecification = new AzureNative.AwsConnector.Inputs.TimeToLiveSpecificationArgs
{
AttributeName = "sxbfejubturdtyusqywguqni",
Enabled = true,
},
},
AwsRegion = "rdzrhtbydhmaxzuwe",
AwsSourceSchema = "sqkkuxwamzevkp",
AwsTags =
{
{ "key3791", "iikafuvbjkvnbogujm" },
},
PublicCloudConnectorsResourceId = "nugnoqcknmrrminwvfvloqsporjd",
PublicCloudResourceName = "lkbwyvnzooydbnembmykhmw",
},
ResourceGroupName = "rgdynamoDBTable",
Tags =
{
{ "key2178", "lyeternduvkobwvqhpicnxel" },
},
});
});
package main
import (
awsconnector "github.com/pulumi/pulumi-azure-native-sdk/awsconnector/v2"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := awsconnector.NewDynamoDbTable(ctx, "dynamoDbTable", &awsconnector.DynamoDbTableArgs{
Location: pulumi.String("fmkjilswdjyisfuwxuj"),
Name: pulumi.String("Replace this value with a string matching RegExp ^(z=.{0,259}[^zs.]$)(z!.*[zzzzzzzz])"),
Properties: &awsconnector.DynamoDBTablePropertiesArgs{
Arn: pulumi.String("gimtbcfiznraniycjyalnwrfstm"),
AwsAccountId: pulumi.String("dejqcxb"),
AwsProperties: &awsconnector.AwsDynamoDBTablePropertiesArgs{
Arn: pulumi.String("qbvqgymuxfzuwybdspdhcuvfouwnet"),
AttributeDefinitions: awsconnector.AttributeDefinitionArray{
&awsconnector.AttributeDefinitionArgs{
AttributeName: pulumi.String("caryhpofnkqtoc"),
AttributeType: pulumi.String("bcmjgzaljcemcrswr"),
},
},
BillingMode: pulumi.String("pwxrsjcybdcidejuhvrckvxyxad"),
ContributorInsightsSpecification: &awsconnector.ContributorInsightsSpecificationArgs{
Enabled: pulumi.Bool(true),
},
DeletionProtectionEnabled: pulumi.Bool(true),
GlobalSecondaryIndexes: awsconnector.GlobalSecondaryIndexArray{
&awsconnector.GlobalSecondaryIndexArgs{
ContributorInsightsSpecification: &awsconnector.ContributorInsightsSpecificationArgs{
Enabled: pulumi.Bool(true),
},
IndexName: pulumi.String("uqlzacnvsvayrvirrwwttb"),
KeySchema: awsconnector.KeySchemaArray{
&awsconnector.KeySchemaArgs{
AttributeName: pulumi.String("wisgqkyoouaxivtrtay"),
KeyType: pulumi.String("kwkqgbxrwnoklpgmoypovxe"),
},
},
Projection: &awsconnector.ProjectionArgs{
NonKeyAttributes: pulumi.StringArray{
pulumi.String("loqmvohtjsscueegam"),
},
ProjectionType: pulumi.String("atbzepkydpgudoaqi"),
},
ProvisionedThroughput: &awsconnector.ProvisionedThroughputArgs{
ReadCapacityUnits: pulumi.Int(10),
WriteCapacityUnits: pulumi.Int(28),
},
},
},
ImportSourceSpecification: &awsconnector.ImportSourceSpecificationArgs{
InputCompressionType: pulumi.String("bjswmnwxleqmcth"),
InputFormat: pulumi.String("grnhhysgejvbnecrqoynjomz"),
InputFormatOptions: &awsconnector.InputFormatOptionsArgs{
Csv: &awsconnector.CsvArgs{
Delimiter: pulumi.String("qzowvvpwwhptthlgvrtnpyjszetrt"),
HeaderList: pulumi.StringArray{
pulumi.String("gminuylhgebpjx"),
},
},
},
S3BucketSource: &awsconnector.S3BucketSourceArgs{
S3Bucket: pulumi.String("exulhkspgmo"),
S3BucketOwner: pulumi.String("pyawhaxbwqhgarz"),
S3KeyPrefix: pulumi.String("ogjgqdsvu"),
},
},
KeySchema: awsconnector.KeySchemaArray{
&awsconnector.KeySchemaArgs{
AttributeName: pulumi.String("wisgqkyoouaxivtrtay"),
KeyType: pulumi.String("kwkqgbxrwnoklpgmoypovxe"),
},
},
KinesisStreamSpecification: &awsconnector.KinesisStreamSpecificationArgs{
ApproximateCreationDateTimePrecision: pulumi.String(awsconnector.KinesisStreamSpecificationApproximateCreationDateTimePrecisionMICROSECOND),
StreamArn: pulumi.String("qldltl"),
},
LocalSecondaryIndexes: awsconnector.LocalSecondaryIndexArray{
&awsconnector.LocalSecondaryIndexArgs{
IndexName: pulumi.String("gintyosxvkjqpe"),
KeySchema: awsconnector.KeySchemaArray{
&awsconnector.KeySchemaArgs{
AttributeName: pulumi.String("wisgqkyoouaxivtrtay"),
KeyType: pulumi.String("kwkqgbxrwnoklpgmoypovxe"),
},
},
Projection: &awsconnector.ProjectionArgs{
NonKeyAttributes: pulumi.StringArray{
pulumi.String("loqmvohtjsscueegam"),
},
ProjectionType: pulumi.String("atbzepkydpgudoaqi"),
},
},
},
PointInTimeRecoverySpecification: &awsconnector.PointInTimeRecoverySpecificationArgs{
PointInTimeRecoveryEnabled: pulumi.Bool(true),
},
ProvisionedThroughput: &awsconnector.ProvisionedThroughputArgs{
ReadCapacityUnits: pulumi.Int(10),
WriteCapacityUnits: pulumi.Int(28),
},
ResourcePolicy: &awsconnector.ResourcePolicyArgs{},
SseSpecification: &awsconnector.SSESpecificationArgs{
KmsMasterKeyId: pulumi.String("rvwuejohzknzrntkvprgxt"),
SseEnabled: pulumi.Bool(true),
SseType: pulumi.String("osjalywculjbrystezvjojxe"),
},
StreamArn: pulumi.String("xvkrzs"),
StreamSpecification: &awsconnector.StreamSpecificationArgs{
ResourcePolicy: &awsconnector.ResourcePolicyArgs{},
StreamViewType: pulumi.String("wemod"),
},
TableClass: pulumi.String("tmbfrfbppwhjpm"),
TableName: pulumi.String("mqvlcdboopn"),
Tags: awsconnector.TagArray{
&awsconnector.TagArgs{
Key: pulumi.String("txipennfw"),
Value: pulumi.String("dkgweupnz"),
},
},
TimeToLiveSpecification: &awsconnector.TimeToLiveSpecificationArgs{
AttributeName: pulumi.String("sxbfejubturdtyusqywguqni"),
Enabled: pulumi.Bool(true),
},
},
AwsRegion: pulumi.String("rdzrhtbydhmaxzuwe"),
AwsSourceSchema: pulumi.String("sqkkuxwamzevkp"),
AwsTags: pulumi.StringMap{
"key3791": pulumi.String("iikafuvbjkvnbogujm"),
},
PublicCloudConnectorsResourceId: pulumi.String("nugnoqcknmrrminwvfvloqsporjd"),
PublicCloudResourceName: pulumi.String("lkbwyvnzooydbnembmykhmw"),
},
ResourceGroupName: pulumi.String("rgdynamoDBTable"),
Tags: pulumi.StringMap{
"key2178": pulumi.String("lyeternduvkobwvqhpicnxel"),
},
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.azurenative.awsconnector.DynamoDbTable;
import com.pulumi.azurenative.awsconnector.DynamoDbTableArgs;
import com.pulumi.azurenative.awsconnector.inputs.DynamoDBTablePropertiesArgs;
import com.pulumi.azurenative.awsconnector.inputs.AwsDynamoDBTablePropertiesArgs;
import com.pulumi.azurenative.awsconnector.inputs.ContributorInsightsSpecificationArgs;
import com.pulumi.azurenative.awsconnector.inputs.ImportSourceSpecificationArgs;
import com.pulumi.azurenative.awsconnector.inputs.InputFormatOptionsArgs;
import com.pulumi.azurenative.awsconnector.inputs.CsvArgs;
import com.pulumi.azurenative.awsconnector.inputs.S3BucketSourceArgs;
import com.pulumi.azurenative.awsconnector.inputs.KinesisStreamSpecificationArgs;
import com.pulumi.azurenative.awsconnector.inputs.PointInTimeRecoverySpecificationArgs;
import com.pulumi.azurenative.awsconnector.inputs.ProvisionedThroughputArgs;
import com.pulumi.azurenative.awsconnector.inputs.ResourcePolicyArgs;
import com.pulumi.azurenative.awsconnector.inputs.SSESpecificationArgs;
import com.pulumi.azurenative.awsconnector.inputs.StreamSpecificationArgs;
import com.pulumi.azurenative.awsconnector.inputs.TimeToLiveSpecificationArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var dynamoDbTable = new DynamoDbTable("dynamoDbTable", DynamoDbTableArgs.builder()
.location("fmkjilswdjyisfuwxuj")
.name("Replace this value with a string matching RegExp ^(z=.{0,259}[^zs.]$)(z!.*[zzzzzzzz])")
.properties(DynamoDBTablePropertiesArgs.builder()
.arn("gimtbcfiznraniycjyalnwrfstm")
.awsAccountId("dejqcxb")
.awsProperties(AwsDynamoDBTablePropertiesArgs.builder()
.arn("qbvqgymuxfzuwybdspdhcuvfouwnet")
.attributeDefinitions(AttributeDefinitionArgs.builder()
.attributeName("caryhpofnkqtoc")
.attributeType("bcmjgzaljcemcrswr")
.build())
.billingMode("pwxrsjcybdcidejuhvrckvxyxad")
.contributorInsightsSpecification(ContributorInsightsSpecificationArgs.builder()
.enabled(true)
.build())
.deletionProtectionEnabled(true)
.globalSecondaryIndexes(GlobalSecondaryIndexArgs.builder()
.contributorInsightsSpecification(ContributorInsightsSpecificationArgs.builder()
.enabled(true)
.build())
.indexName("uqlzacnvsvayrvirrwwttb")
.keySchema(KeySchemaArgs.builder()
.attributeName("wisgqkyoouaxivtrtay")
.keyType("kwkqgbxrwnoklpgmoypovxe")
.build())
.projection(ProjectionArgs.builder()
.nonKeyAttributes("loqmvohtjsscueegam")
.projectionType("atbzepkydpgudoaqi")
.build())
.provisionedThroughput(ProvisionedThroughputArgs.builder()
.readCapacityUnits(10)
.writeCapacityUnits(28)
.build())
.build())
.importSourceSpecification(ImportSourceSpecificationArgs.builder()
.inputCompressionType("bjswmnwxleqmcth")
.inputFormat("grnhhysgejvbnecrqoynjomz")
.inputFormatOptions(InputFormatOptionsArgs.builder()
.csv(CsvArgs.builder()
.delimiter("qzowvvpwwhptthlgvrtnpyjszetrt")
.headerList("gminuylhgebpjx")
.build())
.build())
.s3BucketSource(S3BucketSourceArgs.builder()
.s3Bucket("exulhkspgmo")
.s3BucketOwner("pyawhaxbwqhgarz")
.s3KeyPrefix("ogjgqdsvu")
.build())
.build())
.keySchema(KeySchemaArgs.builder()
.attributeName("wisgqkyoouaxivtrtay")
.keyType("kwkqgbxrwnoklpgmoypovxe")
.build())
.kinesisStreamSpecification(KinesisStreamSpecificationArgs.builder()
.approximateCreationDateTimePrecision("MICROSECOND")
.streamArn("qldltl")
.build())
.localSecondaryIndexes(LocalSecondaryIndexArgs.builder()
.indexName("gintyosxvkjqpe")
.keySchema(KeySchemaArgs.builder()
.attributeName("wisgqkyoouaxivtrtay")
.keyType("kwkqgbxrwnoklpgmoypovxe")
.build())
.projection(ProjectionArgs.builder()
.nonKeyAttributes("loqmvohtjsscueegam")
.projectionType("atbzepkydpgudoaqi")
.build())
.build())
.pointInTimeRecoverySpecification(PointInTimeRecoverySpecificationArgs.builder()
.pointInTimeRecoveryEnabled(true)
.build())
.provisionedThroughput(ProvisionedThroughputArgs.builder()
.readCapacityUnits(10)
.writeCapacityUnits(28)
.build())
.resourcePolicy()
.sseSpecification(SSESpecificationArgs.builder()
.kmsMasterKeyId("rvwuejohzknzrntkvprgxt")
.sseEnabled(true)
.sseType("osjalywculjbrystezvjojxe")
.build())
.streamArn("xvkrzs")
.streamSpecification(StreamSpecificationArgs.builder()
.resourcePolicy()
.streamViewType("wemod")
.build())
.tableClass("tmbfrfbppwhjpm")
.tableName("mqvlcdboopn")
.tags(TagArgs.builder()
.key("txipennfw")
.value("dkgweupnz")
.build())
.timeToLiveSpecification(TimeToLiveSpecificationArgs.builder()
.attributeName("sxbfejubturdtyusqywguqni")
.enabled(true)
.build())
.build())
.awsRegion("rdzrhtbydhmaxzuwe")
.awsSourceSchema("sqkkuxwamzevkp")
.awsTags(Map.of("key3791", "iikafuvbjkvnbogujm"))
.publicCloudConnectorsResourceId("nugnoqcknmrrminwvfvloqsporjd")
.publicCloudResourceName("lkbwyvnzooydbnembmykhmw")
.build())
.resourceGroupName("rgdynamoDBTable")
.tags(Map.of("key2178", "lyeternduvkobwvqhpicnxel"))
.build());
}
}
import pulumi
import pulumi_azure_native as azure_native
dynamo_db_table = azure_native.awsconnector.DynamoDbTable("dynamoDbTable",
location="fmkjilswdjyisfuwxuj",
name="Replace this value with a string matching RegExp ^(z=.{0,259}[^zs.]$)(z!.*[zzzzzzzz])",
properties={
"arn": "gimtbcfiznraniycjyalnwrfstm",
"aws_account_id": "dejqcxb",
"aws_properties": {
"arn": "qbvqgymuxfzuwybdspdhcuvfouwnet",
"attribute_definitions": [{
"attribute_name": "caryhpofnkqtoc",
"attribute_type": "bcmjgzaljcemcrswr",
}],
"billing_mode": "pwxrsjcybdcidejuhvrckvxyxad",
"contributor_insights_specification": {
"enabled": True,
},
"deletion_protection_enabled": True,
"global_secondary_indexes": [{
"contributor_insights_specification": {
"enabled": True,
},
"index_name": "uqlzacnvsvayrvirrwwttb",
"key_schema": [{
"attribute_name": "wisgqkyoouaxivtrtay",
"key_type": "kwkqgbxrwnoklpgmoypovxe",
}],
"projection": {
"non_key_attributes": ["loqmvohtjsscueegam"],
"projection_type": "atbzepkydpgudoaqi",
},
"provisioned_throughput": {
"read_capacity_units": 10,
"write_capacity_units": 28,
},
}],
"import_source_specification": {
"input_compression_type": "bjswmnwxleqmcth",
"input_format": "grnhhysgejvbnecrqoynjomz",
"input_format_options": {
"csv": {
"delimiter": "qzowvvpwwhptthlgvrtnpyjszetrt",
"header_list": ["gminuylhgebpjx"],
},
},
"s3_bucket_source": {
"s3_bucket": "exulhkspgmo",
"s3_bucket_owner": "pyawhaxbwqhgarz",
"s3_key_prefix": "ogjgqdsvu",
},
},
"key_schema": [{
"attribute_name": "wisgqkyoouaxivtrtay",
"key_type": "kwkqgbxrwnoklpgmoypovxe",
}],
"kinesis_stream_specification": {
"approximate_creation_date_time_precision": azure_native.awsconnector.KinesisStreamSpecificationApproximateCreationDateTimePrecision.MICROSECOND,
"stream_arn": "qldltl",
},
"local_secondary_indexes": [{
"index_name": "gintyosxvkjqpe",
"key_schema": [{
"attribute_name": "wisgqkyoouaxivtrtay",
"key_type": "kwkqgbxrwnoklpgmoypovxe",
}],
"projection": {
"non_key_attributes": ["loqmvohtjsscueegam"],
"projection_type": "atbzepkydpgudoaqi",
},
}],
"point_in_time_recovery_specification": {
"point_in_time_recovery_enabled": True,
},
"provisioned_throughput": {
"read_capacity_units": 10,
"write_capacity_units": 28,
},
"resource_policy": {},
"sse_specification": {
"kms_master_key_id": "rvwuejohzknzrntkvprgxt",
"sse_enabled": True,
"sse_type": "osjalywculjbrystezvjojxe",
},
"stream_arn": "xvkrzs",
"stream_specification": {
"resource_policy": {},
"stream_view_type": "wemod",
},
"table_class": "tmbfrfbppwhjpm",
"table_name": "mqvlcdboopn",
"tags": [{
"key": "txipennfw",
"value": "dkgweupnz",
}],
"time_to_live_specification": {
"attribute_name": "sxbfejubturdtyusqywguqni",
"enabled": True,
},
},
"aws_region": "rdzrhtbydhmaxzuwe",
"aws_source_schema": "sqkkuxwamzevkp",
"aws_tags": {
"key3791": "iikafuvbjkvnbogujm",
},
"public_cloud_connectors_resource_id": "nugnoqcknmrrminwvfvloqsporjd",
"public_cloud_resource_name": "lkbwyvnzooydbnembmykhmw",
},
resource_group_name="rgdynamoDBTable",
tags={
"key2178": "lyeternduvkobwvqhpicnxel",
})
import * as pulumi from "@pulumi/pulumi";
import * as azure_native from "@pulumi/azure-native";
const dynamoDbTable = new azure_native.awsconnector.DynamoDbTable("dynamoDbTable", {
location: "fmkjilswdjyisfuwxuj",
name: "Replace this value with a string matching RegExp ^(z=.{0,259}[^zs.]$)(z!.*[zzzzzzzz])",
properties: {
arn: "gimtbcfiznraniycjyalnwrfstm",
awsAccountId: "dejqcxb",
awsProperties: {
arn: "qbvqgymuxfzuwybdspdhcuvfouwnet",
attributeDefinitions: [{
attributeName: "caryhpofnkqtoc",
attributeType: "bcmjgzaljcemcrswr",
}],
billingMode: "pwxrsjcybdcidejuhvrckvxyxad",
contributorInsightsSpecification: {
enabled: true,
},
deletionProtectionEnabled: true,
globalSecondaryIndexes: [{
contributorInsightsSpecification: {
enabled: true,
},
indexName: "uqlzacnvsvayrvirrwwttb",
keySchema: [{
attributeName: "wisgqkyoouaxivtrtay",
keyType: "kwkqgbxrwnoklpgmoypovxe",
}],
projection: {
nonKeyAttributes: ["loqmvohtjsscueegam"],
projectionType: "atbzepkydpgudoaqi",
},
provisionedThroughput: {
readCapacityUnits: 10,
writeCapacityUnits: 28,
},
}],
importSourceSpecification: {
inputCompressionType: "bjswmnwxleqmcth",
inputFormat: "grnhhysgejvbnecrqoynjomz",
inputFormatOptions: {
csv: {
delimiter: "qzowvvpwwhptthlgvrtnpyjszetrt",
headerList: ["gminuylhgebpjx"],
},
},
s3BucketSource: {
s3Bucket: "exulhkspgmo",
s3BucketOwner: "pyawhaxbwqhgarz",
s3KeyPrefix: "ogjgqdsvu",
},
},
keySchema: [{
attributeName: "wisgqkyoouaxivtrtay",
keyType: "kwkqgbxrwnoklpgmoypovxe",
}],
kinesisStreamSpecification: {
approximateCreationDateTimePrecision: azure_native.awsconnector.KinesisStreamSpecificationApproximateCreationDateTimePrecision.MICROSECOND,
streamArn: "qldltl",
},
localSecondaryIndexes: [{
indexName: "gintyosxvkjqpe",
keySchema: [{
attributeName: "wisgqkyoouaxivtrtay",
keyType: "kwkqgbxrwnoklpgmoypovxe",
}],
projection: {
nonKeyAttributes: ["loqmvohtjsscueegam"],
projectionType: "atbzepkydpgudoaqi",
},
}],
pointInTimeRecoverySpecification: {
pointInTimeRecoveryEnabled: true,
},
provisionedThroughput: {
readCapacityUnits: 10,
writeCapacityUnits: 28,
},
resourcePolicy: {},
sseSpecification: {
kmsMasterKeyId: "rvwuejohzknzrntkvprgxt",
sseEnabled: true,
sseType: "osjalywculjbrystezvjojxe",
},
streamArn: "xvkrzs",
streamSpecification: {
resourcePolicy: {},
streamViewType: "wemod",
},
tableClass: "tmbfrfbppwhjpm",
tableName: "mqvlcdboopn",
tags: [{
key: "txipennfw",
value: "dkgweupnz",
}],
timeToLiveSpecification: {
attributeName: "sxbfejubturdtyusqywguqni",
enabled: true,
},
},
awsRegion: "rdzrhtbydhmaxzuwe",
awsSourceSchema: "sqkkuxwamzevkp",
awsTags: {
key3791: "iikafuvbjkvnbogujm",
},
publicCloudConnectorsResourceId: "nugnoqcknmrrminwvfvloqsporjd",
publicCloudResourceName: "lkbwyvnzooydbnembmykhmw",
},
resourceGroupName: "rgdynamoDBTable",
tags: {
key2178: "lyeternduvkobwvqhpicnxel",
},
});
resources:
dynamoDbTable:
type: azure-native:awsconnector:DynamoDbTable
properties:
location: fmkjilswdjyisfuwxuj
name: Replace this value with a string matching RegExp ^(z=.{0,259}[^zs.]$)(z!.*[zzzzzzzz])
properties:
arn: gimtbcfiznraniycjyalnwrfstm
awsAccountId: dejqcxb
awsProperties:
arn: qbvqgymuxfzuwybdspdhcuvfouwnet
attributeDefinitions:
- attributeName: caryhpofnkqtoc
attributeType: bcmjgzaljcemcrswr
billingMode: pwxrsjcybdcidejuhvrckvxyxad
contributorInsightsSpecification:
enabled: true
deletionProtectionEnabled: true
globalSecondaryIndexes:
- contributorInsightsSpecification:
enabled: true
indexName: uqlzacnvsvayrvirrwwttb
keySchema:
- attributeName: wisgqkyoouaxivtrtay
keyType: kwkqgbxrwnoklpgmoypovxe
projection:
nonKeyAttributes:
- loqmvohtjsscueegam
projectionType: atbzepkydpgudoaqi
provisionedThroughput:
readCapacityUnits: 10
writeCapacityUnits: 28
importSourceSpecification:
inputCompressionType: bjswmnwxleqmcth
inputFormat: grnhhysgejvbnecrqoynjomz
inputFormatOptions:
csv:
delimiter: qzowvvpwwhptthlgvrtnpyjszetrt
headerList:
- gminuylhgebpjx
s3BucketSource:
s3Bucket: exulhkspgmo
s3BucketOwner: pyawhaxbwqhgarz
s3KeyPrefix: ogjgqdsvu
keySchema:
- attributeName: wisgqkyoouaxivtrtay
keyType: kwkqgbxrwnoklpgmoypovxe
kinesisStreamSpecification:
approximateCreationDateTimePrecision: MICROSECOND
streamArn: qldltl
localSecondaryIndexes:
- indexName: gintyosxvkjqpe
keySchema:
- attributeName: wisgqkyoouaxivtrtay
keyType: kwkqgbxrwnoklpgmoypovxe
projection:
nonKeyAttributes:
- loqmvohtjsscueegam
projectionType: atbzepkydpgudoaqi
pointInTimeRecoverySpecification:
pointInTimeRecoveryEnabled: true
provisionedThroughput:
readCapacityUnits: 10
writeCapacityUnits: 28
resourcePolicy: {}
sseSpecification:
kmsMasterKeyId: rvwuejohzknzrntkvprgxt
sseEnabled: true
sseType: osjalywculjbrystezvjojxe
streamArn: xvkrzs
streamSpecification:
resourcePolicy: {}
streamViewType: wemod
tableClass: tmbfrfbppwhjpm
tableName: mqvlcdboopn
tags:
- key: txipennfw
value: dkgweupnz
timeToLiveSpecification:
attributeName: sxbfejubturdtyusqywguqni
enabled: true
awsRegion: rdzrhtbydhmaxzuwe
awsSourceSchema: sqkkuxwamzevkp
awsTags:
key3791: iikafuvbjkvnbogujm
publicCloudConnectorsResourceId: nugnoqcknmrrminwvfvloqsporjd
publicCloudResourceName: lkbwyvnzooydbnembmykhmw
resourceGroupName: rgdynamoDBTable
tags:
key2178: lyeternduvkobwvqhpicnxel
Create DynamoDbTable Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new DynamoDbTable(name: string, args: DynamoDbTableArgs, opts?: CustomResourceOptions);
@overload
def DynamoDbTable(resource_name: str,
args: DynamoDbTableArgs,
opts: Optional[ResourceOptions] = None)
@overload
def DynamoDbTable(resource_name: str,
opts: Optional[ResourceOptions] = None,
resource_group_name: Optional[str] = None,
location: Optional[str] = None,
name: Optional[str] = None,
properties: Optional[DynamoDBTablePropertiesArgs] = None,
tags: Optional[Mapping[str, str]] = None)
func NewDynamoDbTable(ctx *Context, name string, args DynamoDbTableArgs, opts ...ResourceOption) (*DynamoDbTable, error)
public DynamoDbTable(string name, DynamoDbTableArgs args, CustomResourceOptions? opts = null)
public DynamoDbTable(String name, DynamoDbTableArgs args)
public DynamoDbTable(String name, DynamoDbTableArgs args, CustomResourceOptions options)
type: azure-native:awsconnector:DynamoDbTable
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args DynamoDbTableArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args DynamoDbTableArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args DynamoDbTableArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args DynamoDbTableArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args DynamoDbTableArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var dynamoDbTableResource = new AzureNative.AwsConnector.DynamoDbTable("dynamoDbTableResource", new()
{
ResourceGroupName = "string",
Location = "string",
Name = "string",
Properties = new AzureNative.AwsConnector.Inputs.DynamoDBTablePropertiesArgs
{
Arn = "string",
AwsAccountId = "string",
AwsProperties = new AzureNative.AwsConnector.Inputs.AwsDynamoDBTablePropertiesArgs
{
Arn = "string",
AttributeDefinitions = new[]
{
new AzureNative.AwsConnector.Inputs.AttributeDefinitionArgs
{
AttributeName = "string",
AttributeType = "string",
},
},
BillingMode = "string",
ContributorInsightsSpecification = new AzureNative.AwsConnector.Inputs.ContributorInsightsSpecificationArgs
{
Enabled = false,
},
DeletionProtectionEnabled = false,
GlobalSecondaryIndexes = new[]
{
new AzureNative.AwsConnector.Inputs.GlobalSecondaryIndexArgs
{
ContributorInsightsSpecification = new AzureNative.AwsConnector.Inputs.ContributorInsightsSpecificationArgs
{
Enabled = false,
},
IndexName = "string",
KeySchema = new[]
{
new AzureNative.AwsConnector.Inputs.KeySchemaArgs
{
AttributeName = "string",
KeyType = "string",
},
},
Projection = new AzureNative.AwsConnector.Inputs.ProjectionArgs
{
NonKeyAttributes = new[]
{
"string",
},
ProjectionType = "string",
},
ProvisionedThroughput = new AzureNative.AwsConnector.Inputs.ProvisionedThroughputArgs
{
ReadCapacityUnits = 0,
WriteCapacityUnits = 0,
},
},
},
ImportSourceSpecification = new AzureNative.AwsConnector.Inputs.ImportSourceSpecificationArgs
{
InputCompressionType = "string",
InputFormat = "string",
InputFormatOptions = new AzureNative.AwsConnector.Inputs.InputFormatOptionsArgs
{
Csv = new AzureNative.AwsConnector.Inputs.CsvArgs
{
Delimiter = "string",
HeaderList = new[]
{
"string",
},
},
},
S3BucketSource = new AzureNative.AwsConnector.Inputs.S3BucketSourceArgs
{
S3Bucket = "string",
S3BucketOwner = "string",
S3KeyPrefix = "string",
},
},
KeySchema = new[]
{
new AzureNative.AwsConnector.Inputs.KeySchemaArgs
{
AttributeName = "string",
KeyType = "string",
},
},
KinesisStreamSpecification = new AzureNative.AwsConnector.Inputs.KinesisStreamSpecificationArgs
{
ApproximateCreationDateTimePrecision = "string",
StreamArn = "string",
},
LocalSecondaryIndexes = new[]
{
new AzureNative.AwsConnector.Inputs.LocalSecondaryIndexArgs
{
IndexName = "string",
KeySchema = new[]
{
new AzureNative.AwsConnector.Inputs.KeySchemaArgs
{
AttributeName = "string",
KeyType = "string",
},
},
Projection = new AzureNative.AwsConnector.Inputs.ProjectionArgs
{
NonKeyAttributes = new[]
{
"string",
},
ProjectionType = "string",
},
},
},
PointInTimeRecoverySpecification = new AzureNative.AwsConnector.Inputs.PointInTimeRecoverySpecificationArgs
{
PointInTimeRecoveryEnabled = false,
},
ProvisionedThroughput = new AzureNative.AwsConnector.Inputs.ProvisionedThroughputArgs
{
ReadCapacityUnits = 0,
WriteCapacityUnits = 0,
},
ResourcePolicy = new AzureNative.AwsConnector.Inputs.ResourcePolicyArgs
{
PolicyDocument = "any",
},
SseSpecification = new AzureNative.AwsConnector.Inputs.SSESpecificationArgs
{
KmsMasterKeyId = "string",
SseEnabled = false,
SseType = "string",
},
StreamArn = "string",
StreamSpecification = new AzureNative.AwsConnector.Inputs.StreamSpecificationArgs
{
ResourcePolicy = new AzureNative.AwsConnector.Inputs.ResourcePolicyArgs
{
PolicyDocument = "any",
},
StreamViewType = "string",
},
TableClass = "string",
TableName = "string",
Tags = new[]
{
new AzureNative.AwsConnector.Inputs.TagArgs
{
Key = "string",
Value = "string",
},
},
TimeToLiveSpecification = new AzureNative.AwsConnector.Inputs.TimeToLiveSpecificationArgs
{
AttributeName = "string",
Enabled = false,
},
},
AwsRegion = "string",
AwsSourceSchema = "string",
AwsTags =
{
{ "string", "string" },
},
PublicCloudConnectorsResourceId = "string",
PublicCloudResourceName = "string",
},
Tags =
{
{ "string", "string" },
},
});
example, err := awsconnector.NewDynamoDbTable(ctx, "dynamoDbTableResource", &awsconnector.DynamoDbTableArgs{
ResourceGroupName: pulumi.String("string"),
Location: pulumi.String("string"),
Name: pulumi.String("string"),
Properties: &awsconnector.DynamoDBTablePropertiesArgs{
Arn: pulumi.String("string"),
AwsAccountId: pulumi.String("string"),
AwsProperties: &awsconnector.AwsDynamoDBTablePropertiesArgs{
Arn: pulumi.String("string"),
AttributeDefinitions: awsconnector.AttributeDefinitionArray{
&awsconnector.AttributeDefinitionArgs{
AttributeName: pulumi.String("string"),
AttributeType: pulumi.String("string"),
},
},
BillingMode: pulumi.String("string"),
ContributorInsightsSpecification: &awsconnector.ContributorInsightsSpecificationArgs{
Enabled: pulumi.Bool(false),
},
DeletionProtectionEnabled: pulumi.Bool(false),
GlobalSecondaryIndexes: awsconnector.GlobalSecondaryIndexArray{
&awsconnector.GlobalSecondaryIndexArgs{
ContributorInsightsSpecification: &awsconnector.ContributorInsightsSpecificationArgs{
Enabled: pulumi.Bool(false),
},
IndexName: pulumi.String("string"),
KeySchema: awsconnector.KeySchemaArray{
&awsconnector.KeySchemaArgs{
AttributeName: pulumi.String("string"),
KeyType: pulumi.String("string"),
},
},
Projection: &awsconnector.ProjectionArgs{
NonKeyAttributes: pulumi.StringArray{
pulumi.String("string"),
},
ProjectionType: pulumi.String("string"),
},
ProvisionedThroughput: &awsconnector.ProvisionedThroughputArgs{
ReadCapacityUnits: pulumi.Int(0),
WriteCapacityUnits: pulumi.Int(0),
},
},
},
ImportSourceSpecification: &awsconnector.ImportSourceSpecificationArgs{
InputCompressionType: pulumi.String("string"),
InputFormat: pulumi.String("string"),
InputFormatOptions: &awsconnector.InputFormatOptionsArgs{
Csv: &awsconnector.CsvArgs{
Delimiter: pulumi.String("string"),
HeaderList: pulumi.StringArray{
pulumi.String("string"),
},
},
},
S3BucketSource: &awsconnector.S3BucketSourceArgs{
S3Bucket: pulumi.String("string"),
S3BucketOwner: pulumi.String("string"),
S3KeyPrefix: pulumi.String("string"),
},
},
KeySchema: awsconnector.KeySchemaArray{
&awsconnector.KeySchemaArgs{
AttributeName: pulumi.String("string"),
KeyType: pulumi.String("string"),
},
},
KinesisStreamSpecification: &awsconnector.KinesisStreamSpecificationArgs{
ApproximateCreationDateTimePrecision: pulumi.String("string"),
StreamArn: pulumi.String("string"),
},
LocalSecondaryIndexes: awsconnector.LocalSecondaryIndexArray{
&awsconnector.LocalSecondaryIndexArgs{
IndexName: pulumi.String("string"),
KeySchema: awsconnector.KeySchemaArray{
&awsconnector.KeySchemaArgs{
AttributeName: pulumi.String("string"),
KeyType: pulumi.String("string"),
},
},
Projection: &awsconnector.ProjectionArgs{
NonKeyAttributes: pulumi.StringArray{
pulumi.String("string"),
},
ProjectionType: pulumi.String("string"),
},
},
},
PointInTimeRecoverySpecification: &awsconnector.PointInTimeRecoverySpecificationArgs{
PointInTimeRecoveryEnabled: pulumi.Bool(false),
},
ProvisionedThroughput: &awsconnector.ProvisionedThroughputArgs{
ReadCapacityUnits: pulumi.Int(0),
WriteCapacityUnits: pulumi.Int(0),
},
ResourcePolicy: &awsconnector.ResourcePolicyArgs{
PolicyDocument: pulumi.Any("any"),
},
SseSpecification: &awsconnector.SSESpecificationArgs{
KmsMasterKeyId: pulumi.String("string"),
SseEnabled: pulumi.Bool(false),
SseType: pulumi.String("string"),
},
StreamArn: pulumi.String("string"),
StreamSpecification: &awsconnector.StreamSpecificationArgs{
ResourcePolicy: &awsconnector.ResourcePolicyArgs{
PolicyDocument: pulumi.Any("any"),
},
StreamViewType: pulumi.String("string"),
},
TableClass: pulumi.String("string"),
TableName: pulumi.String("string"),
Tags: awsconnector.TagArray{
&awsconnector.TagArgs{
Key: pulumi.String("string"),
Value: pulumi.String("string"),
},
},
TimeToLiveSpecification: &awsconnector.TimeToLiveSpecificationArgs{
AttributeName: pulumi.String("string"),
Enabled: pulumi.Bool(false),
},
},
AwsRegion: pulumi.String("string"),
AwsSourceSchema: pulumi.String("string"),
AwsTags: pulumi.StringMap{
"string": pulumi.String("string"),
},
PublicCloudConnectorsResourceId: pulumi.String("string"),
PublicCloudResourceName: pulumi.String("string"),
},
Tags: pulumi.StringMap{
"string": pulumi.String("string"),
},
})
var dynamoDbTableResource = new DynamoDbTable("dynamoDbTableResource", DynamoDbTableArgs.builder()
.resourceGroupName("string")
.location("string")
.name("string")
.properties(DynamoDBTablePropertiesArgs.builder()
.arn("string")
.awsAccountId("string")
.awsProperties(AwsDynamoDBTablePropertiesArgs.builder()
.arn("string")
.attributeDefinitions(AttributeDefinitionArgs.builder()
.attributeName("string")
.attributeType("string")
.build())
.billingMode("string")
.contributorInsightsSpecification(ContributorInsightsSpecificationArgs.builder()
.enabled(false)
.build())
.deletionProtectionEnabled(false)
.globalSecondaryIndexes(GlobalSecondaryIndexArgs.builder()
.contributorInsightsSpecification(ContributorInsightsSpecificationArgs.builder()
.enabled(false)
.build())
.indexName("string")
.keySchema(KeySchemaArgs.builder()
.attributeName("string")
.keyType("string")
.build())
.projection(ProjectionArgs.builder()
.nonKeyAttributes("string")
.projectionType("string")
.build())
.provisionedThroughput(ProvisionedThroughputArgs.builder()
.readCapacityUnits(0)
.writeCapacityUnits(0)
.build())
.build())
.importSourceSpecification(ImportSourceSpecificationArgs.builder()
.inputCompressionType("string")
.inputFormat("string")
.inputFormatOptions(InputFormatOptionsArgs.builder()
.csv(CsvArgs.builder()
.delimiter("string")
.headerList("string")
.build())
.build())
.s3BucketSource(S3BucketSourceArgs.builder()
.s3Bucket("string")
.s3BucketOwner("string")
.s3KeyPrefix("string")
.build())
.build())
.keySchema(KeySchemaArgs.builder()
.attributeName("string")
.keyType("string")
.build())
.kinesisStreamSpecification(KinesisStreamSpecificationArgs.builder()
.approximateCreationDateTimePrecision("string")
.streamArn("string")
.build())
.localSecondaryIndexes(LocalSecondaryIndexArgs.builder()
.indexName("string")
.keySchema(KeySchemaArgs.builder()
.attributeName("string")
.keyType("string")
.build())
.projection(ProjectionArgs.builder()
.nonKeyAttributes("string")
.projectionType("string")
.build())
.build())
.pointInTimeRecoverySpecification(PointInTimeRecoverySpecificationArgs.builder()
.pointInTimeRecoveryEnabled(false)
.build())
.provisionedThroughput(ProvisionedThroughputArgs.builder()
.readCapacityUnits(0)
.writeCapacityUnits(0)
.build())
.resourcePolicy(ResourcePolicyArgs.builder()
.policyDocument("any")
.build())
.sseSpecification(SSESpecificationArgs.builder()
.kmsMasterKeyId("string")
.sseEnabled(false)
.sseType("string")
.build())
.streamArn("string")
.streamSpecification(StreamSpecificationArgs.builder()
.resourcePolicy(ResourcePolicyArgs.builder()
.policyDocument("any")
.build())
.streamViewType("string")
.build())
.tableClass("string")
.tableName("string")
.tags(TagArgs.builder()
.key("string")
.value("string")
.build())
.timeToLiveSpecification(TimeToLiveSpecificationArgs.builder()
.attributeName("string")
.enabled(false)
.build())
.build())
.awsRegion("string")
.awsSourceSchema("string")
.awsTags(Map.of("string", "string"))
.publicCloudConnectorsResourceId("string")
.publicCloudResourceName("string")
.build())
.tags(Map.of("string", "string"))
.build());
dynamo_db_table_resource = azure_native.awsconnector.DynamoDbTable("dynamoDbTableResource",
resource_group_name="string",
location="string",
name="string",
properties={
"arn": "string",
"aws_account_id": "string",
"aws_properties": {
"arn": "string",
"attribute_definitions": [{
"attribute_name": "string",
"attribute_type": "string",
}],
"billing_mode": "string",
"contributor_insights_specification": {
"enabled": False,
},
"deletion_protection_enabled": False,
"global_secondary_indexes": [{
"contributor_insights_specification": {
"enabled": False,
},
"index_name": "string",
"key_schema": [{
"attribute_name": "string",
"key_type": "string",
}],
"projection": {
"non_key_attributes": ["string"],
"projection_type": "string",
},
"provisioned_throughput": {
"read_capacity_units": 0,
"write_capacity_units": 0,
},
}],
"import_source_specification": {
"input_compression_type": "string",
"input_format": "string",
"input_format_options": {
"csv": {
"delimiter": "string",
"header_list": ["string"],
},
},
"s3_bucket_source": {
"s3_bucket": "string",
"s3_bucket_owner": "string",
"s3_key_prefix": "string",
},
},
"key_schema": [{
"attribute_name": "string",
"key_type": "string",
}],
"kinesis_stream_specification": {
"approximate_creation_date_time_precision": "string",
"stream_arn": "string",
},
"local_secondary_indexes": [{
"index_name": "string",
"key_schema": [{
"attribute_name": "string",
"key_type": "string",
}],
"projection": {
"non_key_attributes": ["string"],
"projection_type": "string",
},
}],
"point_in_time_recovery_specification": {
"point_in_time_recovery_enabled": False,
},
"provisioned_throughput": {
"read_capacity_units": 0,
"write_capacity_units": 0,
},
"resource_policy": {
"policy_document": "any",
},
"sse_specification": {
"kms_master_key_id": "string",
"sse_enabled": False,
"sse_type": "string",
},
"stream_arn": "string",
"stream_specification": {
"resource_policy": {
"policy_document": "any",
},
"stream_view_type": "string",
},
"table_class": "string",
"table_name": "string",
"tags": [{
"key": "string",
"value": "string",
}],
"time_to_live_specification": {
"attribute_name": "string",
"enabled": False,
},
},
"aws_region": "string",
"aws_source_schema": "string",
"aws_tags": {
"string": "string",
},
"public_cloud_connectors_resource_id": "string",
"public_cloud_resource_name": "string",
},
tags={
"string": "string",
})
const dynamoDbTableResource = new azure_native.awsconnector.DynamoDbTable("dynamoDbTableResource", {
resourceGroupName: "string",
location: "string",
name: "string",
properties: {
arn: "string",
awsAccountId: "string",
awsProperties: {
arn: "string",
attributeDefinitions: [{
attributeName: "string",
attributeType: "string",
}],
billingMode: "string",
contributorInsightsSpecification: {
enabled: false,
},
deletionProtectionEnabled: false,
globalSecondaryIndexes: [{
contributorInsightsSpecification: {
enabled: false,
},
indexName: "string",
keySchema: [{
attributeName: "string",
keyType: "string",
}],
projection: {
nonKeyAttributes: ["string"],
projectionType: "string",
},
provisionedThroughput: {
readCapacityUnits: 0,
writeCapacityUnits: 0,
},
}],
importSourceSpecification: {
inputCompressionType: "string",
inputFormat: "string",
inputFormatOptions: {
csv: {
delimiter: "string",
headerList: ["string"],
},
},
s3BucketSource: {
s3Bucket: "string",
s3BucketOwner: "string",
s3KeyPrefix: "string",
},
},
keySchema: [{
attributeName: "string",
keyType: "string",
}],
kinesisStreamSpecification: {
approximateCreationDateTimePrecision: "string",
streamArn: "string",
},
localSecondaryIndexes: [{
indexName: "string",
keySchema: [{
attributeName: "string",
keyType: "string",
}],
projection: {
nonKeyAttributes: ["string"],
projectionType: "string",
},
}],
pointInTimeRecoverySpecification: {
pointInTimeRecoveryEnabled: false,
},
provisionedThroughput: {
readCapacityUnits: 0,
writeCapacityUnits: 0,
},
resourcePolicy: {
policyDocument: "any",
},
sseSpecification: {
kmsMasterKeyId: "string",
sseEnabled: false,
sseType: "string",
},
streamArn: "string",
streamSpecification: {
resourcePolicy: {
policyDocument: "any",
},
streamViewType: "string",
},
tableClass: "string",
tableName: "string",
tags: [{
key: "string",
value: "string",
}],
timeToLiveSpecification: {
attributeName: "string",
enabled: false,
},
},
awsRegion: "string",
awsSourceSchema: "string",
awsTags: {
string: "string",
},
publicCloudConnectorsResourceId: "string",
publicCloudResourceName: "string",
},
tags: {
string: "string",
},
});
type: azure-native:awsconnector:DynamoDbTable
properties:
location: string
name: string
properties:
arn: string
awsAccountId: string
awsProperties:
arn: string
attributeDefinitions:
- attributeName: string
attributeType: string
billingMode: string
contributorInsightsSpecification:
enabled: false
deletionProtectionEnabled: false
globalSecondaryIndexes:
- contributorInsightsSpecification:
enabled: false
indexName: string
keySchema:
- attributeName: string
keyType: string
projection:
nonKeyAttributes:
- string
projectionType: string
provisionedThroughput:
readCapacityUnits: 0
writeCapacityUnits: 0
importSourceSpecification:
inputCompressionType: string
inputFormat: string
inputFormatOptions:
csv:
delimiter: string
headerList:
- string
s3BucketSource:
s3Bucket: string
s3BucketOwner: string
s3KeyPrefix: string
keySchema:
- attributeName: string
keyType: string
kinesisStreamSpecification:
approximateCreationDateTimePrecision: string
streamArn: string
localSecondaryIndexes:
- indexName: string
keySchema:
- attributeName: string
keyType: string
projection:
nonKeyAttributes:
- string
projectionType: string
pointInTimeRecoverySpecification:
pointInTimeRecoveryEnabled: false
provisionedThroughput:
readCapacityUnits: 0
writeCapacityUnits: 0
resourcePolicy:
policyDocument: any
sseSpecification:
kmsMasterKeyId: string
sseEnabled: false
sseType: string
streamArn: string
streamSpecification:
resourcePolicy:
policyDocument: any
streamViewType: string
tableClass: string
tableName: string
tags:
- key: string
value: string
timeToLiveSpecification:
attributeName: string
enabled: false
awsRegion: string
awsSourceSchema: string
awsTags:
string: string
publicCloudConnectorsResourceId: string
publicCloudResourceName: string
resourceGroupName: string
tags:
string: string
DynamoDbTable Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The DynamoDbTable resource accepts the following input properties:
- Resource
Group stringName - The name of the resource group. The name is case insensitive.
- Location string
- The geo-location where the resource lives
- Name string
- Name of DynamoDBTable
- Properties
Pulumi.
Azure Native. Aws Connector. Inputs. Dynamo DBTable Properties - The resource-specific properties for this resource.
- Dictionary<string, string>
- Resource tags.
- Resource
Group stringName - The name of the resource group. The name is case insensitive.
- Location string
- The geo-location where the resource lives
- Name string
- Name of DynamoDBTable
- Properties
Dynamo
DBTable Properties Args - The resource-specific properties for this resource.
- map[string]string
- Resource tags.
- resource
Group StringName - The name of the resource group. The name is case insensitive.
- location String
- The geo-location where the resource lives
- name String
- Name of DynamoDBTable
- properties
Dynamo
DBTable Properties - The resource-specific properties for this resource.
- Map<String,String>
- Resource tags.
- resource
Group stringName - The name of the resource group. The name is case insensitive.
- location string
- The geo-location where the resource lives
- name string
- Name of DynamoDBTable
- properties
Dynamo
DBTable Properties - The resource-specific properties for this resource.
- {[key: string]: string}
- Resource tags.
- resource_
group_ strname - The name of the resource group. The name is case insensitive.
- location str
- The geo-location where the resource lives
- name str
- Name of DynamoDBTable
- properties
Dynamo
DBTable Properties Args - The resource-specific properties for this resource.
- Mapping[str, str]
- Resource tags.
- resource
Group StringName - The name of the resource group. The name is case insensitive.
- location String
- The geo-location where the resource lives
- name String
- Name of DynamoDBTable
- properties Property Map
- The resource-specific properties for this resource.
- Map<String>
- Resource tags.
Outputs
All input properties are implicitly available as output properties. Additionally, the DynamoDbTable resource produces the following output properties:
- Id string
- The provider-assigned unique ID for this managed resource.
- System
Data Pulumi.Azure Native. Aws Connector. Outputs. System Data Response - Azure Resource Manager metadata containing createdBy and modifiedBy information.
- Type string
- The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
- Id string
- The provider-assigned unique ID for this managed resource.
- System
Data SystemData Response - Azure Resource Manager metadata containing createdBy and modifiedBy information.
- Type string
- The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
- id String
- The provider-assigned unique ID for this managed resource.
- system
Data SystemData Response - Azure Resource Manager metadata containing createdBy and modifiedBy information.
- type String
- The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
- id string
- The provider-assigned unique ID for this managed resource.
- system
Data SystemData Response - Azure Resource Manager metadata containing createdBy and modifiedBy information.
- type string
- The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
- id str
- The provider-assigned unique ID for this managed resource.
- system_
data SystemData Response - Azure Resource Manager metadata containing createdBy and modifiedBy information.
- type str
- The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
- id String
- The provider-assigned unique ID for this managed resource.
- system
Data Property Map - Azure Resource Manager metadata containing createdBy and modifiedBy information.
- type String
- The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
Supporting Types
AttributeDefinition, AttributeDefinitionArgs
- Attribute
Name string - A name for the attribute.
- Attribute
Type string - The data type for the attribute, where: +
S
- the attribute is of type String +N
- the attribute is of type Number +B
- the attribute is of type Binary
- Attribute
Name string - A name for the attribute.
- Attribute
Type string - The data type for the attribute, where: +
S
- the attribute is of type String +N
- the attribute is of type Number +B
- the attribute is of type Binary
- attribute
Name String - A name for the attribute.
- attribute
Type String - The data type for the attribute, where: +
S
- the attribute is of type String +N
- the attribute is of type Number +B
- the attribute is of type Binary
- attribute
Name string - A name for the attribute.
- attribute
Type string - The data type for the attribute, where: +
S
- the attribute is of type String +N
- the attribute is of type Number +B
- the attribute is of type Binary
- attribute_
name str - A name for the attribute.
- attribute_
type str - The data type for the attribute, where: +
S
- the attribute is of type String +N
- the attribute is of type Number +B
- the attribute is of type Binary
- attribute
Name String - A name for the attribute.
- attribute
Type String - The data type for the attribute, where: +
S
- the attribute is of type String +N
- the attribute is of type Number +B
- the attribute is of type Binary
AttributeDefinitionResponse, AttributeDefinitionResponseArgs
- Attribute
Name string - A name for the attribute.
- Attribute
Type string - The data type for the attribute, where: +
S
- the attribute is of type String +N
- the attribute is of type Number +B
- the attribute is of type Binary
- Attribute
Name string - A name for the attribute.
- Attribute
Type string - The data type for the attribute, where: +
S
- the attribute is of type String +N
- the attribute is of type Number +B
- the attribute is of type Binary
- attribute
Name String - A name for the attribute.
- attribute
Type String - The data type for the attribute, where: +
S
- the attribute is of type String +N
- the attribute is of type Number +B
- the attribute is of type Binary
- attribute
Name string - A name for the attribute.
- attribute
Type string - The data type for the attribute, where: +
S
- the attribute is of type String +N
- the attribute is of type Number +B
- the attribute is of type Binary
- attribute_
name str - A name for the attribute.
- attribute_
type str - The data type for the attribute, where: +
S
- the attribute is of type String +N
- the attribute is of type Number +B
- the attribute is of type Binary
- attribute
Name String - A name for the attribute.
- attribute
Type String - The data type for the attribute, where: +
S
- the attribute is of type String +N
- the attribute is of type Number +B
- the attribute is of type Binary
AwsDynamoDBTableProperties, AwsDynamoDBTablePropertiesArgs
- Arn string
- Property arn
- Attribute
Definitions List<Pulumi.Azure Native. Aws Connector. Inputs. Attribute Definition> - A list of attributes that describe the key schema for the table and indexes. This property is required to create a DDB table. Update requires: Some interruptions. Replacement if you edit an existing AttributeDefinition.
- Billing
Mode string - Specify how you are charged for read and write throughput and how you manage capacity. Valid values include: +
PROVISIONED
- We recommend usingPROVISIONED
for predictable workloads.PROVISIONED
sets the billing mode to Provisioned Mode. +PAY_PER_REQUEST
- We recommend usingPAY_PER_REQUEST
for unpredictable workloads.PAY_PER_REQUEST
sets the billing mode to On-Demand Mode. If not specified, the default isPROVISIONED
. - Contributor
Insights Pulumi.Specification Azure Native. Aws Connector. Inputs. Contributor Insights Specification - The settings used to enable or disable CloudWatch Contributor Insights for the specified table. The settings used to enable or disable CloudWatch Contributor Insights.
- Deletion
Protection boolEnabled - Determines if a table is protected from deletion. When enabled, the table cannot be deleted by any user or process. This setting is disabled by default. For more information, see Using deletion protection in the Developer Guide.
- Global
Secondary List<Pulumi.Indexes Azure Native. Aws Connector. Inputs. Global Secondary Index> - Global secondary indexes to be created on the table. You can create up to 20 global secondary indexes. If you update a table to include a new global secondary index, CFNlong initiates the index creation and then proceeds with the stack update. CFNlong doesn't wait for the index to complete creation because the backfilling phase can take a long time, depending on the size of the table. You can't use the index or update the table until the index's status is
ACTIVE
. You can track its status by using the DynamoDB DescribeTable command. If you add or delete an index during an update, we recommend that you don't update any other resources. If your stack fails to update and is rolled back while adding a new index, you must manually delete the index. Updates are not supported. The following are exceptions: + If you update either the contributor insights specification or the provisioned throughput values of global secondary indexes, you can update the table without interruption. + You can delete or add one global secondary index without interruption. If you do both in the same update (for example, by changing the index's logical ID), the update fails. - Import
Source Pulumi.Specification Azure Native. Aws Connector. Inputs. Import Source Specification - Specifies the properties of data being imported from the S3 bucket source to the table. If you specify the
ImportSourceSpecification
property, and also specify either theStreamSpecification
, theTableClass
property, or theDeletionProtectionEnabled
property, the IAM entity creating/updating stack must haveUpdateTable
permission. Specifies the properties of data being imported from the S3 bucket source to the table. - Key
Schema List<Pulumi.Azure Native. Aws Connector. Inputs. Key Schema> - Specifies the attributes that make up the primary key for the table. The attributes in the
KeySchema
property must also be defined in theAttributeDefinitions
property. - Kinesis
Stream Pulumi.Specification Azure Native. Aws Connector. Inputs. Kinesis Stream Specification - The Kinesis Data Streams configuration for the specified table. The Kinesis Data Streams configuration for the specified table.
- Local
Secondary List<Pulumi.Indexes Azure Native. Aws Connector. Inputs. Local Secondary Index> - Local secondary indexes to be created on the table. You can create up to 5 local secondary indexes. Each index is scoped to a given hash key value. The size of each hash key can be up to 10 gigabytes.
- Point
In Pulumi.Time Recovery Specification Azure Native. Aws Connector. Inputs. Point In Time Recovery Specification - The settings used to enable point in time recovery. The settings used to enable point in time recovery.
- Provisioned
Throughput Pulumi.Azure Native. Aws Connector. Inputs. Provisioned Throughput - Throughput for the specified table, which consists of values for
ReadCapacityUnits
andWriteCapacityUnits
. For more information about the contents of a provisioned throughput structure, see Amazon DynamoDB Table ProvisionedThroughput. If you setBillingMode
asPROVISIONED
, you must specify this property. If you setBillingMode
asPAY_PER_REQUEST
, you cannot specify this property. Throughput for the specified table, which consists of values forReadCapacityUnits
andWriteCapacityUnits
. For more information about the contents of a provisioned throughput structure, see Table ProvisionedThroughput. - Resource
Policy Pulumi.Azure Native. Aws Connector. Inputs. Resource Policy - A resource-based policy document that contains permissions to add to the specified table. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. When you attach a resource-based policy while creating a table, the policy creation is strongly consistent. For information about the considerations that you should keep in mind while attaching a resource-based policy, see Resource-based policy considerations. Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table, its indexes, and stream. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. While defining resource-based policies in your CFNshort templates, the following considerations apply: + The maximum size supported for a resource-based policy document in JSON format is 20 KB. DDB counts whitespaces when calculating the size of a policy against this limit. + Resource-based policies don't support drift detection. If you update a policy outside of the CFNshort stack template, you'll need to update the CFNshort stack with the changes. + Resource-based policies don't support out-of-band changes. If you add, update, or delete a policy outside of the CFNshort template, the change won't be overwritten if there are no changes to the policy within the template. For example, say that your template contains a resource-based policy, which you later update outside of the template. If you don't make any changes to the policy in the template, the updated policy in DDB won’t be synced with the policy in the template. Conversely, say that your template doesn’t contain a resource-based policy, but you add a policy outside of the template. This policy won’t be removed from DDB as long as you don’t add it to the template. When you add a policy to the template and update the stack, the existing policy in DDB will be updated to match the one defined in the template. For a full list of all considerations, see Resource-based policy considerations.
- Sse
Specification Pulumi.Azure Native. Aws Connector. Inputs. SSESpecification - Specifies the settings to enable server-side encryption. Represents the settings used to enable server-side encryption.
- Stream
Arn string - Property streamArn
- Stream
Specification Pulumi.Azure Native. Aws Connector. Inputs. Stream Specification - The settings for the DDB table stream, which capture changes to items stored in the table. Represents the DynamoDB Streams configuration for a table in DynamoDB.
- Table
Class string - The table class of the new table. Valid values are
STANDARD
andSTANDARD_INFREQUENT_ACCESS
. - Table
Name string - A name for the table. If you don't specify a name, CFNlong generates a unique physical ID and uses that ID for the table name. For more information, see Name Type. If you specify a name, you cannot perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you must replace the resource, specify a new name.
- List<Pulumi.
Azure Native. Aws Connector. Inputs. Tag> - An array of key-value pairs to apply to this resource. For more information, see Tag.
- Time
To Pulumi.Live Specification Azure Native. Aws Connector. Inputs. Time To Live Specification - Specifies the Time to Live (TTL) settings for the table. For detailed information about the limits in DynamoDB, see Limits in Amazon DynamoDB in the Amazon DynamoDB Developer Guide. Represents the settings used to enable or disable Time to Live (TTL) for the specified table.
- Arn string
- Property arn
- Attribute
Definitions []AttributeDefinition - A list of attributes that describe the key schema for the table and indexes. This property is required to create a DDB table. Update requires: Some interruptions. Replacement if you edit an existing AttributeDefinition.
- Billing
Mode string - Specify how you are charged for read and write throughput and how you manage capacity. Valid values include: +
PROVISIONED
- We recommend usingPROVISIONED
for predictable workloads.PROVISIONED
sets the billing mode to Provisioned Mode. +PAY_PER_REQUEST
- We recommend usingPAY_PER_REQUEST
for unpredictable workloads.PAY_PER_REQUEST
sets the billing mode to On-Demand Mode. If not specified, the default isPROVISIONED
. - Contributor
Insights ContributorSpecification Insights Specification - The settings used to enable or disable CloudWatch Contributor Insights for the specified table. The settings used to enable or disable CloudWatch Contributor Insights.
- Deletion
Protection boolEnabled - Determines if a table is protected from deletion. When enabled, the table cannot be deleted by any user or process. This setting is disabled by default. For more information, see Using deletion protection in the Developer Guide.
- Global
Secondary []GlobalIndexes Secondary Index - Global secondary indexes to be created on the table. You can create up to 20 global secondary indexes. If you update a table to include a new global secondary index, CFNlong initiates the index creation and then proceeds with the stack update. CFNlong doesn't wait for the index to complete creation because the backfilling phase can take a long time, depending on the size of the table. You can't use the index or update the table until the index's status is
ACTIVE
. You can track its status by using the DynamoDB DescribeTable command. If you add or delete an index during an update, we recommend that you don't update any other resources. If your stack fails to update and is rolled back while adding a new index, you must manually delete the index. Updates are not supported. The following are exceptions: + If you update either the contributor insights specification or the provisioned throughput values of global secondary indexes, you can update the table without interruption. + You can delete or add one global secondary index without interruption. If you do both in the same update (for example, by changing the index's logical ID), the update fails. - Import
Source ImportSpecification Source Specification - Specifies the properties of data being imported from the S3 bucket source to the table. If you specify the
ImportSourceSpecification
property, and also specify either theStreamSpecification
, theTableClass
property, or theDeletionProtectionEnabled
property, the IAM entity creating/updating stack must haveUpdateTable
permission. Specifies the properties of data being imported from the S3 bucket source to the table. - Key
Schema []KeySchema - Specifies the attributes that make up the primary key for the table. The attributes in the
KeySchema
property must also be defined in theAttributeDefinitions
property. - Kinesis
Stream KinesisSpecification Stream Specification - The Kinesis Data Streams configuration for the specified table. The Kinesis Data Streams configuration for the specified table.
- Local
Secondary []LocalIndexes Secondary Index - Local secondary indexes to be created on the table. You can create up to 5 local secondary indexes. Each index is scoped to a given hash key value. The size of each hash key can be up to 10 gigabytes.
- Point
In PointTime Recovery Specification In Time Recovery Specification - The settings used to enable point in time recovery. The settings used to enable point in time recovery.
- Provisioned
Throughput ProvisionedThroughput - Throughput for the specified table, which consists of values for
ReadCapacityUnits
andWriteCapacityUnits
. For more information about the contents of a provisioned throughput structure, see Amazon DynamoDB Table ProvisionedThroughput. If you setBillingMode
asPROVISIONED
, you must specify this property. If you setBillingMode
asPAY_PER_REQUEST
, you cannot specify this property. Throughput for the specified table, which consists of values forReadCapacityUnits
andWriteCapacityUnits
. For more information about the contents of a provisioned throughput structure, see Table ProvisionedThroughput. - Resource
Policy ResourcePolicy - A resource-based policy document that contains permissions to add to the specified table. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. When you attach a resource-based policy while creating a table, the policy creation is strongly consistent. For information about the considerations that you should keep in mind while attaching a resource-based policy, see Resource-based policy considerations. Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table, its indexes, and stream. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. While defining resource-based policies in your CFNshort templates, the following considerations apply: + The maximum size supported for a resource-based policy document in JSON format is 20 KB. DDB counts whitespaces when calculating the size of a policy against this limit. + Resource-based policies don't support drift detection. If you update a policy outside of the CFNshort stack template, you'll need to update the CFNshort stack with the changes. + Resource-based policies don't support out-of-band changes. If you add, update, or delete a policy outside of the CFNshort template, the change won't be overwritten if there are no changes to the policy within the template. For example, say that your template contains a resource-based policy, which you later update outside of the template. If you don't make any changes to the policy in the template, the updated policy in DDB won’t be synced with the policy in the template. Conversely, say that your template doesn’t contain a resource-based policy, but you add a policy outside of the template. This policy won’t be removed from DDB as long as you don’t add it to the template. When you add a policy to the template and update the stack, the existing policy in DDB will be updated to match the one defined in the template. For a full list of all considerations, see Resource-based policy considerations.
- Sse
Specification SSESpecification - Specifies the settings to enable server-side encryption. Represents the settings used to enable server-side encryption.
- Stream
Arn string - Property streamArn
- Stream
Specification StreamSpecification - The settings for the DDB table stream, which capture changes to items stored in the table. Represents the DynamoDB Streams configuration for a table in DynamoDB.
- Table
Class string - The table class of the new table. Valid values are
STANDARD
andSTANDARD_INFREQUENT_ACCESS
. - Table
Name string - A name for the table. If you don't specify a name, CFNlong generates a unique physical ID and uses that ID for the table name. For more information, see Name Type. If you specify a name, you cannot perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you must replace the resource, specify a new name.
- []Tag
- An array of key-value pairs to apply to this resource. For more information, see Tag.
- Time
To TimeLive Specification To Live Specification - Specifies the Time to Live (TTL) settings for the table. For detailed information about the limits in DynamoDB, see Limits in Amazon DynamoDB in the Amazon DynamoDB Developer Guide. Represents the settings used to enable or disable Time to Live (TTL) for the specified table.
- arn String
- Property arn
- attribute
Definitions List<AttributeDefinition> - A list of attributes that describe the key schema for the table and indexes. This property is required to create a DDB table. Update requires: Some interruptions. Replacement if you edit an existing AttributeDefinition.
- billing
Mode String - Specify how you are charged for read and write throughput and how you manage capacity. Valid values include: +
PROVISIONED
- We recommend usingPROVISIONED
for predictable workloads.PROVISIONED
sets the billing mode to Provisioned Mode. +PAY_PER_REQUEST
- We recommend usingPAY_PER_REQUEST
for unpredictable workloads.PAY_PER_REQUEST
sets the billing mode to On-Demand Mode. If not specified, the default isPROVISIONED
. - contributor
Insights ContributorSpecification Insights Specification - The settings used to enable or disable CloudWatch Contributor Insights for the specified table. The settings used to enable or disable CloudWatch Contributor Insights.
- deletion
Protection BooleanEnabled - Determines if a table is protected from deletion. When enabled, the table cannot be deleted by any user or process. This setting is disabled by default. For more information, see Using deletion protection in the Developer Guide.
- global
Secondary List<GlobalIndexes Secondary Index> - Global secondary indexes to be created on the table. You can create up to 20 global secondary indexes. If you update a table to include a new global secondary index, CFNlong initiates the index creation and then proceeds with the stack update. CFNlong doesn't wait for the index to complete creation because the backfilling phase can take a long time, depending on the size of the table. You can't use the index or update the table until the index's status is
ACTIVE
. You can track its status by using the DynamoDB DescribeTable command. If you add or delete an index during an update, we recommend that you don't update any other resources. If your stack fails to update and is rolled back while adding a new index, you must manually delete the index. Updates are not supported. The following are exceptions: + If you update either the contributor insights specification or the provisioned throughput values of global secondary indexes, you can update the table without interruption. + You can delete or add one global secondary index without interruption. If you do both in the same update (for example, by changing the index's logical ID), the update fails. - import
Source ImportSpecification Source Specification - Specifies the properties of data being imported from the S3 bucket source to the table. If you specify the
ImportSourceSpecification
property, and also specify either theStreamSpecification
, theTableClass
property, or theDeletionProtectionEnabled
property, the IAM entity creating/updating stack must haveUpdateTable
permission. Specifies the properties of data being imported from the S3 bucket source to the table. - key
Schema List<KeySchema> - Specifies the attributes that make up the primary key for the table. The attributes in the
KeySchema
property must also be defined in theAttributeDefinitions
property. - kinesis
Stream KinesisSpecification Stream Specification - The Kinesis Data Streams configuration for the specified table. The Kinesis Data Streams configuration for the specified table.
- local
Secondary List<LocalIndexes Secondary Index> - Local secondary indexes to be created on the table. You can create up to 5 local secondary indexes. Each index is scoped to a given hash key value. The size of each hash key can be up to 10 gigabytes.
- point
In PointTime Recovery Specification In Time Recovery Specification - The settings used to enable point in time recovery. The settings used to enable point in time recovery.
- provisioned
Throughput ProvisionedThroughput - Throughput for the specified table, which consists of values for
ReadCapacityUnits
andWriteCapacityUnits
. For more information about the contents of a provisioned throughput structure, see Amazon DynamoDB Table ProvisionedThroughput. If you setBillingMode
asPROVISIONED
, you must specify this property. If you setBillingMode
asPAY_PER_REQUEST
, you cannot specify this property. Throughput for the specified table, which consists of values forReadCapacityUnits
andWriteCapacityUnits
. For more information about the contents of a provisioned throughput structure, see Table ProvisionedThroughput. - resource
Policy ResourcePolicy - A resource-based policy document that contains permissions to add to the specified table. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. When you attach a resource-based policy while creating a table, the policy creation is strongly consistent. For information about the considerations that you should keep in mind while attaching a resource-based policy, see Resource-based policy considerations. Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table, its indexes, and stream. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. While defining resource-based policies in your CFNshort templates, the following considerations apply: + The maximum size supported for a resource-based policy document in JSON format is 20 KB. DDB counts whitespaces when calculating the size of a policy against this limit. + Resource-based policies don't support drift detection. If you update a policy outside of the CFNshort stack template, you'll need to update the CFNshort stack with the changes. + Resource-based policies don't support out-of-band changes. If you add, update, or delete a policy outside of the CFNshort template, the change won't be overwritten if there are no changes to the policy within the template. For example, say that your template contains a resource-based policy, which you later update outside of the template. If you don't make any changes to the policy in the template, the updated policy in DDB won’t be synced with the policy in the template. Conversely, say that your template doesn’t contain a resource-based policy, but you add a policy outside of the template. This policy won’t be removed from DDB as long as you don’t add it to the template. When you add a policy to the template and update the stack, the existing policy in DDB will be updated to match the one defined in the template. For a full list of all considerations, see Resource-based policy considerations.
- sse
Specification SSESpecification - Specifies the settings to enable server-side encryption. Represents the settings used to enable server-side encryption.
- stream
Arn String - Property streamArn
- stream
Specification StreamSpecification - The settings for the DDB table stream, which capture changes to items stored in the table. Represents the DynamoDB Streams configuration for a table in DynamoDB.
- table
Class String - The table class of the new table. Valid values are
STANDARD
andSTANDARD_INFREQUENT_ACCESS
. - table
Name String - A name for the table. If you don't specify a name, CFNlong generates a unique physical ID and uses that ID for the table name. For more information, see Name Type. If you specify a name, you cannot perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you must replace the resource, specify a new name.
- List<Tag>
- An array of key-value pairs to apply to this resource. For more information, see Tag.
- time
To TimeLive Specification To Live Specification - Specifies the Time to Live (TTL) settings for the table. For detailed information about the limits in DynamoDB, see Limits in Amazon DynamoDB in the Amazon DynamoDB Developer Guide. Represents the settings used to enable or disable Time to Live (TTL) for the specified table.
- arn string
- Property arn
- attribute
Definitions AttributeDefinition[] - A list of attributes that describe the key schema for the table and indexes. This property is required to create a DDB table. Update requires: Some interruptions. Replacement if you edit an existing AttributeDefinition.
- billing
Mode string - Specify how you are charged for read and write throughput and how you manage capacity. Valid values include: +
PROVISIONED
- We recommend usingPROVISIONED
for predictable workloads.PROVISIONED
sets the billing mode to Provisioned Mode. +PAY_PER_REQUEST
- We recommend usingPAY_PER_REQUEST
for unpredictable workloads.PAY_PER_REQUEST
sets the billing mode to On-Demand Mode. If not specified, the default isPROVISIONED
. - contributor
Insights ContributorSpecification Insights Specification - The settings used to enable or disable CloudWatch Contributor Insights for the specified table. The settings used to enable or disable CloudWatch Contributor Insights.
- deletion
Protection booleanEnabled - Determines if a table is protected from deletion. When enabled, the table cannot be deleted by any user or process. This setting is disabled by default. For more information, see Using deletion protection in the Developer Guide.
- global
Secondary GlobalIndexes Secondary Index[] - Global secondary indexes to be created on the table. You can create up to 20 global secondary indexes. If you update a table to include a new global secondary index, CFNlong initiates the index creation and then proceeds with the stack update. CFNlong doesn't wait for the index to complete creation because the backfilling phase can take a long time, depending on the size of the table. You can't use the index or update the table until the index's status is
ACTIVE
. You can track its status by using the DynamoDB DescribeTable command. If you add or delete an index during an update, we recommend that you don't update any other resources. If your stack fails to update and is rolled back while adding a new index, you must manually delete the index. Updates are not supported. The following are exceptions: + If you update either the contributor insights specification or the provisioned throughput values of global secondary indexes, you can update the table without interruption. + You can delete or add one global secondary index without interruption. If you do both in the same update (for example, by changing the index's logical ID), the update fails. - import
Source ImportSpecification Source Specification - Specifies the properties of data being imported from the S3 bucket source to the table. If you specify the
ImportSourceSpecification
property, and also specify either theStreamSpecification
, theTableClass
property, or theDeletionProtectionEnabled
property, the IAM entity creating/updating stack must haveUpdateTable
permission. Specifies the properties of data being imported from the S3 bucket source to the table. - key
Schema KeySchema[] - Specifies the attributes that make up the primary key for the table. The attributes in the
KeySchema
property must also be defined in theAttributeDefinitions
property. - kinesis
Stream KinesisSpecification Stream Specification - The Kinesis Data Streams configuration for the specified table. The Kinesis Data Streams configuration for the specified table.
- local
Secondary LocalIndexes Secondary Index[] - Local secondary indexes to be created on the table. You can create up to 5 local secondary indexes. Each index is scoped to a given hash key value. The size of each hash key can be up to 10 gigabytes.
- point
In PointTime Recovery Specification In Time Recovery Specification - The settings used to enable point in time recovery. The settings used to enable point in time recovery.
- provisioned
Throughput ProvisionedThroughput - Throughput for the specified table, which consists of values for
ReadCapacityUnits
andWriteCapacityUnits
. For more information about the contents of a provisioned throughput structure, see Amazon DynamoDB Table ProvisionedThroughput. If you setBillingMode
asPROVISIONED
, you must specify this property. If you setBillingMode
asPAY_PER_REQUEST
, you cannot specify this property. Throughput for the specified table, which consists of values forReadCapacityUnits
andWriteCapacityUnits
. For more information about the contents of a provisioned throughput structure, see Table ProvisionedThroughput. - resource
Policy ResourcePolicy - A resource-based policy document that contains permissions to add to the specified table. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. When you attach a resource-based policy while creating a table, the policy creation is strongly consistent. For information about the considerations that you should keep in mind while attaching a resource-based policy, see Resource-based policy considerations. Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table, its indexes, and stream. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. While defining resource-based policies in your CFNshort templates, the following considerations apply: + The maximum size supported for a resource-based policy document in JSON format is 20 KB. DDB counts whitespaces when calculating the size of a policy against this limit. + Resource-based policies don't support drift detection. If you update a policy outside of the CFNshort stack template, you'll need to update the CFNshort stack with the changes. + Resource-based policies don't support out-of-band changes. If you add, update, or delete a policy outside of the CFNshort template, the change won't be overwritten if there are no changes to the policy within the template. For example, say that your template contains a resource-based policy, which you later update outside of the template. If you don't make any changes to the policy in the template, the updated policy in DDB won’t be synced with the policy in the template. Conversely, say that your template doesn’t contain a resource-based policy, but you add a policy outside of the template. This policy won’t be removed from DDB as long as you don’t add it to the template. When you add a policy to the template and update the stack, the existing policy in DDB will be updated to match the one defined in the template. For a full list of all considerations, see Resource-based policy considerations.
- sse
Specification SSESpecification - Specifies the settings to enable server-side encryption. Represents the settings used to enable server-side encryption.
- stream
Arn string - Property streamArn
- stream
Specification StreamSpecification - The settings for the DDB table stream, which capture changes to items stored in the table. Represents the DynamoDB Streams configuration for a table in DynamoDB.
- table
Class string - The table class of the new table. Valid values are
STANDARD
andSTANDARD_INFREQUENT_ACCESS
. - table
Name string - A name for the table. If you don't specify a name, CFNlong generates a unique physical ID and uses that ID for the table name. For more information, see Name Type. If you specify a name, you cannot perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you must replace the resource, specify a new name.
- Tag[]
- An array of key-value pairs to apply to this resource. For more information, see Tag.
- time
To TimeLive Specification To Live Specification - Specifies the Time to Live (TTL) settings for the table. For detailed information about the limits in DynamoDB, see Limits in Amazon DynamoDB in the Amazon DynamoDB Developer Guide. Represents the settings used to enable or disable Time to Live (TTL) for the specified table.
- arn str
- Property arn
- attribute_
definitions Sequence[AttributeDefinition] - A list of attributes that describe the key schema for the table and indexes. This property is required to create a DDB table. Update requires: Some interruptions. Replacement if you edit an existing AttributeDefinition.
- billing_
mode str - Specify how you are charged for read and write throughput and how you manage capacity. Valid values include: +
PROVISIONED
- We recommend usingPROVISIONED
for predictable workloads.PROVISIONED
sets the billing mode to Provisioned Mode. +PAY_PER_REQUEST
- We recommend usingPAY_PER_REQUEST
for unpredictable workloads.PAY_PER_REQUEST
sets the billing mode to On-Demand Mode. If not specified, the default isPROVISIONED
. - contributor_
insights_ Contributorspecification Insights Specification - The settings used to enable or disable CloudWatch Contributor Insights for the specified table. The settings used to enable or disable CloudWatch Contributor Insights.
- deletion_
protection_ boolenabled - Determines if a table is protected from deletion. When enabled, the table cannot be deleted by any user or process. This setting is disabled by default. For more information, see Using deletion protection in the Developer Guide.
- global_
secondary_ Sequence[Globalindexes Secondary Index] - Global secondary indexes to be created on the table. You can create up to 20 global secondary indexes. If you update a table to include a new global secondary index, CFNlong initiates the index creation and then proceeds with the stack update. CFNlong doesn't wait for the index to complete creation because the backfilling phase can take a long time, depending on the size of the table. You can't use the index or update the table until the index's status is
ACTIVE
. You can track its status by using the DynamoDB DescribeTable command. If you add or delete an index during an update, we recommend that you don't update any other resources. If your stack fails to update and is rolled back while adding a new index, you must manually delete the index. Updates are not supported. The following are exceptions: + If you update either the contributor insights specification or the provisioned throughput values of global secondary indexes, you can update the table without interruption. + You can delete or add one global secondary index without interruption. If you do both in the same update (for example, by changing the index's logical ID), the update fails. - import_
source_ Importspecification Source Specification - Specifies the properties of data being imported from the S3 bucket source to the table. If you specify the
ImportSourceSpecification
property, and also specify either theStreamSpecification
, theTableClass
property, or theDeletionProtectionEnabled
property, the IAM entity creating/updating stack must haveUpdateTable
permission. Specifies the properties of data being imported from the S3 bucket source to the table. - key_
schema Sequence[KeySchema] - Specifies the attributes that make up the primary key for the table. The attributes in the
KeySchema
property must also be defined in theAttributeDefinitions
property. - kinesis_
stream_ Kinesisspecification Stream Specification - The Kinesis Data Streams configuration for the specified table. The Kinesis Data Streams configuration for the specified table.
- local_
secondary_ Sequence[Localindexes Secondary Index] - Local secondary indexes to be created on the table. You can create up to 5 local secondary indexes. Each index is scoped to a given hash key value. The size of each hash key can be up to 10 gigabytes.
- point_
in_ Pointtime_ recovery_ specification In Time Recovery Specification - The settings used to enable point in time recovery. The settings used to enable point in time recovery.
- provisioned_
throughput ProvisionedThroughput - Throughput for the specified table, which consists of values for
ReadCapacityUnits
andWriteCapacityUnits
. For more information about the contents of a provisioned throughput structure, see Amazon DynamoDB Table ProvisionedThroughput. If you setBillingMode
asPROVISIONED
, you must specify this property. If you setBillingMode
asPAY_PER_REQUEST
, you cannot specify this property. Throughput for the specified table, which consists of values forReadCapacityUnits
andWriteCapacityUnits
. For more information about the contents of a provisioned throughput structure, see Table ProvisionedThroughput. - resource_
policy ResourcePolicy - A resource-based policy document that contains permissions to add to the specified table. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. When you attach a resource-based policy while creating a table, the policy creation is strongly consistent. For information about the considerations that you should keep in mind while attaching a resource-based policy, see Resource-based policy considerations. Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table, its indexes, and stream. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. While defining resource-based policies in your CFNshort templates, the following considerations apply: + The maximum size supported for a resource-based policy document in JSON format is 20 KB. DDB counts whitespaces when calculating the size of a policy against this limit. + Resource-based policies don't support drift detection. If you update a policy outside of the CFNshort stack template, you'll need to update the CFNshort stack with the changes. + Resource-based policies don't support out-of-band changes. If you add, update, or delete a policy outside of the CFNshort template, the change won't be overwritten if there are no changes to the policy within the template. For example, say that your template contains a resource-based policy, which you later update outside of the template. If you don't make any changes to the policy in the template, the updated policy in DDB won’t be synced with the policy in the template. Conversely, say that your template doesn’t contain a resource-based policy, but you add a policy outside of the template. This policy won’t be removed from DDB as long as you don’t add it to the template. When you add a policy to the template and update the stack, the existing policy in DDB will be updated to match the one defined in the template. For a full list of all considerations, see Resource-based policy considerations.
- sse_
specification SSESpecification - Specifies the settings to enable server-side encryption. Represents the settings used to enable server-side encryption.
- stream_
arn str - Property streamArn
- stream_
specification StreamSpecification - The settings for the DDB table stream, which capture changes to items stored in the table. Represents the DynamoDB Streams configuration for a table in DynamoDB.
- table_
class str - The table class of the new table. Valid values are
STANDARD
andSTANDARD_INFREQUENT_ACCESS
. - table_
name str - A name for the table. If you don't specify a name, CFNlong generates a unique physical ID and uses that ID for the table name. For more information, see Name Type. If you specify a name, you cannot perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you must replace the resource, specify a new name.
- Sequence[Tag]
- An array of key-value pairs to apply to this resource. For more information, see Tag.
- time_
to_ Timelive_ specification To Live Specification - Specifies the Time to Live (TTL) settings for the table. For detailed information about the limits in DynamoDB, see Limits in Amazon DynamoDB in the Amazon DynamoDB Developer Guide. Represents the settings used to enable or disable Time to Live (TTL) for the specified table.
- arn String
- Property arn
- attribute
Definitions List<Property Map> - A list of attributes that describe the key schema for the table and indexes. This property is required to create a DDB table. Update requires: Some interruptions. Replacement if you edit an existing AttributeDefinition.
- billing
Mode String - Specify how you are charged for read and write throughput and how you manage capacity. Valid values include: +
PROVISIONED
- We recommend usingPROVISIONED
for predictable workloads.PROVISIONED
sets the billing mode to Provisioned Mode. +PAY_PER_REQUEST
- We recommend usingPAY_PER_REQUEST
for unpredictable workloads.PAY_PER_REQUEST
sets the billing mode to On-Demand Mode. If not specified, the default isPROVISIONED
. - contributor
Insights Property MapSpecification - The settings used to enable or disable CloudWatch Contributor Insights for the specified table. The settings used to enable or disable CloudWatch Contributor Insights.
- deletion
Protection BooleanEnabled - Determines if a table is protected from deletion. When enabled, the table cannot be deleted by any user or process. This setting is disabled by default. For more information, see Using deletion protection in the Developer Guide.
- global
Secondary List<Property Map>Indexes - Global secondary indexes to be created on the table. You can create up to 20 global secondary indexes. If you update a table to include a new global secondary index, CFNlong initiates the index creation and then proceeds with the stack update. CFNlong doesn't wait for the index to complete creation because the backfilling phase can take a long time, depending on the size of the table. You can't use the index or update the table until the index's status is
ACTIVE
. You can track its status by using the DynamoDB DescribeTable command. If you add or delete an index during an update, we recommend that you don't update any other resources. If your stack fails to update and is rolled back while adding a new index, you must manually delete the index. Updates are not supported. The following are exceptions: + If you update either the contributor insights specification or the provisioned throughput values of global secondary indexes, you can update the table without interruption. + You can delete or add one global secondary index without interruption. If you do both in the same update (for example, by changing the index's logical ID), the update fails. - import
Source Property MapSpecification - Specifies the properties of data being imported from the S3 bucket source to the table. If you specify the
ImportSourceSpecification
property, and also specify either theStreamSpecification
, theTableClass
property, or theDeletionProtectionEnabled
property, the IAM entity creating/updating stack must haveUpdateTable
permission. Specifies the properties of data being imported from the S3 bucket source to the table. - key
Schema List<Property Map> - Specifies the attributes that make up the primary key for the table. The attributes in the
KeySchema
property must also be defined in theAttributeDefinitions
property. - kinesis
Stream Property MapSpecification - The Kinesis Data Streams configuration for the specified table. The Kinesis Data Streams configuration for the specified table.
- local
Secondary List<Property Map>Indexes - Local secondary indexes to be created on the table. You can create up to 5 local secondary indexes. Each index is scoped to a given hash key value. The size of each hash key can be up to 10 gigabytes.
- point
In Property MapTime Recovery Specification - The settings used to enable point in time recovery. The settings used to enable point in time recovery.
- provisioned
Throughput Property Map - Throughput for the specified table, which consists of values for
ReadCapacityUnits
andWriteCapacityUnits
. For more information about the contents of a provisioned throughput structure, see Amazon DynamoDB Table ProvisionedThroughput. If you setBillingMode
asPROVISIONED
, you must specify this property. If you setBillingMode
asPAY_PER_REQUEST
, you cannot specify this property. Throughput for the specified table, which consists of values forReadCapacityUnits
andWriteCapacityUnits
. For more information about the contents of a provisioned throughput structure, see Table ProvisionedThroughput. - resource
Policy Property Map - A resource-based policy document that contains permissions to add to the specified table. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. When you attach a resource-based policy while creating a table, the policy creation is strongly consistent. For information about the considerations that you should keep in mind while attaching a resource-based policy, see Resource-based policy considerations. Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table, its indexes, and stream. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. While defining resource-based policies in your CFNshort templates, the following considerations apply: + The maximum size supported for a resource-based policy document in JSON format is 20 KB. DDB counts whitespaces when calculating the size of a policy against this limit. + Resource-based policies don't support drift detection. If you update a policy outside of the CFNshort stack template, you'll need to update the CFNshort stack with the changes. + Resource-based policies don't support out-of-band changes. If you add, update, or delete a policy outside of the CFNshort template, the change won't be overwritten if there are no changes to the policy within the template. For example, say that your template contains a resource-based policy, which you later update outside of the template. If you don't make any changes to the policy in the template, the updated policy in DDB won’t be synced with the policy in the template. Conversely, say that your template doesn’t contain a resource-based policy, but you add a policy outside of the template. This policy won’t be removed from DDB as long as you don’t add it to the template. When you add a policy to the template and update the stack, the existing policy in DDB will be updated to match the one defined in the template. For a full list of all considerations, see Resource-based policy considerations.
- sse
Specification Property Map - Specifies the settings to enable server-side encryption. Represents the settings used to enable server-side encryption.
- stream
Arn String - Property streamArn
- stream
Specification Property Map - The settings for the DDB table stream, which capture changes to items stored in the table. Represents the DynamoDB Streams configuration for a table in DynamoDB.
- table
Class String - The table class of the new table. Valid values are
STANDARD
andSTANDARD_INFREQUENT_ACCESS
. - table
Name String - A name for the table. If you don't specify a name, CFNlong generates a unique physical ID and uses that ID for the table name. For more information, see Name Type. If you specify a name, you cannot perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you must replace the resource, specify a new name.
- List<Property Map>
- An array of key-value pairs to apply to this resource. For more information, see Tag.
- time
To Property MapLive Specification - Specifies the Time to Live (TTL) settings for the table. For detailed information about the limits in DynamoDB, see Limits in Amazon DynamoDB in the Amazon DynamoDB Developer Guide. Represents the settings used to enable or disable Time to Live (TTL) for the specified table.
AwsDynamoDBTablePropertiesResponse, AwsDynamoDBTablePropertiesResponseArgs
- Arn string
- Property arn
- Attribute
Definitions List<Pulumi.Azure Native. Aws Connector. Inputs. Attribute Definition Response> - A list of attributes that describe the key schema for the table and indexes. This property is required to create a DDB table. Update requires: Some interruptions. Replacement if you edit an existing AttributeDefinition.
- Billing
Mode string - Specify how you are charged for read and write throughput and how you manage capacity. Valid values include: +
PROVISIONED
- We recommend usingPROVISIONED
for predictable workloads.PROVISIONED
sets the billing mode to Provisioned Mode. +PAY_PER_REQUEST
- We recommend usingPAY_PER_REQUEST
for unpredictable workloads.PAY_PER_REQUEST
sets the billing mode to On-Demand Mode. If not specified, the default isPROVISIONED
. - Contributor
Insights Pulumi.Specification Azure Native. Aws Connector. Inputs. Contributor Insights Specification Response - The settings used to enable or disable CloudWatch Contributor Insights for the specified table. The settings used to enable or disable CloudWatch Contributor Insights.
- Deletion
Protection boolEnabled - Determines if a table is protected from deletion. When enabled, the table cannot be deleted by any user or process. This setting is disabled by default. For more information, see Using deletion protection in the Developer Guide.
- Global
Secondary List<Pulumi.Indexes Azure Native. Aws Connector. Inputs. Global Secondary Index Response> - Global secondary indexes to be created on the table. You can create up to 20 global secondary indexes. If you update a table to include a new global secondary index, CFNlong initiates the index creation and then proceeds with the stack update. CFNlong doesn't wait for the index to complete creation because the backfilling phase can take a long time, depending on the size of the table. You can't use the index or update the table until the index's status is
ACTIVE
. You can track its status by using the DynamoDB DescribeTable command. If you add or delete an index during an update, we recommend that you don't update any other resources. If your stack fails to update and is rolled back while adding a new index, you must manually delete the index. Updates are not supported. The following are exceptions: + If you update either the contributor insights specification or the provisioned throughput values of global secondary indexes, you can update the table without interruption. + You can delete or add one global secondary index without interruption. If you do both in the same update (for example, by changing the index's logical ID), the update fails. - Import
Source Pulumi.Specification Azure Native. Aws Connector. Inputs. Import Source Specification Response - Specifies the properties of data being imported from the S3 bucket source to the table. If you specify the
ImportSourceSpecification
property, and also specify either theStreamSpecification
, theTableClass
property, or theDeletionProtectionEnabled
property, the IAM entity creating/updating stack must haveUpdateTable
permission. Specifies the properties of data being imported from the S3 bucket source to the table. - Key
Schema List<Pulumi.Azure Native. Aws Connector. Inputs. Key Schema Response> - Specifies the attributes that make up the primary key for the table. The attributes in the
KeySchema
property must also be defined in theAttributeDefinitions
property. - Kinesis
Stream Pulumi.Specification Azure Native. Aws Connector. Inputs. Kinesis Stream Specification Response - The Kinesis Data Streams configuration for the specified table. The Kinesis Data Streams configuration for the specified table.
- Local
Secondary List<Pulumi.Indexes Azure Native. Aws Connector. Inputs. Local Secondary Index Response> - Local secondary indexes to be created on the table. You can create up to 5 local secondary indexes. Each index is scoped to a given hash key value. The size of each hash key can be up to 10 gigabytes.
- Point
In Pulumi.Time Recovery Specification Azure Native. Aws Connector. Inputs. Point In Time Recovery Specification Response - The settings used to enable point in time recovery. The settings used to enable point in time recovery.
- Provisioned
Throughput Pulumi.Azure Native. Aws Connector. Inputs. Provisioned Throughput Response - Throughput for the specified table, which consists of values for
ReadCapacityUnits
andWriteCapacityUnits
. For more information about the contents of a provisioned throughput structure, see Amazon DynamoDB Table ProvisionedThroughput. If you setBillingMode
asPROVISIONED
, you must specify this property. If you setBillingMode
asPAY_PER_REQUEST
, you cannot specify this property. Throughput for the specified table, which consists of values forReadCapacityUnits
andWriteCapacityUnits
. For more information about the contents of a provisioned throughput structure, see Table ProvisionedThroughput. - Resource
Policy Pulumi.Azure Native. Aws Connector. Inputs. Resource Policy Response - A resource-based policy document that contains permissions to add to the specified table. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. When you attach a resource-based policy while creating a table, the policy creation is strongly consistent. For information about the considerations that you should keep in mind while attaching a resource-based policy, see Resource-based policy considerations. Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table, its indexes, and stream. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. While defining resource-based policies in your CFNshort templates, the following considerations apply: + The maximum size supported for a resource-based policy document in JSON format is 20 KB. DDB counts whitespaces when calculating the size of a policy against this limit. + Resource-based policies don't support drift detection. If you update a policy outside of the CFNshort stack template, you'll need to update the CFNshort stack with the changes. + Resource-based policies don't support out-of-band changes. If you add, update, or delete a policy outside of the CFNshort template, the change won't be overwritten if there are no changes to the policy within the template. For example, say that your template contains a resource-based policy, which you later update outside of the template. If you don't make any changes to the policy in the template, the updated policy in DDB won’t be synced with the policy in the template. Conversely, say that your template doesn’t contain a resource-based policy, but you add a policy outside of the template. This policy won’t be removed from DDB as long as you don’t add it to the template. When you add a policy to the template and update the stack, the existing policy in DDB will be updated to match the one defined in the template. For a full list of all considerations, see Resource-based policy considerations.
- Sse
Specification Pulumi.Azure Native. Aws Connector. Inputs. SSESpecification Response - Specifies the settings to enable server-side encryption. Represents the settings used to enable server-side encryption.
- Stream
Arn string - Property streamArn
- Stream
Specification Pulumi.Azure Native. Aws Connector. Inputs. Stream Specification Response - The settings for the DDB table stream, which capture changes to items stored in the table. Represents the DynamoDB Streams configuration for a table in DynamoDB.
- Table
Class string - The table class of the new table. Valid values are
STANDARD
andSTANDARD_INFREQUENT_ACCESS
. - Table
Name string - A name for the table. If you don't specify a name, CFNlong generates a unique physical ID and uses that ID for the table name. For more information, see Name Type. If you specify a name, you cannot perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you must replace the resource, specify a new name.
- List<Pulumi.
Azure Native. Aws Connector. Inputs. Tag Response> - An array of key-value pairs to apply to this resource. For more information, see Tag.
- Time
To Pulumi.Live Specification Azure Native. Aws Connector. Inputs. Time To Live Specification Response - Specifies the Time to Live (TTL) settings for the table. For detailed information about the limits in DynamoDB, see Limits in Amazon DynamoDB in the Amazon DynamoDB Developer Guide. Represents the settings used to enable or disable Time to Live (TTL) for the specified table.
- Arn string
- Property arn
- Attribute
Definitions []AttributeDefinition Response - A list of attributes that describe the key schema for the table and indexes. This property is required to create a DDB table. Update requires: Some interruptions. Replacement if you edit an existing AttributeDefinition.
- Billing
Mode string - Specify how you are charged for read and write throughput and how you manage capacity. Valid values include: +
PROVISIONED
- We recommend usingPROVISIONED
for predictable workloads.PROVISIONED
sets the billing mode to Provisioned Mode. +PAY_PER_REQUEST
- We recommend usingPAY_PER_REQUEST
for unpredictable workloads.PAY_PER_REQUEST
sets the billing mode to On-Demand Mode. If not specified, the default isPROVISIONED
. - Contributor
Insights ContributorSpecification Insights Specification Response - The settings used to enable or disable CloudWatch Contributor Insights for the specified table. The settings used to enable or disable CloudWatch Contributor Insights.
- Deletion
Protection boolEnabled - Determines if a table is protected from deletion. When enabled, the table cannot be deleted by any user or process. This setting is disabled by default. For more information, see Using deletion protection in the Developer Guide.
- Global
Secondary []GlobalIndexes Secondary Index Response - Global secondary indexes to be created on the table. You can create up to 20 global secondary indexes. If you update a table to include a new global secondary index, CFNlong initiates the index creation and then proceeds with the stack update. CFNlong doesn't wait for the index to complete creation because the backfilling phase can take a long time, depending on the size of the table. You can't use the index or update the table until the index's status is
ACTIVE
. You can track its status by using the DynamoDB DescribeTable command. If you add or delete an index during an update, we recommend that you don't update any other resources. If your stack fails to update and is rolled back while adding a new index, you must manually delete the index. Updates are not supported. The following are exceptions: + If you update either the contributor insights specification or the provisioned throughput values of global secondary indexes, you can update the table without interruption. + You can delete or add one global secondary index without interruption. If you do both in the same update (for example, by changing the index's logical ID), the update fails. - Import
Source ImportSpecification Source Specification Response - Specifies the properties of data being imported from the S3 bucket source to the table. If you specify the
ImportSourceSpecification
property, and also specify either theStreamSpecification
, theTableClass
property, or theDeletionProtectionEnabled
property, the IAM entity creating/updating stack must haveUpdateTable
permission. Specifies the properties of data being imported from the S3 bucket source to the table. - Key
Schema []KeySchema Response - Specifies the attributes that make up the primary key for the table. The attributes in the
KeySchema
property must also be defined in theAttributeDefinitions
property. - Kinesis
Stream KinesisSpecification Stream Specification Response - The Kinesis Data Streams configuration for the specified table. The Kinesis Data Streams configuration for the specified table.
- Local
Secondary []LocalIndexes Secondary Index Response - Local secondary indexes to be created on the table. You can create up to 5 local secondary indexes. Each index is scoped to a given hash key value. The size of each hash key can be up to 10 gigabytes.
- Point
In PointTime Recovery Specification In Time Recovery Specification Response - The settings used to enable point in time recovery. The settings used to enable point in time recovery.
- Provisioned
Throughput ProvisionedThroughput Response - Throughput for the specified table, which consists of values for
ReadCapacityUnits
andWriteCapacityUnits
. For more information about the contents of a provisioned throughput structure, see Amazon DynamoDB Table ProvisionedThroughput. If you setBillingMode
asPROVISIONED
, you must specify this property. If you setBillingMode
asPAY_PER_REQUEST
, you cannot specify this property. Throughput for the specified table, which consists of values forReadCapacityUnits
andWriteCapacityUnits
. For more information about the contents of a provisioned throughput structure, see Table ProvisionedThroughput. - Resource
Policy ResourcePolicy Response - A resource-based policy document that contains permissions to add to the specified table. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. When you attach a resource-based policy while creating a table, the policy creation is strongly consistent. For information about the considerations that you should keep in mind while attaching a resource-based policy, see Resource-based policy considerations. Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table, its indexes, and stream. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. While defining resource-based policies in your CFNshort templates, the following considerations apply: + The maximum size supported for a resource-based policy document in JSON format is 20 KB. DDB counts whitespaces when calculating the size of a policy against this limit. + Resource-based policies don't support drift detection. If you update a policy outside of the CFNshort stack template, you'll need to update the CFNshort stack with the changes. + Resource-based policies don't support out-of-band changes. If you add, update, or delete a policy outside of the CFNshort template, the change won't be overwritten if there are no changes to the policy within the template. For example, say that your template contains a resource-based policy, which you later update outside of the template. If you don't make any changes to the policy in the template, the updated policy in DDB won’t be synced with the policy in the template. Conversely, say that your template doesn’t contain a resource-based policy, but you add a policy outside of the template. This policy won’t be removed from DDB as long as you don’t add it to the template. When you add a policy to the template and update the stack, the existing policy in DDB will be updated to match the one defined in the template. For a full list of all considerations, see Resource-based policy considerations.
- Sse
Specification SSESpecificationResponse - Specifies the settings to enable server-side encryption. Represents the settings used to enable server-side encryption.
- Stream
Arn string - Property streamArn
- Stream
Specification StreamSpecification Response - The settings for the DDB table stream, which capture changes to items stored in the table. Represents the DynamoDB Streams configuration for a table in DynamoDB.
- Table
Class string - The table class of the new table. Valid values are
STANDARD
andSTANDARD_INFREQUENT_ACCESS
. - Table
Name string - A name for the table. If you don't specify a name, CFNlong generates a unique physical ID and uses that ID for the table name. For more information, see Name Type. If you specify a name, you cannot perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you must replace the resource, specify a new name.
- []Tag
Response - An array of key-value pairs to apply to this resource. For more information, see Tag.
- Time
To TimeLive Specification To Live Specification Response - Specifies the Time to Live (TTL) settings for the table. For detailed information about the limits in DynamoDB, see Limits in Amazon DynamoDB in the Amazon DynamoDB Developer Guide. Represents the settings used to enable or disable Time to Live (TTL) for the specified table.
- arn String
- Property arn
- attribute
Definitions List<AttributeDefinition Response> - A list of attributes that describe the key schema for the table and indexes. This property is required to create a DDB table. Update requires: Some interruptions. Replacement if you edit an existing AttributeDefinition.
- billing
Mode String - Specify how you are charged for read and write throughput and how you manage capacity. Valid values include: +
PROVISIONED
- We recommend usingPROVISIONED
for predictable workloads.PROVISIONED
sets the billing mode to Provisioned Mode. +PAY_PER_REQUEST
- We recommend usingPAY_PER_REQUEST
for unpredictable workloads.PAY_PER_REQUEST
sets the billing mode to On-Demand Mode. If not specified, the default isPROVISIONED
. - contributor
Insights ContributorSpecification Insights Specification Response - The settings used to enable or disable CloudWatch Contributor Insights for the specified table. The settings used to enable or disable CloudWatch Contributor Insights.
- deletion
Protection BooleanEnabled - Determines if a table is protected from deletion. When enabled, the table cannot be deleted by any user or process. This setting is disabled by default. For more information, see Using deletion protection in the Developer Guide.
- global
Secondary List<GlobalIndexes Secondary Index Response> - Global secondary indexes to be created on the table. You can create up to 20 global secondary indexes. If you update a table to include a new global secondary index, CFNlong initiates the index creation and then proceeds with the stack update. CFNlong doesn't wait for the index to complete creation because the backfilling phase can take a long time, depending on the size of the table. You can't use the index or update the table until the index's status is
ACTIVE
. You can track its status by using the DynamoDB DescribeTable command. If you add or delete an index during an update, we recommend that you don't update any other resources. If your stack fails to update and is rolled back while adding a new index, you must manually delete the index. Updates are not supported. The following are exceptions: + If you update either the contributor insights specification or the provisioned throughput values of global secondary indexes, you can update the table without interruption. + You can delete or add one global secondary index without interruption. If you do both in the same update (for example, by changing the index's logical ID), the update fails. - import
Source ImportSpecification Source Specification Response - Specifies the properties of data being imported from the S3 bucket source to the table. If you specify the
ImportSourceSpecification
property, and also specify either theStreamSpecification
, theTableClass
property, or theDeletionProtectionEnabled
property, the IAM entity creating/updating stack must haveUpdateTable
permission. Specifies the properties of data being imported from the S3 bucket source to the table. - key
Schema List<KeySchema Response> - Specifies the attributes that make up the primary key for the table. The attributes in the
KeySchema
property must also be defined in theAttributeDefinitions
property. - kinesis
Stream KinesisSpecification Stream Specification Response - The Kinesis Data Streams configuration for the specified table. The Kinesis Data Streams configuration for the specified table.
- local
Secondary List<LocalIndexes Secondary Index Response> - Local secondary indexes to be created on the table. You can create up to 5 local secondary indexes. Each index is scoped to a given hash key value. The size of each hash key can be up to 10 gigabytes.
- point
In PointTime Recovery Specification In Time Recovery Specification Response - The settings used to enable point in time recovery. The settings used to enable point in time recovery.
- provisioned
Throughput ProvisionedThroughput Response - Throughput for the specified table, which consists of values for
ReadCapacityUnits
andWriteCapacityUnits
. For more information about the contents of a provisioned throughput structure, see Amazon DynamoDB Table ProvisionedThroughput. If you setBillingMode
asPROVISIONED
, you must specify this property. If you setBillingMode
asPAY_PER_REQUEST
, you cannot specify this property. Throughput for the specified table, which consists of values forReadCapacityUnits
andWriteCapacityUnits
. For more information about the contents of a provisioned throughput structure, see Table ProvisionedThroughput. - resource
Policy ResourcePolicy Response - A resource-based policy document that contains permissions to add to the specified table. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. When you attach a resource-based policy while creating a table, the policy creation is strongly consistent. For information about the considerations that you should keep in mind while attaching a resource-based policy, see Resource-based policy considerations. Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table, its indexes, and stream. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. While defining resource-based policies in your CFNshort templates, the following considerations apply: + The maximum size supported for a resource-based policy document in JSON format is 20 KB. DDB counts whitespaces when calculating the size of a policy against this limit. + Resource-based policies don't support drift detection. If you update a policy outside of the CFNshort stack template, you'll need to update the CFNshort stack with the changes. + Resource-based policies don't support out-of-band changes. If you add, update, or delete a policy outside of the CFNshort template, the change won't be overwritten if there are no changes to the policy within the template. For example, say that your template contains a resource-based policy, which you later update outside of the template. If you don't make any changes to the policy in the template, the updated policy in DDB won’t be synced with the policy in the template. Conversely, say that your template doesn’t contain a resource-based policy, but you add a policy outside of the template. This policy won’t be removed from DDB as long as you don’t add it to the template. When you add a policy to the template and update the stack, the existing policy in DDB will be updated to match the one defined in the template. For a full list of all considerations, see Resource-based policy considerations.
- sse
Specification SSESpecificationResponse - Specifies the settings to enable server-side encryption. Represents the settings used to enable server-side encryption.
- stream
Arn String - Property streamArn
- stream
Specification StreamSpecification Response - The settings for the DDB table stream, which capture changes to items stored in the table. Represents the DynamoDB Streams configuration for a table in DynamoDB.
- table
Class String - The table class of the new table. Valid values are
STANDARD
andSTANDARD_INFREQUENT_ACCESS
. - table
Name String - A name for the table. If you don't specify a name, CFNlong generates a unique physical ID and uses that ID for the table name. For more information, see Name Type. If you specify a name, you cannot perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you must replace the resource, specify a new name.
- List<Tag
Response> - An array of key-value pairs to apply to this resource. For more information, see Tag.
- time
To TimeLive Specification To Live Specification Response - Specifies the Time to Live (TTL) settings for the table. For detailed information about the limits in DynamoDB, see Limits in Amazon DynamoDB in the Amazon DynamoDB Developer Guide. Represents the settings used to enable or disable Time to Live (TTL) for the specified table.
- arn string
- Property arn
- attribute
Definitions AttributeDefinition Response[] - A list of attributes that describe the key schema for the table and indexes. This property is required to create a DDB table. Update requires: Some interruptions. Replacement if you edit an existing AttributeDefinition.
- billing
Mode string - Specify how you are charged for read and write throughput and how you manage capacity. Valid values include: +
PROVISIONED
- We recommend usingPROVISIONED
for predictable workloads.PROVISIONED
sets the billing mode to Provisioned Mode. +PAY_PER_REQUEST
- We recommend usingPAY_PER_REQUEST
for unpredictable workloads.PAY_PER_REQUEST
sets the billing mode to On-Demand Mode. If not specified, the default isPROVISIONED
. - contributor
Insights ContributorSpecification Insights Specification Response - The settings used to enable or disable CloudWatch Contributor Insights for the specified table. The settings used to enable or disable CloudWatch Contributor Insights.
- deletion
Protection booleanEnabled - Determines if a table is protected from deletion. When enabled, the table cannot be deleted by any user or process. This setting is disabled by default. For more information, see Using deletion protection in the Developer Guide.
- global
Secondary GlobalIndexes Secondary Index Response[] - Global secondary indexes to be created on the table. You can create up to 20 global secondary indexes. If you update a table to include a new global secondary index, CFNlong initiates the index creation and then proceeds with the stack update. CFNlong doesn't wait for the index to complete creation because the backfilling phase can take a long time, depending on the size of the table. You can't use the index or update the table until the index's status is
ACTIVE
. You can track its status by using the DynamoDB DescribeTable command. If you add or delete an index during an update, we recommend that you don't update any other resources. If your stack fails to update and is rolled back while adding a new index, you must manually delete the index. Updates are not supported. The following are exceptions: + If you update either the contributor insights specification or the provisioned throughput values of global secondary indexes, you can update the table without interruption. + You can delete or add one global secondary index without interruption. If you do both in the same update (for example, by changing the index's logical ID), the update fails. - import
Source ImportSpecification Source Specification Response - Specifies the properties of data being imported from the S3 bucket source to the table. If you specify the
ImportSourceSpecification
property, and also specify either theStreamSpecification
, theTableClass
property, or theDeletionProtectionEnabled
property, the IAM entity creating/updating stack must haveUpdateTable
permission. Specifies the properties of data being imported from the S3 bucket source to the table. - key
Schema KeySchema Response[] - Specifies the attributes that make up the primary key for the table. The attributes in the
KeySchema
property must also be defined in theAttributeDefinitions
property. - kinesis
Stream KinesisSpecification Stream Specification Response - The Kinesis Data Streams configuration for the specified table. The Kinesis Data Streams configuration for the specified table.
- local
Secondary LocalIndexes Secondary Index Response[] - Local secondary indexes to be created on the table. You can create up to 5 local secondary indexes. Each index is scoped to a given hash key value. The size of each hash key can be up to 10 gigabytes.
- point
In PointTime Recovery Specification In Time Recovery Specification Response - The settings used to enable point in time recovery. The settings used to enable point in time recovery.
- provisioned
Throughput ProvisionedThroughput Response - Throughput for the specified table, which consists of values for
ReadCapacityUnits
andWriteCapacityUnits
. For more information about the contents of a provisioned throughput structure, see Amazon DynamoDB Table ProvisionedThroughput. If you setBillingMode
asPROVISIONED
, you must specify this property. If you setBillingMode
asPAY_PER_REQUEST
, you cannot specify this property. Throughput for the specified table, which consists of values forReadCapacityUnits
andWriteCapacityUnits
. For more information about the contents of a provisioned throughput structure, see Table ProvisionedThroughput. - resource
Policy ResourcePolicy Response - A resource-based policy document that contains permissions to add to the specified table. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. When you attach a resource-based policy while creating a table, the policy creation is strongly consistent. For information about the considerations that you should keep in mind while attaching a resource-based policy, see Resource-based policy considerations. Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table, its indexes, and stream. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. While defining resource-based policies in your CFNshort templates, the following considerations apply: + The maximum size supported for a resource-based policy document in JSON format is 20 KB. DDB counts whitespaces when calculating the size of a policy against this limit. + Resource-based policies don't support drift detection. If you update a policy outside of the CFNshort stack template, you'll need to update the CFNshort stack with the changes. + Resource-based policies don't support out-of-band changes. If you add, update, or delete a policy outside of the CFNshort template, the change won't be overwritten if there are no changes to the policy within the template. For example, say that your template contains a resource-based policy, which you later update outside of the template. If you don't make any changes to the policy in the template, the updated policy in DDB won’t be synced with the policy in the template. Conversely, say that your template doesn’t contain a resource-based policy, but you add a policy outside of the template. This policy won’t be removed from DDB as long as you don’t add it to the template. When you add a policy to the template and update the stack, the existing policy in DDB will be updated to match the one defined in the template. For a full list of all considerations, see Resource-based policy considerations.
- sse
Specification SSESpecificationResponse - Specifies the settings to enable server-side encryption. Represents the settings used to enable server-side encryption.
- stream
Arn string - Property streamArn
- stream
Specification StreamSpecification Response - The settings for the DDB table stream, which capture changes to items stored in the table. Represents the DynamoDB Streams configuration for a table in DynamoDB.
- table
Class string - The table class of the new table. Valid values are
STANDARD
andSTANDARD_INFREQUENT_ACCESS
. - table
Name string - A name for the table. If you don't specify a name, CFNlong generates a unique physical ID and uses that ID for the table name. For more information, see Name Type. If you specify a name, you cannot perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you must replace the resource, specify a new name.
- Tag
Response[] - An array of key-value pairs to apply to this resource. For more information, see Tag.
- time
To TimeLive Specification To Live Specification Response - Specifies the Time to Live (TTL) settings for the table. For detailed information about the limits in DynamoDB, see Limits in Amazon DynamoDB in the Amazon DynamoDB Developer Guide. Represents the settings used to enable or disable Time to Live (TTL) for the specified table.
- arn str
- Property arn
- attribute_
definitions Sequence[AttributeDefinition Response] - A list of attributes that describe the key schema for the table and indexes. This property is required to create a DDB table. Update requires: Some interruptions. Replacement if you edit an existing AttributeDefinition.
- billing_
mode str - Specify how you are charged for read and write throughput and how you manage capacity. Valid values include: +
PROVISIONED
- We recommend usingPROVISIONED
for predictable workloads.PROVISIONED
sets the billing mode to Provisioned Mode. +PAY_PER_REQUEST
- We recommend usingPAY_PER_REQUEST
for unpredictable workloads.PAY_PER_REQUEST
sets the billing mode to On-Demand Mode. If not specified, the default isPROVISIONED
. - contributor_
insights_ Contributorspecification Insights Specification Response - The settings used to enable or disable CloudWatch Contributor Insights for the specified table. The settings used to enable or disable CloudWatch Contributor Insights.
- deletion_
protection_ boolenabled - Determines if a table is protected from deletion. When enabled, the table cannot be deleted by any user or process. This setting is disabled by default. For more information, see Using deletion protection in the Developer Guide.
- global_
secondary_ Sequence[Globalindexes Secondary Index Response] - Global secondary indexes to be created on the table. You can create up to 20 global secondary indexes. If you update a table to include a new global secondary index, CFNlong initiates the index creation and then proceeds with the stack update. CFNlong doesn't wait for the index to complete creation because the backfilling phase can take a long time, depending on the size of the table. You can't use the index or update the table until the index's status is
ACTIVE
. You can track its status by using the DynamoDB DescribeTable command. If you add or delete an index during an update, we recommend that you don't update any other resources. If your stack fails to update and is rolled back while adding a new index, you must manually delete the index. Updates are not supported. The following are exceptions: + If you update either the contributor insights specification or the provisioned throughput values of global secondary indexes, you can update the table without interruption. + You can delete or add one global secondary index without interruption. If you do both in the same update (for example, by changing the index's logical ID), the update fails. - import_
source_ Importspecification Source Specification Response - Specifies the properties of data being imported from the S3 bucket source to the table. If you specify the
ImportSourceSpecification
property, and also specify either theStreamSpecification
, theTableClass
property, or theDeletionProtectionEnabled
property, the IAM entity creating/updating stack must haveUpdateTable
permission. Specifies the properties of data being imported from the S3 bucket source to the table. - key_
schema Sequence[KeySchema Response] - Specifies the attributes that make up the primary key for the table. The attributes in the
KeySchema
property must also be defined in theAttributeDefinitions
property. - kinesis_
stream_ Kinesisspecification Stream Specification Response - The Kinesis Data Streams configuration for the specified table. The Kinesis Data Streams configuration for the specified table.
- local_
secondary_ Sequence[Localindexes Secondary Index Response] - Local secondary indexes to be created on the table. You can create up to 5 local secondary indexes. Each index is scoped to a given hash key value. The size of each hash key can be up to 10 gigabytes.
- point_
in_ Pointtime_ recovery_ specification In Time Recovery Specification Response - The settings used to enable point in time recovery. The settings used to enable point in time recovery.
- provisioned_
throughput ProvisionedThroughput Response - Throughput for the specified table, which consists of values for
ReadCapacityUnits
andWriteCapacityUnits
. For more information about the contents of a provisioned throughput structure, see Amazon DynamoDB Table ProvisionedThroughput. If you setBillingMode
asPROVISIONED
, you must specify this property. If you setBillingMode
asPAY_PER_REQUEST
, you cannot specify this property. Throughput for the specified table, which consists of values forReadCapacityUnits
andWriteCapacityUnits
. For more information about the contents of a provisioned throughput structure, see Table ProvisionedThroughput. - resource_
policy ResourcePolicy Response - A resource-based policy document that contains permissions to add to the specified table. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. When you attach a resource-based policy while creating a table, the policy creation is strongly consistent. For information about the considerations that you should keep in mind while attaching a resource-based policy, see Resource-based policy considerations. Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table, its indexes, and stream. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. While defining resource-based policies in your CFNshort templates, the following considerations apply: + The maximum size supported for a resource-based policy document in JSON format is 20 KB. DDB counts whitespaces when calculating the size of a policy against this limit. + Resource-based policies don't support drift detection. If you update a policy outside of the CFNshort stack template, you'll need to update the CFNshort stack with the changes. + Resource-based policies don't support out-of-band changes. If you add, update, or delete a policy outside of the CFNshort template, the change won't be overwritten if there are no changes to the policy within the template. For example, say that your template contains a resource-based policy, which you later update outside of the template. If you don't make any changes to the policy in the template, the updated policy in DDB won’t be synced with the policy in the template. Conversely, say that your template doesn’t contain a resource-based policy, but you add a policy outside of the template. This policy won’t be removed from DDB as long as you don’t add it to the template. When you add a policy to the template and update the stack, the existing policy in DDB will be updated to match the one defined in the template. For a full list of all considerations, see Resource-based policy considerations.
- sse_
specification SSESpecificationResponse - Specifies the settings to enable server-side encryption. Represents the settings used to enable server-side encryption.
- stream_
arn str - Property streamArn
- stream_
specification StreamSpecification Response - The settings for the DDB table stream, which capture changes to items stored in the table. Represents the DynamoDB Streams configuration for a table in DynamoDB.
- table_
class str - The table class of the new table. Valid values are
STANDARD
andSTANDARD_INFREQUENT_ACCESS
. - table_
name str - A name for the table. If you don't specify a name, CFNlong generates a unique physical ID and uses that ID for the table name. For more information, see Name Type. If you specify a name, you cannot perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you must replace the resource, specify a new name.
- Sequence[Tag
Response] - An array of key-value pairs to apply to this resource. For more information, see Tag.
- time_
to_ Timelive_ specification To Live Specification Response - Specifies the Time to Live (TTL) settings for the table. For detailed information about the limits in DynamoDB, see Limits in Amazon DynamoDB in the Amazon DynamoDB Developer Guide. Represents the settings used to enable or disable Time to Live (TTL) for the specified table.
- arn String
- Property arn
- attribute
Definitions List<Property Map> - A list of attributes that describe the key schema for the table and indexes. This property is required to create a DDB table. Update requires: Some interruptions. Replacement if you edit an existing AttributeDefinition.
- billing
Mode String - Specify how you are charged for read and write throughput and how you manage capacity. Valid values include: +
PROVISIONED
- We recommend usingPROVISIONED
for predictable workloads.PROVISIONED
sets the billing mode to Provisioned Mode. +PAY_PER_REQUEST
- We recommend usingPAY_PER_REQUEST
for unpredictable workloads.PAY_PER_REQUEST
sets the billing mode to On-Demand Mode. If not specified, the default isPROVISIONED
. - contributor
Insights Property MapSpecification - The settings used to enable or disable CloudWatch Contributor Insights for the specified table. The settings used to enable or disable CloudWatch Contributor Insights.
- deletion
Protection BooleanEnabled - Determines if a table is protected from deletion. When enabled, the table cannot be deleted by any user or process. This setting is disabled by default. For more information, see Using deletion protection in the Developer Guide.
- global
Secondary List<Property Map>Indexes - Global secondary indexes to be created on the table. You can create up to 20 global secondary indexes. If you update a table to include a new global secondary index, CFNlong initiates the index creation and then proceeds with the stack update. CFNlong doesn't wait for the index to complete creation because the backfilling phase can take a long time, depending on the size of the table. You can't use the index or update the table until the index's status is
ACTIVE
. You can track its status by using the DynamoDB DescribeTable command. If you add or delete an index during an update, we recommend that you don't update any other resources. If your stack fails to update and is rolled back while adding a new index, you must manually delete the index. Updates are not supported. The following are exceptions: + If you update either the contributor insights specification or the provisioned throughput values of global secondary indexes, you can update the table without interruption. + You can delete or add one global secondary index without interruption. If you do both in the same update (for example, by changing the index's logical ID), the update fails. - import
Source Property MapSpecification - Specifies the properties of data being imported from the S3 bucket source to the table. If you specify the
ImportSourceSpecification
property, and also specify either theStreamSpecification
, theTableClass
property, or theDeletionProtectionEnabled
property, the IAM entity creating/updating stack must haveUpdateTable
permission. Specifies the properties of data being imported from the S3 bucket source to the table. - key
Schema List<Property Map> - Specifies the attributes that make up the primary key for the table. The attributes in the
KeySchema
property must also be defined in theAttributeDefinitions
property. - kinesis
Stream Property MapSpecification - The Kinesis Data Streams configuration for the specified table. The Kinesis Data Streams configuration for the specified table.
- local
Secondary List<Property Map>Indexes - Local secondary indexes to be created on the table. You can create up to 5 local secondary indexes. Each index is scoped to a given hash key value. The size of each hash key can be up to 10 gigabytes.
- point
In Property MapTime Recovery Specification - The settings used to enable point in time recovery. The settings used to enable point in time recovery.
- provisioned
Throughput Property Map - Throughput for the specified table, which consists of values for
ReadCapacityUnits
andWriteCapacityUnits
. For more information about the contents of a provisioned throughput structure, see Amazon DynamoDB Table ProvisionedThroughput. If you setBillingMode
asPROVISIONED
, you must specify this property. If you setBillingMode
asPAY_PER_REQUEST
, you cannot specify this property. Throughput for the specified table, which consists of values forReadCapacityUnits
andWriteCapacityUnits
. For more information about the contents of a provisioned throughput structure, see Table ProvisionedThroughput. - resource
Policy Property Map - A resource-based policy document that contains permissions to add to the specified table. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. When you attach a resource-based policy while creating a table, the policy creation is strongly consistent. For information about the considerations that you should keep in mind while attaching a resource-based policy, see Resource-based policy considerations. Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table, its indexes, and stream. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. While defining resource-based policies in your CFNshort templates, the following considerations apply: + The maximum size supported for a resource-based policy document in JSON format is 20 KB. DDB counts whitespaces when calculating the size of a policy against this limit. + Resource-based policies don't support drift detection. If you update a policy outside of the CFNshort stack template, you'll need to update the CFNshort stack with the changes. + Resource-based policies don't support out-of-band changes. If you add, update, or delete a policy outside of the CFNshort template, the change won't be overwritten if there are no changes to the policy within the template. For example, say that your template contains a resource-based policy, which you later update outside of the template. If you don't make any changes to the policy in the template, the updated policy in DDB won’t be synced with the policy in the template. Conversely, say that your template doesn’t contain a resource-based policy, but you add a policy outside of the template. This policy won’t be removed from DDB as long as you don’t add it to the template. When you add a policy to the template and update the stack, the existing policy in DDB will be updated to match the one defined in the template. For a full list of all considerations, see Resource-based policy considerations.
- sse
Specification Property Map - Specifies the settings to enable server-side encryption. Represents the settings used to enable server-side encryption.
- stream
Arn String - Property streamArn
- stream
Specification Property Map - The settings for the DDB table stream, which capture changes to items stored in the table. Represents the DynamoDB Streams configuration for a table in DynamoDB.
- table
Class String - The table class of the new table. Valid values are
STANDARD
andSTANDARD_INFREQUENT_ACCESS
. - table
Name String - A name for the table. If you don't specify a name, CFNlong generates a unique physical ID and uses that ID for the table name. For more information, see Name Type. If you specify a name, you cannot perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you must replace the resource, specify a new name.
- List<Property Map>
- An array of key-value pairs to apply to this resource. For more information, see Tag.
- time
To Property MapLive Specification - Specifies the Time to Live (TTL) settings for the table. For detailed information about the limits in DynamoDB, see Limits in Amazon DynamoDB in the Amazon DynamoDB Developer Guide. Represents the settings used to enable or disable Time to Live (TTL) for the specified table.
ContributorInsightsSpecification, ContributorInsightsSpecificationArgs
- Enabled bool
- Indicates whether CloudWatch Contributor Insights are to be enabled (true) or disabled (false).
- Enabled bool
- Indicates whether CloudWatch Contributor Insights are to be enabled (true) or disabled (false).
- enabled Boolean
- Indicates whether CloudWatch Contributor Insights are to be enabled (true) or disabled (false).
- enabled boolean
- Indicates whether CloudWatch Contributor Insights are to be enabled (true) or disabled (false).
- enabled bool
- Indicates whether CloudWatch Contributor Insights are to be enabled (true) or disabled (false).
- enabled Boolean
- Indicates whether CloudWatch Contributor Insights are to be enabled (true) or disabled (false).
ContributorInsightsSpecificationResponse, ContributorInsightsSpecificationResponseArgs
- Enabled bool
- Indicates whether CloudWatch Contributor Insights are to be enabled (true) or disabled (false).
- Enabled bool
- Indicates whether CloudWatch Contributor Insights are to be enabled (true) or disabled (false).
- enabled Boolean
- Indicates whether CloudWatch Contributor Insights are to be enabled (true) or disabled (false).
- enabled boolean
- Indicates whether CloudWatch Contributor Insights are to be enabled (true) or disabled (false).
- enabled bool
- Indicates whether CloudWatch Contributor Insights are to be enabled (true) or disabled (false).
- enabled Boolean
- Indicates whether CloudWatch Contributor Insights are to be enabled (true) or disabled (false).
Csv, CsvArgs
- Delimiter string
- The delimiter used for separating items in the CSV file being imported.
- Header
List List<string> - List of the headers used to specify a common header for all source CSV files being imported. If this field is specified then the first line of each CSV file is treated as data instead of the header. If this field is not specified the the first line of each CSV file is treated as the header.
- Delimiter string
- The delimiter used for separating items in the CSV file being imported.
- Header
List []string - List of the headers used to specify a common header for all source CSV files being imported. If this field is specified then the first line of each CSV file is treated as data instead of the header. If this field is not specified the the first line of each CSV file is treated as the header.
- delimiter String
- The delimiter used for separating items in the CSV file being imported.
- header
List List<String> - List of the headers used to specify a common header for all source CSV files being imported. If this field is specified then the first line of each CSV file is treated as data instead of the header. If this field is not specified the the first line of each CSV file is treated as the header.
- delimiter string
- The delimiter used for separating items in the CSV file being imported.
- header
List string[] - List of the headers used to specify a common header for all source CSV files being imported. If this field is specified then the first line of each CSV file is treated as data instead of the header. If this field is not specified the the first line of each CSV file is treated as the header.
- delimiter str
- The delimiter used for separating items in the CSV file being imported.
- header_
list Sequence[str] - List of the headers used to specify a common header for all source CSV files being imported. If this field is specified then the first line of each CSV file is treated as data instead of the header. If this field is not specified the the first line of each CSV file is treated as the header.
- delimiter String
- The delimiter used for separating items in the CSV file being imported.
- header
List List<String> - List of the headers used to specify a common header for all source CSV files being imported. If this field is specified then the first line of each CSV file is treated as data instead of the header. If this field is not specified the the first line of each CSV file is treated as the header.
CsvResponse, CsvResponseArgs
- Delimiter string
- The delimiter used for separating items in the CSV file being imported.
- Header
List List<string> - List of the headers used to specify a common header for all source CSV files being imported. If this field is specified then the first line of each CSV file is treated as data instead of the header. If this field is not specified the the first line of each CSV file is treated as the header.
- Delimiter string
- The delimiter used for separating items in the CSV file being imported.
- Header
List []string - List of the headers used to specify a common header for all source CSV files being imported. If this field is specified then the first line of each CSV file is treated as data instead of the header. If this field is not specified the the first line of each CSV file is treated as the header.
- delimiter String
- The delimiter used for separating items in the CSV file being imported.
- header
List List<String> - List of the headers used to specify a common header for all source CSV files being imported. If this field is specified then the first line of each CSV file is treated as data instead of the header. If this field is not specified the the first line of each CSV file is treated as the header.
- delimiter string
- The delimiter used for separating items in the CSV file being imported.
- header
List string[] - List of the headers used to specify a common header for all source CSV files being imported. If this field is specified then the first line of each CSV file is treated as data instead of the header. If this field is not specified the the first line of each CSV file is treated as the header.
- delimiter str
- The delimiter used for separating items in the CSV file being imported.
- header_
list Sequence[str] - List of the headers used to specify a common header for all source CSV files being imported. If this field is specified then the first line of each CSV file is treated as data instead of the header. If this field is not specified the the first line of each CSV file is treated as the header.
- delimiter String
- The delimiter used for separating items in the CSV file being imported.
- header
List List<String> - List of the headers used to specify a common header for all source CSV files being imported. If this field is specified then the first line of each CSV file is treated as data instead of the header. If this field is not specified the the first line of each CSV file is treated as the header.
DynamoDBTableProperties, DynamoDBTablePropertiesArgs
- Arn string
- Amazon Resource Name (ARN)
- Aws
Account stringId - AWS Account ID
- Aws
Properties Pulumi.Azure Native. Aws Connector. Inputs. Aws Dynamo DBTable Properties - AWS Properties
- Aws
Region string - AWS Region
- Aws
Source stringSchema - AWS Source Schema
- Dictionary<string, string>
- AWS Tags
- Public
Cloud stringConnectors Resource Id - Public Cloud Connectors Resource ID
- Public
Cloud stringResource Name - Public Cloud Resource Name
- Arn string
- Amazon Resource Name (ARN)
- Aws
Account stringId - AWS Account ID
- Aws
Properties AwsDynamo DBTable Properties - AWS Properties
- Aws
Region string - AWS Region
- Aws
Source stringSchema - AWS Source Schema
- map[string]string
- AWS Tags
- Public
Cloud stringConnectors Resource Id - Public Cloud Connectors Resource ID
- Public
Cloud stringResource Name - Public Cloud Resource Name
- arn String
- Amazon Resource Name (ARN)
- aws
Account StringId - AWS Account ID
- aws
Properties AwsDynamo DBTable Properties - AWS Properties
- aws
Region String - AWS Region
- aws
Source StringSchema - AWS Source Schema
- Map<String,String>
- AWS Tags
- public
Cloud StringConnectors Resource Id - Public Cloud Connectors Resource ID
- public
Cloud StringResource Name - Public Cloud Resource Name
- arn string
- Amazon Resource Name (ARN)
- aws
Account stringId - AWS Account ID
- aws
Properties AwsDynamo DBTable Properties - AWS Properties
- aws
Region string - AWS Region
- aws
Source stringSchema - AWS Source Schema
- {[key: string]: string}
- AWS Tags
- public
Cloud stringConnectors Resource Id - Public Cloud Connectors Resource ID
- public
Cloud stringResource Name - Public Cloud Resource Name
- arn str
- Amazon Resource Name (ARN)
- aws_
account_ strid - AWS Account ID
- aws_
properties AwsDynamo DBTable Properties - AWS Properties
- aws_
region str - AWS Region
- aws_
source_ strschema - AWS Source Schema
- Mapping[str, str]
- AWS Tags
- public_
cloud_ strconnectors_ resource_ id - Public Cloud Connectors Resource ID
- public_
cloud_ strresource_ name - Public Cloud Resource Name
- arn String
- Amazon Resource Name (ARN)
- aws
Account StringId - AWS Account ID
- aws
Properties Property Map - AWS Properties
- aws
Region String - AWS Region
- aws
Source StringSchema - AWS Source Schema
- Map<String>
- AWS Tags
- public
Cloud StringConnectors Resource Id - Public Cloud Connectors Resource ID
- public
Cloud StringResource Name - Public Cloud Resource Name
DynamoDBTablePropertiesResponse, DynamoDBTablePropertiesResponseArgs
- Provisioning
State string - The status of the last operation.
- Arn string
- Amazon Resource Name (ARN)
- Aws
Account stringId - AWS Account ID
- Aws
Properties Pulumi.Azure Native. Aws Connector. Inputs. Aws Dynamo DBTable Properties Response - AWS Properties
- Aws
Region string - AWS Region
- Aws
Source stringSchema - AWS Source Schema
- Dictionary<string, string>
- AWS Tags
- Public
Cloud stringConnectors Resource Id - Public Cloud Connectors Resource ID
- Public
Cloud stringResource Name - Public Cloud Resource Name
- Provisioning
State string - The status of the last operation.
- Arn string
- Amazon Resource Name (ARN)
- Aws
Account stringId - AWS Account ID
- Aws
Properties AwsDynamo DBTable Properties Response - AWS Properties
- Aws
Region string - AWS Region
- Aws
Source stringSchema - AWS Source Schema
- map[string]string
- AWS Tags
- Public
Cloud stringConnectors Resource Id - Public Cloud Connectors Resource ID
- Public
Cloud stringResource Name - Public Cloud Resource Name
- provisioning
State String - The status of the last operation.
- arn String
- Amazon Resource Name (ARN)
- aws
Account StringId - AWS Account ID
- aws
Properties AwsDynamo DBTable Properties Response - AWS Properties
- aws
Region String - AWS Region
- aws
Source StringSchema - AWS Source Schema
- Map<String,String>
- AWS Tags
- public
Cloud StringConnectors Resource Id - Public Cloud Connectors Resource ID
- public
Cloud StringResource Name - Public Cloud Resource Name
- provisioning
State string - The status of the last operation.
- arn string
- Amazon Resource Name (ARN)
- aws
Account stringId - AWS Account ID
- aws
Properties AwsDynamo DBTable Properties Response - AWS Properties
- aws
Region string - AWS Region
- aws
Source stringSchema - AWS Source Schema
- {[key: string]: string}
- AWS Tags
- public
Cloud stringConnectors Resource Id - Public Cloud Connectors Resource ID
- public
Cloud stringResource Name - Public Cloud Resource Name
- provisioning_
state str - The status of the last operation.
- arn str
- Amazon Resource Name (ARN)
- aws_
account_ strid - AWS Account ID
- aws_
properties AwsDynamo DBTable Properties Response - AWS Properties
- aws_
region str - AWS Region
- aws_
source_ strschema - AWS Source Schema
- Mapping[str, str]
- AWS Tags
- public_
cloud_ strconnectors_ resource_ id - Public Cloud Connectors Resource ID
- public_
cloud_ strresource_ name - Public Cloud Resource Name
- provisioning
State String - The status of the last operation.
- arn String
- Amazon Resource Name (ARN)
- aws
Account StringId - AWS Account ID
- aws
Properties Property Map - AWS Properties
- aws
Region String - AWS Region
- aws
Source StringSchema - AWS Source Schema
- Map<String>
- AWS Tags
- public
Cloud StringConnectors Resource Id - Public Cloud Connectors Resource ID
- public
Cloud StringResource Name - Public Cloud Resource Name
GlobalSecondaryIndex, GlobalSecondaryIndexArgs
- Contributor
Insights Pulumi.Specification Azure Native. Aws Connector. Inputs. Contributor Insights Specification - The settings used to enable or disable CloudWatch Contributor Insights for the specified global secondary index. The settings used to enable or disable CloudWatch Contributor Insights.
- Index
Name string - The name of the global secondary index. The name must be unique among all other indexes on this table.
- Key
Schema List<Pulumi.Azure Native. Aws Connector. Inputs. Key Schema> - The complete key schema for a global secondary index, which consists of one or more pairs of attribute names and key types: +
HASH
- partition key +RANGE
- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value. - Projection
Pulumi.
Azure Native. Aws Connector. Inputs. Projection - Represents attributes that are copied (projected) from the table into the global secondary index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected.
- Provisioned
Throughput Pulumi.Azure Native. Aws Connector. Inputs. Provisioned Throughput - Represents the provisioned throughput settings for the specified global secondary index. For current minimum and maximum provisioned throughput values, see Service, Account, and Table Quotas in the Amazon DynamoDB Developer Guide. Throughput for the specified table, which consists of values for
ReadCapacityUnits
andWriteCapacityUnits
. For more information about the contents of a provisioned throughput structure, see Table ProvisionedThroughput.
- Contributor
Insights ContributorSpecification Insights Specification - The settings used to enable or disable CloudWatch Contributor Insights for the specified global secondary index. The settings used to enable or disable CloudWatch Contributor Insights.
- Index
Name string - The name of the global secondary index. The name must be unique among all other indexes on this table.
- Key
Schema []KeySchema - The complete key schema for a global secondary index, which consists of one or more pairs of attribute names and key types: +
HASH
- partition key +RANGE
- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value. - Projection Projection
- Represents attributes that are copied (projected) from the table into the global secondary index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected.
- Provisioned
Throughput ProvisionedThroughput - Represents the provisioned throughput settings for the specified global secondary index. For current minimum and maximum provisioned throughput values, see Service, Account, and Table Quotas in the Amazon DynamoDB Developer Guide. Throughput for the specified table, which consists of values for
ReadCapacityUnits
andWriteCapacityUnits
. For more information about the contents of a provisioned throughput structure, see Table ProvisionedThroughput.
- contributor
Insights ContributorSpecification Insights Specification - The settings used to enable or disable CloudWatch Contributor Insights for the specified global secondary index. The settings used to enable or disable CloudWatch Contributor Insights.
- index
Name String - The name of the global secondary index. The name must be unique among all other indexes on this table.
- key
Schema List<KeySchema> - The complete key schema for a global secondary index, which consists of one or more pairs of attribute names and key types: +
HASH
- partition key +RANGE
- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value. - projection Projection
- Represents attributes that are copied (projected) from the table into the global secondary index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected.
- provisioned
Throughput ProvisionedThroughput - Represents the provisioned throughput settings for the specified global secondary index. For current minimum and maximum provisioned throughput values, see Service, Account, and Table Quotas in the Amazon DynamoDB Developer Guide. Throughput for the specified table, which consists of values for
ReadCapacityUnits
andWriteCapacityUnits
. For more information about the contents of a provisioned throughput structure, see Table ProvisionedThroughput.
- contributor
Insights ContributorSpecification Insights Specification - The settings used to enable or disable CloudWatch Contributor Insights for the specified global secondary index. The settings used to enable or disable CloudWatch Contributor Insights.
- index
Name string - The name of the global secondary index. The name must be unique among all other indexes on this table.
- key
Schema KeySchema[] - The complete key schema for a global secondary index, which consists of one or more pairs of attribute names and key types: +
HASH
- partition key +RANGE
- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value. - projection Projection
- Represents attributes that are copied (projected) from the table into the global secondary index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected.
- provisioned
Throughput ProvisionedThroughput - Represents the provisioned throughput settings for the specified global secondary index. For current minimum and maximum provisioned throughput values, see Service, Account, and Table Quotas in the Amazon DynamoDB Developer Guide. Throughput for the specified table, which consists of values for
ReadCapacityUnits
andWriteCapacityUnits
. For more information about the contents of a provisioned throughput structure, see Table ProvisionedThroughput.
- contributor_
insights_ Contributorspecification Insights Specification - The settings used to enable or disable CloudWatch Contributor Insights for the specified global secondary index. The settings used to enable or disable CloudWatch Contributor Insights.
- index_
name str - The name of the global secondary index. The name must be unique among all other indexes on this table.
- key_
schema Sequence[KeySchema] - The complete key schema for a global secondary index, which consists of one or more pairs of attribute names and key types: +
HASH
- partition key +RANGE
- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value. - projection Projection
- Represents attributes that are copied (projected) from the table into the global secondary index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected.
- provisioned_
throughput ProvisionedThroughput - Represents the provisioned throughput settings for the specified global secondary index. For current minimum and maximum provisioned throughput values, see Service, Account, and Table Quotas in the Amazon DynamoDB Developer Guide. Throughput for the specified table, which consists of values for
ReadCapacityUnits
andWriteCapacityUnits
. For more information about the contents of a provisioned throughput structure, see Table ProvisionedThroughput.
- contributor
Insights Property MapSpecification - The settings used to enable or disable CloudWatch Contributor Insights for the specified global secondary index. The settings used to enable or disable CloudWatch Contributor Insights.
- index
Name String - The name of the global secondary index. The name must be unique among all other indexes on this table.
- key
Schema List<Property Map> - The complete key schema for a global secondary index, which consists of one or more pairs of attribute names and key types: +
HASH
- partition key +RANGE
- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value. - projection Property Map
- Represents attributes that are copied (projected) from the table into the global secondary index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected.
- provisioned
Throughput Property Map - Represents the provisioned throughput settings for the specified global secondary index. For current minimum and maximum provisioned throughput values, see Service, Account, and Table Quotas in the Amazon DynamoDB Developer Guide. Throughput for the specified table, which consists of values for
ReadCapacityUnits
andWriteCapacityUnits
. For more information about the contents of a provisioned throughput structure, see Table ProvisionedThroughput.
GlobalSecondaryIndexResponse, GlobalSecondaryIndexResponseArgs
- Contributor
Insights Pulumi.Specification Azure Native. Aws Connector. Inputs. Contributor Insights Specification Response - The settings used to enable or disable CloudWatch Contributor Insights for the specified global secondary index. The settings used to enable or disable CloudWatch Contributor Insights.
- Index
Name string - The name of the global secondary index. The name must be unique among all other indexes on this table.
- Key
Schema List<Pulumi.Azure Native. Aws Connector. Inputs. Key Schema Response> - The complete key schema for a global secondary index, which consists of one or more pairs of attribute names and key types: +
HASH
- partition key +RANGE
- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value. - Projection
Pulumi.
Azure Native. Aws Connector. Inputs. Projection Response - Represents attributes that are copied (projected) from the table into the global secondary index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected.
- Provisioned
Throughput Pulumi.Azure Native. Aws Connector. Inputs. Provisioned Throughput Response - Represents the provisioned throughput settings for the specified global secondary index. For current minimum and maximum provisioned throughput values, see Service, Account, and Table Quotas in the Amazon DynamoDB Developer Guide. Throughput for the specified table, which consists of values for
ReadCapacityUnits
andWriteCapacityUnits
. For more information about the contents of a provisioned throughput structure, see Table ProvisionedThroughput.
- Contributor
Insights ContributorSpecification Insights Specification Response - The settings used to enable or disable CloudWatch Contributor Insights for the specified global secondary index. The settings used to enable or disable CloudWatch Contributor Insights.
- Index
Name string - The name of the global secondary index. The name must be unique among all other indexes on this table.
- Key
Schema []KeySchema Response - The complete key schema for a global secondary index, which consists of one or more pairs of attribute names and key types: +
HASH
- partition key +RANGE
- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value. - Projection
Projection
Response - Represents attributes that are copied (projected) from the table into the global secondary index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected.
- Provisioned
Throughput ProvisionedThroughput Response - Represents the provisioned throughput settings for the specified global secondary index. For current minimum and maximum provisioned throughput values, see Service, Account, and Table Quotas in the Amazon DynamoDB Developer Guide. Throughput for the specified table, which consists of values for
ReadCapacityUnits
andWriteCapacityUnits
. For more information about the contents of a provisioned throughput structure, see Table ProvisionedThroughput.
- contributor
Insights ContributorSpecification Insights Specification Response - The settings used to enable or disable CloudWatch Contributor Insights for the specified global secondary index. The settings used to enable or disable CloudWatch Contributor Insights.
- index
Name String - The name of the global secondary index. The name must be unique among all other indexes on this table.
- key
Schema List<KeySchema Response> - The complete key schema for a global secondary index, which consists of one or more pairs of attribute names and key types: +
HASH
- partition key +RANGE
- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value. - projection
Projection
Response - Represents attributes that are copied (projected) from the table into the global secondary index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected.
- provisioned
Throughput ProvisionedThroughput Response - Represents the provisioned throughput settings for the specified global secondary index. For current minimum and maximum provisioned throughput values, see Service, Account, and Table Quotas in the Amazon DynamoDB Developer Guide. Throughput for the specified table, which consists of values for
ReadCapacityUnits
andWriteCapacityUnits
. For more information about the contents of a provisioned throughput structure, see Table ProvisionedThroughput.
- contributor
Insights ContributorSpecification Insights Specification Response - The settings used to enable or disable CloudWatch Contributor Insights for the specified global secondary index. The settings used to enable or disable CloudWatch Contributor Insights.
- index
Name string - The name of the global secondary index. The name must be unique among all other indexes on this table.
- key
Schema KeySchema Response[] - The complete key schema for a global secondary index, which consists of one or more pairs of attribute names and key types: +
HASH
- partition key +RANGE
- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value. - projection
Projection
Response - Represents attributes that are copied (projected) from the table into the global secondary index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected.
- provisioned
Throughput ProvisionedThroughput Response - Represents the provisioned throughput settings for the specified global secondary index. For current minimum and maximum provisioned throughput values, see Service, Account, and Table Quotas in the Amazon DynamoDB Developer Guide. Throughput for the specified table, which consists of values for
ReadCapacityUnits
andWriteCapacityUnits
. For more information about the contents of a provisioned throughput structure, see Table ProvisionedThroughput.
- contributor_
insights_ Contributorspecification Insights Specification Response - The settings used to enable or disable CloudWatch Contributor Insights for the specified global secondary index. The settings used to enable or disable CloudWatch Contributor Insights.
- index_
name str - The name of the global secondary index. The name must be unique among all other indexes on this table.
- key_
schema Sequence[KeySchema Response] - The complete key schema for a global secondary index, which consists of one or more pairs of attribute names and key types: +
HASH
- partition key +RANGE
- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value. - projection
Projection
Response - Represents attributes that are copied (projected) from the table into the global secondary index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected.
- provisioned_
throughput ProvisionedThroughput Response - Represents the provisioned throughput settings for the specified global secondary index. For current minimum and maximum provisioned throughput values, see Service, Account, and Table Quotas in the Amazon DynamoDB Developer Guide. Throughput for the specified table, which consists of values for
ReadCapacityUnits
andWriteCapacityUnits
. For more information about the contents of a provisioned throughput structure, see Table ProvisionedThroughput.
- contributor
Insights Property MapSpecification - The settings used to enable or disable CloudWatch Contributor Insights for the specified global secondary index. The settings used to enable or disable CloudWatch Contributor Insights.
- index
Name String - The name of the global secondary index. The name must be unique among all other indexes on this table.
- key
Schema List<Property Map> - The complete key schema for a global secondary index, which consists of one or more pairs of attribute names and key types: +
HASH
- partition key +RANGE
- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value. - projection Property Map
- Represents attributes that are copied (projected) from the table into the global secondary index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected.
- provisioned
Throughput Property Map - Represents the provisioned throughput settings for the specified global secondary index. For current minimum and maximum provisioned throughput values, see Service, Account, and Table Quotas in the Amazon DynamoDB Developer Guide. Throughput for the specified table, which consists of values for
ReadCapacityUnits
andWriteCapacityUnits
. For more information about the contents of a provisioned throughput structure, see Table ProvisionedThroughput.
ImportSourceSpecification, ImportSourceSpecificationArgs
- Input
Compression stringType - Type of compression to be used on the input coming from the imported table.
- Input
Format string - The format of the source data. Valid values for
ImportFormat
areCSV
,DYNAMODB_JSON
orION
. - Input
Format Pulumi.Options Azure Native. Aws Connector. Inputs. Input Format Options - Additional properties that specify how the input is formatted, The format options for the data that was imported into the target table. There is one value, CsvOption.
- S3Bucket
Source Pulumi.Azure Native. Aws Connector. Inputs. S3Bucket Source - The S3 bucket that provides the source for the import. The S3 bucket that is being imported from.
- Input
Compression stringType - Type of compression to be used on the input coming from the imported table.
- Input
Format string - The format of the source data. Valid values for
ImportFormat
areCSV
,DYNAMODB_JSON
orION
. - Input
Format InputOptions Format Options - Additional properties that specify how the input is formatted, The format options for the data that was imported into the target table. There is one value, CsvOption.
- S3Bucket
Source S3BucketSource - The S3 bucket that provides the source for the import. The S3 bucket that is being imported from.
- input
Compression StringType - Type of compression to be used on the input coming from the imported table.
- input
Format String - The format of the source data. Valid values for
ImportFormat
areCSV
,DYNAMODB_JSON
orION
. - input
Format InputOptions Format Options - Additional properties that specify how the input is formatted, The format options for the data that was imported into the target table. There is one value, CsvOption.
- s3Bucket
Source S3BucketSource - The S3 bucket that provides the source for the import. The S3 bucket that is being imported from.
- input
Compression stringType - Type of compression to be used on the input coming from the imported table.
- input
Format string - The format of the source data. Valid values for
ImportFormat
areCSV
,DYNAMODB_JSON
orION
. - input
Format InputOptions Format Options - Additional properties that specify how the input is formatted, The format options for the data that was imported into the target table. There is one value, CsvOption.
- s3Bucket
Source S3BucketSource - The S3 bucket that provides the source for the import. The S3 bucket that is being imported from.
- input_
compression_ strtype - Type of compression to be used on the input coming from the imported table.
- input_
format str - The format of the source data. Valid values for
ImportFormat
areCSV
,DYNAMODB_JSON
orION
. - input_
format_ Inputoptions Format Options - Additional properties that specify how the input is formatted, The format options for the data that was imported into the target table. There is one value, CsvOption.
- s3_
bucket_ S3Bucketsource Source - The S3 bucket that provides the source for the import. The S3 bucket that is being imported from.
- input
Compression StringType - Type of compression to be used on the input coming from the imported table.
- input
Format String - The format of the source data. Valid values for
ImportFormat
areCSV
,DYNAMODB_JSON
orION
. - input
Format Property MapOptions - Additional properties that specify how the input is formatted, The format options for the data that was imported into the target table. There is one value, CsvOption.
- s3Bucket
Source Property Map - The S3 bucket that provides the source for the import. The S3 bucket that is being imported from.
ImportSourceSpecificationResponse, ImportSourceSpecificationResponseArgs
- Input
Compression stringType - Type of compression to be used on the input coming from the imported table.
- Input
Format string - The format of the source data. Valid values for
ImportFormat
areCSV
,DYNAMODB_JSON
orION
. - Input
Format Pulumi.Options Azure Native. Aws Connector. Inputs. Input Format Options Response - Additional properties that specify how the input is formatted, The format options for the data that was imported into the target table. There is one value, CsvOption.
- S3Bucket
Source Pulumi.Azure Native. Aws Connector. Inputs. S3Bucket Source Response - The S3 bucket that provides the source for the import. The S3 bucket that is being imported from.
- Input
Compression stringType - Type of compression to be used on the input coming from the imported table.
- Input
Format string - The format of the source data. Valid values for
ImportFormat
areCSV
,DYNAMODB_JSON
orION
. - Input
Format InputOptions Format Options Response - Additional properties that specify how the input is formatted, The format options for the data that was imported into the target table. There is one value, CsvOption.
- S3Bucket
Source S3BucketSource Response - The S3 bucket that provides the source for the import. The S3 bucket that is being imported from.
- input
Compression StringType - Type of compression to be used on the input coming from the imported table.
- input
Format String - The format of the source data. Valid values for
ImportFormat
areCSV
,DYNAMODB_JSON
orION
. - input
Format InputOptions Format Options Response - Additional properties that specify how the input is formatted, The format options for the data that was imported into the target table. There is one value, CsvOption.
- s3Bucket
Source S3BucketSource Response - The S3 bucket that provides the source for the import. The S3 bucket that is being imported from.
- input
Compression stringType - Type of compression to be used on the input coming from the imported table.
- input
Format string - The format of the source data. Valid values for
ImportFormat
areCSV
,DYNAMODB_JSON
orION
. - input
Format InputOptions Format Options Response - Additional properties that specify how the input is formatted, The format options for the data that was imported into the target table. There is one value, CsvOption.
- s3Bucket
Source S3BucketSource Response - The S3 bucket that provides the source for the import. The S3 bucket that is being imported from.
- input_
compression_ strtype - Type of compression to be used on the input coming from the imported table.
- input_
format str - The format of the source data. Valid values for
ImportFormat
areCSV
,DYNAMODB_JSON
orION
. - input_
format_ Inputoptions Format Options Response - Additional properties that specify how the input is formatted, The format options for the data that was imported into the target table. There is one value, CsvOption.
- s3_
bucket_ S3Bucketsource Source Response - The S3 bucket that provides the source for the import. The S3 bucket that is being imported from.
- input
Compression StringType - Type of compression to be used on the input coming from the imported table.
- input
Format String - The format of the source data. Valid values for
ImportFormat
areCSV
,DYNAMODB_JSON
orION
. - input
Format Property MapOptions - Additional properties that specify how the input is formatted, The format options for the data that was imported into the target table. There is one value, CsvOption.
- s3Bucket
Source Property Map - The S3 bucket that provides the source for the import. The S3 bucket that is being imported from.
InputFormatOptions, InputFormatOptionsArgs
- Csv
Pulumi.
Azure Native. Aws Connector. Inputs. Csv - The options for imported source files in CSV format. The values are Delimiter and HeaderList. The options for imported source files in CSV format. The values are Delimiter and HeaderList.
- csv Property Map
- The options for imported source files in CSV format. The values are Delimiter and HeaderList. The options for imported source files in CSV format. The values are Delimiter and HeaderList.
InputFormatOptionsResponse, InputFormatOptionsResponseArgs
- Csv
Pulumi.
Azure Native. Aws Connector. Inputs. Csv Response - The options for imported source files in CSV format. The values are Delimiter and HeaderList. The options for imported source files in CSV format. The values are Delimiter and HeaderList.
- Csv
Csv
Response - The options for imported source files in CSV format. The values are Delimiter and HeaderList. The options for imported source files in CSV format. The values are Delimiter and HeaderList.
- csv
Csv
Response - The options for imported source files in CSV format. The values are Delimiter and HeaderList. The options for imported source files in CSV format. The values are Delimiter and HeaderList.
- csv
Csv
Response - The options for imported source files in CSV format. The values are Delimiter and HeaderList. The options for imported source files in CSV format. The values are Delimiter and HeaderList.
- csv
Csv
Response - The options for imported source files in CSV format. The values are Delimiter and HeaderList. The options for imported source files in CSV format. The values are Delimiter and HeaderList.
- csv Property Map
- The options for imported source files in CSV format. The values are Delimiter and HeaderList. The options for imported source files in CSV format. The values are Delimiter and HeaderList.
KeySchema, KeySchemaArgs
- Attribute
Name string - The name of a key attribute.
- Key
Type string - The role that this key attribute will assume: +
HASH
- partition key +RANGE
- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.
- Attribute
Name string - The name of a key attribute.
- Key
Type string - The role that this key attribute will assume: +
HASH
- partition key +RANGE
- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.
- attribute
Name String - The name of a key attribute.
- key
Type String - The role that this key attribute will assume: +
HASH
- partition key +RANGE
- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.
- attribute
Name string - The name of a key attribute.
- key
Type string - The role that this key attribute will assume: +
HASH
- partition key +RANGE
- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.
- attribute_
name str - The name of a key attribute.
- key_
type str - The role that this key attribute will assume: +
HASH
- partition key +RANGE
- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.
- attribute
Name String - The name of a key attribute.
- key
Type String - The role that this key attribute will assume: +
HASH
- partition key +RANGE
- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.
KeySchemaResponse, KeySchemaResponseArgs
- Attribute
Name string - The name of a key attribute.
- Key
Type string - The role that this key attribute will assume: +
HASH
- partition key +RANGE
- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.
- Attribute
Name string - The name of a key attribute.
- Key
Type string - The role that this key attribute will assume: +
HASH
- partition key +RANGE
- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.
- attribute
Name String - The name of a key attribute.
- key
Type String - The role that this key attribute will assume: +
HASH
- partition key +RANGE
- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.
- attribute
Name string - The name of a key attribute.
- key
Type string - The role that this key attribute will assume: +
HASH
- partition key +RANGE
- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.
- attribute_
name str - The name of a key attribute.
- key_
type str - The role that this key attribute will assume: +
HASH
- partition key +RANGE
- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.
- attribute
Name String - The name of a key attribute.
- key
Type String - The role that this key attribute will assume: +
HASH
- partition key +RANGE
- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.
KinesisStreamSpecification, KinesisStreamSpecificationArgs
- Approximate
Creation string | Pulumi.Date Time Precision Azure Native. Aws Connector. Kinesis Stream Specification Approximate Creation Date Time Precision - The precision for the time and date that the stream was created.
- Stream
Arn string - The ARN for a specific Kinesis data stream. Length Constraints: Minimum length of 37. Maximum length of 1024.
- Approximate
Creation string | KinesisDate Time Precision Stream Specification Approximate Creation Date Time Precision - The precision for the time and date that the stream was created.
- Stream
Arn string - The ARN for a specific Kinesis data stream. Length Constraints: Minimum length of 37. Maximum length of 1024.
- approximate
Creation String | KinesisDate Time Precision Stream Specification Approximate Creation Date Time Precision - The precision for the time and date that the stream was created.
- stream
Arn String - The ARN for a specific Kinesis data stream. Length Constraints: Minimum length of 37. Maximum length of 1024.
- approximate
Creation string | KinesisDate Time Precision Stream Specification Approximate Creation Date Time Precision - The precision for the time and date that the stream was created.
- stream
Arn string - The ARN for a specific Kinesis data stream. Length Constraints: Minimum length of 37. Maximum length of 1024.
- approximate_
creation_ str | Kinesisdate_ time_ precision Stream Specification Approximate Creation Date Time Precision - The precision for the time and date that the stream was created.
- stream_
arn str - The ARN for a specific Kinesis data stream. Length Constraints: Minimum length of 37. Maximum length of 1024.
- approximate
Creation String | "MICROSECOND" | "MILLISECOND"Date Time Precision - The precision for the time and date that the stream was created.
- stream
Arn String - The ARN for a specific Kinesis data stream. Length Constraints: Minimum length of 37. Maximum length of 1024.
KinesisStreamSpecificationApproximateCreationDateTimePrecision, KinesisStreamSpecificationApproximateCreationDateTimePrecisionArgs
- MICROSECOND
- MICROSECONDKinesisStreamSpecificationApproximateCreationDateTimePrecision enum MICROSECOND
- MILLISECOND
- MILLISECONDKinesisStreamSpecificationApproximateCreationDateTimePrecision enum MILLISECOND
- Kinesis
Stream Specification Approximate Creation Date Time Precision MICROSECOND - MICROSECONDKinesisStreamSpecificationApproximateCreationDateTimePrecision enum MICROSECOND
- Kinesis
Stream Specification Approximate Creation Date Time Precision MILLISECOND - MILLISECONDKinesisStreamSpecificationApproximateCreationDateTimePrecision enum MILLISECOND
- MICROSECOND
- MICROSECONDKinesisStreamSpecificationApproximateCreationDateTimePrecision enum MICROSECOND
- MILLISECOND
- MILLISECONDKinesisStreamSpecificationApproximateCreationDateTimePrecision enum MILLISECOND
- MICROSECOND
- MICROSECONDKinesisStreamSpecificationApproximateCreationDateTimePrecision enum MICROSECOND
- MILLISECOND
- MILLISECONDKinesisStreamSpecificationApproximateCreationDateTimePrecision enum MILLISECOND
- MICROSECOND
- MICROSECONDKinesisStreamSpecificationApproximateCreationDateTimePrecision enum MICROSECOND
- MILLISECOND
- MILLISECONDKinesisStreamSpecificationApproximateCreationDateTimePrecision enum MILLISECOND
- "MICROSECOND"
- MICROSECONDKinesisStreamSpecificationApproximateCreationDateTimePrecision enum MICROSECOND
- "MILLISECOND"
- MILLISECONDKinesisStreamSpecificationApproximateCreationDateTimePrecision enum MILLISECOND
KinesisStreamSpecificationResponse, KinesisStreamSpecificationResponseArgs
- Approximate
Creation stringDate Time Precision - The precision for the time and date that the stream was created.
- Stream
Arn string - The ARN for a specific Kinesis data stream. Length Constraints: Minimum length of 37. Maximum length of 1024.
- Approximate
Creation stringDate Time Precision - The precision for the time and date that the stream was created.
- Stream
Arn string - The ARN for a specific Kinesis data stream. Length Constraints: Minimum length of 37. Maximum length of 1024.
- approximate
Creation StringDate Time Precision - The precision for the time and date that the stream was created.
- stream
Arn String - The ARN for a specific Kinesis data stream. Length Constraints: Minimum length of 37. Maximum length of 1024.
- approximate
Creation stringDate Time Precision - The precision for the time and date that the stream was created.
- stream
Arn string - The ARN for a specific Kinesis data stream. Length Constraints: Minimum length of 37. Maximum length of 1024.
- approximate_
creation_ strdate_ time_ precision - The precision for the time and date that the stream was created.
- stream_
arn str - The ARN for a specific Kinesis data stream. Length Constraints: Minimum length of 37. Maximum length of 1024.
- approximate
Creation StringDate Time Precision - The precision for the time and date that the stream was created.
- stream
Arn String - The ARN for a specific Kinesis data stream. Length Constraints: Minimum length of 37. Maximum length of 1024.
LocalSecondaryIndex, LocalSecondaryIndexArgs
- Index
Name string - The name of the local secondary index. The name must be unique among all other indexes on this table.
- Key
Schema List<Pulumi.Azure Native. Aws Connector. Inputs. Key Schema> - The complete key schema for the local secondary index, consisting of one or more pairs of attribute names and key types: +
HASH
- partition key +RANGE
- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value. - Projection
Pulumi.
Azure Native. Aws Connector. Inputs. Projection - Represents attributes that are copied (projected) from the table into the local secondary index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected.
- Index
Name string - The name of the local secondary index. The name must be unique among all other indexes on this table.
- Key
Schema []KeySchema - The complete key schema for the local secondary index, consisting of one or more pairs of attribute names and key types: +
HASH
- partition key +RANGE
- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value. - Projection Projection
- Represents attributes that are copied (projected) from the table into the local secondary index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected.
- index
Name String - The name of the local secondary index. The name must be unique among all other indexes on this table.
- key
Schema List<KeySchema> - The complete key schema for the local secondary index, consisting of one or more pairs of attribute names and key types: +
HASH
- partition key +RANGE
- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value. - projection Projection
- Represents attributes that are copied (projected) from the table into the local secondary index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected.
- index
Name string - The name of the local secondary index. The name must be unique among all other indexes on this table.
- key
Schema KeySchema[] - The complete key schema for the local secondary index, consisting of one or more pairs of attribute names and key types: +
HASH
- partition key +RANGE
- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value. - projection Projection
- Represents attributes that are copied (projected) from the table into the local secondary index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected.
- index_
name str - The name of the local secondary index. The name must be unique among all other indexes on this table.
- key_
schema Sequence[KeySchema] - The complete key schema for the local secondary index, consisting of one or more pairs of attribute names and key types: +
HASH
- partition key +RANGE
- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value. - projection Projection
- Represents attributes that are copied (projected) from the table into the local secondary index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected.
- index
Name String - The name of the local secondary index. The name must be unique among all other indexes on this table.
- key
Schema List<Property Map> - The complete key schema for the local secondary index, consisting of one or more pairs of attribute names and key types: +
HASH
- partition key +RANGE
- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value. - projection Property Map
- Represents attributes that are copied (projected) from the table into the local secondary index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected.
LocalSecondaryIndexResponse, LocalSecondaryIndexResponseArgs
- Index
Name string - The name of the local secondary index. The name must be unique among all other indexes on this table.
- Key
Schema List<Pulumi.Azure Native. Aws Connector. Inputs. Key Schema Response> - The complete key schema for the local secondary index, consisting of one or more pairs of attribute names and key types: +
HASH
- partition key +RANGE
- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value. - Projection
Pulumi.
Azure Native. Aws Connector. Inputs. Projection Response - Represents attributes that are copied (projected) from the table into the local secondary index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected.
- Index
Name string - The name of the local secondary index. The name must be unique among all other indexes on this table.
- Key
Schema []KeySchema Response - The complete key schema for the local secondary index, consisting of one or more pairs of attribute names and key types: +
HASH
- partition key +RANGE
- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value. - Projection
Projection
Response - Represents attributes that are copied (projected) from the table into the local secondary index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected.
- index
Name String - The name of the local secondary index. The name must be unique among all other indexes on this table.
- key
Schema List<KeySchema Response> - The complete key schema for the local secondary index, consisting of one or more pairs of attribute names and key types: +
HASH
- partition key +RANGE
- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value. - projection
Projection
Response - Represents attributes that are copied (projected) from the table into the local secondary index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected.
- index
Name string - The name of the local secondary index. The name must be unique among all other indexes on this table.
- key
Schema KeySchema Response[] - The complete key schema for the local secondary index, consisting of one or more pairs of attribute names and key types: +
HASH
- partition key +RANGE
- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value. - projection
Projection
Response - Represents attributes that are copied (projected) from the table into the local secondary index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected.
- index_
name str - The name of the local secondary index. The name must be unique among all other indexes on this table.
- key_
schema Sequence[KeySchema Response] - The complete key schema for the local secondary index, consisting of one or more pairs of attribute names and key types: +
HASH
- partition key +RANGE
- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value. - projection
Projection
Response - Represents attributes that are copied (projected) from the table into the local secondary index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected.
- index
Name String - The name of the local secondary index. The name must be unique among all other indexes on this table.
- key
Schema List<Property Map> - The complete key schema for the local secondary index, consisting of one or more pairs of attribute names and key types: +
HASH
- partition key +RANGE
- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value. - projection Property Map
- Represents attributes that are copied (projected) from the table into the local secondary index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected.
PointInTimeRecoverySpecification, PointInTimeRecoverySpecificationArgs
- Point
In boolTime Recovery Enabled - Indicates whether point in time recovery is enabled (true) or disabled (false) on the table.
- Point
In boolTime Recovery Enabled - Indicates whether point in time recovery is enabled (true) or disabled (false) on the table.
- point
In BooleanTime Recovery Enabled - Indicates whether point in time recovery is enabled (true) or disabled (false) on the table.
- point
In booleanTime Recovery Enabled - Indicates whether point in time recovery is enabled (true) or disabled (false) on the table.
- point_
in_ booltime_ recovery_ enabled - Indicates whether point in time recovery is enabled (true) or disabled (false) on the table.
- point
In BooleanTime Recovery Enabled - Indicates whether point in time recovery is enabled (true) or disabled (false) on the table.
PointInTimeRecoverySpecificationResponse, PointInTimeRecoverySpecificationResponseArgs
- Point
In boolTime Recovery Enabled - Indicates whether point in time recovery is enabled (true) or disabled (false) on the table.
- Point
In boolTime Recovery Enabled - Indicates whether point in time recovery is enabled (true) or disabled (false) on the table.
- point
In BooleanTime Recovery Enabled - Indicates whether point in time recovery is enabled (true) or disabled (false) on the table.
- point
In booleanTime Recovery Enabled - Indicates whether point in time recovery is enabled (true) or disabled (false) on the table.
- point_
in_ booltime_ recovery_ enabled - Indicates whether point in time recovery is enabled (true) or disabled (false) on the table.
- point
In BooleanTime Recovery Enabled - Indicates whether point in time recovery is enabled (true) or disabled (false) on the table.
Projection, ProjectionArgs
- Non
Key List<string>Attributes - Represents the non-key attribute names which will be projected into the index. For local secondary indexes, the total count of
NonKeyAttributes
summed across all of the local secondary indexes, must not exceed 100. If you project the same attribute into two different indexes, this counts as two distinct attributes when determining the total. - Projection
Type string - The set of attributes that are projected into the index: +
KEYS_ONLY
- Only the index and primary keys are projected into the index. +INCLUDE
- In addition to the attributes described inKEYS_ONLY
, the secondary index will include other non-key attributes that you specify. +ALL
- All of the table attributes are projected into the index. When using the DynamoDB console,ALL
is selected by default.
- Non
Key []stringAttributes - Represents the non-key attribute names which will be projected into the index. For local secondary indexes, the total count of
NonKeyAttributes
summed across all of the local secondary indexes, must not exceed 100. If you project the same attribute into two different indexes, this counts as two distinct attributes when determining the total. - Projection
Type string - The set of attributes that are projected into the index: +
KEYS_ONLY
- Only the index and primary keys are projected into the index. +INCLUDE
- In addition to the attributes described inKEYS_ONLY
, the secondary index will include other non-key attributes that you specify. +ALL
- All of the table attributes are projected into the index. When using the DynamoDB console,ALL
is selected by default.
- non
Key List<String>Attributes - Represents the non-key attribute names which will be projected into the index. For local secondary indexes, the total count of
NonKeyAttributes
summed across all of the local secondary indexes, must not exceed 100. If you project the same attribute into two different indexes, this counts as two distinct attributes when determining the total. - projection
Type String - The set of attributes that are projected into the index: +
KEYS_ONLY
- Only the index and primary keys are projected into the index. +INCLUDE
- In addition to the attributes described inKEYS_ONLY
, the secondary index will include other non-key attributes that you specify. +ALL
- All of the table attributes are projected into the index. When using the DynamoDB console,ALL
is selected by default.
- non
Key string[]Attributes - Represents the non-key attribute names which will be projected into the index. For local secondary indexes, the total count of
NonKeyAttributes
summed across all of the local secondary indexes, must not exceed 100. If you project the same attribute into two different indexes, this counts as two distinct attributes when determining the total. - projection
Type string - The set of attributes that are projected into the index: +
KEYS_ONLY
- Only the index and primary keys are projected into the index. +INCLUDE
- In addition to the attributes described inKEYS_ONLY
, the secondary index will include other non-key attributes that you specify. +ALL
- All of the table attributes are projected into the index. When using the DynamoDB console,ALL
is selected by default.
- non_
key_ Sequence[str]attributes - Represents the non-key attribute names which will be projected into the index. For local secondary indexes, the total count of
NonKeyAttributes
summed across all of the local secondary indexes, must not exceed 100. If you project the same attribute into two different indexes, this counts as two distinct attributes when determining the total. - projection_
type str - The set of attributes that are projected into the index: +
KEYS_ONLY
- Only the index and primary keys are projected into the index. +INCLUDE
- In addition to the attributes described inKEYS_ONLY
, the secondary index will include other non-key attributes that you specify. +ALL
- All of the table attributes are projected into the index. When using the DynamoDB console,ALL
is selected by default.
- non
Key List<String>Attributes - Represents the non-key attribute names which will be projected into the index. For local secondary indexes, the total count of
NonKeyAttributes
summed across all of the local secondary indexes, must not exceed 100. If you project the same attribute into two different indexes, this counts as two distinct attributes when determining the total. - projection
Type String - The set of attributes that are projected into the index: +
KEYS_ONLY
- Only the index and primary keys are projected into the index. +INCLUDE
- In addition to the attributes described inKEYS_ONLY
, the secondary index will include other non-key attributes that you specify. +ALL
- All of the table attributes are projected into the index. When using the DynamoDB console,ALL
is selected by default.
ProjectionResponse, ProjectionResponseArgs
- Non
Key List<string>Attributes - Represents the non-key attribute names which will be projected into the index. For local secondary indexes, the total count of
NonKeyAttributes
summed across all of the local secondary indexes, must not exceed 100. If you project the same attribute into two different indexes, this counts as two distinct attributes when determining the total. - Projection
Type string - The set of attributes that are projected into the index: +
KEYS_ONLY
- Only the index and primary keys are projected into the index. +INCLUDE
- In addition to the attributes described inKEYS_ONLY
, the secondary index will include other non-key attributes that you specify. +ALL
- All of the table attributes are projected into the index. When using the DynamoDB console,ALL
is selected by default.
- Non
Key []stringAttributes - Represents the non-key attribute names which will be projected into the index. For local secondary indexes, the total count of
NonKeyAttributes
summed across all of the local secondary indexes, must not exceed 100. If you project the same attribute into two different indexes, this counts as two distinct attributes when determining the total. - Projection
Type string - The set of attributes that are projected into the index: +
KEYS_ONLY
- Only the index and primary keys are projected into the index. +INCLUDE
- In addition to the attributes described inKEYS_ONLY
, the secondary index will include other non-key attributes that you specify. +ALL
- All of the table attributes are projected into the index. When using the DynamoDB console,ALL
is selected by default.
- non
Key List<String>Attributes - Represents the non-key attribute names which will be projected into the index. For local secondary indexes, the total count of
NonKeyAttributes
summed across all of the local secondary indexes, must not exceed 100. If you project the same attribute into two different indexes, this counts as two distinct attributes when determining the total. - projection
Type String - The set of attributes that are projected into the index: +
KEYS_ONLY
- Only the index and primary keys are projected into the index. +INCLUDE
- In addition to the attributes described inKEYS_ONLY
, the secondary index will include other non-key attributes that you specify. +ALL
- All of the table attributes are projected into the index. When using the DynamoDB console,ALL
is selected by default.
- non
Key string[]Attributes - Represents the non-key attribute names which will be projected into the index. For local secondary indexes, the total count of
NonKeyAttributes
summed across all of the local secondary indexes, must not exceed 100. If you project the same attribute into two different indexes, this counts as two distinct attributes when determining the total. - projection
Type string - The set of attributes that are projected into the index: +
KEYS_ONLY
- Only the index and primary keys are projected into the index. +INCLUDE
- In addition to the attributes described inKEYS_ONLY
, the secondary index will include other non-key attributes that you specify. +ALL
- All of the table attributes are projected into the index. When using the DynamoDB console,ALL
is selected by default.
- non_
key_ Sequence[str]attributes - Represents the non-key attribute names which will be projected into the index. For local secondary indexes, the total count of
NonKeyAttributes
summed across all of the local secondary indexes, must not exceed 100. If you project the same attribute into two different indexes, this counts as two distinct attributes when determining the total. - projection_
type str - The set of attributes that are projected into the index: +
KEYS_ONLY
- Only the index and primary keys are projected into the index. +INCLUDE
- In addition to the attributes described inKEYS_ONLY
, the secondary index will include other non-key attributes that you specify. +ALL
- All of the table attributes are projected into the index. When using the DynamoDB console,ALL
is selected by default.
- non
Key List<String>Attributes - Represents the non-key attribute names which will be projected into the index. For local secondary indexes, the total count of
NonKeyAttributes
summed across all of the local secondary indexes, must not exceed 100. If you project the same attribute into two different indexes, this counts as two distinct attributes when determining the total. - projection
Type String - The set of attributes that are projected into the index: +
KEYS_ONLY
- Only the index and primary keys are projected into the index. +INCLUDE
- In addition to the attributes described inKEYS_ONLY
, the secondary index will include other non-key attributes that you specify. +ALL
- All of the table attributes are projected into the index. When using the DynamoDB console,ALL
is selected by default.
ProvisionedThroughput, ProvisionedThroughputArgs
- Read
Capacity intUnits - The maximum number of strongly consistent reads consumed per second before DynamoDB returns a
ThrottlingException
. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide. If read/write capacity mode isPAY_PER_REQUEST
the value is set to 0. - Write
Capacity intUnits - The maximum number of writes consumed per second before DynamoDB returns a
ThrottlingException
. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide. If read/write capacity mode isPAY_PER_REQUEST
the value is set to 0.
- Read
Capacity intUnits - The maximum number of strongly consistent reads consumed per second before DynamoDB returns a
ThrottlingException
. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide. If read/write capacity mode isPAY_PER_REQUEST
the value is set to 0. - Write
Capacity intUnits - The maximum number of writes consumed per second before DynamoDB returns a
ThrottlingException
. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide. If read/write capacity mode isPAY_PER_REQUEST
the value is set to 0.
- read
Capacity IntegerUnits - The maximum number of strongly consistent reads consumed per second before DynamoDB returns a
ThrottlingException
. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide. If read/write capacity mode isPAY_PER_REQUEST
the value is set to 0. - write
Capacity IntegerUnits - The maximum number of writes consumed per second before DynamoDB returns a
ThrottlingException
. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide. If read/write capacity mode isPAY_PER_REQUEST
the value is set to 0.
- read
Capacity numberUnits - The maximum number of strongly consistent reads consumed per second before DynamoDB returns a
ThrottlingException
. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide. If read/write capacity mode isPAY_PER_REQUEST
the value is set to 0. - write
Capacity numberUnits - The maximum number of writes consumed per second before DynamoDB returns a
ThrottlingException
. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide. If read/write capacity mode isPAY_PER_REQUEST
the value is set to 0.
- read_
capacity_ intunits - The maximum number of strongly consistent reads consumed per second before DynamoDB returns a
ThrottlingException
. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide. If read/write capacity mode isPAY_PER_REQUEST
the value is set to 0. - write_
capacity_ intunits - The maximum number of writes consumed per second before DynamoDB returns a
ThrottlingException
. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide. If read/write capacity mode isPAY_PER_REQUEST
the value is set to 0.
- read
Capacity NumberUnits - The maximum number of strongly consistent reads consumed per second before DynamoDB returns a
ThrottlingException
. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide. If read/write capacity mode isPAY_PER_REQUEST
the value is set to 0. - write
Capacity NumberUnits - The maximum number of writes consumed per second before DynamoDB returns a
ThrottlingException
. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide. If read/write capacity mode isPAY_PER_REQUEST
the value is set to 0.
ProvisionedThroughputResponse, ProvisionedThroughputResponseArgs
- Read
Capacity intUnits - The maximum number of strongly consistent reads consumed per second before DynamoDB returns a
ThrottlingException
. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide. If read/write capacity mode isPAY_PER_REQUEST
the value is set to 0. - Write
Capacity intUnits - The maximum number of writes consumed per second before DynamoDB returns a
ThrottlingException
. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide. If read/write capacity mode isPAY_PER_REQUEST
the value is set to 0.
- Read
Capacity intUnits - The maximum number of strongly consistent reads consumed per second before DynamoDB returns a
ThrottlingException
. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide. If read/write capacity mode isPAY_PER_REQUEST
the value is set to 0. - Write
Capacity intUnits - The maximum number of writes consumed per second before DynamoDB returns a
ThrottlingException
. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide. If read/write capacity mode isPAY_PER_REQUEST
the value is set to 0.
- read
Capacity IntegerUnits - The maximum number of strongly consistent reads consumed per second before DynamoDB returns a
ThrottlingException
. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide. If read/write capacity mode isPAY_PER_REQUEST
the value is set to 0. - write
Capacity IntegerUnits - The maximum number of writes consumed per second before DynamoDB returns a
ThrottlingException
. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide. If read/write capacity mode isPAY_PER_REQUEST
the value is set to 0.
- read
Capacity numberUnits - The maximum number of strongly consistent reads consumed per second before DynamoDB returns a
ThrottlingException
. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide. If read/write capacity mode isPAY_PER_REQUEST
the value is set to 0. - write
Capacity numberUnits - The maximum number of writes consumed per second before DynamoDB returns a
ThrottlingException
. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide. If read/write capacity mode isPAY_PER_REQUEST
the value is set to 0.
- read_
capacity_ intunits - The maximum number of strongly consistent reads consumed per second before DynamoDB returns a
ThrottlingException
. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide. If read/write capacity mode isPAY_PER_REQUEST
the value is set to 0. - write_
capacity_ intunits - The maximum number of writes consumed per second before DynamoDB returns a
ThrottlingException
. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide. If read/write capacity mode isPAY_PER_REQUEST
the value is set to 0.
- read
Capacity NumberUnits - The maximum number of strongly consistent reads consumed per second before DynamoDB returns a
ThrottlingException
. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide. If read/write capacity mode isPAY_PER_REQUEST
the value is set to 0. - write
Capacity NumberUnits - The maximum number of writes consumed per second before DynamoDB returns a
ThrottlingException
. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide. If read/write capacity mode isPAY_PER_REQUEST
the value is set to 0.
ResourcePolicy, ResourcePolicyArgs
- Policy
Document object - A resource-based policy document that contains permissions to add to the specified DDB table, index, or both. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples.
- Policy
Document interface{} - A resource-based policy document that contains permissions to add to the specified DDB table, index, or both. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples.
- policy
Document Object - A resource-based policy document that contains permissions to add to the specified DDB table, index, or both. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples.
- policy
Document any - A resource-based policy document that contains permissions to add to the specified DDB table, index, or both. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples.
- policy_
document Any - A resource-based policy document that contains permissions to add to the specified DDB table, index, or both. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples.
- policy
Document Any - A resource-based policy document that contains permissions to add to the specified DDB table, index, or both. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples.
ResourcePolicyResponse, ResourcePolicyResponseArgs
- Policy
Document object - A resource-based policy document that contains permissions to add to the specified DDB table, index, or both. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples.
- Policy
Document interface{} - A resource-based policy document that contains permissions to add to the specified DDB table, index, or both. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples.
- policy
Document Object - A resource-based policy document that contains permissions to add to the specified DDB table, index, or both. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples.
- policy
Document any - A resource-based policy document that contains permissions to add to the specified DDB table, index, or both. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples.
- policy_
document Any - A resource-based policy document that contains permissions to add to the specified DDB table, index, or both. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples.
- policy
Document Any - A resource-based policy document that contains permissions to add to the specified DDB table, index, or both. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples.
S3BucketSource, S3BucketSourceArgs
- S3Bucket string
- The S3 bucket that is being imported from.
- S3Bucket
Owner string - The account number of the S3 bucket that is being imported from. If the bucket is owned by the requester this is optional.
- S3Key
Prefix string - The key prefix shared by all S3 Objects that are being imported.
- S3Bucket string
- The S3 bucket that is being imported from.
- S3Bucket
Owner string - The account number of the S3 bucket that is being imported from. If the bucket is owned by the requester this is optional.
- S3Key
Prefix string - The key prefix shared by all S3 Objects that are being imported.
- s3Bucket String
- The S3 bucket that is being imported from.
- s3Bucket
Owner String - The account number of the S3 bucket that is being imported from. If the bucket is owned by the requester this is optional.
- s3Key
Prefix String - The key prefix shared by all S3 Objects that are being imported.
- s3Bucket string
- The S3 bucket that is being imported from.
- s3Bucket
Owner string - The account number of the S3 bucket that is being imported from. If the bucket is owned by the requester this is optional.
- s3Key
Prefix string - The key prefix shared by all S3 Objects that are being imported.
- s3_
bucket str - The S3 bucket that is being imported from.
- s3_
bucket_ strowner - The account number of the S3 bucket that is being imported from. If the bucket is owned by the requester this is optional.
- s3_
key_ strprefix - The key prefix shared by all S3 Objects that are being imported.
- s3Bucket String
- The S3 bucket that is being imported from.
- s3Bucket
Owner String - The account number of the S3 bucket that is being imported from. If the bucket is owned by the requester this is optional.
- s3Key
Prefix String - The key prefix shared by all S3 Objects that are being imported.
S3BucketSourceResponse, S3BucketSourceResponseArgs
- S3Bucket string
- The S3 bucket that is being imported from.
- S3Bucket
Owner string - The account number of the S3 bucket that is being imported from. If the bucket is owned by the requester this is optional.
- S3Key
Prefix string - The key prefix shared by all S3 Objects that are being imported.
- S3Bucket string
- The S3 bucket that is being imported from.
- S3Bucket
Owner string - The account number of the S3 bucket that is being imported from. If the bucket is owned by the requester this is optional.
- S3Key
Prefix string - The key prefix shared by all S3 Objects that are being imported.
- s3Bucket String
- The S3 bucket that is being imported from.
- s3Bucket
Owner String - The account number of the S3 bucket that is being imported from. If the bucket is owned by the requester this is optional.
- s3Key
Prefix String - The key prefix shared by all S3 Objects that are being imported.
- s3Bucket string
- The S3 bucket that is being imported from.
- s3Bucket
Owner string - The account number of the S3 bucket that is being imported from. If the bucket is owned by the requester this is optional.
- s3Key
Prefix string - The key prefix shared by all S3 Objects that are being imported.
- s3_
bucket str - The S3 bucket that is being imported from.
- s3_
bucket_ strowner - The account number of the S3 bucket that is being imported from. If the bucket is owned by the requester this is optional.
- s3_
key_ strprefix - The key prefix shared by all S3 Objects that are being imported.
- s3Bucket String
- The S3 bucket that is being imported from.
- s3Bucket
Owner String - The account number of the S3 bucket that is being imported from. If the bucket is owned by the requester this is optional.
- s3Key
Prefix String - The key prefix shared by all S3 Objects that are being imported.
SSESpecification, SSESpecificationArgs
- Kms
Master stringKey Id - The KMS key that should be used for the KMS encryption. To specify a key, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. Note that you should only provide this parameter if the key is different from the default DynamoDB key
alias/aws/dynamodb
. - Sse
Enabled bool - Indicates whether server-side encryption is done using an AWS managed key or an AWS owned key. If enabled (true), server-side encryption type is set to
KMS
and an AWS managed key is used (KMS charges apply). If disabled (false) or not specified, server-side encryption is set to AWS owned key. - Sse
Type string - Server-side encryption type. The only supported value is: +
KMS
- Server-side encryption that uses KMSlong. The key is stored in your account and is managed by KMS (KMS charges apply).
- Kms
Master stringKey Id - The KMS key that should be used for the KMS encryption. To specify a key, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. Note that you should only provide this parameter if the key is different from the default DynamoDB key
alias/aws/dynamodb
. - Sse
Enabled bool - Indicates whether server-side encryption is done using an AWS managed key or an AWS owned key. If enabled (true), server-side encryption type is set to
KMS
and an AWS managed key is used (KMS charges apply). If disabled (false) or not specified, server-side encryption is set to AWS owned key. - Sse
Type string - Server-side encryption type. The only supported value is: +
KMS
- Server-side encryption that uses KMSlong. The key is stored in your account and is managed by KMS (KMS charges apply).
- kms
Master StringKey Id - The KMS key that should be used for the KMS encryption. To specify a key, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. Note that you should only provide this parameter if the key is different from the default DynamoDB key
alias/aws/dynamodb
. - sse
Enabled Boolean - Indicates whether server-side encryption is done using an AWS managed key or an AWS owned key. If enabled (true), server-side encryption type is set to
KMS
and an AWS managed key is used (KMS charges apply). If disabled (false) or not specified, server-side encryption is set to AWS owned key. - sse
Type String - Server-side encryption type. The only supported value is: +
KMS
- Server-side encryption that uses KMSlong. The key is stored in your account and is managed by KMS (KMS charges apply).
- kms
Master stringKey Id - The KMS key that should be used for the KMS encryption. To specify a key, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. Note that you should only provide this parameter if the key is different from the default DynamoDB key
alias/aws/dynamodb
. - sse
Enabled boolean - Indicates whether server-side encryption is done using an AWS managed key or an AWS owned key. If enabled (true), server-side encryption type is set to
KMS
and an AWS managed key is used (KMS charges apply). If disabled (false) or not specified, server-side encryption is set to AWS owned key. - sse
Type string - Server-side encryption type. The only supported value is: +
KMS
- Server-side encryption that uses KMSlong. The key is stored in your account and is managed by KMS (KMS charges apply).
- kms_
master_ strkey_ id - The KMS key that should be used for the KMS encryption. To specify a key, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. Note that you should only provide this parameter if the key is different from the default DynamoDB key
alias/aws/dynamodb
. - sse_
enabled bool - Indicates whether server-side encryption is done using an AWS managed key or an AWS owned key. If enabled (true), server-side encryption type is set to
KMS
and an AWS managed key is used (KMS charges apply). If disabled (false) or not specified, server-side encryption is set to AWS owned key. - sse_
type str - Server-side encryption type. The only supported value is: +
KMS
- Server-side encryption that uses KMSlong. The key is stored in your account and is managed by KMS (KMS charges apply).
- kms
Master StringKey Id - The KMS key that should be used for the KMS encryption. To specify a key, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. Note that you should only provide this parameter if the key is different from the default DynamoDB key
alias/aws/dynamodb
. - sse
Enabled Boolean - Indicates whether server-side encryption is done using an AWS managed key or an AWS owned key. If enabled (true), server-side encryption type is set to
KMS
and an AWS managed key is used (KMS charges apply). If disabled (false) or not specified, server-side encryption is set to AWS owned key. - sse
Type String - Server-side encryption type. The only supported value is: +
KMS
- Server-side encryption that uses KMSlong. The key is stored in your account and is managed by KMS (KMS charges apply).
SSESpecificationResponse, SSESpecificationResponseArgs
- Kms
Master stringKey Id - The KMS key that should be used for the KMS encryption. To specify a key, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. Note that you should only provide this parameter if the key is different from the default DynamoDB key
alias/aws/dynamodb
. - Sse
Enabled bool - Indicates whether server-side encryption is done using an AWS managed key or an AWS owned key. If enabled (true), server-side encryption type is set to
KMS
and an AWS managed key is used (KMS charges apply). If disabled (false) or not specified, server-side encryption is set to AWS owned key. - Sse
Type string - Server-side encryption type. The only supported value is: +
KMS
- Server-side encryption that uses KMSlong. The key is stored in your account and is managed by KMS (KMS charges apply).
- Kms
Master stringKey Id - The KMS key that should be used for the KMS encryption. To specify a key, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. Note that you should only provide this parameter if the key is different from the default DynamoDB key
alias/aws/dynamodb
. - Sse
Enabled bool - Indicates whether server-side encryption is done using an AWS managed key or an AWS owned key. If enabled (true), server-side encryption type is set to
KMS
and an AWS managed key is used (KMS charges apply). If disabled (false) or not specified, server-side encryption is set to AWS owned key. - Sse
Type string - Server-side encryption type. The only supported value is: +
KMS
- Server-side encryption that uses KMSlong. The key is stored in your account and is managed by KMS (KMS charges apply).
- kms
Master StringKey Id - The KMS key that should be used for the KMS encryption. To specify a key, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. Note that you should only provide this parameter if the key is different from the default DynamoDB key
alias/aws/dynamodb
. - sse
Enabled Boolean - Indicates whether server-side encryption is done using an AWS managed key or an AWS owned key. If enabled (true), server-side encryption type is set to
KMS
and an AWS managed key is used (KMS charges apply). If disabled (false) or not specified, server-side encryption is set to AWS owned key. - sse
Type String - Server-side encryption type. The only supported value is: +
KMS
- Server-side encryption that uses KMSlong. The key is stored in your account and is managed by KMS (KMS charges apply).
- kms
Master stringKey Id - The KMS key that should be used for the KMS encryption. To specify a key, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. Note that you should only provide this parameter if the key is different from the default DynamoDB key
alias/aws/dynamodb
. - sse
Enabled boolean - Indicates whether server-side encryption is done using an AWS managed key or an AWS owned key. If enabled (true), server-side encryption type is set to
KMS
and an AWS managed key is used (KMS charges apply). If disabled (false) or not specified, server-side encryption is set to AWS owned key. - sse
Type string - Server-side encryption type. The only supported value is: +
KMS
- Server-side encryption that uses KMSlong. The key is stored in your account and is managed by KMS (KMS charges apply).
- kms_
master_ strkey_ id - The KMS key that should be used for the KMS encryption. To specify a key, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. Note that you should only provide this parameter if the key is different from the default DynamoDB key
alias/aws/dynamodb
. - sse_
enabled bool - Indicates whether server-side encryption is done using an AWS managed key or an AWS owned key. If enabled (true), server-side encryption type is set to
KMS
and an AWS managed key is used (KMS charges apply). If disabled (false) or not specified, server-side encryption is set to AWS owned key. - sse_
type str - Server-side encryption type. The only supported value is: +
KMS
- Server-side encryption that uses KMSlong. The key is stored in your account and is managed by KMS (KMS charges apply).
- kms
Master StringKey Id - The KMS key that should be used for the KMS encryption. To specify a key, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. Note that you should only provide this parameter if the key is different from the default DynamoDB key
alias/aws/dynamodb
. - sse
Enabled Boolean - Indicates whether server-side encryption is done using an AWS managed key or an AWS owned key. If enabled (true), server-side encryption type is set to
KMS
and an AWS managed key is used (KMS charges apply). If disabled (false) or not specified, server-side encryption is set to AWS owned key. - sse
Type String - Server-side encryption type. The only supported value is: +
KMS
- Server-side encryption that uses KMSlong. The key is stored in your account and is managed by KMS (KMS charges apply).
StreamSpecification, StreamSpecificationArgs
- Resource
Policy Pulumi.Azure Native. Aws Connector. Inputs. Resource Policy - Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table's streams. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table, its indexes, and stream. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. While defining resource-based policies in your CFNshort templates, the following considerations apply: + The maximum size supported for a resource-based policy document in JSON format is 20 KB. DDB counts whitespaces when calculating the size of a policy against this limit. + Resource-based policies don't support drift detection. If you update a policy outside of the CFNshort stack template, you'll need to update the CFNshort stack with the changes. + Resource-based policies don't support out-of-band changes. If you add, update, or delete a policy outside of the CFNshort template, the change won't be overwritten if there are no changes to the policy within the template. For example, say that your template contains a resource-based policy, which you later update outside of the template. If you don't make any changes to the policy in the template, the updated policy in DDB won’t be synced with the policy in the template. Conversely, say that your template doesn’t contain a resource-based policy, but you add a policy outside of the template. This policy won’t be removed from DDB as long as you don’t add it to the template. When you add a policy to the template and update the stack, the existing policy in DDB will be updated to match the one defined in the template. For a full list of all considerations, see Resource-based policy considerations.
- Stream
View stringType - When an item in the table is modified,
StreamViewType
determines what information is written to the stream for this table. Valid values forStreamViewType
are: +KEYS_ONLY
- Only the key attributes of the modified item are written to the stream. +NEW_IMAGE
- The entire item, as it appears after it was modified, is written to the stream. +OLD_IMAGE
- The entire item, as it appeared before it was modified, is written to the stream. +NEW_AND_OLD_IMAGES
- Both the new and the old item images of the item are written to the stream.
- Resource
Policy ResourcePolicy - Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table's streams. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table, its indexes, and stream. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. While defining resource-based policies in your CFNshort templates, the following considerations apply: + The maximum size supported for a resource-based policy document in JSON format is 20 KB. DDB counts whitespaces when calculating the size of a policy against this limit. + Resource-based policies don't support drift detection. If you update a policy outside of the CFNshort stack template, you'll need to update the CFNshort stack with the changes. + Resource-based policies don't support out-of-band changes. If you add, update, or delete a policy outside of the CFNshort template, the change won't be overwritten if there are no changes to the policy within the template. For example, say that your template contains a resource-based policy, which you later update outside of the template. If you don't make any changes to the policy in the template, the updated policy in DDB won’t be synced with the policy in the template. Conversely, say that your template doesn’t contain a resource-based policy, but you add a policy outside of the template. This policy won’t be removed from DDB as long as you don’t add it to the template. When you add a policy to the template and update the stack, the existing policy in DDB will be updated to match the one defined in the template. For a full list of all considerations, see Resource-based policy considerations.
- Stream
View stringType - When an item in the table is modified,
StreamViewType
determines what information is written to the stream for this table. Valid values forStreamViewType
are: +KEYS_ONLY
- Only the key attributes of the modified item are written to the stream. +NEW_IMAGE
- The entire item, as it appears after it was modified, is written to the stream. +OLD_IMAGE
- The entire item, as it appeared before it was modified, is written to the stream. +NEW_AND_OLD_IMAGES
- Both the new and the old item images of the item are written to the stream.
- resource
Policy ResourcePolicy - Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table's streams. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table, its indexes, and stream. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. While defining resource-based policies in your CFNshort templates, the following considerations apply: + The maximum size supported for a resource-based policy document in JSON format is 20 KB. DDB counts whitespaces when calculating the size of a policy against this limit. + Resource-based policies don't support drift detection. If you update a policy outside of the CFNshort stack template, you'll need to update the CFNshort stack with the changes. + Resource-based policies don't support out-of-band changes. If you add, update, or delete a policy outside of the CFNshort template, the change won't be overwritten if there are no changes to the policy within the template. For example, say that your template contains a resource-based policy, which you later update outside of the template. If you don't make any changes to the policy in the template, the updated policy in DDB won’t be synced with the policy in the template. Conversely, say that your template doesn’t contain a resource-based policy, but you add a policy outside of the template. This policy won’t be removed from DDB as long as you don’t add it to the template. When you add a policy to the template and update the stack, the existing policy in DDB will be updated to match the one defined in the template. For a full list of all considerations, see Resource-based policy considerations.
- stream
View StringType - When an item in the table is modified,
StreamViewType
determines what information is written to the stream for this table. Valid values forStreamViewType
are: +KEYS_ONLY
- Only the key attributes of the modified item are written to the stream. +NEW_IMAGE
- The entire item, as it appears after it was modified, is written to the stream. +OLD_IMAGE
- The entire item, as it appeared before it was modified, is written to the stream. +NEW_AND_OLD_IMAGES
- Both the new and the old item images of the item are written to the stream.
- resource
Policy ResourcePolicy - Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table's streams. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table, its indexes, and stream. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. While defining resource-based policies in your CFNshort templates, the following considerations apply: + The maximum size supported for a resource-based policy document in JSON format is 20 KB. DDB counts whitespaces when calculating the size of a policy against this limit. + Resource-based policies don't support drift detection. If you update a policy outside of the CFNshort stack template, you'll need to update the CFNshort stack with the changes. + Resource-based policies don't support out-of-band changes. If you add, update, or delete a policy outside of the CFNshort template, the change won't be overwritten if there are no changes to the policy within the template. For example, say that your template contains a resource-based policy, which you later update outside of the template. If you don't make any changes to the policy in the template, the updated policy in DDB won’t be synced with the policy in the template. Conversely, say that your template doesn’t contain a resource-based policy, but you add a policy outside of the template. This policy won’t be removed from DDB as long as you don’t add it to the template. When you add a policy to the template and update the stack, the existing policy in DDB will be updated to match the one defined in the template. For a full list of all considerations, see Resource-based policy considerations.
- stream
View stringType - When an item in the table is modified,
StreamViewType
determines what information is written to the stream for this table. Valid values forStreamViewType
are: +KEYS_ONLY
- Only the key attributes of the modified item are written to the stream. +NEW_IMAGE
- The entire item, as it appears after it was modified, is written to the stream. +OLD_IMAGE
- The entire item, as it appeared before it was modified, is written to the stream. +NEW_AND_OLD_IMAGES
- Both the new and the old item images of the item are written to the stream.
- resource_
policy ResourcePolicy - Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table's streams. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table, its indexes, and stream. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. While defining resource-based policies in your CFNshort templates, the following considerations apply: + The maximum size supported for a resource-based policy document in JSON format is 20 KB. DDB counts whitespaces when calculating the size of a policy against this limit. + Resource-based policies don't support drift detection. If you update a policy outside of the CFNshort stack template, you'll need to update the CFNshort stack with the changes. + Resource-based policies don't support out-of-band changes. If you add, update, or delete a policy outside of the CFNshort template, the change won't be overwritten if there are no changes to the policy within the template. For example, say that your template contains a resource-based policy, which you later update outside of the template. If you don't make any changes to the policy in the template, the updated policy in DDB won’t be synced with the policy in the template. Conversely, say that your template doesn’t contain a resource-based policy, but you add a policy outside of the template. This policy won’t be removed from DDB as long as you don’t add it to the template. When you add a policy to the template and update the stack, the existing policy in DDB will be updated to match the one defined in the template. For a full list of all considerations, see Resource-based policy considerations.
- stream_
view_ strtype - When an item in the table is modified,
StreamViewType
determines what information is written to the stream for this table. Valid values forStreamViewType
are: +KEYS_ONLY
- Only the key attributes of the modified item are written to the stream. +NEW_IMAGE
- The entire item, as it appears after it was modified, is written to the stream. +OLD_IMAGE
- The entire item, as it appeared before it was modified, is written to the stream. +NEW_AND_OLD_IMAGES
- Both the new and the old item images of the item are written to the stream.
- resource
Policy Property Map - Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table's streams. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table, its indexes, and stream. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. While defining resource-based policies in your CFNshort templates, the following considerations apply: + The maximum size supported for a resource-based policy document in JSON format is 20 KB. DDB counts whitespaces when calculating the size of a policy against this limit. + Resource-based policies don't support drift detection. If you update a policy outside of the CFNshort stack template, you'll need to update the CFNshort stack with the changes. + Resource-based policies don't support out-of-band changes. If you add, update, or delete a policy outside of the CFNshort template, the change won't be overwritten if there are no changes to the policy within the template. For example, say that your template contains a resource-based policy, which you later update outside of the template. If you don't make any changes to the policy in the template, the updated policy in DDB won’t be synced with the policy in the template. Conversely, say that your template doesn’t contain a resource-based policy, but you add a policy outside of the template. This policy won’t be removed from DDB as long as you don’t add it to the template. When you add a policy to the template and update the stack, the existing policy in DDB will be updated to match the one defined in the template. For a full list of all considerations, see Resource-based policy considerations.
- stream
View StringType - When an item in the table is modified,
StreamViewType
determines what information is written to the stream for this table. Valid values forStreamViewType
are: +KEYS_ONLY
- Only the key attributes of the modified item are written to the stream. +NEW_IMAGE
- The entire item, as it appears after it was modified, is written to the stream. +OLD_IMAGE
- The entire item, as it appeared before it was modified, is written to the stream. +NEW_AND_OLD_IMAGES
- Both the new and the old item images of the item are written to the stream.
StreamSpecificationResponse, StreamSpecificationResponseArgs
- Resource
Policy Pulumi.Azure Native. Aws Connector. Inputs. Resource Policy Response - Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table's streams. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table, its indexes, and stream. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. While defining resource-based policies in your CFNshort templates, the following considerations apply: + The maximum size supported for a resource-based policy document in JSON format is 20 KB. DDB counts whitespaces when calculating the size of a policy against this limit. + Resource-based policies don't support drift detection. If you update a policy outside of the CFNshort stack template, you'll need to update the CFNshort stack with the changes. + Resource-based policies don't support out-of-band changes. If you add, update, or delete a policy outside of the CFNshort template, the change won't be overwritten if there are no changes to the policy within the template. For example, say that your template contains a resource-based policy, which you later update outside of the template. If you don't make any changes to the policy in the template, the updated policy in DDB won’t be synced with the policy in the template. Conversely, say that your template doesn’t contain a resource-based policy, but you add a policy outside of the template. This policy won’t be removed from DDB as long as you don’t add it to the template. When you add a policy to the template and update the stack, the existing policy in DDB will be updated to match the one defined in the template. For a full list of all considerations, see Resource-based policy considerations.
- Stream
View stringType - When an item in the table is modified,
StreamViewType
determines what information is written to the stream for this table. Valid values forStreamViewType
are: +KEYS_ONLY
- Only the key attributes of the modified item are written to the stream. +NEW_IMAGE
- The entire item, as it appears after it was modified, is written to the stream. +OLD_IMAGE
- The entire item, as it appeared before it was modified, is written to the stream. +NEW_AND_OLD_IMAGES
- Both the new and the old item images of the item are written to the stream.
- Resource
Policy ResourcePolicy Response - Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table's streams. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table, its indexes, and stream. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. While defining resource-based policies in your CFNshort templates, the following considerations apply: + The maximum size supported for a resource-based policy document in JSON format is 20 KB. DDB counts whitespaces when calculating the size of a policy against this limit. + Resource-based policies don't support drift detection. If you update a policy outside of the CFNshort stack template, you'll need to update the CFNshort stack with the changes. + Resource-based policies don't support out-of-band changes. If you add, update, or delete a policy outside of the CFNshort template, the change won't be overwritten if there are no changes to the policy within the template. For example, say that your template contains a resource-based policy, which you later update outside of the template. If you don't make any changes to the policy in the template, the updated policy in DDB won’t be synced with the policy in the template. Conversely, say that your template doesn’t contain a resource-based policy, but you add a policy outside of the template. This policy won’t be removed from DDB as long as you don’t add it to the template. When you add a policy to the template and update the stack, the existing policy in DDB will be updated to match the one defined in the template. For a full list of all considerations, see Resource-based policy considerations.
- Stream
View stringType - When an item in the table is modified,
StreamViewType
determines what information is written to the stream for this table. Valid values forStreamViewType
are: +KEYS_ONLY
- Only the key attributes of the modified item are written to the stream. +NEW_IMAGE
- The entire item, as it appears after it was modified, is written to the stream. +OLD_IMAGE
- The entire item, as it appeared before it was modified, is written to the stream. +NEW_AND_OLD_IMAGES
- Both the new and the old item images of the item are written to the stream.
- resource
Policy ResourcePolicy Response - Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table's streams. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table, its indexes, and stream. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. While defining resource-based policies in your CFNshort templates, the following considerations apply: + The maximum size supported for a resource-based policy document in JSON format is 20 KB. DDB counts whitespaces when calculating the size of a policy against this limit. + Resource-based policies don't support drift detection. If you update a policy outside of the CFNshort stack template, you'll need to update the CFNshort stack with the changes. + Resource-based policies don't support out-of-band changes. If you add, update, or delete a policy outside of the CFNshort template, the change won't be overwritten if there are no changes to the policy within the template. For example, say that your template contains a resource-based policy, which you later update outside of the template. If you don't make any changes to the policy in the template, the updated policy in DDB won’t be synced with the policy in the template. Conversely, say that your template doesn’t contain a resource-based policy, but you add a policy outside of the template. This policy won’t be removed from DDB as long as you don’t add it to the template. When you add a policy to the template and update the stack, the existing policy in DDB will be updated to match the one defined in the template. For a full list of all considerations, see Resource-based policy considerations.
- stream
View StringType - When an item in the table is modified,
StreamViewType
determines what information is written to the stream for this table. Valid values forStreamViewType
are: +KEYS_ONLY
- Only the key attributes of the modified item are written to the stream. +NEW_IMAGE
- The entire item, as it appears after it was modified, is written to the stream. +OLD_IMAGE
- The entire item, as it appeared before it was modified, is written to the stream. +NEW_AND_OLD_IMAGES
- Both the new and the old item images of the item are written to the stream.
- resource
Policy ResourcePolicy Response - Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table's streams. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table, its indexes, and stream. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. While defining resource-based policies in your CFNshort templates, the following considerations apply: + The maximum size supported for a resource-based policy document in JSON format is 20 KB. DDB counts whitespaces when calculating the size of a policy against this limit. + Resource-based policies don't support drift detection. If you update a policy outside of the CFNshort stack template, you'll need to update the CFNshort stack with the changes. + Resource-based policies don't support out-of-band changes. If you add, update, or delete a policy outside of the CFNshort template, the change won't be overwritten if there are no changes to the policy within the template. For example, say that your template contains a resource-based policy, which you later update outside of the template. If you don't make any changes to the policy in the template, the updated policy in DDB won’t be synced with the policy in the template. Conversely, say that your template doesn’t contain a resource-based policy, but you add a policy outside of the template. This policy won’t be removed from DDB as long as you don’t add it to the template. When you add a policy to the template and update the stack, the existing policy in DDB will be updated to match the one defined in the template. For a full list of all considerations, see Resource-based policy considerations.
- stream
View stringType - When an item in the table is modified,
StreamViewType
determines what information is written to the stream for this table. Valid values forStreamViewType
are: +KEYS_ONLY
- Only the key attributes of the modified item are written to the stream. +NEW_IMAGE
- The entire item, as it appears after it was modified, is written to the stream. +OLD_IMAGE
- The entire item, as it appeared before it was modified, is written to the stream. +NEW_AND_OLD_IMAGES
- Both the new and the old item images of the item are written to the stream.
- resource_
policy ResourcePolicy Response - Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table's streams. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table, its indexes, and stream. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. While defining resource-based policies in your CFNshort templates, the following considerations apply: + The maximum size supported for a resource-based policy document in JSON format is 20 KB. DDB counts whitespaces when calculating the size of a policy against this limit. + Resource-based policies don't support drift detection. If you update a policy outside of the CFNshort stack template, you'll need to update the CFNshort stack with the changes. + Resource-based policies don't support out-of-band changes. If you add, update, or delete a policy outside of the CFNshort template, the change won't be overwritten if there are no changes to the policy within the template. For example, say that your template contains a resource-based policy, which you later update outside of the template. If you don't make any changes to the policy in the template, the updated policy in DDB won’t be synced with the policy in the template. Conversely, say that your template doesn’t contain a resource-based policy, but you add a policy outside of the template. This policy won’t be removed from DDB as long as you don’t add it to the template. When you add a policy to the template and update the stack, the existing policy in DDB will be updated to match the one defined in the template. For a full list of all considerations, see Resource-based policy considerations.
- stream_
view_ strtype - When an item in the table is modified,
StreamViewType
determines what information is written to the stream for this table. Valid values forStreamViewType
are: +KEYS_ONLY
- Only the key attributes of the modified item are written to the stream. +NEW_IMAGE
- The entire item, as it appears after it was modified, is written to the stream. +OLD_IMAGE
- The entire item, as it appeared before it was modified, is written to the stream. +NEW_AND_OLD_IMAGES
- Both the new and the old item images of the item are written to the stream.
- resource
Policy Property Map - Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table's streams. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table, its indexes, and stream. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. While defining resource-based policies in your CFNshort templates, the following considerations apply: + The maximum size supported for a resource-based policy document in JSON format is 20 KB. DDB counts whitespaces when calculating the size of a policy against this limit. + Resource-based policies don't support drift detection. If you update a policy outside of the CFNshort stack template, you'll need to update the CFNshort stack with the changes. + Resource-based policies don't support out-of-band changes. If you add, update, or delete a policy outside of the CFNshort template, the change won't be overwritten if there are no changes to the policy within the template. For example, say that your template contains a resource-based policy, which you later update outside of the template. If you don't make any changes to the policy in the template, the updated policy in DDB won’t be synced with the policy in the template. Conversely, say that your template doesn’t contain a resource-based policy, but you add a policy outside of the template. This policy won’t be removed from DDB as long as you don’t add it to the template. When you add a policy to the template and update the stack, the existing policy in DDB will be updated to match the one defined in the template. For a full list of all considerations, see Resource-based policy considerations.
- stream
View StringType - When an item in the table is modified,
StreamViewType
determines what information is written to the stream for this table. Valid values forStreamViewType
are: +KEYS_ONLY
- Only the key attributes of the modified item are written to the stream. +NEW_IMAGE
- The entire item, as it appears after it was modified, is written to the stream. +OLD_IMAGE
- The entire item, as it appeared before it was modified, is written to the stream. +NEW_AND_OLD_IMAGES
- Both the new and the old item images of the item are written to the stream.
SystemDataResponse, SystemDataResponseArgs
- Created
At string - The timestamp of resource creation (UTC).
- Created
By string - The identity that created the resource.
- Created
By stringType - The type of identity that created the resource.
- Last
Modified stringAt - The timestamp of resource last modification (UTC)
- Last
Modified stringBy - The identity that last modified the resource.
- Last
Modified stringBy Type - The type of identity that last modified the resource.
- Created
At string - The timestamp of resource creation (UTC).
- Created
By string - The identity that created the resource.
- Created
By stringType - The type of identity that created the resource.
- Last
Modified stringAt - The timestamp of resource last modification (UTC)
- Last
Modified stringBy - The identity that last modified the resource.
- Last
Modified stringBy Type - The type of identity that last modified the resource.
- created
At String - The timestamp of resource creation (UTC).
- created
By String - The identity that created the resource.
- created
By StringType - The type of identity that created the resource.
- last
Modified StringAt - The timestamp of resource last modification (UTC)
- last
Modified StringBy - The identity that last modified the resource.
- last
Modified StringBy Type - The type of identity that last modified the resource.
- created
At string - The timestamp of resource creation (UTC).
- created
By string - The identity that created the resource.
- created
By stringType - The type of identity that created the resource.
- last
Modified stringAt - The timestamp of resource last modification (UTC)
- last
Modified stringBy - The identity that last modified the resource.
- last
Modified stringBy Type - The type of identity that last modified the resource.
- created_
at str - The timestamp of resource creation (UTC).
- created_
by str - The identity that created the resource.
- created_
by_ strtype - The type of identity that created the resource.
- last_
modified_ strat - The timestamp of resource last modification (UTC)
- last_
modified_ strby - The identity that last modified the resource.
- last_
modified_ strby_ type - The type of identity that last modified the resource.
- created
At String - The timestamp of resource creation (UTC).
- created
By String - The identity that created the resource.
- created
By StringType - The type of identity that created the resource.
- last
Modified StringAt - The timestamp of resource last modification (UTC)
- last
Modified StringBy - The identity that last modified the resource.
- last
Modified StringBy Type - The type of identity that last modified the resource.
Tag, TagArgs
- Key string
- The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
- Value string
- The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
- Key string
- The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
- Value string
- The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
- key String
- The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
- value String
- The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
- key string
- The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
- value string
- The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
- key str
- The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
- value str
- The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
- key String
- The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
- value String
- The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
TagResponse, TagResponseArgs
- Key string
- The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
- Value string
- The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
- Key string
- The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
- Value string
- The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
- key String
- The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
- value String
- The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
- key string
- The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
- value string
- The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
- key str
- The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
- value str
- The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
- key String
- The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
- value String
- The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
TimeToLiveSpecification, TimeToLiveSpecificationArgs
- Attribute
Name string - The name of the TTL attribute used to store the expiration time for items in the table. + The
AttributeName
property is required when enabling the TTL, or when TTL is already enabled. + To update this property, you must first disable TTL and then enable TTL with the new attribute name. - Enabled bool
- Indicates whether TTL is to be enabled (true) or disabled (false) on the table.
- Attribute
Name string - The name of the TTL attribute used to store the expiration time for items in the table. + The
AttributeName
property is required when enabling the TTL, or when TTL is already enabled. + To update this property, you must first disable TTL and then enable TTL with the new attribute name. - Enabled bool
- Indicates whether TTL is to be enabled (true) or disabled (false) on the table.
- attribute
Name String - The name of the TTL attribute used to store the expiration time for items in the table. + The
AttributeName
property is required when enabling the TTL, or when TTL is already enabled. + To update this property, you must first disable TTL and then enable TTL with the new attribute name. - enabled Boolean
- Indicates whether TTL is to be enabled (true) or disabled (false) on the table.
- attribute
Name string - The name of the TTL attribute used to store the expiration time for items in the table. + The
AttributeName
property is required when enabling the TTL, or when TTL is already enabled. + To update this property, you must first disable TTL and then enable TTL with the new attribute name. - enabled boolean
- Indicates whether TTL is to be enabled (true) or disabled (false) on the table.
- attribute_
name str - The name of the TTL attribute used to store the expiration time for items in the table. + The
AttributeName
property is required when enabling the TTL, or when TTL is already enabled. + To update this property, you must first disable TTL and then enable TTL with the new attribute name. - enabled bool
- Indicates whether TTL is to be enabled (true) or disabled (false) on the table.
- attribute
Name String - The name of the TTL attribute used to store the expiration time for items in the table. + The
AttributeName
property is required when enabling the TTL, or when TTL is already enabled. + To update this property, you must first disable TTL and then enable TTL with the new attribute name. - enabled Boolean
- Indicates whether TTL is to be enabled (true) or disabled (false) on the table.
TimeToLiveSpecificationResponse, TimeToLiveSpecificationResponseArgs
- Attribute
Name string - The name of the TTL attribute used to store the expiration time for items in the table. + The
AttributeName
property is required when enabling the TTL, or when TTL is already enabled. + To update this property, you must first disable TTL and then enable TTL with the new attribute name. - Enabled bool
- Indicates whether TTL is to be enabled (true) or disabled (false) on the table.
- Attribute
Name string - The name of the TTL attribute used to store the expiration time for items in the table. + The
AttributeName
property is required when enabling the TTL, or when TTL is already enabled. + To update this property, you must first disable TTL and then enable TTL with the new attribute name. - Enabled bool
- Indicates whether TTL is to be enabled (true) or disabled (false) on the table.
- attribute
Name String - The name of the TTL attribute used to store the expiration time for items in the table. + The
AttributeName
property is required when enabling the TTL, or when TTL is already enabled. + To update this property, you must first disable TTL and then enable TTL with the new attribute name. - enabled Boolean
- Indicates whether TTL is to be enabled (true) or disabled (false) on the table.
- attribute
Name string - The name of the TTL attribute used to store the expiration time for items in the table. + The
AttributeName
property is required when enabling the TTL, or when TTL is already enabled. + To update this property, you must first disable TTL and then enable TTL with the new attribute name. - enabled boolean
- Indicates whether TTL is to be enabled (true) or disabled (false) on the table.
- attribute_
name str - The name of the TTL attribute used to store the expiration time for items in the table. + The
AttributeName
property is required when enabling the TTL, or when TTL is already enabled. + To update this property, you must first disable TTL and then enable TTL with the new attribute name. - enabled bool
- Indicates whether TTL is to be enabled (true) or disabled (false) on the table.
- attribute
Name String - The name of the TTL attribute used to store the expiration time for items in the table. + The
AttributeName
property is required when enabling the TTL, or when TTL is already enabled. + To update this property, you must first disable TTL and then enable TTL with the new attribute name. - enabled Boolean
- Indicates whether TTL is to be enabled (true) or disabled (false) on the table.
Import
An existing resource can be imported using its type token, name, and identifier, e.g.
$ pulumi import azure-native:awsconnector:DynamoDbTable wjhshaxtpxprmkvirlnkg /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AwsConnector/dynamoDBTables/{name}
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- Azure Native pulumi/pulumi-azure-native
- License
- Apache-2.0