aws.kinesis.FirehoseDeliveryStream
Explore with Pulumi AI
Provides a Kinesis Firehose Delivery Stream resource. Amazon Kinesis Firehose is a fully managed, elastic service to easily deliver real-time data streams to destinations such as Amazon S3 , Amazon Redshift and Snowflake.
For more details, see the Amazon Kinesis Firehose Documentation.
Example Usage
Extended S3 Destination
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const bucket = new aws.s3.BucketV2("bucket", {bucket: "tf-test-bucket"});
const firehoseAssumeRole = aws.iam.getPolicyDocument({
statements: [{
effect: "Allow",
principals: [{
type: "Service",
identifiers: ["firehose.amazonaws.com"],
}],
actions: ["sts:AssumeRole"],
}],
});
const firehoseRole = new aws.iam.Role("firehose_role", {
name: "firehose_test_role",
assumeRolePolicy: firehoseAssumeRole.then(firehoseAssumeRole => firehoseAssumeRole.json),
});
const lambdaAssumeRole = aws.iam.getPolicyDocument({
statements: [{
effect: "Allow",
principals: [{
type: "Service",
identifiers: ["lambda.amazonaws.com"],
}],
actions: ["sts:AssumeRole"],
}],
});
const lambdaIam = new aws.iam.Role("lambda_iam", {
name: "lambda_iam",
assumeRolePolicy: lambdaAssumeRole.then(lambdaAssumeRole => lambdaAssumeRole.json),
});
const lambdaProcessor = new aws.lambda.Function("lambda_processor", {
code: new pulumi.asset.FileArchive("lambda.zip"),
name: "firehose_lambda_processor",
role: lambdaIam.arn,
handler: "exports.handler",
runtime: aws.lambda.Runtime.NodeJS20dX,
});
const extendedS3Stream = new aws.kinesis.FirehoseDeliveryStream("extended_s3_stream", {
name: "kinesis-firehose-extended-s3-test-stream",
destination: "extended_s3",
extendedS3Configuration: {
roleArn: firehoseRole.arn,
bucketArn: bucket.arn,
processingConfiguration: {
enabled: true,
processors: [{
type: "Lambda",
parameters: [{
parameterName: "LambdaArn",
parameterValue: pulumi.interpolate`${lambdaProcessor.arn}:$LATEST`,
}],
}],
},
},
});
const bucketAcl = new aws.s3.BucketAclV2("bucket_acl", {
bucket: bucket.id,
acl: "private",
});
import pulumi
import pulumi_aws as aws
bucket = aws.s3.BucketV2("bucket", bucket="tf-test-bucket")
firehose_assume_role = aws.iam.get_policy_document(statements=[{
"effect": "Allow",
"principals": [{
"type": "Service",
"identifiers": ["firehose.amazonaws.com"],
}],
"actions": ["sts:AssumeRole"],
}])
firehose_role = aws.iam.Role("firehose_role",
name="firehose_test_role",
assume_role_policy=firehose_assume_role.json)
lambda_assume_role = aws.iam.get_policy_document(statements=[{
"effect": "Allow",
"principals": [{
"type": "Service",
"identifiers": ["lambda.amazonaws.com"],
}],
"actions": ["sts:AssumeRole"],
}])
lambda_iam = aws.iam.Role("lambda_iam",
name="lambda_iam",
assume_role_policy=lambda_assume_role.json)
lambda_processor = aws.lambda_.Function("lambda_processor",
code=pulumi.FileArchive("lambda.zip"),
name="firehose_lambda_processor",
role=lambda_iam.arn,
handler="exports.handler",
runtime=aws.lambda_.Runtime.NODE_JS20D_X)
extended_s3_stream = aws.kinesis.FirehoseDeliveryStream("extended_s3_stream",
name="kinesis-firehose-extended-s3-test-stream",
destination="extended_s3",
extended_s3_configuration={
"role_arn": firehose_role.arn,
"bucket_arn": bucket.arn,
"processing_configuration": {
"enabled": True,
"processors": [{
"type": "Lambda",
"parameters": [{
"parameter_name": "LambdaArn",
"parameter_value": lambda_processor.arn.apply(lambda arn: f"{arn}:$LATEST"),
}],
}],
},
})
bucket_acl = aws.s3.BucketAclV2("bucket_acl",
bucket=bucket.id,
acl="private")
package main
import (
"fmt"
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/iam"
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/kinesis"
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/lambda"
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/s3"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
bucket, err := s3.NewBucketV2(ctx, "bucket", &s3.BucketV2Args{
Bucket: pulumi.String("tf-test-bucket"),
})
if err != nil {
return err
}
firehoseAssumeRole, err := iam.GetPolicyDocument(ctx, &iam.GetPolicyDocumentArgs{
Statements: []iam.GetPolicyDocumentStatement{
{
Effect: pulumi.StringRef("Allow"),
Principals: []iam.GetPolicyDocumentStatementPrincipal{
{
Type: "Service",
Identifiers: []string{
"firehose.amazonaws.com",
},
},
},
Actions: []string{
"sts:AssumeRole",
},
},
},
}, nil)
if err != nil {
return err
}
firehoseRole, err := iam.NewRole(ctx, "firehose_role", &iam.RoleArgs{
Name: pulumi.String("firehose_test_role"),
AssumeRolePolicy: pulumi.String(firehoseAssumeRole.Json),
})
if err != nil {
return err
}
lambdaAssumeRole, err := iam.GetPolicyDocument(ctx, &iam.GetPolicyDocumentArgs{
Statements: []iam.GetPolicyDocumentStatement{
{
Effect: pulumi.StringRef("Allow"),
Principals: []iam.GetPolicyDocumentStatementPrincipal{
{
Type: "Service",
Identifiers: []string{
"lambda.amazonaws.com",
},
},
},
Actions: []string{
"sts:AssumeRole",
},
},
},
}, nil)
if err != nil {
return err
}
lambdaIam, err := iam.NewRole(ctx, "lambda_iam", &iam.RoleArgs{
Name: pulumi.String("lambda_iam"),
AssumeRolePolicy: pulumi.String(lambdaAssumeRole.Json),
})
if err != nil {
return err
}
lambdaProcessor, err := lambda.NewFunction(ctx, "lambda_processor", &lambda.FunctionArgs{
Code: pulumi.NewFileArchive("lambda.zip"),
Name: pulumi.String("firehose_lambda_processor"),
Role: lambdaIam.Arn,
Handler: pulumi.String("exports.handler"),
Runtime: pulumi.String(lambda.RuntimeNodeJS20dX),
})
if err != nil {
return err
}
_, err = kinesis.NewFirehoseDeliveryStream(ctx, "extended_s3_stream", &kinesis.FirehoseDeliveryStreamArgs{
Name: pulumi.String("kinesis-firehose-extended-s3-test-stream"),
Destination: pulumi.String("extended_s3"),
ExtendedS3Configuration: &kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationArgs{
RoleArn: firehoseRole.Arn,
BucketArn: bucket.Arn,
ProcessingConfiguration: &kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationArgs{
Enabled: pulumi.Bool(true),
Processors: kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArray{
&kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs{
Type: pulumi.String("Lambda"),
Parameters: kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArray{
&kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs{
ParameterName: pulumi.String("LambdaArn"),
ParameterValue: lambdaProcessor.Arn.ApplyT(func(arn string) (string, error) {
return fmt.Sprintf("%v:$LATEST", arn), nil
}).(pulumi.StringOutput),
},
},
},
},
},
},
})
if err != nil {
return err
}
_, err = s3.NewBucketAclV2(ctx, "bucket_acl", &s3.BucketAclV2Args{
Bucket: bucket.ID(),
Acl: pulumi.String("private"),
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var bucket = new Aws.S3.BucketV2("bucket", new()
{
Bucket = "tf-test-bucket",
});
var firehoseAssumeRole = Aws.Iam.GetPolicyDocument.Invoke(new()
{
Statements = new[]
{
new Aws.Iam.Inputs.GetPolicyDocumentStatementInputArgs
{
Effect = "Allow",
Principals = new[]
{
new Aws.Iam.Inputs.GetPolicyDocumentStatementPrincipalInputArgs
{
Type = "Service",
Identifiers = new[]
{
"firehose.amazonaws.com",
},
},
},
Actions = new[]
{
"sts:AssumeRole",
},
},
},
});
var firehoseRole = new Aws.Iam.Role("firehose_role", new()
{
Name = "firehose_test_role",
AssumeRolePolicy = firehoseAssumeRole.Apply(getPolicyDocumentResult => getPolicyDocumentResult.Json),
});
var lambdaAssumeRole = Aws.Iam.GetPolicyDocument.Invoke(new()
{
Statements = new[]
{
new Aws.Iam.Inputs.GetPolicyDocumentStatementInputArgs
{
Effect = "Allow",
Principals = new[]
{
new Aws.Iam.Inputs.GetPolicyDocumentStatementPrincipalInputArgs
{
Type = "Service",
Identifiers = new[]
{
"lambda.amazonaws.com",
},
},
},
Actions = new[]
{
"sts:AssumeRole",
},
},
},
});
var lambdaIam = new Aws.Iam.Role("lambda_iam", new()
{
Name = "lambda_iam",
AssumeRolePolicy = lambdaAssumeRole.Apply(getPolicyDocumentResult => getPolicyDocumentResult.Json),
});
var lambdaProcessor = new Aws.Lambda.Function("lambda_processor", new()
{
Code = new FileArchive("lambda.zip"),
Name = "firehose_lambda_processor",
Role = lambdaIam.Arn,
Handler = "exports.handler",
Runtime = Aws.Lambda.Runtime.NodeJS20dX,
});
var extendedS3Stream = new Aws.Kinesis.FirehoseDeliveryStream("extended_s3_stream", new()
{
Name = "kinesis-firehose-extended-s3-test-stream",
Destination = "extended_s3",
ExtendedS3Configuration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationArgs
{
RoleArn = firehoseRole.Arn,
BucketArn = bucket.Arn,
ProcessingConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationArgs
{
Enabled = true,
Processors = new[]
{
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs
{
Type = "Lambda",
Parameters = new[]
{
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs
{
ParameterName = "LambdaArn",
ParameterValue = lambdaProcessor.Arn.Apply(arn => $"{arn}:$LATEST"),
},
},
},
},
},
},
});
var bucketAcl = new Aws.S3.BucketAclV2("bucket_acl", new()
{
Bucket = bucket.Id,
Acl = "private",
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.s3.BucketV2;
import com.pulumi.aws.s3.BucketV2Args;
import com.pulumi.aws.iam.IamFunctions;
import com.pulumi.aws.iam.inputs.GetPolicyDocumentArgs;
import com.pulumi.aws.iam.Role;
import com.pulumi.aws.iam.RoleArgs;
import com.pulumi.aws.lambda.Function;
import com.pulumi.aws.lambda.FunctionArgs;
import com.pulumi.aws.kinesis.FirehoseDeliveryStream;
import com.pulumi.aws.kinesis.FirehoseDeliveryStreamArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamExtendedS3ConfigurationArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationArgs;
import com.pulumi.aws.s3.BucketAclV2;
import com.pulumi.aws.s3.BucketAclV2Args;
import com.pulumi.asset.FileArchive;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var bucket = new BucketV2("bucket", BucketV2Args.builder()
.bucket("tf-test-bucket")
.build());
final var firehoseAssumeRole = IamFunctions.getPolicyDocument(GetPolicyDocumentArgs.builder()
.statements(GetPolicyDocumentStatementArgs.builder()
.effect("Allow")
.principals(GetPolicyDocumentStatementPrincipalArgs.builder()
.type("Service")
.identifiers("firehose.amazonaws.com")
.build())
.actions("sts:AssumeRole")
.build())
.build());
var firehoseRole = new Role("firehoseRole", RoleArgs.builder()
.name("firehose_test_role")
.assumeRolePolicy(firehoseAssumeRole.applyValue(getPolicyDocumentResult -> getPolicyDocumentResult.json()))
.build());
final var lambdaAssumeRole = IamFunctions.getPolicyDocument(GetPolicyDocumentArgs.builder()
.statements(GetPolicyDocumentStatementArgs.builder()
.effect("Allow")
.principals(GetPolicyDocumentStatementPrincipalArgs.builder()
.type("Service")
.identifiers("lambda.amazonaws.com")
.build())
.actions("sts:AssumeRole")
.build())
.build());
var lambdaIam = new Role("lambdaIam", RoleArgs.builder()
.name("lambda_iam")
.assumeRolePolicy(lambdaAssumeRole.applyValue(getPolicyDocumentResult -> getPolicyDocumentResult.json()))
.build());
var lambdaProcessor = new Function("lambdaProcessor", FunctionArgs.builder()
.code(new FileArchive("lambda.zip"))
.name("firehose_lambda_processor")
.role(lambdaIam.arn())
.handler("exports.handler")
.runtime("nodejs20.x")
.build());
var extendedS3Stream = new FirehoseDeliveryStream("extendedS3Stream", FirehoseDeliveryStreamArgs.builder()
.name("kinesis-firehose-extended-s3-test-stream")
.destination("extended_s3")
.extendedS3Configuration(FirehoseDeliveryStreamExtendedS3ConfigurationArgs.builder()
.roleArn(firehoseRole.arn())
.bucketArn(bucket.arn())
.processingConfiguration(FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationArgs.builder()
.enabled("true")
.processors(FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs.builder()
.type("Lambda")
.parameters(FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs.builder()
.parameterName("LambdaArn")
.parameterValue(lambdaProcessor.arn().applyValue(arn -> String.format("%s:$LATEST", arn)))
.build())
.build())
.build())
.build())
.build());
var bucketAcl = new BucketAclV2("bucketAcl", BucketAclV2Args.builder()
.bucket(bucket.id())
.acl("private")
.build());
}
}
resources:
extendedS3Stream:
type: aws:kinesis:FirehoseDeliveryStream
name: extended_s3_stream
properties:
name: kinesis-firehose-extended-s3-test-stream
destination: extended_s3
extendedS3Configuration:
roleArn: ${firehoseRole.arn}
bucketArn: ${bucket.arn}
processingConfiguration:
enabled: 'true'
processors:
- type: Lambda
parameters:
- parameterName: LambdaArn
parameterValue: ${lambdaProcessor.arn}:$LATEST
bucket:
type: aws:s3:BucketV2
properties:
bucket: tf-test-bucket
bucketAcl:
type: aws:s3:BucketAclV2
name: bucket_acl
properties:
bucket: ${bucket.id}
acl: private
firehoseRole:
type: aws:iam:Role
name: firehose_role
properties:
name: firehose_test_role
assumeRolePolicy: ${firehoseAssumeRole.json}
lambdaIam:
type: aws:iam:Role
name: lambda_iam
properties:
name: lambda_iam
assumeRolePolicy: ${lambdaAssumeRole.json}
lambdaProcessor:
type: aws:lambda:Function
name: lambda_processor
properties:
code:
fn::FileArchive: lambda.zip
name: firehose_lambda_processor
role: ${lambdaIam.arn}
handler: exports.handler
runtime: nodejs20.x
variables:
firehoseAssumeRole:
fn::invoke:
Function: aws:iam:getPolicyDocument
Arguments:
statements:
- effect: Allow
principals:
- type: Service
identifiers:
- firehose.amazonaws.com
actions:
- sts:AssumeRole
lambdaAssumeRole:
fn::invoke:
Function: aws:iam:getPolicyDocument
Arguments:
statements:
- effect: Allow
principals:
- type: Service
identifiers:
- lambda.amazonaws.com
actions:
- sts:AssumeRole
Extended S3 Destination with dynamic partitioning
These examples use built-in Firehose functionality, rather than requiring a lambda.
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const extendedS3Stream = new aws.kinesis.FirehoseDeliveryStream("extended_s3_stream", {
name: "kinesis-firehose-extended-s3-test-stream",
destination: "extended_s3",
extendedS3Configuration: {
roleArn: firehoseRole.arn,
bucketArn: bucket.arn,
bufferingSize: 64,
dynamicPartitioningConfiguration: {
enabled: true,
},
prefix: "data/customer_id=!{partitionKeyFromQuery:customer_id}/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/",
errorOutputPrefix: "errors/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/!{firehose:error-output-type}/",
processingConfiguration: {
enabled: true,
processors: [
{
type: "RecordDeAggregation",
parameters: [{
parameterName: "SubRecordType",
parameterValue: "JSON",
}],
},
{
type: "AppendDelimiterToRecord",
},
{
type: "MetadataExtraction",
parameters: [
{
parameterName: "JsonParsingEngine",
parameterValue: "JQ-1.6",
},
{
parameterName: "MetadataExtractionQuery",
parameterValue: "{customer_id:.customer_id}",
},
],
},
],
},
},
});
import pulumi
import pulumi_aws as aws
extended_s3_stream = aws.kinesis.FirehoseDeliveryStream("extended_s3_stream",
name="kinesis-firehose-extended-s3-test-stream",
destination="extended_s3",
extended_s3_configuration={
"role_arn": firehose_role["arn"],
"bucket_arn": bucket["arn"],
"buffering_size": 64,
"dynamic_partitioning_configuration": {
"enabled": True,
},
"prefix": "data/customer_id=!{partitionKeyFromQuery:customer_id}/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/",
"error_output_prefix": "errors/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/!{firehose:error-output-type}/",
"processing_configuration": {
"enabled": True,
"processors": [
{
"type": "RecordDeAggregation",
"parameters": [{
"parameter_name": "SubRecordType",
"parameter_value": "JSON",
}],
},
{
"type": "AppendDelimiterToRecord",
},
{
"type": "MetadataExtraction",
"parameters": [
{
"parameter_name": "JsonParsingEngine",
"parameter_value": "JQ-1.6",
},
{
"parameter_name": "MetadataExtractionQuery",
"parameter_value": "{customer_id:.customer_id}",
},
],
},
],
},
})
package main
import (
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/kinesis"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := kinesis.NewFirehoseDeliveryStream(ctx, "extended_s3_stream", &kinesis.FirehoseDeliveryStreamArgs{
Name: pulumi.String("kinesis-firehose-extended-s3-test-stream"),
Destination: pulumi.String("extended_s3"),
ExtendedS3Configuration: &kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationArgs{
RoleArn: pulumi.Any(firehoseRole.Arn),
BucketArn: pulumi.Any(bucket.Arn),
BufferingSize: pulumi.Int(64),
DynamicPartitioningConfiguration: &kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationDynamicPartitioningConfigurationArgs{
Enabled: pulumi.Bool(true),
},
Prefix: pulumi.String("data/customer_id=!{partitionKeyFromQuery:customer_id}/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/"),
ErrorOutputPrefix: pulumi.String("errors/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/!{firehose:error-output-type}/"),
ProcessingConfiguration: &kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationArgs{
Enabled: pulumi.Bool(true),
Processors: kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArray{
&kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs{
Type: pulumi.String("RecordDeAggregation"),
Parameters: kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArray{
&kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs{
ParameterName: pulumi.String("SubRecordType"),
ParameterValue: pulumi.String("JSON"),
},
},
},
&kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs{
Type: pulumi.String("AppendDelimiterToRecord"),
},
&kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs{
Type: pulumi.String("MetadataExtraction"),
Parameters: kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArray{
&kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs{
ParameterName: pulumi.String("JsonParsingEngine"),
ParameterValue: pulumi.String("JQ-1.6"),
},
&kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs{
ParameterName: pulumi.String("MetadataExtractionQuery"),
ParameterValue: pulumi.String("{customer_id:.customer_id}"),
},
},
},
},
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var extendedS3Stream = new Aws.Kinesis.FirehoseDeliveryStream("extended_s3_stream", new()
{
Name = "kinesis-firehose-extended-s3-test-stream",
Destination = "extended_s3",
ExtendedS3Configuration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationArgs
{
RoleArn = firehoseRole.Arn,
BucketArn = bucket.Arn,
BufferingSize = 64,
DynamicPartitioningConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationDynamicPartitioningConfigurationArgs
{
Enabled = true,
},
Prefix = "data/customer_id=!{partitionKeyFromQuery:customer_id}/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/",
ErrorOutputPrefix = "errors/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/!{firehose:error-output-type}/",
ProcessingConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationArgs
{
Enabled = true,
Processors = new[]
{
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs
{
Type = "RecordDeAggregation",
Parameters = new[]
{
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs
{
ParameterName = "SubRecordType",
ParameterValue = "JSON",
},
},
},
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs
{
Type = "AppendDelimiterToRecord",
},
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs
{
Type = "MetadataExtraction",
Parameters = new[]
{
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs
{
ParameterName = "JsonParsingEngine",
ParameterValue = "JQ-1.6",
},
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs
{
ParameterName = "MetadataExtractionQuery",
ParameterValue = "{customer_id:.customer_id}",
},
},
},
},
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.kinesis.FirehoseDeliveryStream;
import com.pulumi.aws.kinesis.FirehoseDeliveryStreamArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamExtendedS3ConfigurationArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamExtendedS3ConfigurationDynamicPartitioningConfigurationArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var extendedS3Stream = new FirehoseDeliveryStream("extendedS3Stream", FirehoseDeliveryStreamArgs.builder()
.name("kinesis-firehose-extended-s3-test-stream")
.destination("extended_s3")
.extendedS3Configuration(FirehoseDeliveryStreamExtendedS3ConfigurationArgs.builder()
.roleArn(firehoseRole.arn())
.bucketArn(bucket.arn())
.bufferingSize(64)
.dynamicPartitioningConfiguration(FirehoseDeliveryStreamExtendedS3ConfigurationDynamicPartitioningConfigurationArgs.builder()
.enabled("true")
.build())
.prefix("data/customer_id=!{partitionKeyFromQuery:customer_id}/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/")
.errorOutputPrefix("errors/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/!{firehose:error-output-type}/")
.processingConfiguration(FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationArgs.builder()
.enabled("true")
.processors(
FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs.builder()
.type("RecordDeAggregation")
.parameters(FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs.builder()
.parameterName("SubRecordType")
.parameterValue("JSON")
.build())
.build(),
FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs.builder()
.type("AppendDelimiterToRecord")
.build(),
FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs.builder()
.type("MetadataExtraction")
.parameters(
FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs.builder()
.parameterName("JsonParsingEngine")
.parameterValue("JQ-1.6")
.build(),
FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs.builder()
.parameterName("MetadataExtractionQuery")
.parameterValue("{customer_id:.customer_id}")
.build())
.build())
.build())
.build())
.build());
}
}
resources:
extendedS3Stream:
type: aws:kinesis:FirehoseDeliveryStream
name: extended_s3_stream
properties:
name: kinesis-firehose-extended-s3-test-stream
destination: extended_s3
extendedS3Configuration:
roleArn: ${firehoseRole.arn}
bucketArn: ${bucket.arn}
bufferingSize: 64
dynamicPartitioningConfiguration:
enabled: 'true'
prefix: data/customer_id=!{partitionKeyFromQuery:customer_id}/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/
errorOutputPrefix: errors/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/!{firehose:error-output-type}/
processingConfiguration:
enabled: 'true'
processors:
- type: RecordDeAggregation
parameters:
- parameterName: SubRecordType
parameterValue: JSON
- type: AppendDelimiterToRecord
- type: MetadataExtraction
parameters:
- parameterName: JsonParsingEngine
parameterValue: JQ-1.6
- parameterName: MetadataExtractionQuery
parameterValue: '{customer_id:.customer_id}'
Multiple Dynamic Partitioning Keys (maximum of 50) can be added by comma separating the parameter_value
.
The following example adds the Dynamic Partitioning Keys: store_id
and customer_id
to the S3 prefix.
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const extendedS3Stream = new aws.kinesis.FirehoseDeliveryStream("extended_s3_stream", {
name: "kinesis-firehose-extended-s3-test-stream",
destination: "extended_s3",
extendedS3Configuration: {
roleArn: firehoseRole.arn,
bucketArn: bucket.arn,
bufferingSize: 64,
dynamicPartitioningConfiguration: {
enabled: true,
},
prefix: "data/store_id=!{partitionKeyFromQuery:store_id}/customer_id=!{partitionKeyFromQuery:customer_id}/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/",
errorOutputPrefix: "errors/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/!{firehose:error-output-type}/",
processingConfiguration: {
enabled: true,
processors: [{
type: "MetadataExtraction",
parameters: [
{
parameterName: "JsonParsingEngine",
parameterValue: "JQ-1.6",
},
{
parameterName: "MetadataExtractionQuery",
parameterValue: "{store_id:.store_id,customer_id:.customer_id}",
},
],
}],
},
},
});
import pulumi
import pulumi_aws as aws
extended_s3_stream = aws.kinesis.FirehoseDeliveryStream("extended_s3_stream",
name="kinesis-firehose-extended-s3-test-stream",
destination="extended_s3",
extended_s3_configuration={
"role_arn": firehose_role["arn"],
"bucket_arn": bucket["arn"],
"buffering_size": 64,
"dynamic_partitioning_configuration": {
"enabled": True,
},
"prefix": "data/store_id=!{partitionKeyFromQuery:store_id}/customer_id=!{partitionKeyFromQuery:customer_id}/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/",
"error_output_prefix": "errors/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/!{firehose:error-output-type}/",
"processing_configuration": {
"enabled": True,
"processors": [{
"type": "MetadataExtraction",
"parameters": [
{
"parameter_name": "JsonParsingEngine",
"parameter_value": "JQ-1.6",
},
{
"parameter_name": "MetadataExtractionQuery",
"parameter_value": "{store_id:.store_id,customer_id:.customer_id}",
},
],
}],
},
})
package main
import (
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/kinesis"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := kinesis.NewFirehoseDeliveryStream(ctx, "extended_s3_stream", &kinesis.FirehoseDeliveryStreamArgs{
Name: pulumi.String("kinesis-firehose-extended-s3-test-stream"),
Destination: pulumi.String("extended_s3"),
ExtendedS3Configuration: &kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationArgs{
RoleArn: pulumi.Any(firehoseRole.Arn),
BucketArn: pulumi.Any(bucket.Arn),
BufferingSize: pulumi.Int(64),
DynamicPartitioningConfiguration: &kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationDynamicPartitioningConfigurationArgs{
Enabled: pulumi.Bool(true),
},
Prefix: pulumi.String("data/store_id=!{partitionKeyFromQuery:store_id}/customer_id=!{partitionKeyFromQuery:customer_id}/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/"),
ErrorOutputPrefix: pulumi.String("errors/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/!{firehose:error-output-type}/"),
ProcessingConfiguration: &kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationArgs{
Enabled: pulumi.Bool(true),
Processors: kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArray{
&kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs{
Type: pulumi.String("MetadataExtraction"),
Parameters: kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArray{
&kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs{
ParameterName: pulumi.String("JsonParsingEngine"),
ParameterValue: pulumi.String("JQ-1.6"),
},
&kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs{
ParameterName: pulumi.String("MetadataExtractionQuery"),
ParameterValue: pulumi.String("{store_id:.store_id,customer_id:.customer_id}"),
},
},
},
},
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var extendedS3Stream = new Aws.Kinesis.FirehoseDeliveryStream("extended_s3_stream", new()
{
Name = "kinesis-firehose-extended-s3-test-stream",
Destination = "extended_s3",
ExtendedS3Configuration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationArgs
{
RoleArn = firehoseRole.Arn,
BucketArn = bucket.Arn,
BufferingSize = 64,
DynamicPartitioningConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationDynamicPartitioningConfigurationArgs
{
Enabled = true,
},
Prefix = "data/store_id=!{partitionKeyFromQuery:store_id}/customer_id=!{partitionKeyFromQuery:customer_id}/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/",
ErrorOutputPrefix = "errors/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/!{firehose:error-output-type}/",
ProcessingConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationArgs
{
Enabled = true,
Processors = new[]
{
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs
{
Type = "MetadataExtraction",
Parameters = new[]
{
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs
{
ParameterName = "JsonParsingEngine",
ParameterValue = "JQ-1.6",
},
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs
{
ParameterName = "MetadataExtractionQuery",
ParameterValue = "{store_id:.store_id,customer_id:.customer_id}",
},
},
},
},
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.kinesis.FirehoseDeliveryStream;
import com.pulumi.aws.kinesis.FirehoseDeliveryStreamArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamExtendedS3ConfigurationArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamExtendedS3ConfigurationDynamicPartitioningConfigurationArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var extendedS3Stream = new FirehoseDeliveryStream("extendedS3Stream", FirehoseDeliveryStreamArgs.builder()
.name("kinesis-firehose-extended-s3-test-stream")
.destination("extended_s3")
.extendedS3Configuration(FirehoseDeliveryStreamExtendedS3ConfigurationArgs.builder()
.roleArn(firehoseRole.arn())
.bucketArn(bucket.arn())
.bufferingSize(64)
.dynamicPartitioningConfiguration(FirehoseDeliveryStreamExtendedS3ConfigurationDynamicPartitioningConfigurationArgs.builder()
.enabled("true")
.build())
.prefix("data/store_id=!{partitionKeyFromQuery:store_id}/customer_id=!{partitionKeyFromQuery:customer_id}/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/")
.errorOutputPrefix("errors/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/!{firehose:error-output-type}/")
.processingConfiguration(FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationArgs.builder()
.enabled("true")
.processors(FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs.builder()
.type("MetadataExtraction")
.parameters(
FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs.builder()
.parameterName("JsonParsingEngine")
.parameterValue("JQ-1.6")
.build(),
FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs.builder()
.parameterName("MetadataExtractionQuery")
.parameterValue("{store_id:.store_id,customer_id:.customer_id}")
.build())
.build())
.build())
.build())
.build());
}
}
resources:
extendedS3Stream:
type: aws:kinesis:FirehoseDeliveryStream
name: extended_s3_stream
properties:
name: kinesis-firehose-extended-s3-test-stream
destination: extended_s3
extendedS3Configuration:
roleArn: ${firehoseRole.arn}
bucketArn: ${bucket.arn}
bufferingSize: 64
dynamicPartitioningConfiguration:
enabled: 'true'
prefix: data/store_id=!{partitionKeyFromQuery:store_id}/customer_id=!{partitionKeyFromQuery:customer_id}/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/
errorOutputPrefix: errors/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/!{firehose:error-output-type}/
processingConfiguration:
enabled: 'true'
processors:
- type: MetadataExtraction
parameters:
- parameterName: JsonParsingEngine
parameterValue: JQ-1.6
- parameterName: MetadataExtractionQuery
parameterValue: '{store_id:.store_id,customer_id:.customer_id}'
Redshift Destination
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const testCluster = new aws.redshift.Cluster("test_cluster", {
clusterIdentifier: "tf-redshift-cluster",
databaseName: "test",
masterUsername: "testuser",
masterPassword: "T3stPass",
nodeType: "dc1.large",
clusterType: "single-node",
});
const testStream = new aws.kinesis.FirehoseDeliveryStream("test_stream", {
name: "kinesis-firehose-test-stream",
destination: "redshift",
redshiftConfiguration: {
roleArn: firehoseRole.arn,
clusterJdbcurl: pulumi.interpolate`jdbc:redshift://${testCluster.endpoint}/${testCluster.databaseName}`,
username: "testuser",
password: "T3stPass",
dataTableName: "test-table",
copyOptions: "delimiter '|'",
dataTableColumns: "test-col",
s3BackupMode: "Enabled",
s3Configuration: {
roleArn: firehoseRole.arn,
bucketArn: bucket.arn,
bufferingSize: 10,
bufferingInterval: 400,
compressionFormat: "GZIP",
},
s3BackupConfiguration: {
roleArn: firehoseRole.arn,
bucketArn: bucket.arn,
bufferingSize: 15,
bufferingInterval: 300,
compressionFormat: "GZIP",
},
},
});
import pulumi
import pulumi_aws as aws
test_cluster = aws.redshift.Cluster("test_cluster",
cluster_identifier="tf-redshift-cluster",
database_name="test",
master_username="testuser",
master_password="T3stPass",
node_type="dc1.large",
cluster_type="single-node")
test_stream = aws.kinesis.FirehoseDeliveryStream("test_stream",
name="kinesis-firehose-test-stream",
destination="redshift",
redshift_configuration={
"role_arn": firehose_role["arn"],
"cluster_jdbcurl": pulumi.Output.all(
endpoint=test_cluster.endpoint,
database_name=test_cluster.database_name
).apply(lambda resolved_outputs: f"jdbc:redshift://{resolved_outputs['endpoint']}/{resolved_outputs['database_name']}")
,
"username": "testuser",
"password": "T3stPass",
"data_table_name": "test-table",
"copy_options": "delimiter '|'",
"data_table_columns": "test-col",
"s3_backup_mode": "Enabled",
"s3_configuration": {
"role_arn": firehose_role["arn"],
"bucket_arn": bucket["arn"],
"buffering_size": 10,
"buffering_interval": 400,
"compression_format": "GZIP",
},
"s3_backup_configuration": {
"role_arn": firehose_role["arn"],
"bucket_arn": bucket["arn"],
"buffering_size": 15,
"buffering_interval": 300,
"compression_format": "GZIP",
},
})
package main
import (
"fmt"
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/kinesis"
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/redshift"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
testCluster, err := redshift.NewCluster(ctx, "test_cluster", &redshift.ClusterArgs{
ClusterIdentifier: pulumi.String("tf-redshift-cluster"),
DatabaseName: pulumi.String("test"),
MasterUsername: pulumi.String("testuser"),
MasterPassword: pulumi.String("T3stPass"),
NodeType: pulumi.String("dc1.large"),
ClusterType: pulumi.String("single-node"),
})
if err != nil {
return err
}
_, err = kinesis.NewFirehoseDeliveryStream(ctx, "test_stream", &kinesis.FirehoseDeliveryStreamArgs{
Name: pulumi.String("kinesis-firehose-test-stream"),
Destination: pulumi.String("redshift"),
RedshiftConfiguration: &kinesis.FirehoseDeliveryStreamRedshiftConfigurationArgs{
RoleArn: pulumi.Any(firehoseRole.Arn),
ClusterJdbcurl: pulumi.All(testCluster.Endpoint, testCluster.DatabaseName).ApplyT(func(_args []interface{}) (string, error) {
endpoint := _args[0].(string)
databaseName := _args[1].(string)
return fmt.Sprintf("jdbc:redshift://%v/%v", endpoint, databaseName), nil
}).(pulumi.StringOutput),
Username: pulumi.String("testuser"),
Password: pulumi.String("T3stPass"),
DataTableName: pulumi.String("test-table"),
CopyOptions: pulumi.String("delimiter '|'"),
DataTableColumns: pulumi.String("test-col"),
S3BackupMode: pulumi.String("Enabled"),
S3Configuration: &kinesis.FirehoseDeliveryStreamRedshiftConfigurationS3ConfigurationArgs{
RoleArn: pulumi.Any(firehoseRole.Arn),
BucketArn: pulumi.Any(bucket.Arn),
BufferingSize: pulumi.Int(10),
BufferingInterval: pulumi.Int(400),
CompressionFormat: pulumi.String("GZIP"),
},
S3BackupConfiguration: &kinesis.FirehoseDeliveryStreamRedshiftConfigurationS3BackupConfigurationArgs{
RoleArn: pulumi.Any(firehoseRole.Arn),
BucketArn: pulumi.Any(bucket.Arn),
BufferingSize: pulumi.Int(15),
BufferingInterval: pulumi.Int(300),
CompressionFormat: pulumi.String("GZIP"),
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var testCluster = new Aws.RedShift.Cluster("test_cluster", new()
{
ClusterIdentifier = "tf-redshift-cluster",
DatabaseName = "test",
MasterUsername = "testuser",
MasterPassword = "T3stPass",
NodeType = "dc1.large",
ClusterType = "single-node",
});
var testStream = new Aws.Kinesis.FirehoseDeliveryStream("test_stream", new()
{
Name = "kinesis-firehose-test-stream",
Destination = "redshift",
RedshiftConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamRedshiftConfigurationArgs
{
RoleArn = firehoseRole.Arn,
ClusterJdbcurl = Output.Tuple(testCluster.Endpoint, testCluster.DatabaseName).Apply(values =>
{
var endpoint = values.Item1;
var databaseName = values.Item2;
return $"jdbc:redshift://{endpoint}/{databaseName}";
}),
Username = "testuser",
Password = "T3stPass",
DataTableName = "test-table",
CopyOptions = "delimiter '|'",
DataTableColumns = "test-col",
S3BackupMode = "Enabled",
S3Configuration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamRedshiftConfigurationS3ConfigurationArgs
{
RoleArn = firehoseRole.Arn,
BucketArn = bucket.Arn,
BufferingSize = 10,
BufferingInterval = 400,
CompressionFormat = "GZIP",
},
S3BackupConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamRedshiftConfigurationS3BackupConfigurationArgs
{
RoleArn = firehoseRole.Arn,
BucketArn = bucket.Arn,
BufferingSize = 15,
BufferingInterval = 300,
CompressionFormat = "GZIP",
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.redshift.Cluster;
import com.pulumi.aws.redshift.ClusterArgs;
import com.pulumi.aws.kinesis.FirehoseDeliveryStream;
import com.pulumi.aws.kinesis.FirehoseDeliveryStreamArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamRedshiftConfigurationArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamRedshiftConfigurationS3ConfigurationArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamRedshiftConfigurationS3BackupConfigurationArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var testCluster = new Cluster("testCluster", ClusterArgs.builder()
.clusterIdentifier("tf-redshift-cluster")
.databaseName("test")
.masterUsername("testuser")
.masterPassword("T3stPass")
.nodeType("dc1.large")
.clusterType("single-node")
.build());
var testStream = new FirehoseDeliveryStream("testStream", FirehoseDeliveryStreamArgs.builder()
.name("kinesis-firehose-test-stream")
.destination("redshift")
.redshiftConfiguration(FirehoseDeliveryStreamRedshiftConfigurationArgs.builder()
.roleArn(firehoseRole.arn())
.clusterJdbcurl(Output.tuple(testCluster.endpoint(), testCluster.databaseName()).applyValue(values -> {
var endpoint = values.t1;
var databaseName = values.t2;
return String.format("jdbc:redshift://%s/%s", endpoint,databaseName);
}))
.username("testuser")
.password("T3stPass")
.dataTableName("test-table")
.copyOptions("delimiter '|'")
.dataTableColumns("test-col")
.s3BackupMode("Enabled")
.s3Configuration(FirehoseDeliveryStreamRedshiftConfigurationS3ConfigurationArgs.builder()
.roleArn(firehoseRole.arn())
.bucketArn(bucket.arn())
.bufferingSize(10)
.bufferingInterval(400)
.compressionFormat("GZIP")
.build())
.s3BackupConfiguration(FirehoseDeliveryStreamRedshiftConfigurationS3BackupConfigurationArgs.builder()
.roleArn(firehoseRole.arn())
.bucketArn(bucket.arn())
.bufferingSize(15)
.bufferingInterval(300)
.compressionFormat("GZIP")
.build())
.build())
.build());
}
}
resources:
testCluster:
type: aws:redshift:Cluster
name: test_cluster
properties:
clusterIdentifier: tf-redshift-cluster
databaseName: test
masterUsername: testuser
masterPassword: T3stPass
nodeType: dc1.large
clusterType: single-node
testStream:
type: aws:kinesis:FirehoseDeliveryStream
name: test_stream
properties:
name: kinesis-firehose-test-stream
destination: redshift
redshiftConfiguration:
roleArn: ${firehoseRole.arn}
clusterJdbcurl: jdbc:redshift://${testCluster.endpoint}/${testCluster.databaseName}
username: testuser
password: T3stPass
dataTableName: test-table
copyOptions: delimiter '|'
dataTableColumns: test-col
s3BackupMode: Enabled
s3Configuration:
roleArn: ${firehoseRole.arn}
bucketArn: ${bucket.arn}
bufferingSize: 10
bufferingInterval: 400
compressionFormat: GZIP
s3BackupConfiguration:
roleArn: ${firehoseRole.arn}
bucketArn: ${bucket.arn}
bufferingSize: 15
bufferingInterval: 300
compressionFormat: GZIP
Elasticsearch Destination
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const testCluster = new aws.elasticsearch.Domain("test_cluster", {domainName: "firehose-es-test"});
const testStream = new aws.kinesis.FirehoseDeliveryStream("test_stream", {
name: "kinesis-firehose-test-stream",
destination: "elasticsearch",
elasticsearchConfiguration: {
domainArn: testCluster.arn,
roleArn: firehoseRole.arn,
indexName: "test",
typeName: "test",
s3Configuration: {
roleArn: firehoseRole.arn,
bucketArn: bucket.arn,
bufferingSize: 10,
bufferingInterval: 400,
compressionFormat: "GZIP",
},
processingConfiguration: {
enabled: true,
processors: [{
type: "Lambda",
parameters: [{
parameterName: "LambdaArn",
parameterValue: `${lambdaProcessor.arn}:$LATEST`,
}],
}],
},
},
});
import pulumi
import pulumi_aws as aws
test_cluster = aws.elasticsearch.Domain("test_cluster", domain_name="firehose-es-test")
test_stream = aws.kinesis.FirehoseDeliveryStream("test_stream",
name="kinesis-firehose-test-stream",
destination="elasticsearch",
elasticsearch_configuration={
"domain_arn": test_cluster.arn,
"role_arn": firehose_role["arn"],
"index_name": "test",
"type_name": "test",
"s3_configuration": {
"role_arn": firehose_role["arn"],
"bucket_arn": bucket["arn"],
"buffering_size": 10,
"buffering_interval": 400,
"compression_format": "GZIP",
},
"processing_configuration": {
"enabled": True,
"processors": [{
"type": "Lambda",
"parameters": [{
"parameter_name": "LambdaArn",
"parameter_value": f"{lambda_processor['arn']}:$LATEST",
}],
}],
},
})
package main
import (
"fmt"
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/elasticsearch"
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/kinesis"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
testCluster, err := elasticsearch.NewDomain(ctx, "test_cluster", &elasticsearch.DomainArgs{
DomainName: pulumi.String("firehose-es-test"),
})
if err != nil {
return err
}
_, err = kinesis.NewFirehoseDeliveryStream(ctx, "test_stream", &kinesis.FirehoseDeliveryStreamArgs{
Name: pulumi.String("kinesis-firehose-test-stream"),
Destination: pulumi.String("elasticsearch"),
ElasticsearchConfiguration: &kinesis.FirehoseDeliveryStreamElasticsearchConfigurationArgs{
DomainArn: testCluster.Arn,
RoleArn: pulumi.Any(firehoseRole.Arn),
IndexName: pulumi.String("test"),
TypeName: pulumi.String("test"),
S3Configuration: &kinesis.FirehoseDeliveryStreamElasticsearchConfigurationS3ConfigurationArgs{
RoleArn: pulumi.Any(firehoseRole.Arn),
BucketArn: pulumi.Any(bucket.Arn),
BufferingSize: pulumi.Int(10),
BufferingInterval: pulumi.Int(400),
CompressionFormat: pulumi.String("GZIP"),
},
ProcessingConfiguration: &kinesis.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationArgs{
Enabled: pulumi.Bool(true),
Processors: kinesis.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorArray{
&kinesis.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorArgs{
Type: pulumi.String("Lambda"),
Parameters: kinesis.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorParameterArray{
&kinesis.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorParameterArgs{
ParameterName: pulumi.String("LambdaArn"),
ParameterValue: pulumi.Sprintf("%v:$LATEST", lambdaProcessor.Arn),
},
},
},
},
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var testCluster = new Aws.ElasticSearch.Domain("test_cluster", new()
{
DomainName = "firehose-es-test",
});
var testStream = new Aws.Kinesis.FirehoseDeliveryStream("test_stream", new()
{
Name = "kinesis-firehose-test-stream",
Destination = "elasticsearch",
ElasticsearchConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamElasticsearchConfigurationArgs
{
DomainArn = testCluster.Arn,
RoleArn = firehoseRole.Arn,
IndexName = "test",
TypeName = "test",
S3Configuration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamElasticsearchConfigurationS3ConfigurationArgs
{
RoleArn = firehoseRole.Arn,
BucketArn = bucket.Arn,
BufferingSize = 10,
BufferingInterval = 400,
CompressionFormat = "GZIP",
},
ProcessingConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationArgs
{
Enabled = true,
Processors = new[]
{
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorArgs
{
Type = "Lambda",
Parameters = new[]
{
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorParameterArgs
{
ParameterName = "LambdaArn",
ParameterValue = $"{lambdaProcessor.Arn}:$LATEST",
},
},
},
},
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.elasticsearch.Domain;
import com.pulumi.aws.elasticsearch.DomainArgs;
import com.pulumi.aws.kinesis.FirehoseDeliveryStream;
import com.pulumi.aws.kinesis.FirehoseDeliveryStreamArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamElasticsearchConfigurationArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamElasticsearchConfigurationS3ConfigurationArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var testCluster = new Domain("testCluster", DomainArgs.builder()
.domainName("firehose-es-test")
.build());
var testStream = new FirehoseDeliveryStream("testStream", FirehoseDeliveryStreamArgs.builder()
.name("kinesis-firehose-test-stream")
.destination("elasticsearch")
.elasticsearchConfiguration(FirehoseDeliveryStreamElasticsearchConfigurationArgs.builder()
.domainArn(testCluster.arn())
.roleArn(firehoseRole.arn())
.indexName("test")
.typeName("test")
.s3Configuration(FirehoseDeliveryStreamElasticsearchConfigurationS3ConfigurationArgs.builder()
.roleArn(firehoseRole.arn())
.bucketArn(bucket.arn())
.bufferingSize(10)
.bufferingInterval(400)
.compressionFormat("GZIP")
.build())
.processingConfiguration(FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationArgs.builder()
.enabled("true")
.processors(FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorArgs.builder()
.type("Lambda")
.parameters(FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorParameterArgs.builder()
.parameterName("LambdaArn")
.parameterValue(String.format("%s:$LATEST", lambdaProcessor.arn()))
.build())
.build())
.build())
.build())
.build());
}
}
resources:
testCluster:
type: aws:elasticsearch:Domain
name: test_cluster
properties:
domainName: firehose-es-test
testStream:
type: aws:kinesis:FirehoseDeliveryStream
name: test_stream
properties:
name: kinesis-firehose-test-stream
destination: elasticsearch
elasticsearchConfiguration:
domainArn: ${testCluster.arn}
roleArn: ${firehoseRole.arn}
indexName: test
typeName: test
s3Configuration:
roleArn: ${firehoseRole.arn}
bucketArn: ${bucket.arn}
bufferingSize: 10
bufferingInterval: 400
compressionFormat: GZIP
processingConfiguration:
enabled: 'true'
processors:
- type: Lambda
parameters:
- parameterName: LambdaArn
parameterValue: ${lambdaProcessor.arn}:$LATEST
Elasticsearch Destination With VPC
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const testCluster = new aws.elasticsearch.Domain("test_cluster", {
domainName: "es-test",
clusterConfig: {
instanceCount: 2,
zoneAwarenessEnabled: true,
instanceType: "t2.small.elasticsearch",
},
ebsOptions: {
ebsEnabled: true,
volumeSize: 10,
},
vpcOptions: {
securityGroupIds: [first.id],
subnetIds: [
firstAwsSubnet.id,
second.id,
],
},
});
const firehose-elasticsearch = aws.iam.getPolicyDocumentOutput({
statements: [
{
effect: "Allow",
actions: ["es:*"],
resources: [
testCluster.arn,
pulumi.interpolate`${testCluster.arn}/*`,
],
},
{
effect: "Allow",
actions: [
"ec2:DescribeVpcs",
"ec2:DescribeVpcAttribute",
"ec2:DescribeSubnets",
"ec2:DescribeSecurityGroups",
"ec2:DescribeNetworkInterfaces",
"ec2:CreateNetworkInterface",
"ec2:CreateNetworkInterfacePermission",
"ec2:DeleteNetworkInterface",
],
resources: ["*"],
},
],
});
const firehose_elasticsearchRolePolicy = new aws.iam.RolePolicy("firehose-elasticsearch", {
name: "elasticsearch",
role: firehose.id,
policy: firehose_elasticsearch.apply(firehose_elasticsearch => firehose_elasticsearch.json),
});
const test = new aws.kinesis.FirehoseDeliveryStream("test", {
name: "kinesis-firehose-es",
destination: "elasticsearch",
elasticsearchConfiguration: {
domainArn: testCluster.arn,
roleArn: firehose.arn,
indexName: "test",
typeName: "test",
s3Configuration: {
roleArn: firehose.arn,
bucketArn: bucket.arn,
},
vpcConfig: {
subnetIds: [
firstAwsSubnet.id,
second.id,
],
securityGroupIds: [first.id],
roleArn: firehose.arn,
},
},
}, {
dependsOn: [firehose_elasticsearchRolePolicy],
});
import pulumi
import pulumi_aws as aws
test_cluster = aws.elasticsearch.Domain("test_cluster",
domain_name="es-test",
cluster_config={
"instance_count": 2,
"zone_awareness_enabled": True,
"instance_type": "t2.small.elasticsearch",
},
ebs_options={
"ebs_enabled": True,
"volume_size": 10,
},
vpc_options={
"security_group_ids": [first["id"]],
"subnet_ids": [
first_aws_subnet["id"],
second["id"],
],
})
firehose_elasticsearch = aws.iam.get_policy_document_output(statements=[
{
"effect": "Allow",
"actions": ["es:*"],
"resources": [
test_cluster.arn,
test_cluster.arn.apply(lambda arn: f"{arn}/*"),
],
},
{
"effect": "Allow",
"actions": [
"ec2:DescribeVpcs",
"ec2:DescribeVpcAttribute",
"ec2:DescribeSubnets",
"ec2:DescribeSecurityGroups",
"ec2:DescribeNetworkInterfaces",
"ec2:CreateNetworkInterface",
"ec2:CreateNetworkInterfacePermission",
"ec2:DeleteNetworkInterface",
],
"resources": ["*"],
},
])
firehose_elasticsearch_role_policy = aws.iam.RolePolicy("firehose-elasticsearch",
name="elasticsearch",
role=firehose["id"],
policy=firehose_elasticsearch.json)
test = aws.kinesis.FirehoseDeliveryStream("test",
name="kinesis-firehose-es",
destination="elasticsearch",
elasticsearch_configuration={
"domain_arn": test_cluster.arn,
"role_arn": firehose["arn"],
"index_name": "test",
"type_name": "test",
"s3_configuration": {
"role_arn": firehose["arn"],
"bucket_arn": bucket["arn"],
},
"vpc_config": {
"subnet_ids": [
first_aws_subnet["id"],
second["id"],
],
"security_group_ids": [first["id"]],
"role_arn": firehose["arn"],
},
},
opts = pulumi.ResourceOptions(depends_on=[firehose_elasticsearch_role_policy]))
package main
import (
"fmt"
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/elasticsearch"
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/iam"
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/kinesis"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
testCluster, err := elasticsearch.NewDomain(ctx, "test_cluster", &elasticsearch.DomainArgs{
DomainName: pulumi.String("es-test"),
ClusterConfig: &elasticsearch.DomainClusterConfigArgs{
InstanceCount: pulumi.Int(2),
ZoneAwarenessEnabled: pulumi.Bool(true),
InstanceType: pulumi.String("t2.small.elasticsearch"),
},
EbsOptions: &elasticsearch.DomainEbsOptionsArgs{
EbsEnabled: pulumi.Bool(true),
VolumeSize: pulumi.Int(10),
},
VpcOptions: &elasticsearch.DomainVpcOptionsArgs{
SecurityGroupIds: pulumi.StringArray{
first.Id,
},
SubnetIds: pulumi.StringArray{
firstAwsSubnet.Id,
second.Id,
},
},
})
if err != nil {
return err
}
firehose_elasticsearch := iam.GetPolicyDocumentOutput(ctx, iam.GetPolicyDocumentOutputArgs{
Statements: iam.GetPolicyDocumentStatementArray{
&iam.GetPolicyDocumentStatementArgs{
Effect: pulumi.String("Allow"),
Actions: pulumi.StringArray{
pulumi.String("es:*"),
},
Resources: pulumi.StringArray{
testCluster.Arn,
testCluster.Arn.ApplyT(func(arn string) (string, error) {
return fmt.Sprintf("%v/*", arn), nil
}).(pulumi.StringOutput),
},
},
&iam.GetPolicyDocumentStatementArgs{
Effect: pulumi.String("Allow"),
Actions: pulumi.StringArray{
pulumi.String("ec2:DescribeVpcs"),
pulumi.String("ec2:DescribeVpcAttribute"),
pulumi.String("ec2:DescribeSubnets"),
pulumi.String("ec2:DescribeSecurityGroups"),
pulumi.String("ec2:DescribeNetworkInterfaces"),
pulumi.String("ec2:CreateNetworkInterface"),
pulumi.String("ec2:CreateNetworkInterfacePermission"),
pulumi.String("ec2:DeleteNetworkInterface"),
},
Resources: pulumi.StringArray{
pulumi.String("*"),
},
},
},
}, nil)
_, err = iam.NewRolePolicy(ctx, "firehose-elasticsearch", &iam.RolePolicyArgs{
Name: pulumi.String("elasticsearch"),
Role: pulumi.Any(firehose.Id),
Policy: pulumi.String(firehose_elasticsearch.ApplyT(func(firehose_elasticsearch iam.GetPolicyDocumentResult) (*string, error) {
return &firehose_elasticsearch.Json, nil
}).(pulumi.StringPtrOutput)),
})
if err != nil {
return err
}
_, err = kinesis.NewFirehoseDeliveryStream(ctx, "test", &kinesis.FirehoseDeliveryStreamArgs{
Name: pulumi.String("kinesis-firehose-es"),
Destination: pulumi.String("elasticsearch"),
ElasticsearchConfiguration: &kinesis.FirehoseDeliveryStreamElasticsearchConfigurationArgs{
DomainArn: testCluster.Arn,
RoleArn: pulumi.Any(firehose.Arn),
IndexName: pulumi.String("test"),
TypeName: pulumi.String("test"),
S3Configuration: &kinesis.FirehoseDeliveryStreamElasticsearchConfigurationS3ConfigurationArgs{
RoleArn: pulumi.Any(firehose.Arn),
BucketArn: pulumi.Any(bucket.Arn),
},
VpcConfig: &kinesis.FirehoseDeliveryStreamElasticsearchConfigurationVpcConfigArgs{
SubnetIds: pulumi.StringArray{
firstAwsSubnet.Id,
second.Id,
},
SecurityGroupIds: pulumi.StringArray{
first.Id,
},
RoleArn: pulumi.Any(firehose.Arn),
},
},
}, pulumi.DependsOn([]pulumi.Resource{
firehose_elasticsearchRolePolicy,
}))
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var testCluster = new Aws.ElasticSearch.Domain("test_cluster", new()
{
DomainName = "es-test",
ClusterConfig = new Aws.ElasticSearch.Inputs.DomainClusterConfigArgs
{
InstanceCount = 2,
ZoneAwarenessEnabled = true,
InstanceType = "t2.small.elasticsearch",
},
EbsOptions = new Aws.ElasticSearch.Inputs.DomainEbsOptionsArgs
{
EbsEnabled = true,
VolumeSize = 10,
},
VpcOptions = new Aws.ElasticSearch.Inputs.DomainVpcOptionsArgs
{
SecurityGroupIds = new[]
{
first.Id,
},
SubnetIds = new[]
{
firstAwsSubnet.Id,
second.Id,
},
},
});
var firehose_elasticsearch = Aws.Iam.GetPolicyDocument.Invoke(new()
{
Statements = new[]
{
new Aws.Iam.Inputs.GetPolicyDocumentStatementInputArgs
{
Effect = "Allow",
Actions = new[]
{
"es:*",
},
Resources = new[]
{
testCluster.Arn,
$"{testCluster.Arn}/*",
},
},
new Aws.Iam.Inputs.GetPolicyDocumentStatementInputArgs
{
Effect = "Allow",
Actions = new[]
{
"ec2:DescribeVpcs",
"ec2:DescribeVpcAttribute",
"ec2:DescribeSubnets",
"ec2:DescribeSecurityGroups",
"ec2:DescribeNetworkInterfaces",
"ec2:CreateNetworkInterface",
"ec2:CreateNetworkInterfacePermission",
"ec2:DeleteNetworkInterface",
},
Resources = new[]
{
"*",
},
},
},
});
var firehose_elasticsearchRolePolicy = new Aws.Iam.RolePolicy("firehose-elasticsearch", new()
{
Name = "elasticsearch",
Role = firehose.Id,
Policy = firehose_elasticsearch.Apply(firehose_elasticsearch => firehose_elasticsearch.Apply(getPolicyDocumentResult => getPolicyDocumentResult.Json)),
});
var test = new Aws.Kinesis.FirehoseDeliveryStream("test", new()
{
Name = "kinesis-firehose-es",
Destination = "elasticsearch",
ElasticsearchConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamElasticsearchConfigurationArgs
{
DomainArn = testCluster.Arn,
RoleArn = firehose.Arn,
IndexName = "test",
TypeName = "test",
S3Configuration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamElasticsearchConfigurationS3ConfigurationArgs
{
RoleArn = firehose.Arn,
BucketArn = bucket.Arn,
},
VpcConfig = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamElasticsearchConfigurationVpcConfigArgs
{
SubnetIds = new[]
{
firstAwsSubnet.Id,
second.Id,
},
SecurityGroupIds = new[]
{
first.Id,
},
RoleArn = firehose.Arn,
},
},
}, new CustomResourceOptions
{
DependsOn =
{
firehose_elasticsearchRolePolicy,
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.elasticsearch.Domain;
import com.pulumi.aws.elasticsearch.DomainArgs;
import com.pulumi.aws.elasticsearch.inputs.DomainClusterConfigArgs;
import com.pulumi.aws.elasticsearch.inputs.DomainEbsOptionsArgs;
import com.pulumi.aws.elasticsearch.inputs.DomainVpcOptionsArgs;
import com.pulumi.aws.iam.IamFunctions;
import com.pulumi.aws.iam.inputs.GetPolicyDocumentArgs;
import com.pulumi.aws.iam.RolePolicy;
import com.pulumi.aws.iam.RolePolicyArgs;
import com.pulumi.aws.kinesis.FirehoseDeliveryStream;
import com.pulumi.aws.kinesis.FirehoseDeliveryStreamArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamElasticsearchConfigurationArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamElasticsearchConfigurationS3ConfigurationArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamElasticsearchConfigurationVpcConfigArgs;
import com.pulumi.resources.CustomResourceOptions;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var testCluster = new Domain("testCluster", DomainArgs.builder()
.domainName("es-test")
.clusterConfig(DomainClusterConfigArgs.builder()
.instanceCount(2)
.zoneAwarenessEnabled(true)
.instanceType("t2.small.elasticsearch")
.build())
.ebsOptions(DomainEbsOptionsArgs.builder()
.ebsEnabled(true)
.volumeSize(10)
.build())
.vpcOptions(DomainVpcOptionsArgs.builder()
.securityGroupIds(first.id())
.subnetIds(
firstAwsSubnet.id(),
second.id())
.build())
.build());
final var firehose-elasticsearch = IamFunctions.getPolicyDocument(GetPolicyDocumentArgs.builder()
.statements(
GetPolicyDocumentStatementArgs.builder()
.effect("Allow")
.actions("es:*")
.resources(
testCluster.arn(),
testCluster.arn().applyValue(arn -> String.format("%s/*", arn)))
.build(),
GetPolicyDocumentStatementArgs.builder()
.effect("Allow")
.actions(
"ec2:DescribeVpcs",
"ec2:DescribeVpcAttribute",
"ec2:DescribeSubnets",
"ec2:DescribeSecurityGroups",
"ec2:DescribeNetworkInterfaces",
"ec2:CreateNetworkInterface",
"ec2:CreateNetworkInterfacePermission",
"ec2:DeleteNetworkInterface")
.resources("*")
.build())
.build());
var firehose_elasticsearchRolePolicy = new RolePolicy("firehose-elasticsearchRolePolicy", RolePolicyArgs.builder()
.name("elasticsearch")
.role(firehose.id())
.policy(firehose_elasticsearch.applyValue(firehose_elasticsearch -> firehose_elasticsearch.json()))
.build());
var test = new FirehoseDeliveryStream("test", FirehoseDeliveryStreamArgs.builder()
.name("kinesis-firehose-es")
.destination("elasticsearch")
.elasticsearchConfiguration(FirehoseDeliveryStreamElasticsearchConfigurationArgs.builder()
.domainArn(testCluster.arn())
.roleArn(firehose.arn())
.indexName("test")
.typeName("test")
.s3Configuration(FirehoseDeliveryStreamElasticsearchConfigurationS3ConfigurationArgs.builder()
.roleArn(firehose.arn())
.bucketArn(bucket.arn())
.build())
.vpcConfig(FirehoseDeliveryStreamElasticsearchConfigurationVpcConfigArgs.builder()
.subnetIds(
firstAwsSubnet.id(),
second.id())
.securityGroupIds(first.id())
.roleArn(firehose.arn())
.build())
.build())
.build(), CustomResourceOptions.builder()
.dependsOn(firehose_elasticsearchRolePolicy)
.build());
}
}
resources:
testCluster:
type: aws:elasticsearch:Domain
name: test_cluster
properties:
domainName: es-test
clusterConfig:
instanceCount: 2
zoneAwarenessEnabled: true
instanceType: t2.small.elasticsearch
ebsOptions:
ebsEnabled: true
volumeSize: 10
vpcOptions:
securityGroupIds:
- ${first.id}
subnetIds:
- ${firstAwsSubnet.id}
- ${second.id}
firehose-elasticsearchRolePolicy:
type: aws:iam:RolePolicy
name: firehose-elasticsearch
properties:
name: elasticsearch
role: ${firehose.id}
policy: ${["firehose-elasticsearch"].json}
test:
type: aws:kinesis:FirehoseDeliveryStream
properties:
name: kinesis-firehose-es
destination: elasticsearch
elasticsearchConfiguration:
domainArn: ${testCluster.arn}
roleArn: ${firehose.arn}
indexName: test
typeName: test
s3Configuration:
roleArn: ${firehose.arn}
bucketArn: ${bucket.arn}
vpcConfig:
subnetIds:
- ${firstAwsSubnet.id}
- ${second.id}
securityGroupIds:
- ${first.id}
roleArn: ${firehose.arn}
options:
dependson:
- ${["firehose-elasticsearchRolePolicy"]}
variables:
firehose-elasticsearch:
fn::invoke:
Function: aws:iam:getPolicyDocument
Arguments:
statements:
- effect: Allow
actions:
- es:*
resources:
- ${testCluster.arn}
- ${testCluster.arn}/*
- effect: Allow
actions:
- ec2:DescribeVpcs
- ec2:DescribeVpcAttribute
- ec2:DescribeSubnets
- ec2:DescribeSecurityGroups
- ec2:DescribeNetworkInterfaces
- ec2:CreateNetworkInterface
- ec2:CreateNetworkInterfacePermission
- ec2:DeleteNetworkInterface
resources:
- '*'
OpenSearch Destination
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const testCluster = new aws.opensearch.Domain("test_cluster", {domainName: "firehose-os-test"});
const testStream = new aws.kinesis.FirehoseDeliveryStream("test_stream", {
name: "kinesis-firehose-test-stream",
destination: "opensearch",
opensearchConfiguration: {
domainArn: testCluster.arn,
roleArn: firehoseRole.arn,
indexName: "test",
s3Configuration: {
roleArn: firehoseRole.arn,
bucketArn: bucket.arn,
bufferingSize: 10,
bufferingInterval: 400,
compressionFormat: "GZIP",
},
processingConfiguration: {
enabled: true,
processors: [{
type: "Lambda",
parameters: [{
parameterName: "LambdaArn",
parameterValue: `${lambdaProcessor.arn}:$LATEST`,
}],
}],
},
},
});
import pulumi
import pulumi_aws as aws
test_cluster = aws.opensearch.Domain("test_cluster", domain_name="firehose-os-test")
test_stream = aws.kinesis.FirehoseDeliveryStream("test_stream",
name="kinesis-firehose-test-stream",
destination="opensearch",
opensearch_configuration={
"domain_arn": test_cluster.arn,
"role_arn": firehose_role["arn"],
"index_name": "test",
"s3_configuration": {
"role_arn": firehose_role["arn"],
"bucket_arn": bucket["arn"],
"buffering_size": 10,
"buffering_interval": 400,
"compression_format": "GZIP",
},
"processing_configuration": {
"enabled": True,
"processors": [{
"type": "Lambda",
"parameters": [{
"parameter_name": "LambdaArn",
"parameter_value": f"{lambda_processor['arn']}:$LATEST",
}],
}],
},
})
package main
import (
"fmt"
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/kinesis"
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/opensearch"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
testCluster, err := opensearch.NewDomain(ctx, "test_cluster", &opensearch.DomainArgs{
DomainName: pulumi.String("firehose-os-test"),
})
if err != nil {
return err
}
_, err = kinesis.NewFirehoseDeliveryStream(ctx, "test_stream", &kinesis.FirehoseDeliveryStreamArgs{
Name: pulumi.String("kinesis-firehose-test-stream"),
Destination: pulumi.String("opensearch"),
OpensearchConfiguration: &kinesis.FirehoseDeliveryStreamOpensearchConfigurationArgs{
DomainArn: testCluster.Arn,
RoleArn: pulumi.Any(firehoseRole.Arn),
IndexName: pulumi.String("test"),
S3Configuration: &kinesis.FirehoseDeliveryStreamOpensearchConfigurationS3ConfigurationArgs{
RoleArn: pulumi.Any(firehoseRole.Arn),
BucketArn: pulumi.Any(bucket.Arn),
BufferingSize: pulumi.Int(10),
BufferingInterval: pulumi.Int(400),
CompressionFormat: pulumi.String("GZIP"),
},
ProcessingConfiguration: &kinesis.FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationArgs{
Enabled: pulumi.Bool(true),
Processors: kinesis.FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationProcessorArray{
&kinesis.FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationProcessorArgs{
Type: pulumi.String("Lambda"),
Parameters: kinesis.FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationProcessorParameterArray{
&kinesis.FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationProcessorParameterArgs{
ParameterName: pulumi.String("LambdaArn"),
ParameterValue: pulumi.Sprintf("%v:$LATEST", lambdaProcessor.Arn),
},
},
},
},
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var testCluster = new Aws.OpenSearch.Domain("test_cluster", new()
{
DomainName = "firehose-os-test",
});
var testStream = new Aws.Kinesis.FirehoseDeliveryStream("test_stream", new()
{
Name = "kinesis-firehose-test-stream",
Destination = "opensearch",
OpensearchConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamOpensearchConfigurationArgs
{
DomainArn = testCluster.Arn,
RoleArn = firehoseRole.Arn,
IndexName = "test",
S3Configuration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamOpensearchConfigurationS3ConfigurationArgs
{
RoleArn = firehoseRole.Arn,
BucketArn = bucket.Arn,
BufferingSize = 10,
BufferingInterval = 400,
CompressionFormat = "GZIP",
},
ProcessingConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationArgs
{
Enabled = true,
Processors = new[]
{
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationProcessorArgs
{
Type = "Lambda",
Parameters = new[]
{
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationProcessorParameterArgs
{
ParameterName = "LambdaArn",
ParameterValue = $"{lambdaProcessor.Arn}:$LATEST",
},
},
},
},
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.opensearch.Domain;
import com.pulumi.aws.opensearch.DomainArgs;
import com.pulumi.aws.kinesis.FirehoseDeliveryStream;
import com.pulumi.aws.kinesis.FirehoseDeliveryStreamArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamOpensearchConfigurationArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamOpensearchConfigurationS3ConfigurationArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var testCluster = new Domain("testCluster", DomainArgs.builder()
.domainName("firehose-os-test")
.build());
var testStream = new FirehoseDeliveryStream("testStream", FirehoseDeliveryStreamArgs.builder()
.name("kinesis-firehose-test-stream")
.destination("opensearch")
.opensearchConfiguration(FirehoseDeliveryStreamOpensearchConfigurationArgs.builder()
.domainArn(testCluster.arn())
.roleArn(firehoseRole.arn())
.indexName("test")
.s3Configuration(FirehoseDeliveryStreamOpensearchConfigurationS3ConfigurationArgs.builder()
.roleArn(firehoseRole.arn())
.bucketArn(bucket.arn())
.bufferingSize(10)
.bufferingInterval(400)
.compressionFormat("GZIP")
.build())
.processingConfiguration(FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationArgs.builder()
.enabled("true")
.processors(FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationProcessorArgs.builder()
.type("Lambda")
.parameters(FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationProcessorParameterArgs.builder()
.parameterName("LambdaArn")
.parameterValue(String.format("%s:$LATEST", lambdaProcessor.arn()))
.build())
.build())
.build())
.build())
.build());
}
}
resources:
testCluster:
type: aws:opensearch:Domain
name: test_cluster
properties:
domainName: firehose-os-test
testStream:
type: aws:kinesis:FirehoseDeliveryStream
name: test_stream
properties:
name: kinesis-firehose-test-stream
destination: opensearch
opensearchConfiguration:
domainArn: ${testCluster.arn}
roleArn: ${firehoseRole.arn}
indexName: test
s3Configuration:
roleArn: ${firehoseRole.arn}
bucketArn: ${bucket.arn}
bufferingSize: 10
bufferingInterval: 400
compressionFormat: GZIP
processingConfiguration:
enabled: 'true'
processors:
- type: Lambda
parameters:
- parameterName: LambdaArn
parameterValue: ${lambdaProcessor.arn}:$LATEST
OpenSearch Destination With VPC
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const testCluster = new aws.opensearch.Domain("test_cluster", {
domainName: "es-test",
clusterConfig: {
instanceCount: 2,
zoneAwarenessEnabled: true,
instanceType: "m4.large.search",
},
ebsOptions: {
ebsEnabled: true,
volumeSize: 10,
},
vpcOptions: {
securityGroupIds: [first.id],
subnetIds: [
firstAwsSubnet.id,
second.id,
],
},
});
const firehose_opensearch = new aws.iam.RolePolicy("firehose-opensearch", {
name: "opensearch",
role: firehose.id,
policy: pulumi.interpolate`{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"es:*"
],
"Resource": [
"${testCluster.arn}",
"${testCluster.arn}/*"
]
},
{
"Effect": "Allow",
"Action": [
"ec2:DescribeVpcs",
"ec2:DescribeVpcAttribute",
"ec2:DescribeSubnets",
"ec2:DescribeSecurityGroups",
"ec2:DescribeNetworkInterfaces",
"ec2:CreateNetworkInterface",
"ec2:CreateNetworkInterfacePermission",
"ec2:DeleteNetworkInterface"
],
"Resource": [
"*"
]
}
]
}
`,
});
const test = new aws.kinesis.FirehoseDeliveryStream("test", {
name: "pulumi-kinesis-firehose-os",
destination: "opensearch",
opensearchConfiguration: {
domainArn: testCluster.arn,
roleArn: firehose.arn,
indexName: "test",
s3Configuration: {
roleArn: firehose.arn,
bucketArn: bucket.arn,
},
vpcConfig: {
subnetIds: [
firstAwsSubnet.id,
second.id,
],
securityGroupIds: [first.id],
roleArn: firehose.arn,
},
},
}, {
dependsOn: [firehose_opensearch],
});
import pulumi
import pulumi_aws as aws
test_cluster = aws.opensearch.Domain("test_cluster",
domain_name="es-test",
cluster_config={
"instance_count": 2,
"zone_awareness_enabled": True,
"instance_type": "m4.large.search",
},
ebs_options={
"ebs_enabled": True,
"volume_size": 10,
},
vpc_options={
"security_group_ids": [first["id"]],
"subnet_ids": [
first_aws_subnet["id"],
second["id"],
],
})
firehose_opensearch = aws.iam.RolePolicy("firehose-opensearch",
name="opensearch",
role=firehose["id"],
policy=pulumi.Output.all(
testClusterArn=test_cluster.arn,
testClusterArn1=test_cluster.arn
).apply(lambda resolved_outputs: f"""{{
"Version": "2012-10-17",
"Statement": [
{{
"Effect": "Allow",
"Action": [
"es:*"
],
"Resource": [
"{resolved_outputs['testClusterArn']}",
"{resolved_outputs['testClusterArn1']}/*"
]
}},
{{
"Effect": "Allow",
"Action": [
"ec2:DescribeVpcs",
"ec2:DescribeVpcAttribute",
"ec2:DescribeSubnets",
"ec2:DescribeSecurityGroups",
"ec2:DescribeNetworkInterfaces",
"ec2:CreateNetworkInterface",
"ec2:CreateNetworkInterfacePermission",
"ec2:DeleteNetworkInterface"
],
"Resource": [
"*"
]
}}
]
}}
""")
)
test = aws.kinesis.FirehoseDeliveryStream("test",
name="pulumi-kinesis-firehose-os",
destination="opensearch",
opensearch_configuration={
"domain_arn": test_cluster.arn,
"role_arn": firehose["arn"],
"index_name": "test",
"s3_configuration": {
"role_arn": firehose["arn"],
"bucket_arn": bucket["arn"],
},
"vpc_config": {
"subnet_ids": [
first_aws_subnet["id"],
second["id"],
],
"security_group_ids": [first["id"]],
"role_arn": firehose["arn"],
},
},
opts = pulumi.ResourceOptions(depends_on=[firehose_opensearch]))
package main
import (
"fmt"
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/iam"
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/kinesis"
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/opensearch"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
testCluster, err := opensearch.NewDomain(ctx, "test_cluster", &opensearch.DomainArgs{
DomainName: pulumi.String("es-test"),
ClusterConfig: &opensearch.DomainClusterConfigArgs{
InstanceCount: pulumi.Int(2),
ZoneAwarenessEnabled: pulumi.Bool(true),
InstanceType: pulumi.String("m4.large.search"),
},
EbsOptions: &opensearch.DomainEbsOptionsArgs{
EbsEnabled: pulumi.Bool(true),
VolumeSize: pulumi.Int(10),
},
VpcOptions: &opensearch.DomainVpcOptionsArgs{
SecurityGroupIds: pulumi.StringArray{
first.Id,
},
SubnetIds: pulumi.StringArray{
firstAwsSubnet.Id,
second.Id,
},
},
})
if err != nil {
return err
}
_, err = iam.NewRolePolicy(ctx, "firehose-opensearch", &iam.RolePolicyArgs{
Name: pulumi.String("opensearch"),
Role: pulumi.Any(firehose.Id),
Policy: pulumi.All(testCluster.Arn, testCluster.Arn).ApplyT(func(_args []interface{}) (string, error) {
testClusterArn := _args[0].(string)
testClusterArn1 := _args[1].(string)
return fmt.Sprintf(`{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"es:*"
],
"Resource": [
"%v",
"%v/*"
]
},
{
"Effect": "Allow",
"Action": [
"ec2:DescribeVpcs",
"ec2:DescribeVpcAttribute",
"ec2:DescribeSubnets",
"ec2:DescribeSecurityGroups",
"ec2:DescribeNetworkInterfaces",
"ec2:CreateNetworkInterface",
"ec2:CreateNetworkInterfacePermission",
"ec2:DeleteNetworkInterface"
],
"Resource": [
"*"
]
}
]
}
`, testClusterArn, testClusterArn1), nil
}).(pulumi.StringOutput),
})
if err != nil {
return err
}
_, err = kinesis.NewFirehoseDeliveryStream(ctx, "test", &kinesis.FirehoseDeliveryStreamArgs{
Name: pulumi.String("pulumi-kinesis-firehose-os"),
Destination: pulumi.String("opensearch"),
OpensearchConfiguration: &kinesis.FirehoseDeliveryStreamOpensearchConfigurationArgs{
DomainArn: testCluster.Arn,
RoleArn: pulumi.Any(firehose.Arn),
IndexName: pulumi.String("test"),
S3Configuration: &kinesis.FirehoseDeliveryStreamOpensearchConfigurationS3ConfigurationArgs{
RoleArn: pulumi.Any(firehose.Arn),
BucketArn: pulumi.Any(bucket.Arn),
},
VpcConfig: &kinesis.FirehoseDeliveryStreamOpensearchConfigurationVpcConfigArgs{
SubnetIds: pulumi.StringArray{
firstAwsSubnet.Id,
second.Id,
},
SecurityGroupIds: pulumi.StringArray{
first.Id,
},
RoleArn: pulumi.Any(firehose.Arn),
},
},
}, pulumi.DependsOn([]pulumi.Resource{
firehose_opensearch,
}))
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var testCluster = new Aws.OpenSearch.Domain("test_cluster", new()
{
DomainName = "es-test",
ClusterConfig = new Aws.OpenSearch.Inputs.DomainClusterConfigArgs
{
InstanceCount = 2,
ZoneAwarenessEnabled = true,
InstanceType = "m4.large.search",
},
EbsOptions = new Aws.OpenSearch.Inputs.DomainEbsOptionsArgs
{
EbsEnabled = true,
VolumeSize = 10,
},
VpcOptions = new Aws.OpenSearch.Inputs.DomainVpcOptionsArgs
{
SecurityGroupIds = new[]
{
first.Id,
},
SubnetIds = new[]
{
firstAwsSubnet.Id,
second.Id,
},
},
});
var firehose_opensearch = new Aws.Iam.RolePolicy("firehose-opensearch", new()
{
Name = "opensearch",
Role = firehose.Id,
Policy = Output.Tuple(testCluster.Arn, testCluster.Arn).Apply(values =>
{
var testClusterArn = values.Item1;
var testClusterArn1 = values.Item2;
return @$"{{
""Version"": ""2012-10-17"",
""Statement"": [
{{
""Effect"": ""Allow"",
""Action"": [
""es:*""
],
""Resource"": [
""{testClusterArn}"",
""{testClusterArn1}/*""
]
}},
{{
""Effect"": ""Allow"",
""Action"": [
""ec2:DescribeVpcs"",
""ec2:DescribeVpcAttribute"",
""ec2:DescribeSubnets"",
""ec2:DescribeSecurityGroups"",
""ec2:DescribeNetworkInterfaces"",
""ec2:CreateNetworkInterface"",
""ec2:CreateNetworkInterfacePermission"",
""ec2:DeleteNetworkInterface""
],
""Resource"": [
""*""
]
}}
]
}}
";
}),
});
var test = new Aws.Kinesis.FirehoseDeliveryStream("test", new()
{
Name = "pulumi-kinesis-firehose-os",
Destination = "opensearch",
OpensearchConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamOpensearchConfigurationArgs
{
DomainArn = testCluster.Arn,
RoleArn = firehose.Arn,
IndexName = "test",
S3Configuration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamOpensearchConfigurationS3ConfigurationArgs
{
RoleArn = firehose.Arn,
BucketArn = bucket.Arn,
},
VpcConfig = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamOpensearchConfigurationVpcConfigArgs
{
SubnetIds = new[]
{
firstAwsSubnet.Id,
second.Id,
},
SecurityGroupIds = new[]
{
first.Id,
},
RoleArn = firehose.Arn,
},
},
}, new CustomResourceOptions
{
DependsOn =
{
firehose_opensearch,
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.opensearch.Domain;
import com.pulumi.aws.opensearch.DomainArgs;
import com.pulumi.aws.opensearch.inputs.DomainClusterConfigArgs;
import com.pulumi.aws.opensearch.inputs.DomainEbsOptionsArgs;
import com.pulumi.aws.opensearch.inputs.DomainVpcOptionsArgs;
import com.pulumi.aws.iam.RolePolicy;
import com.pulumi.aws.iam.RolePolicyArgs;
import com.pulumi.aws.kinesis.FirehoseDeliveryStream;
import com.pulumi.aws.kinesis.FirehoseDeliveryStreamArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamOpensearchConfigurationArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamOpensearchConfigurationS3ConfigurationArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamOpensearchConfigurationVpcConfigArgs;
import com.pulumi.resources.CustomResourceOptions;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var testCluster = new Domain("testCluster", DomainArgs.builder()
.domainName("es-test")
.clusterConfig(DomainClusterConfigArgs.builder()
.instanceCount(2)
.zoneAwarenessEnabled(true)
.instanceType("m4.large.search")
.build())
.ebsOptions(DomainEbsOptionsArgs.builder()
.ebsEnabled(true)
.volumeSize(10)
.build())
.vpcOptions(DomainVpcOptionsArgs.builder()
.securityGroupIds(first.id())
.subnetIds(
firstAwsSubnet.id(),
second.id())
.build())
.build());
var firehose_opensearch = new RolePolicy("firehose-opensearch", RolePolicyArgs.builder()
.name("opensearch")
.role(firehose.id())
.policy(Output.tuple(testCluster.arn(), testCluster.arn()).applyValue(values -> {
var testClusterArn = values.t1;
var testClusterArn1 = values.t2;
return """
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"es:*"
],
"Resource": [
"%s",
"%s/*"
]
},
{
"Effect": "Allow",
"Action": [
"ec2:DescribeVpcs",
"ec2:DescribeVpcAttribute",
"ec2:DescribeSubnets",
"ec2:DescribeSecurityGroups",
"ec2:DescribeNetworkInterfaces",
"ec2:CreateNetworkInterface",
"ec2:CreateNetworkInterfacePermission",
"ec2:DeleteNetworkInterface"
],
"Resource": [
"*"
]
}
]
}
", testClusterArn,testClusterArn1);
}))
.build());
var test = new FirehoseDeliveryStream("test", FirehoseDeliveryStreamArgs.builder()
.name("pulumi-kinesis-firehose-os")
.destination("opensearch")
.opensearchConfiguration(FirehoseDeliveryStreamOpensearchConfigurationArgs.builder()
.domainArn(testCluster.arn())
.roleArn(firehose.arn())
.indexName("test")
.s3Configuration(FirehoseDeliveryStreamOpensearchConfigurationS3ConfigurationArgs.builder()
.roleArn(firehose.arn())
.bucketArn(bucket.arn())
.build())
.vpcConfig(FirehoseDeliveryStreamOpensearchConfigurationVpcConfigArgs.builder()
.subnetIds(
firstAwsSubnet.id(),
second.id())
.securityGroupIds(first.id())
.roleArn(firehose.arn())
.build())
.build())
.build(), CustomResourceOptions.builder()
.dependsOn(firehose_opensearch)
.build());
}
}
resources:
testCluster:
type: aws:opensearch:Domain
name: test_cluster
properties:
domainName: es-test
clusterConfig:
instanceCount: 2
zoneAwarenessEnabled: true
instanceType: m4.large.search
ebsOptions:
ebsEnabled: true
volumeSize: 10
vpcOptions:
securityGroupIds:
- ${first.id}
subnetIds:
- ${firstAwsSubnet.id}
- ${second.id}
firehose-opensearch:
type: aws:iam:RolePolicy
properties:
name: opensearch
role: ${firehose.id}
policy: |
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"es:*"
],
"Resource": [
"${testCluster.arn}",
"${testCluster.arn}/*"
]
},
{
"Effect": "Allow",
"Action": [
"ec2:DescribeVpcs",
"ec2:DescribeVpcAttribute",
"ec2:DescribeSubnets",
"ec2:DescribeSecurityGroups",
"ec2:DescribeNetworkInterfaces",
"ec2:CreateNetworkInterface",
"ec2:CreateNetworkInterfacePermission",
"ec2:DeleteNetworkInterface"
],
"Resource": [
"*"
]
}
]
}
test:
type: aws:kinesis:FirehoseDeliveryStream
properties:
name: pulumi-kinesis-firehose-os
destination: opensearch
opensearchConfiguration:
domainArn: ${testCluster.arn}
roleArn: ${firehose.arn}
indexName: test
s3Configuration:
roleArn: ${firehose.arn}
bucketArn: ${bucket.arn}
vpcConfig:
subnetIds:
- ${firstAwsSubnet.id}
- ${second.id}
securityGroupIds:
- ${first.id}
roleArn: ${firehose.arn}
options:
dependson:
- ${["firehose-opensearch"]}
OpenSearch Serverless Destination
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const testCollection = new aws.opensearch.ServerlessCollection("test_collection", {name: "firehose-osserverless-test"});
const testStream = new aws.kinesis.FirehoseDeliveryStream("test_stream", {
name: "kinesis-firehose-test-stream",
destination: "opensearchserverless",
opensearchserverlessConfiguration: {
collectionEndpoint: testCollection.collectionEndpoint,
roleArn: firehoseRole.arn,
indexName: "test",
s3Configuration: {
roleArn: firehoseRole.arn,
bucketArn: bucket.arn,
bufferingSize: 10,
bufferingInterval: 400,
compressionFormat: "GZIP",
},
processingConfiguration: {
enabled: true,
processors: [{
type: "Lambda",
parameters: [{
parameterName: "LambdaArn",
parameterValue: `${lambdaProcessor.arn}:$LATEST`,
}],
}],
},
},
});
import pulumi
import pulumi_aws as aws
test_collection = aws.opensearch.ServerlessCollection("test_collection", name="firehose-osserverless-test")
test_stream = aws.kinesis.FirehoseDeliveryStream("test_stream",
name="kinesis-firehose-test-stream",
destination="opensearchserverless",
opensearchserverless_configuration={
"collection_endpoint": test_collection.collection_endpoint,
"role_arn": firehose_role["arn"],
"index_name": "test",
"s3_configuration": {
"role_arn": firehose_role["arn"],
"bucket_arn": bucket["arn"],
"buffering_size": 10,
"buffering_interval": 400,
"compression_format": "GZIP",
},
"processing_configuration": {
"enabled": True,
"processors": [{
"type": "Lambda",
"parameters": [{
"parameter_name": "LambdaArn",
"parameter_value": f"{lambda_processor['arn']}:$LATEST",
}],
}],
},
})
package main
import (
"fmt"
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/kinesis"
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/opensearch"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
testCollection, err := opensearch.NewServerlessCollection(ctx, "test_collection", &opensearch.ServerlessCollectionArgs{
Name: pulumi.String("firehose-osserverless-test"),
})
if err != nil {
return err
}
_, err = kinesis.NewFirehoseDeliveryStream(ctx, "test_stream", &kinesis.FirehoseDeliveryStreamArgs{
Name: pulumi.String("kinesis-firehose-test-stream"),
Destination: pulumi.String("opensearchserverless"),
OpensearchserverlessConfiguration: &kinesis.FirehoseDeliveryStreamOpensearchserverlessConfigurationArgs{
CollectionEndpoint: testCollection.CollectionEndpoint,
RoleArn: pulumi.Any(firehoseRole.Arn),
IndexName: pulumi.String("test"),
S3Configuration: &kinesis.FirehoseDeliveryStreamOpensearchserverlessConfigurationS3ConfigurationArgs{
RoleArn: pulumi.Any(firehoseRole.Arn),
BucketArn: pulumi.Any(bucket.Arn),
BufferingSize: pulumi.Int(10),
BufferingInterval: pulumi.Int(400),
CompressionFormat: pulumi.String("GZIP"),
},
ProcessingConfiguration: &kinesis.FirehoseDeliveryStreamOpensearchserverlessConfigurationProcessingConfigurationArgs{
Enabled: pulumi.Bool(true),
Processors: kinesis.FirehoseDeliveryStreamOpensearchserverlessConfigurationProcessingConfigurationProcessorArray{
&kinesis.FirehoseDeliveryStreamOpensearchserverlessConfigurationProcessingConfigurationProcessorArgs{
Type: pulumi.String("Lambda"),
Parameters: kinesis.FirehoseDeliveryStreamOpensearchserverlessConfigurationProcessingConfigurationProcessorParameterArray{
&kinesis.FirehoseDeliveryStreamOpensearchserverlessConfigurationProcessingConfigurationProcessorParameterArgs{
ParameterName: pulumi.String("LambdaArn"),
ParameterValue: pulumi.Sprintf("%v:$LATEST", lambdaProcessor.Arn),
},
},
},
},
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var testCollection = new Aws.OpenSearch.ServerlessCollection("test_collection", new()
{
Name = "firehose-osserverless-test",
});
var testStream = new Aws.Kinesis.FirehoseDeliveryStream("test_stream", new()
{
Name = "kinesis-firehose-test-stream",
Destination = "opensearchserverless",
OpensearchserverlessConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamOpensearchserverlessConfigurationArgs
{
CollectionEndpoint = testCollection.CollectionEndpoint,
RoleArn = firehoseRole.Arn,
IndexName = "test",
S3Configuration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamOpensearchserverlessConfigurationS3ConfigurationArgs
{
RoleArn = firehoseRole.Arn,
BucketArn = bucket.Arn,
BufferingSize = 10,
BufferingInterval = 400,
CompressionFormat = "GZIP",
},
ProcessingConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamOpensearchserverlessConfigurationProcessingConfigurationArgs
{
Enabled = true,
Processors = new[]
{
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamOpensearchserverlessConfigurationProcessingConfigurationProcessorArgs
{
Type = "Lambda",
Parameters = new[]
{
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamOpensearchserverlessConfigurationProcessingConfigurationProcessorParameterArgs
{
ParameterName = "LambdaArn",
ParameterValue = $"{lambdaProcessor.Arn}:$LATEST",
},
},
},
},
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.opensearch.ServerlessCollection;
import com.pulumi.aws.opensearch.ServerlessCollectionArgs;
import com.pulumi.aws.kinesis.FirehoseDeliveryStream;
import com.pulumi.aws.kinesis.FirehoseDeliveryStreamArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamOpensearchserverlessConfigurationArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamOpensearchserverlessConfigurationS3ConfigurationArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamOpensearchserverlessConfigurationProcessingConfigurationArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var testCollection = new ServerlessCollection("testCollection", ServerlessCollectionArgs.builder()
.name("firehose-osserverless-test")
.build());
var testStream = new FirehoseDeliveryStream("testStream", FirehoseDeliveryStreamArgs.builder()
.name("kinesis-firehose-test-stream")
.destination("opensearchserverless")
.opensearchserverlessConfiguration(FirehoseDeliveryStreamOpensearchserverlessConfigurationArgs.builder()
.collectionEndpoint(testCollection.collectionEndpoint())
.roleArn(firehoseRole.arn())
.indexName("test")
.s3Configuration(FirehoseDeliveryStreamOpensearchserverlessConfigurationS3ConfigurationArgs.builder()
.roleArn(firehoseRole.arn())
.bucketArn(bucket.arn())
.bufferingSize(10)
.bufferingInterval(400)
.compressionFormat("GZIP")
.build())
.processingConfiguration(FirehoseDeliveryStreamOpensearchserverlessConfigurationProcessingConfigurationArgs.builder()
.enabled("true")
.processors(FirehoseDeliveryStreamOpensearchserverlessConfigurationProcessingConfigurationProcessorArgs.builder()
.type("Lambda")
.parameters(FirehoseDeliveryStreamOpensearchserverlessConfigurationProcessingConfigurationProcessorParameterArgs.builder()
.parameterName("LambdaArn")
.parameterValue(String.format("%s:$LATEST", lambdaProcessor.arn()))
.build())
.build())
.build())
.build())
.build());
}
}
resources:
testCollection:
type: aws:opensearch:ServerlessCollection
name: test_collection
properties:
name: firehose-osserverless-test
testStream:
type: aws:kinesis:FirehoseDeliveryStream
name: test_stream
properties:
name: kinesis-firehose-test-stream
destination: opensearchserverless
opensearchserverlessConfiguration:
collectionEndpoint: ${testCollection.collectionEndpoint}
roleArn: ${firehoseRole.arn}
indexName: test
s3Configuration:
roleArn: ${firehoseRole.arn}
bucketArn: ${bucket.arn}
bufferingSize: 10
bufferingInterval: 400
compressionFormat: GZIP
processingConfiguration:
enabled: 'true'
processors:
- type: Lambda
parameters:
- parameterName: LambdaArn
parameterValue: ${lambdaProcessor.arn}:$LATEST
Iceberg Destination
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const current = aws.getCallerIdentity({});
const currentGetPartition = aws.getPartition({});
const currentGetRegion = aws.getRegion({});
const bucket = new aws.s3.BucketV2("bucket", {
bucket: "test-bucket",
forceDestroy: true,
});
const test = new aws.glue.CatalogDatabase("test", {name: "test"});
const testCatalogTable = new aws.glue.CatalogTable("test", {
name: "test",
databaseName: test.name,
parameters: {
format: "parquet",
},
tableType: "EXTERNAL_TABLE",
openTableFormatInput: {
icebergInput: {
metadataOperation: "CREATE",
version: "2",
},
},
storageDescriptor: {
location: pulumi.interpolate`s3://${bucket.id}`,
columns: [{
name: "my_column_1",
type: "int",
}],
},
});
const testStream = new aws.kinesis.FirehoseDeliveryStream("test_stream", {
name: "kinesis-firehose-test-stream",
destination: "iceberg",
icebergConfiguration: {
roleArn: firehoseRole.arn,
catalogArn: Promise.all([currentGetPartition, currentGetRegion, current]).then(([currentGetPartition, currentGetRegion, current]) => `arn:${currentGetPartition.partition}:glue:${currentGetRegion.name}:${current.accountId}:catalog`),
bufferingSize: 10,
bufferingInterval: 400,
s3Configuration: {
roleArn: firehoseRole.arn,
bucketArn: bucket.arn,
},
destinationTableConfigurations: [{
databaseName: test.name,
tableName: testCatalogTable.name,
}],
processingConfiguration: {
enabled: true,
processors: [{
type: "Lambda",
parameters: [{
parameterName: "LambdaArn",
parameterValue: `${lambdaProcessor.arn}:$LATEST`,
}],
}],
},
},
});
import pulumi
import pulumi_aws as aws
current = aws.get_caller_identity()
current_get_partition = aws.get_partition()
current_get_region = aws.get_region()
bucket = aws.s3.BucketV2("bucket",
bucket="test-bucket",
force_destroy=True)
test = aws.glue.CatalogDatabase("test", name="test")
test_catalog_table = aws.glue.CatalogTable("test",
name="test",
database_name=test.name,
parameters={
"format": "parquet",
},
table_type="EXTERNAL_TABLE",
open_table_format_input={
"iceberg_input": {
"metadata_operation": "CREATE",
"version": "2",
},
},
storage_descriptor={
"location": bucket.id.apply(lambda id: f"s3://{id}"),
"columns": [{
"name": "my_column_1",
"type": "int",
}],
})
test_stream = aws.kinesis.FirehoseDeliveryStream("test_stream",
name="kinesis-firehose-test-stream",
destination="iceberg",
iceberg_configuration={
"role_arn": firehose_role["arn"],
"catalog_arn": f"arn:{current_get_partition.partition}:glue:{current_get_region.name}:{current.account_id}:catalog",
"buffering_size": 10,
"buffering_interval": 400,
"s3_configuration": {
"role_arn": firehose_role["arn"],
"bucket_arn": bucket.arn,
},
"destination_table_configurations": [{
"database_name": test.name,
"table_name": test_catalog_table.name,
}],
"processing_configuration": {
"enabled": True,
"processors": [{
"type": "Lambda",
"parameters": [{
"parameter_name": "LambdaArn",
"parameter_value": f"{lambda_processor['arn']}:$LATEST",
}],
}],
},
})
package main
import (
"fmt"
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws"
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/glue"
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/kinesis"
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/s3"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
current, err := aws.GetCallerIdentity(ctx, &aws.GetCallerIdentityArgs{}, nil)
if err != nil {
return err
}
currentGetPartition, err := aws.GetPartition(ctx, &aws.GetPartitionArgs{}, nil)
if err != nil {
return err
}
currentGetRegion, err := aws.GetRegion(ctx, &aws.GetRegionArgs{}, nil)
if err != nil {
return err
}
bucket, err := s3.NewBucketV2(ctx, "bucket", &s3.BucketV2Args{
Bucket: pulumi.String("test-bucket"),
ForceDestroy: pulumi.Bool(true),
})
if err != nil {
return err
}
test, err := glue.NewCatalogDatabase(ctx, "test", &glue.CatalogDatabaseArgs{
Name: pulumi.String("test"),
})
if err != nil {
return err
}
testCatalogTable, err := glue.NewCatalogTable(ctx, "test", &glue.CatalogTableArgs{
Name: pulumi.String("test"),
DatabaseName: test.Name,
Parameters: pulumi.StringMap{
"format": pulumi.String("parquet"),
},
TableType: pulumi.String("EXTERNAL_TABLE"),
OpenTableFormatInput: &glue.CatalogTableOpenTableFormatInputArgs{
IcebergInput: &glue.CatalogTableOpenTableFormatInputIcebergInputArgs{
MetadataOperation: pulumi.String("CREATE"),
Version: pulumi.String("2"),
},
},
StorageDescriptor: &glue.CatalogTableStorageDescriptorArgs{
Location: bucket.ID().ApplyT(func(id string) (string, error) {
return fmt.Sprintf("s3://%v", id), nil
}).(pulumi.StringOutput),
Columns: glue.CatalogTableStorageDescriptorColumnArray{
&glue.CatalogTableStorageDescriptorColumnArgs{
Name: pulumi.String("my_column_1"),
Type: pulumi.String("int"),
},
},
},
})
if err != nil {
return err
}
_, err = kinesis.NewFirehoseDeliveryStream(ctx, "test_stream", &kinesis.FirehoseDeliveryStreamArgs{
Name: pulumi.String("kinesis-firehose-test-stream"),
Destination: pulumi.String("iceberg"),
IcebergConfiguration: &kinesis.FirehoseDeliveryStreamIcebergConfigurationArgs{
RoleArn: pulumi.Any(firehoseRole.Arn),
CatalogArn: pulumi.Sprintf("arn:%v:glue:%v:%v:catalog", currentGetPartition.Partition, currentGetRegion.Name, current.AccountId),
BufferingSize: pulumi.Int(10),
BufferingInterval: pulumi.Int(400),
S3Configuration: &kinesis.FirehoseDeliveryStreamIcebergConfigurationS3ConfigurationArgs{
RoleArn: pulumi.Any(firehoseRole.Arn),
BucketArn: bucket.Arn,
},
DestinationTableConfigurations: kinesis.FirehoseDeliveryStreamIcebergConfigurationDestinationTableConfigurationArray{
&kinesis.FirehoseDeliveryStreamIcebergConfigurationDestinationTableConfigurationArgs{
DatabaseName: test.Name,
TableName: testCatalogTable.Name,
},
},
ProcessingConfiguration: &kinesis.FirehoseDeliveryStreamIcebergConfigurationProcessingConfigurationArgs{
Enabled: pulumi.Bool(true),
Processors: kinesis.FirehoseDeliveryStreamIcebergConfigurationProcessingConfigurationProcessorArray{
&kinesis.FirehoseDeliveryStreamIcebergConfigurationProcessingConfigurationProcessorArgs{
Type: pulumi.String("Lambda"),
Parameters: kinesis.FirehoseDeliveryStreamIcebergConfigurationProcessingConfigurationProcessorParameterArray{
&kinesis.FirehoseDeliveryStreamIcebergConfigurationProcessingConfigurationProcessorParameterArgs{
ParameterName: pulumi.String("LambdaArn"),
ParameterValue: pulumi.Sprintf("%v:$LATEST", lambdaProcessor.Arn),
},
},
},
},
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var current = Aws.GetCallerIdentity.Invoke();
var currentGetPartition = Aws.GetPartition.Invoke();
var currentGetRegion = Aws.GetRegion.Invoke();
var bucket = new Aws.S3.BucketV2("bucket", new()
{
Bucket = "test-bucket",
ForceDestroy = true,
});
var test = new Aws.Glue.CatalogDatabase("test", new()
{
Name = "test",
});
var testCatalogTable = new Aws.Glue.CatalogTable("test", new()
{
Name = "test",
DatabaseName = test.Name,
Parameters =
{
{ "format", "parquet" },
},
TableType = "EXTERNAL_TABLE",
OpenTableFormatInput = new Aws.Glue.Inputs.CatalogTableOpenTableFormatInputArgs
{
IcebergInput = new Aws.Glue.Inputs.CatalogTableOpenTableFormatInputIcebergInputArgs
{
MetadataOperation = "CREATE",
Version = "2",
},
},
StorageDescriptor = new Aws.Glue.Inputs.CatalogTableStorageDescriptorArgs
{
Location = bucket.Id.Apply(id => $"s3://{id}"),
Columns = new[]
{
new Aws.Glue.Inputs.CatalogTableStorageDescriptorColumnArgs
{
Name = "my_column_1",
Type = "int",
},
},
},
});
var testStream = new Aws.Kinesis.FirehoseDeliveryStream("test_stream", new()
{
Name = "kinesis-firehose-test-stream",
Destination = "iceberg",
IcebergConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamIcebergConfigurationArgs
{
RoleArn = firehoseRole.Arn,
CatalogArn = Output.Tuple(currentGetPartition, currentGetRegion, current).Apply(values =>
{
var currentGetPartition = values.Item1;
var currentGetRegion = values.Item2;
var current = values.Item3;
return $"arn:{currentGetPartition.Apply(getPartitionResult => getPartitionResult.Partition)}:glue:{currentGetRegion.Apply(getRegionResult => getRegionResult.Name)}:{current.Apply(getCallerIdentityResult => getCallerIdentityResult.AccountId)}:catalog";
}),
BufferingSize = 10,
BufferingInterval = 400,
S3Configuration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamIcebergConfigurationS3ConfigurationArgs
{
RoleArn = firehoseRole.Arn,
BucketArn = bucket.Arn,
},
DestinationTableConfigurations = new[]
{
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamIcebergConfigurationDestinationTableConfigurationArgs
{
DatabaseName = test.Name,
TableName = testCatalogTable.Name,
},
},
ProcessingConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamIcebergConfigurationProcessingConfigurationArgs
{
Enabled = true,
Processors = new[]
{
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamIcebergConfigurationProcessingConfigurationProcessorArgs
{
Type = "Lambda",
Parameters = new[]
{
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamIcebergConfigurationProcessingConfigurationProcessorParameterArgs
{
ParameterName = "LambdaArn",
ParameterValue = $"{lambdaProcessor.Arn}:$LATEST",
},
},
},
},
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.AwsFunctions;
import com.pulumi.aws.inputs.GetCallerIdentityArgs;
import com.pulumi.aws.inputs.GetPartitionArgs;
import com.pulumi.aws.inputs.GetRegionArgs;
import com.pulumi.aws.s3.BucketV2;
import com.pulumi.aws.s3.BucketV2Args;
import com.pulumi.aws.glue.CatalogDatabase;
import com.pulumi.aws.glue.CatalogDatabaseArgs;
import com.pulumi.aws.glue.CatalogTable;
import com.pulumi.aws.glue.CatalogTableArgs;
import com.pulumi.aws.glue.inputs.CatalogTableOpenTableFormatInputArgs;
import com.pulumi.aws.glue.inputs.CatalogTableOpenTableFormatInputIcebergInputArgs;
import com.pulumi.aws.glue.inputs.CatalogTableStorageDescriptorArgs;
import com.pulumi.aws.kinesis.FirehoseDeliveryStream;
import com.pulumi.aws.kinesis.FirehoseDeliveryStreamArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamIcebergConfigurationArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamIcebergConfigurationS3ConfigurationArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamIcebergConfigurationProcessingConfigurationArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
final var current = AwsFunctions.getCallerIdentity();
final var currentGetPartition = AwsFunctions.getPartition();
final var currentGetRegion = AwsFunctions.getRegion();
var bucket = new BucketV2("bucket", BucketV2Args.builder()
.bucket("test-bucket")
.forceDestroy(true)
.build());
var test = new CatalogDatabase("test", CatalogDatabaseArgs.builder()
.name("test")
.build());
var testCatalogTable = new CatalogTable("testCatalogTable", CatalogTableArgs.builder()
.name("test")
.databaseName(test.name())
.parameters(Map.of("format", "parquet"))
.tableType("EXTERNAL_TABLE")
.openTableFormatInput(CatalogTableOpenTableFormatInputArgs.builder()
.icebergInput(CatalogTableOpenTableFormatInputIcebergInputArgs.builder()
.metadataOperation("CREATE")
.version(2)
.build())
.build())
.storageDescriptor(CatalogTableStorageDescriptorArgs.builder()
.location(bucket.id().applyValue(id -> String.format("s3://%s", id)))
.columns(CatalogTableStorageDescriptorColumnArgs.builder()
.name("my_column_1")
.type("int")
.build())
.build())
.build());
var testStream = new FirehoseDeliveryStream("testStream", FirehoseDeliveryStreamArgs.builder()
.name("kinesis-firehose-test-stream")
.destination("iceberg")
.icebergConfiguration(FirehoseDeliveryStreamIcebergConfigurationArgs.builder()
.roleArn(firehoseRole.arn())
.catalogArn(String.format("arn:%s:glue:%s:%s:catalog", currentGetPartition.applyValue(getPartitionResult -> getPartitionResult.partition()),currentGetRegion.applyValue(getRegionResult -> getRegionResult.name()),current.applyValue(getCallerIdentityResult -> getCallerIdentityResult.accountId())))
.bufferingSize(10)
.bufferingInterval(400)
.s3Configuration(FirehoseDeliveryStreamIcebergConfigurationS3ConfigurationArgs.builder()
.roleArn(firehoseRole.arn())
.bucketArn(bucket.arn())
.build())
.destinationTableConfigurations(FirehoseDeliveryStreamIcebergConfigurationDestinationTableConfigurationArgs.builder()
.databaseName(test.name())
.tableName(testCatalogTable.name())
.build())
.processingConfiguration(FirehoseDeliveryStreamIcebergConfigurationProcessingConfigurationArgs.builder()
.enabled("true")
.processors(FirehoseDeliveryStreamIcebergConfigurationProcessingConfigurationProcessorArgs.builder()
.type("Lambda")
.parameters(FirehoseDeliveryStreamIcebergConfigurationProcessingConfigurationProcessorParameterArgs.builder()
.parameterName("LambdaArn")
.parameterValue(String.format("%s:$LATEST", lambdaProcessor.arn()))
.build())
.build())
.build())
.build())
.build());
}
}
resources:
bucket:
type: aws:s3:BucketV2
properties:
bucket: test-bucket
forceDestroy: true
test:
type: aws:glue:CatalogDatabase
properties:
name: test
testCatalogTable:
type: aws:glue:CatalogTable
name: test
properties:
name: test
databaseName: ${test.name}
parameters:
format: parquet
tableType: EXTERNAL_TABLE
openTableFormatInput:
icebergInput:
metadataOperation: CREATE
version: 2
storageDescriptor:
location: s3://${bucket.id}
columns:
- name: my_column_1
type: int
testStream:
type: aws:kinesis:FirehoseDeliveryStream
name: test_stream
properties:
name: kinesis-firehose-test-stream
destination: iceberg
icebergConfiguration:
roleArn: ${firehoseRole.arn}
catalogArn: arn:${currentGetPartition.partition}:glue:${currentGetRegion.name}:${current.accountId}:catalog
bufferingSize: 10
bufferingInterval: 400
s3Configuration:
roleArn: ${firehoseRole.arn}
bucketArn: ${bucket.arn}
destinationTableConfigurations:
- databaseName: ${test.name}
tableName: ${testCatalogTable.name}
processingConfiguration:
enabled: 'true'
processors:
- type: Lambda
parameters:
- parameterName: LambdaArn
parameterValue: ${lambdaProcessor.arn}:$LATEST
variables:
current:
fn::invoke:
Function: aws:getCallerIdentity
Arguments: {}
currentGetPartition:
fn::invoke:
Function: aws:getPartition
Arguments: {}
currentGetRegion:
fn::invoke:
Function: aws:getRegion
Arguments: {}
Splunk Destination
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const testStream = new aws.kinesis.FirehoseDeliveryStream("test_stream", {
name: "kinesis-firehose-test-stream",
destination: "splunk",
splunkConfiguration: {
hecEndpoint: "https://http-inputs-mydomain.splunkcloud.com:443",
hecToken: "51D4DA16-C61B-4F5F-8EC7-ED4301342A4A",
hecAcknowledgmentTimeout: 600,
hecEndpointType: "Event",
s3BackupMode: "FailedEventsOnly",
s3Configuration: {
roleArn: firehose.arn,
bucketArn: bucket.arn,
bufferingSize: 10,
bufferingInterval: 400,
compressionFormat: "GZIP",
},
},
});
import pulumi
import pulumi_aws as aws
test_stream = aws.kinesis.FirehoseDeliveryStream("test_stream",
name="kinesis-firehose-test-stream",
destination="splunk",
splunk_configuration={
"hec_endpoint": "https://http-inputs-mydomain.splunkcloud.com:443",
"hec_token": "51D4DA16-C61B-4F5F-8EC7-ED4301342A4A",
"hec_acknowledgment_timeout": 600,
"hec_endpoint_type": "Event",
"s3_backup_mode": "FailedEventsOnly",
"s3_configuration": {
"role_arn": firehose["arn"],
"bucket_arn": bucket["arn"],
"buffering_size": 10,
"buffering_interval": 400,
"compression_format": "GZIP",
},
})
package main
import (
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/kinesis"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := kinesis.NewFirehoseDeliveryStream(ctx, "test_stream", &kinesis.FirehoseDeliveryStreamArgs{
Name: pulumi.String("kinesis-firehose-test-stream"),
Destination: pulumi.String("splunk"),
SplunkConfiguration: &kinesis.FirehoseDeliveryStreamSplunkConfigurationArgs{
HecEndpoint: pulumi.String("https://http-inputs-mydomain.splunkcloud.com:443"),
HecToken: pulumi.String("51D4DA16-C61B-4F5F-8EC7-ED4301342A4A"),
HecAcknowledgmentTimeout: pulumi.Int(600),
HecEndpointType: pulumi.String("Event"),
S3BackupMode: pulumi.String("FailedEventsOnly"),
S3Configuration: &kinesis.FirehoseDeliveryStreamSplunkConfigurationS3ConfigurationArgs{
RoleArn: pulumi.Any(firehose.Arn),
BucketArn: pulumi.Any(bucket.Arn),
BufferingSize: pulumi.Int(10),
BufferingInterval: pulumi.Int(400),
CompressionFormat: pulumi.String("GZIP"),
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var testStream = new Aws.Kinesis.FirehoseDeliveryStream("test_stream", new()
{
Name = "kinesis-firehose-test-stream",
Destination = "splunk",
SplunkConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamSplunkConfigurationArgs
{
HecEndpoint = "https://http-inputs-mydomain.splunkcloud.com:443",
HecToken = "51D4DA16-C61B-4F5F-8EC7-ED4301342A4A",
HecAcknowledgmentTimeout = 600,
HecEndpointType = "Event",
S3BackupMode = "FailedEventsOnly",
S3Configuration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamSplunkConfigurationS3ConfigurationArgs
{
RoleArn = firehose.Arn,
BucketArn = bucket.Arn,
BufferingSize = 10,
BufferingInterval = 400,
CompressionFormat = "GZIP",
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.kinesis.FirehoseDeliveryStream;
import com.pulumi.aws.kinesis.FirehoseDeliveryStreamArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamSplunkConfigurationArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamSplunkConfigurationS3ConfigurationArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var testStream = new FirehoseDeliveryStream("testStream", FirehoseDeliveryStreamArgs.builder()
.name("kinesis-firehose-test-stream")
.destination("splunk")
.splunkConfiguration(FirehoseDeliveryStreamSplunkConfigurationArgs.builder()
.hecEndpoint("https://http-inputs-mydomain.splunkcloud.com:443")
.hecToken("51D4DA16-C61B-4F5F-8EC7-ED4301342A4A")
.hecAcknowledgmentTimeout(600)
.hecEndpointType("Event")
.s3BackupMode("FailedEventsOnly")
.s3Configuration(FirehoseDeliveryStreamSplunkConfigurationS3ConfigurationArgs.builder()
.roleArn(firehose.arn())
.bucketArn(bucket.arn())
.bufferingSize(10)
.bufferingInterval(400)
.compressionFormat("GZIP")
.build())
.build())
.build());
}
}
resources:
testStream:
type: aws:kinesis:FirehoseDeliveryStream
name: test_stream
properties:
name: kinesis-firehose-test-stream
destination: splunk
splunkConfiguration:
hecEndpoint: https://http-inputs-mydomain.splunkcloud.com:443
hecToken: 51D4DA16-C61B-4F5F-8EC7-ED4301342A4A
hecAcknowledgmentTimeout: 600
hecEndpointType: Event
s3BackupMode: FailedEventsOnly
s3Configuration:
roleArn: ${firehose.arn}
bucketArn: ${bucket.arn}
bufferingSize: 10
bufferingInterval: 400
compressionFormat: GZIP
HTTP Endpoint (e.g., New Relic) Destination
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const testStream = new aws.kinesis.FirehoseDeliveryStream("test_stream", {
name: "kinesis-firehose-test-stream",
destination: "http_endpoint",
httpEndpointConfiguration: {
url: "https://aws-api.newrelic.com/firehose/v1",
name: "New Relic",
accessKey: "my-key",
bufferingSize: 15,
bufferingInterval: 600,
roleArn: firehose.arn,
s3BackupMode: "FailedDataOnly",
s3Configuration: {
roleArn: firehose.arn,
bucketArn: bucket.arn,
bufferingSize: 10,
bufferingInterval: 400,
compressionFormat: "GZIP",
},
requestConfiguration: {
contentEncoding: "GZIP",
commonAttributes: [
{
name: "testname",
value: "testvalue",
},
{
name: "testname2",
value: "testvalue2",
},
],
},
},
});
import pulumi
import pulumi_aws as aws
test_stream = aws.kinesis.FirehoseDeliveryStream("test_stream",
name="kinesis-firehose-test-stream",
destination="http_endpoint",
http_endpoint_configuration={
"url": "https://aws-api.newrelic.com/firehose/v1",
"name": "New Relic",
"access_key": "my-key",
"buffering_size": 15,
"buffering_interval": 600,
"role_arn": firehose["arn"],
"s3_backup_mode": "FailedDataOnly",
"s3_configuration": {
"role_arn": firehose["arn"],
"bucket_arn": bucket["arn"],
"buffering_size": 10,
"buffering_interval": 400,
"compression_format": "GZIP",
},
"request_configuration": {
"content_encoding": "GZIP",
"common_attributes": [
{
"name": "testname",
"value": "testvalue",
},
{
"name": "testname2",
"value": "testvalue2",
},
],
},
})
package main
import (
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/kinesis"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := kinesis.NewFirehoseDeliveryStream(ctx, "test_stream", &kinesis.FirehoseDeliveryStreamArgs{
Name: pulumi.String("kinesis-firehose-test-stream"),
Destination: pulumi.String("http_endpoint"),
HttpEndpointConfiguration: &kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationArgs{
Url: pulumi.String("https://aws-api.newrelic.com/firehose/v1"),
Name: pulumi.String("New Relic"),
AccessKey: pulumi.String("my-key"),
BufferingSize: pulumi.Int(15),
BufferingInterval: pulumi.Int(600),
RoleArn: pulumi.Any(firehose.Arn),
S3BackupMode: pulumi.String("FailedDataOnly"),
S3Configuration: &kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationS3ConfigurationArgs{
RoleArn: pulumi.Any(firehose.Arn),
BucketArn: pulumi.Any(bucket.Arn),
BufferingSize: pulumi.Int(10),
BufferingInterval: pulumi.Int(400),
CompressionFormat: pulumi.String("GZIP"),
},
RequestConfiguration: &kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationArgs{
ContentEncoding: pulumi.String("GZIP"),
CommonAttributes: kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationCommonAttributeArray{
&kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationCommonAttributeArgs{
Name: pulumi.String("testname"),
Value: pulumi.String("testvalue"),
},
&kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationCommonAttributeArgs{
Name: pulumi.String("testname2"),
Value: pulumi.String("testvalue2"),
},
},
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var testStream = new Aws.Kinesis.FirehoseDeliveryStream("test_stream", new()
{
Name = "kinesis-firehose-test-stream",
Destination = "http_endpoint",
HttpEndpointConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamHttpEndpointConfigurationArgs
{
Url = "https://aws-api.newrelic.com/firehose/v1",
Name = "New Relic",
AccessKey = "my-key",
BufferingSize = 15,
BufferingInterval = 600,
RoleArn = firehose.Arn,
S3BackupMode = "FailedDataOnly",
S3Configuration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamHttpEndpointConfigurationS3ConfigurationArgs
{
RoleArn = firehose.Arn,
BucketArn = bucket.Arn,
BufferingSize = 10,
BufferingInterval = 400,
CompressionFormat = "GZIP",
},
RequestConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationArgs
{
ContentEncoding = "GZIP",
CommonAttributes = new[]
{
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationCommonAttributeArgs
{
Name = "testname",
Value = "testvalue",
},
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationCommonAttributeArgs
{
Name = "testname2",
Value = "testvalue2",
},
},
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.kinesis.FirehoseDeliveryStream;
import com.pulumi.aws.kinesis.FirehoseDeliveryStreamArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamHttpEndpointConfigurationArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamHttpEndpointConfigurationS3ConfigurationArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var testStream = new FirehoseDeliveryStream("testStream", FirehoseDeliveryStreamArgs.builder()
.name("kinesis-firehose-test-stream")
.destination("http_endpoint")
.httpEndpointConfiguration(FirehoseDeliveryStreamHttpEndpointConfigurationArgs.builder()
.url("https://aws-api.newrelic.com/firehose/v1")
.name("New Relic")
.accessKey("my-key")
.bufferingSize(15)
.bufferingInterval(600)
.roleArn(firehose.arn())
.s3BackupMode("FailedDataOnly")
.s3Configuration(FirehoseDeliveryStreamHttpEndpointConfigurationS3ConfigurationArgs.builder()
.roleArn(firehose.arn())
.bucketArn(bucket.arn())
.bufferingSize(10)
.bufferingInterval(400)
.compressionFormat("GZIP")
.build())
.requestConfiguration(FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationArgs.builder()
.contentEncoding("GZIP")
.commonAttributes(
FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationCommonAttributeArgs.builder()
.name("testname")
.value("testvalue")
.build(),
FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationCommonAttributeArgs.builder()
.name("testname2")
.value("testvalue2")
.build())
.build())
.build())
.build());
}
}
resources:
testStream:
type: aws:kinesis:FirehoseDeliveryStream
name: test_stream
properties:
name: kinesis-firehose-test-stream
destination: http_endpoint
httpEndpointConfiguration:
url: https://aws-api.newrelic.com/firehose/v1
name: New Relic
accessKey: my-key
bufferingSize: 15
bufferingInterval: 600
roleArn: ${firehose.arn}
s3BackupMode: FailedDataOnly
s3Configuration:
roleArn: ${firehose.arn}
bucketArn: ${bucket.arn}
bufferingSize: 10
bufferingInterval: 400
compressionFormat: GZIP
requestConfiguration:
contentEncoding: GZIP
commonAttributes:
- name: testname
value: testvalue
- name: testname2
value: testvalue2
Snowflake Destination
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const exampleSnowflakeDestination = new aws.kinesis.FirehoseDeliveryStream("example_snowflake_destination", {
name: "example-snowflake-destination",
destination: "snowflake",
snowflakeConfiguration: {
accountUrl: "https://example.snowflakecomputing.com",
bufferingSize: 15,
bufferingInterval: 600,
database: "example-db",
privateKey: "...",
roleArn: firehose.arn,
schema: "example-schema",
table: "example-table",
user: "example-usr",
s3Configuration: {
roleArn: firehose.arn,
bucketArn: bucket.arn,
bufferingSize: 10,
bufferingInterval: 400,
compressionFormat: "GZIP",
},
},
});
import pulumi
import pulumi_aws as aws
example_snowflake_destination = aws.kinesis.FirehoseDeliveryStream("example_snowflake_destination",
name="example-snowflake-destination",
destination="snowflake",
snowflake_configuration={
"account_url": "https://example.snowflakecomputing.com",
"buffering_size": 15,
"buffering_interval": 600,
"database": "example-db",
"private_key": "...",
"role_arn": firehose["arn"],
"schema": "example-schema",
"table": "example-table",
"user": "example-usr",
"s3_configuration": {
"role_arn": firehose["arn"],
"bucket_arn": bucket["arn"],
"buffering_size": 10,
"buffering_interval": 400,
"compression_format": "GZIP",
},
})
package main
import (
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/kinesis"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := kinesis.NewFirehoseDeliveryStream(ctx, "example_snowflake_destination", &kinesis.FirehoseDeliveryStreamArgs{
Name: pulumi.String("example-snowflake-destination"),
Destination: pulumi.String("snowflake"),
SnowflakeConfiguration: &kinesis.FirehoseDeliveryStreamSnowflakeConfigurationArgs{
AccountUrl: pulumi.String("https://example.snowflakecomputing.com"),
BufferingSize: pulumi.Int(15),
BufferingInterval: pulumi.Int(600),
Database: pulumi.String("example-db"),
PrivateKey: pulumi.String("..."),
RoleArn: pulumi.Any(firehose.Arn),
Schema: pulumi.String("example-schema"),
Table: pulumi.String("example-table"),
User: pulumi.String("example-usr"),
S3Configuration: &kinesis.FirehoseDeliveryStreamSnowflakeConfigurationS3ConfigurationArgs{
RoleArn: pulumi.Any(firehose.Arn),
BucketArn: pulumi.Any(bucket.Arn),
BufferingSize: pulumi.Int(10),
BufferingInterval: pulumi.Int(400),
CompressionFormat: pulumi.String("GZIP"),
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var exampleSnowflakeDestination = new Aws.Kinesis.FirehoseDeliveryStream("example_snowflake_destination", new()
{
Name = "example-snowflake-destination",
Destination = "snowflake",
SnowflakeConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamSnowflakeConfigurationArgs
{
AccountUrl = "https://example.snowflakecomputing.com",
BufferingSize = 15,
BufferingInterval = 600,
Database = "example-db",
PrivateKey = "...",
RoleArn = firehose.Arn,
Schema = "example-schema",
Table = "example-table",
User = "example-usr",
S3Configuration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamSnowflakeConfigurationS3ConfigurationArgs
{
RoleArn = firehose.Arn,
BucketArn = bucket.Arn,
BufferingSize = 10,
BufferingInterval = 400,
CompressionFormat = "GZIP",
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.kinesis.FirehoseDeliveryStream;
import com.pulumi.aws.kinesis.FirehoseDeliveryStreamArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamSnowflakeConfigurationArgs;
import com.pulumi.aws.kinesis.inputs.FirehoseDeliveryStreamSnowflakeConfigurationS3ConfigurationArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var exampleSnowflakeDestination = new FirehoseDeliveryStream("exampleSnowflakeDestination", FirehoseDeliveryStreamArgs.builder()
.name("example-snowflake-destination")
.destination("snowflake")
.snowflakeConfiguration(FirehoseDeliveryStreamSnowflakeConfigurationArgs.builder()
.accountUrl("https://example.snowflakecomputing.com")
.bufferingSize(15)
.bufferingInterval(600)
.database("example-db")
.privateKey("...")
.roleArn(firehose.arn())
.schema("example-schema")
.table("example-table")
.user("example-usr")
.s3Configuration(FirehoseDeliveryStreamSnowflakeConfigurationS3ConfigurationArgs.builder()
.roleArn(firehose.arn())
.bucketArn(bucket.arn())
.bufferingSize(10)
.bufferingInterval(400)
.compressionFormat("GZIP")
.build())
.build())
.build());
}
}
resources:
exampleSnowflakeDestination:
type: aws:kinesis:FirehoseDeliveryStream
name: example_snowflake_destination
properties:
name: example-snowflake-destination
destination: snowflake
snowflakeConfiguration:
accountUrl: https://example.snowflakecomputing.com
bufferingSize: 15
bufferingInterval: 600
database: example-db
privateKey: '...'
roleArn: ${firehose.arn}
schema: example-schema
table: example-table
user: example-usr
s3Configuration:
roleArn: ${firehose.arn}
bucketArn: ${bucket.arn}
bufferingSize: 10
bufferingInterval: 400
compressionFormat: GZIP
Create FirehoseDeliveryStream Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new FirehoseDeliveryStream(name: string, args: FirehoseDeliveryStreamArgs, opts?: CustomResourceOptions);
@overload
def FirehoseDeliveryStream(resource_name: str,
args: FirehoseDeliveryStreamArgs,
opts: Optional[ResourceOptions] = None)
@overload
def FirehoseDeliveryStream(resource_name: str,
opts: Optional[ResourceOptions] = None,
destination: Optional[str] = None,
msk_source_configuration: Optional[FirehoseDeliveryStreamMskSourceConfigurationArgs] = None,
http_endpoint_configuration: Optional[FirehoseDeliveryStreamHttpEndpointConfigurationArgs] = None,
name: Optional[str] = None,
extended_s3_configuration: Optional[FirehoseDeliveryStreamExtendedS3ConfigurationArgs] = None,
opensearch_configuration: Optional[FirehoseDeliveryStreamOpensearchConfigurationArgs] = None,
iceberg_configuration: Optional[FirehoseDeliveryStreamIcebergConfigurationArgs] = None,
kinesis_source_configuration: Optional[FirehoseDeliveryStreamKinesisSourceConfigurationArgs] = None,
opensearchserverless_configuration: Optional[FirehoseDeliveryStreamOpensearchserverlessConfigurationArgs] = None,
elasticsearch_configuration: Optional[FirehoseDeliveryStreamElasticsearchConfigurationArgs] = None,
destination_id: Optional[str] = None,
arn: Optional[str] = None,
redshift_configuration: Optional[FirehoseDeliveryStreamRedshiftConfigurationArgs] = None,
server_side_encryption: Optional[FirehoseDeliveryStreamServerSideEncryptionArgs] = None,
snowflake_configuration: Optional[FirehoseDeliveryStreamSnowflakeConfigurationArgs] = None,
splunk_configuration: Optional[FirehoseDeliveryStreamSplunkConfigurationArgs] = None,
tags: Optional[Mapping[str, str]] = None,
version_id: Optional[str] = None)
func NewFirehoseDeliveryStream(ctx *Context, name string, args FirehoseDeliveryStreamArgs, opts ...ResourceOption) (*FirehoseDeliveryStream, error)
public FirehoseDeliveryStream(string name, FirehoseDeliveryStreamArgs args, CustomResourceOptions? opts = null)
public FirehoseDeliveryStream(String name, FirehoseDeliveryStreamArgs args)
public FirehoseDeliveryStream(String name, FirehoseDeliveryStreamArgs args, CustomResourceOptions options)
type: aws:kinesis:FirehoseDeliveryStream
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args FirehoseDeliveryStreamArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args FirehoseDeliveryStreamArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args FirehoseDeliveryStreamArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args FirehoseDeliveryStreamArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args FirehoseDeliveryStreamArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var firehoseDeliveryStreamResource = new Aws.Kinesis.FirehoseDeliveryStream("firehoseDeliveryStreamResource", new()
{
Destination = "string",
MskSourceConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamMskSourceConfigurationArgs
{
AuthenticationConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamMskSourceConfigurationAuthenticationConfigurationArgs
{
Connectivity = "string",
RoleArn = "string",
},
MskClusterArn = "string",
TopicName = "string",
},
HttpEndpointConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamHttpEndpointConfigurationArgs
{
S3Configuration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamHttpEndpointConfigurationS3ConfigurationArgs
{
BucketArn = "string",
RoleArn = "string",
BufferingInterval = 0,
BufferingSize = 0,
CloudwatchLoggingOptions = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamHttpEndpointConfigurationS3ConfigurationCloudwatchLoggingOptionsArgs
{
Enabled = false,
LogGroupName = "string",
LogStreamName = "string",
},
CompressionFormat = "string",
ErrorOutputPrefix = "string",
KmsKeyArn = "string",
Prefix = "string",
},
Url = "string",
RequestConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationArgs
{
CommonAttributes = new[]
{
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationCommonAttributeArgs
{
Name = "string",
Value = "string",
},
},
ContentEncoding = "string",
},
CloudwatchLoggingOptions = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamHttpEndpointConfigurationCloudwatchLoggingOptionsArgs
{
Enabled = false,
LogGroupName = "string",
LogStreamName = "string",
},
Name = "string",
ProcessingConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamHttpEndpointConfigurationProcessingConfigurationArgs
{
Enabled = false,
Processors = new[]
{
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamHttpEndpointConfigurationProcessingConfigurationProcessorArgs
{
Type = "string",
Parameters = new[]
{
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamHttpEndpointConfigurationProcessingConfigurationProcessorParameterArgs
{
ParameterName = "string",
ParameterValue = "string",
},
},
},
},
},
AccessKey = "string",
RetryDuration = 0,
RoleArn = "string",
S3BackupMode = "string",
BufferingSize = 0,
SecretsManagerConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamHttpEndpointConfigurationSecretsManagerConfigurationArgs
{
Enabled = false,
RoleArn = "string",
SecretArn = "string",
},
BufferingInterval = 0,
},
Name = "string",
ExtendedS3Configuration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationArgs
{
BucketArn = "string",
RoleArn = "string",
ErrorOutputPrefix = "string",
FileExtension = "string",
CompressionFormat = "string",
CustomTimeZone = "string",
DataFormatConversionConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationArgs
{
InputFormatConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationArgs
{
Deserializer = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializerArgs
{
HiveJsonSerDe = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializerHiveJsonSerDeArgs
{
TimestampFormats = new[]
{
"string",
},
},
OpenXJsonSerDe = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializerOpenXJsonSerDeArgs
{
CaseInsensitive = false,
ColumnToJsonKeyMappings =
{
{ "string", "string" },
},
ConvertDotsInJsonKeysToUnderscores = false,
},
},
},
OutputFormatConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationArgs
{
Serializer = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializerArgs
{
OrcSerDe = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializerOrcSerDeArgs
{
BlockSizeBytes = 0,
BloomFilterColumns = new[]
{
"string",
},
BloomFilterFalsePositiveProbability = 0,
Compression = "string",
DictionaryKeyThreshold = 0,
EnablePadding = false,
FormatVersion = "string",
PaddingTolerance = 0,
RowIndexStride = 0,
StripeSizeBytes = 0,
},
ParquetSerDe = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializerParquetSerDeArgs
{
BlockSizeBytes = 0,
Compression = "string",
EnableDictionaryCompression = false,
MaxPaddingBytes = 0,
PageSizeBytes = 0,
WriterVersion = "string",
},
},
},
SchemaConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationSchemaConfigurationArgs
{
DatabaseName = "string",
RoleArn = "string",
TableName = "string",
CatalogId = "string",
Region = "string",
VersionId = "string",
},
Enabled = false,
},
DynamicPartitioningConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationDynamicPartitioningConfigurationArgs
{
Enabled = false,
RetryDuration = 0,
},
BufferingSize = 0,
CloudwatchLoggingOptions = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationCloudwatchLoggingOptionsArgs
{
Enabled = false,
LogGroupName = "string",
LogStreamName = "string",
},
KmsKeyArn = "string",
Prefix = "string",
ProcessingConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationArgs
{
Enabled = false,
Processors = new[]
{
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs
{
Type = "string",
Parameters = new[]
{
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs
{
ParameterName = "string",
ParameterValue = "string",
},
},
},
},
},
BufferingInterval = 0,
S3BackupConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationS3BackupConfigurationArgs
{
BucketArn = "string",
RoleArn = "string",
BufferingInterval = 0,
BufferingSize = 0,
CloudwatchLoggingOptions = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamExtendedS3ConfigurationS3BackupConfigurationCloudwatchLoggingOptionsArgs
{
Enabled = false,
LogGroupName = "string",
LogStreamName = "string",
},
CompressionFormat = "string",
ErrorOutputPrefix = "string",
KmsKeyArn = "string",
Prefix = "string",
},
S3BackupMode = "string",
},
OpensearchConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamOpensearchConfigurationArgs
{
IndexName = "string",
S3Configuration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamOpensearchConfigurationS3ConfigurationArgs
{
BucketArn = "string",
RoleArn = "string",
BufferingInterval = 0,
BufferingSize = 0,
CloudwatchLoggingOptions = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamOpensearchConfigurationS3ConfigurationCloudwatchLoggingOptionsArgs
{
Enabled = false,
LogGroupName = "string",
LogStreamName = "string",
},
CompressionFormat = "string",
ErrorOutputPrefix = "string",
KmsKeyArn = "string",
Prefix = "string",
},
RoleArn = "string",
ClusterEndpoint = "string",
DocumentIdOptions = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamOpensearchConfigurationDocumentIdOptionsArgs
{
DefaultDocumentIdFormat = "string",
},
DomainArn = "string",
BufferingInterval = 0,
IndexRotationPeriod = "string",
ProcessingConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationArgs
{
Enabled = false,
Processors = new[]
{
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationProcessorArgs
{
Type = "string",
Parameters = new[]
{
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationProcessorParameterArgs
{
ParameterName = "string",
ParameterValue = "string",
},
},
},
},
},
RetryDuration = 0,
CloudwatchLoggingOptions = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamOpensearchConfigurationCloudwatchLoggingOptionsArgs
{
Enabled = false,
LogGroupName = "string",
LogStreamName = "string",
},
S3BackupMode = "string",
BufferingSize = 0,
TypeName = "string",
VpcConfig = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamOpensearchConfigurationVpcConfigArgs
{
RoleArn = "string",
SecurityGroupIds = new[]
{
"string",
},
SubnetIds = new[]
{
"string",
},
VpcId = "string",
},
},
IcebergConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamIcebergConfigurationArgs
{
CatalogArn = "string",
RoleArn = "string",
S3Configuration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamIcebergConfigurationS3ConfigurationArgs
{
BucketArn = "string",
RoleArn = "string",
BufferingInterval = 0,
BufferingSize = 0,
CloudwatchLoggingOptions = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamIcebergConfigurationS3ConfigurationCloudwatchLoggingOptionsArgs
{
Enabled = false,
LogGroupName = "string",
LogStreamName = "string",
},
CompressionFormat = "string",
ErrorOutputPrefix = "string",
KmsKeyArn = "string",
Prefix = "string",
},
BufferingInterval = 0,
BufferingSize = 0,
CloudwatchLoggingOptions = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamIcebergConfigurationCloudwatchLoggingOptionsArgs
{
Enabled = false,
LogGroupName = "string",
LogStreamName = "string",
},
DestinationTableConfigurations = new[]
{
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamIcebergConfigurationDestinationTableConfigurationArgs
{
DatabaseName = "string",
TableName = "string",
S3ErrorOutputPrefix = "string",
UniqueKeys = new[]
{
"string",
},
},
},
ProcessingConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamIcebergConfigurationProcessingConfigurationArgs
{
Enabled = false,
Processors = new[]
{
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamIcebergConfigurationProcessingConfigurationProcessorArgs
{
Type = "string",
Parameters = new[]
{
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamIcebergConfigurationProcessingConfigurationProcessorParameterArgs
{
ParameterName = "string",
ParameterValue = "string",
},
},
},
},
},
RetryDuration = 0,
S3BackupMode = "string",
},
KinesisSourceConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamKinesisSourceConfigurationArgs
{
KinesisStreamArn = "string",
RoleArn = "string",
},
OpensearchserverlessConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamOpensearchserverlessConfigurationArgs
{
CollectionEndpoint = "string",
IndexName = "string",
RoleArn = "string",
S3Configuration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamOpensearchserverlessConfigurationS3ConfigurationArgs
{
BucketArn = "string",
RoleArn = "string",
BufferingInterval = 0,
BufferingSize = 0,
CloudwatchLoggingOptions = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamOpensearchserverlessConfigurationS3ConfigurationCloudwatchLoggingOptionsArgs
{
Enabled = false,
LogGroupName = "string",
LogStreamName = "string",
},
CompressionFormat = "string",
ErrorOutputPrefix = "string",
KmsKeyArn = "string",
Prefix = "string",
},
BufferingInterval = 0,
BufferingSize = 0,
CloudwatchLoggingOptions = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamOpensearchserverlessConfigurationCloudwatchLoggingOptionsArgs
{
Enabled = false,
LogGroupName = "string",
LogStreamName = "string",
},
ProcessingConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamOpensearchserverlessConfigurationProcessingConfigurationArgs
{
Enabled = false,
Processors = new[]
{
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamOpensearchserverlessConfigurationProcessingConfigurationProcessorArgs
{
Type = "string",
Parameters = new[]
{
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamOpensearchserverlessConfigurationProcessingConfigurationProcessorParameterArgs
{
ParameterName = "string",
ParameterValue = "string",
},
},
},
},
},
RetryDuration = 0,
S3BackupMode = "string",
VpcConfig = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamOpensearchserverlessConfigurationVpcConfigArgs
{
RoleArn = "string",
SecurityGroupIds = new[]
{
"string",
},
SubnetIds = new[]
{
"string",
},
VpcId = "string",
},
},
ElasticsearchConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamElasticsearchConfigurationArgs
{
IndexName = "string",
S3Configuration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamElasticsearchConfigurationS3ConfigurationArgs
{
BucketArn = "string",
RoleArn = "string",
BufferingInterval = 0,
BufferingSize = 0,
CloudwatchLoggingOptions = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamElasticsearchConfigurationS3ConfigurationCloudwatchLoggingOptionsArgs
{
Enabled = false,
LogGroupName = "string",
LogStreamName = "string",
},
CompressionFormat = "string",
ErrorOutputPrefix = "string",
KmsKeyArn = "string",
Prefix = "string",
},
RoleArn = "string",
ClusterEndpoint = "string",
DomainArn = "string",
BufferingInterval = 0,
IndexRotationPeriod = "string",
ProcessingConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationArgs
{
Enabled = false,
Processors = new[]
{
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorArgs
{
Type = "string",
Parameters = new[]
{
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorParameterArgs
{
ParameterName = "string",
ParameterValue = "string",
},
},
},
},
},
RetryDuration = 0,
CloudwatchLoggingOptions = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamElasticsearchConfigurationCloudwatchLoggingOptionsArgs
{
Enabled = false,
LogGroupName = "string",
LogStreamName = "string",
},
S3BackupMode = "string",
BufferingSize = 0,
TypeName = "string",
VpcConfig = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamElasticsearchConfigurationVpcConfigArgs
{
RoleArn = "string",
SecurityGroupIds = new[]
{
"string",
},
SubnetIds = new[]
{
"string",
},
VpcId = "string",
},
},
DestinationId = "string",
Arn = "string",
RedshiftConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamRedshiftConfigurationArgs
{
DataTableName = "string",
ClusterJdbcurl = "string",
S3Configuration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamRedshiftConfigurationS3ConfigurationArgs
{
BucketArn = "string",
RoleArn = "string",
BufferingInterval = 0,
BufferingSize = 0,
CloudwatchLoggingOptions = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamRedshiftConfigurationS3ConfigurationCloudwatchLoggingOptionsArgs
{
Enabled = false,
LogGroupName = "string",
LogStreamName = "string",
},
CompressionFormat = "string",
ErrorOutputPrefix = "string",
KmsKeyArn = "string",
Prefix = "string",
},
RoleArn = "string",
ProcessingConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamRedshiftConfigurationProcessingConfigurationArgs
{
Enabled = false,
Processors = new[]
{
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamRedshiftConfigurationProcessingConfigurationProcessorArgs
{
Type = "string",
Parameters = new[]
{
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamRedshiftConfigurationProcessingConfigurationProcessorParameterArgs
{
ParameterName = "string",
ParameterValue = "string",
},
},
},
},
},
Password = "string",
CloudwatchLoggingOptions = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamRedshiftConfigurationCloudwatchLoggingOptionsArgs
{
Enabled = false,
LogGroupName = "string",
LogStreamName = "string",
},
RetryDuration = 0,
DataTableColumns = "string",
S3BackupConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamRedshiftConfigurationS3BackupConfigurationArgs
{
BucketArn = "string",
RoleArn = "string",
BufferingInterval = 0,
BufferingSize = 0,
CloudwatchLoggingOptions = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamRedshiftConfigurationS3BackupConfigurationCloudwatchLoggingOptionsArgs
{
Enabled = false,
LogGroupName = "string",
LogStreamName = "string",
},
CompressionFormat = "string",
ErrorOutputPrefix = "string",
KmsKeyArn = "string",
Prefix = "string",
},
S3BackupMode = "string",
CopyOptions = "string",
SecretsManagerConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamRedshiftConfigurationSecretsManagerConfigurationArgs
{
Enabled = false,
RoleArn = "string",
SecretArn = "string",
},
Username = "string",
},
ServerSideEncryption = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamServerSideEncryptionArgs
{
Enabled = false,
KeyArn = "string",
KeyType = "string",
},
SnowflakeConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamSnowflakeConfigurationArgs
{
Database = "string",
Table = "string",
Schema = "string",
S3Configuration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamSnowflakeConfigurationS3ConfigurationArgs
{
BucketArn = "string",
RoleArn = "string",
BufferingInterval = 0,
BufferingSize = 0,
CloudwatchLoggingOptions = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamSnowflakeConfigurationS3ConfigurationCloudwatchLoggingOptionsArgs
{
Enabled = false,
LogGroupName = "string",
LogStreamName = "string",
},
CompressionFormat = "string",
ErrorOutputPrefix = "string",
KmsKeyArn = "string",
Prefix = "string",
},
RoleArn = "string",
AccountUrl = "string",
DataLoadingOption = "string",
S3BackupMode = "string",
MetadataColumnName = "string",
PrivateKey = "string",
ProcessingConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamSnowflakeConfigurationProcessingConfigurationArgs
{
Enabled = false,
Processors = new[]
{
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamSnowflakeConfigurationProcessingConfigurationProcessorArgs
{
Type = "string",
Parameters = new[]
{
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamSnowflakeConfigurationProcessingConfigurationProcessorParameterArgs
{
ParameterName = "string",
ParameterValue = "string",
},
},
},
},
},
RetryDuration = 0,
ContentColumnName = "string",
KeyPassphrase = "string",
CloudwatchLoggingOptions = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamSnowflakeConfigurationCloudwatchLoggingOptionsArgs
{
Enabled = false,
LogGroupName = "string",
LogStreamName = "string",
},
BufferingSize = 0,
SecretsManagerConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamSnowflakeConfigurationSecretsManagerConfigurationArgs
{
Enabled = false,
RoleArn = "string",
SecretArn = "string",
},
SnowflakeRoleConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamSnowflakeConfigurationSnowflakeRoleConfigurationArgs
{
Enabled = false,
SnowflakeRole = "string",
},
SnowflakeVpcConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamSnowflakeConfigurationSnowflakeVpcConfigurationArgs
{
PrivateLinkVpceId = "string",
},
BufferingInterval = 0,
User = "string",
},
SplunkConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamSplunkConfigurationArgs
{
HecEndpoint = "string",
S3Configuration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamSplunkConfigurationS3ConfigurationArgs
{
BucketArn = "string",
RoleArn = "string",
BufferingInterval = 0,
BufferingSize = 0,
CloudwatchLoggingOptions = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamSplunkConfigurationS3ConfigurationCloudwatchLoggingOptionsArgs
{
Enabled = false,
LogGroupName = "string",
LogStreamName = "string",
},
CompressionFormat = "string",
ErrorOutputPrefix = "string",
KmsKeyArn = "string",
Prefix = "string",
},
BufferingInterval = 0,
BufferingSize = 0,
CloudwatchLoggingOptions = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamSplunkConfigurationCloudwatchLoggingOptionsArgs
{
Enabled = false,
LogGroupName = "string",
LogStreamName = "string",
},
HecAcknowledgmentTimeout = 0,
HecEndpointType = "string",
HecToken = "string",
ProcessingConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamSplunkConfigurationProcessingConfigurationArgs
{
Enabled = false,
Processors = new[]
{
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamSplunkConfigurationProcessingConfigurationProcessorArgs
{
Type = "string",
Parameters = new[]
{
new Aws.Kinesis.Inputs.FirehoseDeliveryStreamSplunkConfigurationProcessingConfigurationProcessorParameterArgs
{
ParameterName = "string",
ParameterValue = "string",
},
},
},
},
},
RetryDuration = 0,
S3BackupMode = "string",
SecretsManagerConfiguration = new Aws.Kinesis.Inputs.FirehoseDeliveryStreamSplunkConfigurationSecretsManagerConfigurationArgs
{
Enabled = false,
RoleArn = "string",
SecretArn = "string",
},
},
Tags =
{
{ "string", "string" },
},
VersionId = "string",
});
example, err := kinesis.NewFirehoseDeliveryStream(ctx, "firehoseDeliveryStreamResource", &kinesis.FirehoseDeliveryStreamArgs{
Destination: pulumi.String("string"),
MskSourceConfiguration: &kinesis.FirehoseDeliveryStreamMskSourceConfigurationArgs{
AuthenticationConfiguration: &kinesis.FirehoseDeliveryStreamMskSourceConfigurationAuthenticationConfigurationArgs{
Connectivity: pulumi.String("string"),
RoleArn: pulumi.String("string"),
},
MskClusterArn: pulumi.String("string"),
TopicName: pulumi.String("string"),
},
HttpEndpointConfiguration: &kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationArgs{
S3Configuration: &kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationS3ConfigurationArgs{
BucketArn: pulumi.String("string"),
RoleArn: pulumi.String("string"),
BufferingInterval: pulumi.Int(0),
BufferingSize: pulumi.Int(0),
CloudwatchLoggingOptions: &kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationS3ConfigurationCloudwatchLoggingOptionsArgs{
Enabled: pulumi.Bool(false),
LogGroupName: pulumi.String("string"),
LogStreamName: pulumi.String("string"),
},
CompressionFormat: pulumi.String("string"),
ErrorOutputPrefix: pulumi.String("string"),
KmsKeyArn: pulumi.String("string"),
Prefix: pulumi.String("string"),
},
Url: pulumi.String("string"),
RequestConfiguration: &kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationArgs{
CommonAttributes: kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationCommonAttributeArray{
&kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationCommonAttributeArgs{
Name: pulumi.String("string"),
Value: pulumi.String("string"),
},
},
ContentEncoding: pulumi.String("string"),
},
CloudwatchLoggingOptions: &kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationCloudwatchLoggingOptionsArgs{
Enabled: pulumi.Bool(false),
LogGroupName: pulumi.String("string"),
LogStreamName: pulumi.String("string"),
},
Name: pulumi.String("string"),
ProcessingConfiguration: &kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationProcessingConfigurationArgs{
Enabled: pulumi.Bool(false),
Processors: kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationProcessingConfigurationProcessorArray{
&kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationProcessingConfigurationProcessorArgs{
Type: pulumi.String("string"),
Parameters: kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationProcessingConfigurationProcessorParameterArray{
&kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationProcessingConfigurationProcessorParameterArgs{
ParameterName: pulumi.String("string"),
ParameterValue: pulumi.String("string"),
},
},
},
},
},
AccessKey: pulumi.String("string"),
RetryDuration: pulumi.Int(0),
RoleArn: pulumi.String("string"),
S3BackupMode: pulumi.String("string"),
BufferingSize: pulumi.Int(0),
SecretsManagerConfiguration: &kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationSecretsManagerConfigurationArgs{
Enabled: pulumi.Bool(false),
RoleArn: pulumi.String("string"),
SecretArn: pulumi.String("string"),
},
BufferingInterval: pulumi.Int(0),
},
Name: pulumi.String("string"),
ExtendedS3Configuration: &kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationArgs{
BucketArn: pulumi.String("string"),
RoleArn: pulumi.String("string"),
ErrorOutputPrefix: pulumi.String("string"),
FileExtension: pulumi.String("string"),
CompressionFormat: pulumi.String("string"),
CustomTimeZone: pulumi.String("string"),
DataFormatConversionConfiguration: &kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationArgs{
InputFormatConfiguration: &kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationArgs{
Deserializer: &kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializerArgs{
HiveJsonSerDe: &kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializerHiveJsonSerDeArgs{
TimestampFormats: pulumi.StringArray{
pulumi.String("string"),
},
},
OpenXJsonSerDe: &kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializerOpenXJsonSerDeArgs{
CaseInsensitive: pulumi.Bool(false),
ColumnToJsonKeyMappings: pulumi.StringMap{
"string": pulumi.String("string"),
},
ConvertDotsInJsonKeysToUnderscores: pulumi.Bool(false),
},
},
},
OutputFormatConfiguration: &kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationArgs{
Serializer: &kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializerArgs{
OrcSerDe: &kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializerOrcSerDeArgs{
BlockSizeBytes: pulumi.Int(0),
BloomFilterColumns: pulumi.StringArray{
pulumi.String("string"),
},
BloomFilterFalsePositiveProbability: pulumi.Float64(0),
Compression: pulumi.String("string"),
DictionaryKeyThreshold: pulumi.Float64(0),
EnablePadding: pulumi.Bool(false),
FormatVersion: pulumi.String("string"),
PaddingTolerance: pulumi.Float64(0),
RowIndexStride: pulumi.Int(0),
StripeSizeBytes: pulumi.Int(0),
},
ParquetSerDe: &kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializerParquetSerDeArgs{
BlockSizeBytes: pulumi.Int(0),
Compression: pulumi.String("string"),
EnableDictionaryCompression: pulumi.Bool(false),
MaxPaddingBytes: pulumi.Int(0),
PageSizeBytes: pulumi.Int(0),
WriterVersion: pulumi.String("string"),
},
},
},
SchemaConfiguration: &kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationSchemaConfigurationArgs{
DatabaseName: pulumi.String("string"),
RoleArn: pulumi.String("string"),
TableName: pulumi.String("string"),
CatalogId: pulumi.String("string"),
Region: pulumi.String("string"),
VersionId: pulumi.String("string"),
},
Enabled: pulumi.Bool(false),
},
DynamicPartitioningConfiguration: &kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationDynamicPartitioningConfigurationArgs{
Enabled: pulumi.Bool(false),
RetryDuration: pulumi.Int(0),
},
BufferingSize: pulumi.Int(0),
CloudwatchLoggingOptions: &kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationCloudwatchLoggingOptionsArgs{
Enabled: pulumi.Bool(false),
LogGroupName: pulumi.String("string"),
LogStreamName: pulumi.String("string"),
},
KmsKeyArn: pulumi.String("string"),
Prefix: pulumi.String("string"),
ProcessingConfiguration: &kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationArgs{
Enabled: pulumi.Bool(false),
Processors: kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArray{
&kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs{
Type: pulumi.String("string"),
Parameters: kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArray{
&kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs{
ParameterName: pulumi.String("string"),
ParameterValue: pulumi.String("string"),
},
},
},
},
},
BufferingInterval: pulumi.Int(0),
S3BackupConfiguration: &kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationS3BackupConfigurationArgs{
BucketArn: pulumi.String("string"),
RoleArn: pulumi.String("string"),
BufferingInterval: pulumi.Int(0),
BufferingSize: pulumi.Int(0),
CloudwatchLoggingOptions: &kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationS3BackupConfigurationCloudwatchLoggingOptionsArgs{
Enabled: pulumi.Bool(false),
LogGroupName: pulumi.String("string"),
LogStreamName: pulumi.String("string"),
},
CompressionFormat: pulumi.String("string"),
ErrorOutputPrefix: pulumi.String("string"),
KmsKeyArn: pulumi.String("string"),
Prefix: pulumi.String("string"),
},
S3BackupMode: pulumi.String("string"),
},
OpensearchConfiguration: &kinesis.FirehoseDeliveryStreamOpensearchConfigurationArgs{
IndexName: pulumi.String("string"),
S3Configuration: &kinesis.FirehoseDeliveryStreamOpensearchConfigurationS3ConfigurationArgs{
BucketArn: pulumi.String("string"),
RoleArn: pulumi.String("string"),
BufferingInterval: pulumi.Int(0),
BufferingSize: pulumi.Int(0),
CloudwatchLoggingOptions: &kinesis.FirehoseDeliveryStreamOpensearchConfigurationS3ConfigurationCloudwatchLoggingOptionsArgs{
Enabled: pulumi.Bool(false),
LogGroupName: pulumi.String("string"),
LogStreamName: pulumi.String("string"),
},
CompressionFormat: pulumi.String("string"),
ErrorOutputPrefix: pulumi.String("string"),
KmsKeyArn: pulumi.String("string"),
Prefix: pulumi.String("string"),
},
RoleArn: pulumi.String("string"),
ClusterEndpoint: pulumi.String("string"),
DocumentIdOptions: &kinesis.FirehoseDeliveryStreamOpensearchConfigurationDocumentIdOptionsArgs{
DefaultDocumentIdFormat: pulumi.String("string"),
},
DomainArn: pulumi.String("string"),
BufferingInterval: pulumi.Int(0),
IndexRotationPeriod: pulumi.String("string"),
ProcessingConfiguration: &kinesis.FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationArgs{
Enabled: pulumi.Bool(false),
Processors: kinesis.FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationProcessorArray{
&kinesis.FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationProcessorArgs{
Type: pulumi.String("string"),
Parameters: kinesis.FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationProcessorParameterArray{
&kinesis.FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationProcessorParameterArgs{
ParameterName: pulumi.String("string"),
ParameterValue: pulumi.String("string"),
},
},
},
},
},
RetryDuration: pulumi.Int(0),
CloudwatchLoggingOptions: &kinesis.FirehoseDeliveryStreamOpensearchConfigurationCloudwatchLoggingOptionsArgs{
Enabled: pulumi.Bool(false),
LogGroupName: pulumi.String("string"),
LogStreamName: pulumi.String("string"),
},
S3BackupMode: pulumi.String("string"),
BufferingSize: pulumi.Int(0),
TypeName: pulumi.String("string"),
VpcConfig: &kinesis.FirehoseDeliveryStreamOpensearchConfigurationVpcConfigArgs{
RoleArn: pulumi.String("string"),
SecurityGroupIds: pulumi.StringArray{
pulumi.String("string"),
},
SubnetIds: pulumi.StringArray{
pulumi.String("string"),
},
VpcId: pulumi.String("string"),
},
},
IcebergConfiguration: &kinesis.FirehoseDeliveryStreamIcebergConfigurationArgs{
CatalogArn: pulumi.String("string"),
RoleArn: pulumi.String("string"),
S3Configuration: &kinesis.FirehoseDeliveryStreamIcebergConfigurationS3ConfigurationArgs{
BucketArn: pulumi.String("string"),
RoleArn: pulumi.String("string"),
BufferingInterval: pulumi.Int(0),
BufferingSize: pulumi.Int(0),
CloudwatchLoggingOptions: &kinesis.FirehoseDeliveryStreamIcebergConfigurationS3ConfigurationCloudwatchLoggingOptionsArgs{
Enabled: pulumi.Bool(false),
LogGroupName: pulumi.String("string"),
LogStreamName: pulumi.String("string"),
},
CompressionFormat: pulumi.String("string"),
ErrorOutputPrefix: pulumi.String("string"),
KmsKeyArn: pulumi.String("string"),
Prefix: pulumi.String("string"),
},
BufferingInterval: pulumi.Int(0),
BufferingSize: pulumi.Int(0),
CloudwatchLoggingOptions: &kinesis.FirehoseDeliveryStreamIcebergConfigurationCloudwatchLoggingOptionsArgs{
Enabled: pulumi.Bool(false),
LogGroupName: pulumi.String("string"),
LogStreamName: pulumi.String("string"),
},
DestinationTableConfigurations: kinesis.FirehoseDeliveryStreamIcebergConfigurationDestinationTableConfigurationArray{
&kinesis.FirehoseDeliveryStreamIcebergConfigurationDestinationTableConfigurationArgs{
DatabaseName: pulumi.String("string"),
TableName: pulumi.String("string"),
S3ErrorOutputPrefix: pulumi.String("string"),
UniqueKeys: pulumi.StringArray{
pulumi.String("string"),
},
},
},
ProcessingConfiguration: &kinesis.FirehoseDeliveryStreamIcebergConfigurationProcessingConfigurationArgs{
Enabled: pulumi.Bool(false),
Processors: kinesis.FirehoseDeliveryStreamIcebergConfigurationProcessingConfigurationProcessorArray{
&kinesis.FirehoseDeliveryStreamIcebergConfigurationProcessingConfigurationProcessorArgs{
Type: pulumi.String("string"),
Parameters: kinesis.FirehoseDeliveryStreamIcebergConfigurationProcessingConfigurationProcessorParameterArray{
&kinesis.FirehoseDeliveryStreamIcebergConfigurationProcessingConfigurationProcessorParameterArgs{
ParameterName: pulumi.String("string"),
ParameterValue: pulumi.String("string"),
},
},
},
},
},
RetryDuration: pulumi.Int(0),
S3BackupMode: pulumi.String("string"),
},
KinesisSourceConfiguration: &kinesis.FirehoseDeliveryStreamKinesisSourceConfigurationArgs{
KinesisStreamArn: pulumi.String("string"),
RoleArn: pulumi.String("string"),
},
OpensearchserverlessConfiguration: &kinesis.FirehoseDeliveryStreamOpensearchserverlessConfigurationArgs{
CollectionEndpoint: pulumi.String("string"),
IndexName: pulumi.String("string"),
RoleArn: pulumi.String("string"),
S3Configuration: &kinesis.FirehoseDeliveryStreamOpensearchserverlessConfigurationS3ConfigurationArgs{
BucketArn: pulumi.String("string"),
RoleArn: pulumi.String("string"),
BufferingInterval: pulumi.Int(0),
BufferingSize: pulumi.Int(0),
CloudwatchLoggingOptions: &kinesis.FirehoseDeliveryStreamOpensearchserverlessConfigurationS3ConfigurationCloudwatchLoggingOptionsArgs{
Enabled: pulumi.Bool(false),
LogGroupName: pulumi.String("string"),
LogStreamName: pulumi.String("string"),
},
CompressionFormat: pulumi.String("string"),
ErrorOutputPrefix: pulumi.String("string"),
KmsKeyArn: pulumi.String("string"),
Prefix: pulumi.String("string"),
},
BufferingInterval: pulumi.Int(0),
BufferingSize: pulumi.Int(0),
CloudwatchLoggingOptions: &kinesis.FirehoseDeliveryStreamOpensearchserverlessConfigurationCloudwatchLoggingOptionsArgs{
Enabled: pulumi.Bool(false),
LogGroupName: pulumi.String("string"),
LogStreamName: pulumi.String("string"),
},
ProcessingConfiguration: &kinesis.FirehoseDeliveryStreamOpensearchserverlessConfigurationProcessingConfigurationArgs{
Enabled: pulumi.Bool(false),
Processors: kinesis.FirehoseDeliveryStreamOpensearchserverlessConfigurationProcessingConfigurationProcessorArray{
&kinesis.FirehoseDeliveryStreamOpensearchserverlessConfigurationProcessingConfigurationProcessorArgs{
Type: pulumi.String("string"),
Parameters: kinesis.FirehoseDeliveryStreamOpensearchserverlessConfigurationProcessingConfigurationProcessorParameterArray{
&kinesis.FirehoseDeliveryStreamOpensearchserverlessConfigurationProcessingConfigurationProcessorParameterArgs{
ParameterName: pulumi.String("string"),
ParameterValue: pulumi.String("string"),
},
},
},
},
},
RetryDuration: pulumi.Int(0),
S3BackupMode: pulumi.String("string"),
VpcConfig: &kinesis.FirehoseDeliveryStreamOpensearchserverlessConfigurationVpcConfigArgs{
RoleArn: pulumi.String("string"),
SecurityGroupIds: pulumi.StringArray{
pulumi.String("string"),
},
SubnetIds: pulumi.StringArray{
pulumi.String("string"),
},
VpcId: pulumi.String("string"),
},
},
ElasticsearchConfiguration: &kinesis.FirehoseDeliveryStreamElasticsearchConfigurationArgs{
IndexName: pulumi.String("string"),
S3Configuration: &kinesis.FirehoseDeliveryStreamElasticsearchConfigurationS3ConfigurationArgs{
BucketArn: pulumi.String("string"),
RoleArn: pulumi.String("string"),
BufferingInterval: pulumi.Int(0),
BufferingSize: pulumi.Int(0),
CloudwatchLoggingOptions: &kinesis.FirehoseDeliveryStreamElasticsearchConfigurationS3ConfigurationCloudwatchLoggingOptionsArgs{
Enabled: pulumi.Bool(false),
LogGroupName: pulumi.String("string"),
LogStreamName: pulumi.String("string"),
},
CompressionFormat: pulumi.String("string"),
ErrorOutputPrefix: pulumi.String("string"),
KmsKeyArn: pulumi.String("string"),
Prefix: pulumi.String("string"),
},
RoleArn: pulumi.String("string"),
ClusterEndpoint: pulumi.String("string"),
DomainArn: pulumi.String("string"),
BufferingInterval: pulumi.Int(0),
IndexRotationPeriod: pulumi.String("string"),
ProcessingConfiguration: &kinesis.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationArgs{
Enabled: pulumi.Bool(false),
Processors: kinesis.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorArray{
&kinesis.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorArgs{
Type: pulumi.String("string"),
Parameters: kinesis.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorParameterArray{
&kinesis.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorParameterArgs{
ParameterName: pulumi.String("string"),
ParameterValue: pulumi.String("string"),
},
},
},
},
},
RetryDuration: pulumi.Int(0),
CloudwatchLoggingOptions: &kinesis.FirehoseDeliveryStreamElasticsearchConfigurationCloudwatchLoggingOptionsArgs{
Enabled: pulumi.Bool(false),
LogGroupName: pulumi.String("string"),
LogStreamName: pulumi.String("string"),
},
S3BackupMode: pulumi.String("string"),
BufferingSize: pulumi.Int(0),
TypeName: pulumi.String("string"),
VpcConfig: &kinesis.FirehoseDeliveryStreamElasticsearchConfigurationVpcConfigArgs{
RoleArn: pulumi.String("string"),
SecurityGroupIds: pulumi.StringArray{
pulumi.String("string"),
},
SubnetIds: pulumi.StringArray{
pulumi.String("string"),
},
VpcId: pulumi.String("string"),
},
},
DestinationId: pulumi.String("string"),
Arn: pulumi.String("string"),
RedshiftConfiguration: &kinesis.FirehoseDeliveryStreamRedshiftConfigurationArgs{
DataTableName: pulumi.String("string"),
ClusterJdbcurl: pulumi.String("string"),
S3Configuration: &kinesis.FirehoseDeliveryStreamRedshiftConfigurationS3ConfigurationArgs{
BucketArn: pulumi.String("string"),
RoleArn: pulumi.String("string"),
BufferingInterval: pulumi.Int(0),
BufferingSize: pulumi.Int(0),
CloudwatchLoggingOptions: &kinesis.FirehoseDeliveryStreamRedshiftConfigurationS3ConfigurationCloudwatchLoggingOptionsArgs{
Enabled: pulumi.Bool(false),
LogGroupName: pulumi.String("string"),
LogStreamName: pulumi.String("string"),
},
CompressionFormat: pulumi.String("string"),
ErrorOutputPrefix: pulumi.String("string"),
KmsKeyArn: pulumi.String("string"),
Prefix: pulumi.String("string"),
},
RoleArn: pulumi.String("string"),
ProcessingConfiguration: &kinesis.FirehoseDeliveryStreamRedshiftConfigurationProcessingConfigurationArgs{
Enabled: pulumi.Bool(false),
Processors: kinesis.FirehoseDeliveryStreamRedshiftConfigurationProcessingConfigurationProcessorArray{
&kinesis.FirehoseDeliveryStreamRedshiftConfigurationProcessingConfigurationProcessorArgs{
Type: pulumi.String("string"),
Parameters: kinesis.FirehoseDeliveryStreamRedshiftConfigurationProcessingConfigurationProcessorParameterArray{
&kinesis.FirehoseDeliveryStreamRedshiftConfigurationProcessingConfigurationProcessorParameterArgs{
ParameterName: pulumi.String("string"),
ParameterValue: pulumi.String("string"),
},
},
},
},
},
Password: pulumi.String("string"),
CloudwatchLoggingOptions: &kinesis.FirehoseDeliveryStreamRedshiftConfigurationCloudwatchLoggingOptionsArgs{
Enabled: pulumi.Bool(false),
LogGroupName: pulumi.String("string"),
LogStreamName: pulumi.String("string"),
},
RetryDuration: pulumi.Int(0),
DataTableColumns: pulumi.String("string"),
S3BackupConfiguration: &kinesis.FirehoseDeliveryStreamRedshiftConfigurationS3BackupConfigurationArgs{
BucketArn: pulumi.String("string"),
RoleArn: pulumi.String("string"),
BufferingInterval: pulumi.Int(0),
BufferingSize: pulumi.Int(0),
CloudwatchLoggingOptions: &kinesis.FirehoseDeliveryStreamRedshiftConfigurationS3BackupConfigurationCloudwatchLoggingOptionsArgs{
Enabled: pulumi.Bool(false),
LogGroupName: pulumi.String("string"),
LogStreamName: pulumi.String("string"),
},
CompressionFormat: pulumi.String("string"),
ErrorOutputPrefix: pulumi.String("string"),
KmsKeyArn: pulumi.String("string"),
Prefix: pulumi.String("string"),
},
S3BackupMode: pulumi.String("string"),
CopyOptions: pulumi.String("string"),
SecretsManagerConfiguration: &kinesis.FirehoseDeliveryStreamRedshiftConfigurationSecretsManagerConfigurationArgs{
Enabled: pulumi.Bool(false),
RoleArn: pulumi.String("string"),
SecretArn: pulumi.String("string"),
},
Username: pulumi.String("string"),
},
ServerSideEncryption: &kinesis.FirehoseDeliveryStreamServerSideEncryptionArgs{
Enabled: pulumi.Bool(false),
KeyArn: pulumi.String("string"),
KeyType: pulumi.String("string"),
},
SnowflakeConfiguration: &kinesis.FirehoseDeliveryStreamSnowflakeConfigurationArgs{
Database: pulumi.String("string"),
Table: pulumi.String("string"),
Schema: pulumi.String("string"),
S3Configuration: &kinesis.FirehoseDeliveryStreamSnowflakeConfigurationS3ConfigurationArgs{
BucketArn: pulumi.String("string"),
RoleArn: pulumi.String("string"),
BufferingInterval: pulumi.Int(0),
BufferingSize: pulumi.Int(0),
CloudwatchLoggingOptions: &kinesis.FirehoseDeliveryStreamSnowflakeConfigurationS3ConfigurationCloudwatchLoggingOptionsArgs{
Enabled: pulumi.Bool(false),
LogGroupName: pulumi.String("string"),
LogStreamName: pulumi.String("string"),
},
CompressionFormat: pulumi.String("string"),
ErrorOutputPrefix: pulumi.String("string"),
KmsKeyArn: pulumi.String("string"),
Prefix: pulumi.String("string"),
},
RoleArn: pulumi.String("string"),
AccountUrl: pulumi.String("string"),
DataLoadingOption: pulumi.String("string"),
S3BackupMode: pulumi.String("string"),
MetadataColumnName: pulumi.String("string"),
PrivateKey: pulumi.String("string"),
ProcessingConfiguration: &kinesis.FirehoseDeliveryStreamSnowflakeConfigurationProcessingConfigurationArgs{
Enabled: pulumi.Bool(false),
Processors: kinesis.FirehoseDeliveryStreamSnowflakeConfigurationProcessingConfigurationProcessorArray{
&kinesis.FirehoseDeliveryStreamSnowflakeConfigurationProcessingConfigurationProcessorArgs{
Type: pulumi.String("string"),
Parameters: kinesis.FirehoseDeliveryStreamSnowflakeConfigurationProcessingConfigurationProcessorParameterArray{
&kinesis.FirehoseDeliveryStreamSnowflakeConfigurationProcessingConfigurationProcessorParameterArgs{
ParameterName: pulumi.String("string"),
ParameterValue: pulumi.String("string"),
},
},
},
},
},
RetryDuration: pulumi.Int(0),
ContentColumnName: pulumi.String("string"),
KeyPassphrase: pulumi.String("string"),
CloudwatchLoggingOptions: &kinesis.FirehoseDeliveryStreamSnowflakeConfigurationCloudwatchLoggingOptionsArgs{
Enabled: pulumi.Bool(false),
LogGroupName: pulumi.String("string"),
LogStreamName: pulumi.String("string"),
},
BufferingSize: pulumi.Int(0),
SecretsManagerConfiguration: &kinesis.FirehoseDeliveryStreamSnowflakeConfigurationSecretsManagerConfigurationArgs{
Enabled: pulumi.Bool(false),
RoleArn: pulumi.String("string"),
SecretArn: pulumi.String("string"),
},
SnowflakeRoleConfiguration: &kinesis.FirehoseDeliveryStreamSnowflakeConfigurationSnowflakeRoleConfigurationArgs{
Enabled: pulumi.Bool(false),
SnowflakeRole: pulumi.String("string"),
},
SnowflakeVpcConfiguration: &kinesis.FirehoseDeliveryStreamSnowflakeConfigurationSnowflakeVpcConfigurationArgs{
PrivateLinkVpceId: pulumi.String("string"),
},
BufferingInterval: pulumi.Int(0),
User: pulumi.String("string"),
},
SplunkConfiguration: &kinesis.FirehoseDeliveryStreamSplunkConfigurationArgs{
HecEndpoint: pulumi.String("string"),
S3Configuration: &kinesis.FirehoseDeliveryStreamSplunkConfigurationS3ConfigurationArgs{
BucketArn: pulumi.String("string"),
RoleArn: pulumi.String("string"),
BufferingInterval: pulumi.Int(0),
BufferingSize: pulumi.Int(0),
CloudwatchLoggingOptions: &kinesis.FirehoseDeliveryStreamSplunkConfigurationS3ConfigurationCloudwatchLoggingOptionsArgs{
Enabled: pulumi.Bool(false),
LogGroupName: pulumi.String("string"),
LogStreamName: pulumi.String("string"),
},
CompressionFormat: pulumi.String("string"),
ErrorOutputPrefix: pulumi.String("string"),
KmsKeyArn: pulumi.String("string"),
Prefix: pulumi.String("string"),
},
BufferingInterval: pulumi.Int(0),
BufferingSize: pulumi.Int(0),
CloudwatchLoggingOptions: &kinesis.FirehoseDeliveryStreamSplunkConfigurationCloudwatchLoggingOptionsArgs{
Enabled: pulumi.Bool(false),
LogGroupName: pulumi.String("string"),
LogStreamName: pulumi.String("string"),
},
HecAcknowledgmentTimeout: pulumi.Int(0),
HecEndpointType: pulumi.String("string"),
HecToken: pulumi.String("string"),
ProcessingConfiguration: &kinesis.FirehoseDeliveryStreamSplunkConfigurationProcessingConfigurationArgs{
Enabled: pulumi.Bool(false),
Processors: kinesis.FirehoseDeliveryStreamSplunkConfigurationProcessingConfigurationProcessorArray{
&kinesis.FirehoseDeliveryStreamSplunkConfigurationProcessingConfigurationProcessorArgs{
Type: pulumi.String("string"),
Parameters: kinesis.FirehoseDeliveryStreamSplunkConfigurationProcessingConfigurationProcessorParameterArray{
&kinesis.FirehoseDeliveryStreamSplunkConfigurationProcessingConfigurationProcessorParameterArgs{
ParameterName: pulumi.String("string"),
ParameterValue: pulumi.String("string"),
},
},
},
},
},
RetryDuration: pulumi.Int(0),
S3BackupMode: pulumi.String("string"),
SecretsManagerConfiguration: &kinesis.FirehoseDeliveryStreamSplunkConfigurationSecretsManagerConfigurationArgs{
Enabled: pulumi.Bool(false),
RoleArn: pulumi.String("string"),
SecretArn: pulumi.String("string"),
},
},
Tags: pulumi.StringMap{
"string": pulumi.String("string"),
},
VersionId: pulumi.String("string"),
})
var firehoseDeliveryStreamResource = new FirehoseDeliveryStream("firehoseDeliveryStreamResource", FirehoseDeliveryStreamArgs.builder()
.destination("string")
.mskSourceConfiguration(FirehoseDeliveryStreamMskSourceConfigurationArgs.builder()
.authenticationConfiguration(FirehoseDeliveryStreamMskSourceConfigurationAuthenticationConfigurationArgs.builder()
.connectivity("string")
.roleArn("string")
.build())
.mskClusterArn("string")
.topicName("string")
.build())
.httpEndpointConfiguration(FirehoseDeliveryStreamHttpEndpointConfigurationArgs.builder()
.s3Configuration(FirehoseDeliveryStreamHttpEndpointConfigurationS3ConfigurationArgs.builder()
.bucketArn("string")
.roleArn("string")
.bufferingInterval(0)
.bufferingSize(0)
.cloudwatchLoggingOptions(FirehoseDeliveryStreamHttpEndpointConfigurationS3ConfigurationCloudwatchLoggingOptionsArgs.builder()
.enabled(false)
.logGroupName("string")
.logStreamName("string")
.build())
.compressionFormat("string")
.errorOutputPrefix("string")
.kmsKeyArn("string")
.prefix("string")
.build())
.url("string")
.requestConfiguration(FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationArgs.builder()
.commonAttributes(FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationCommonAttributeArgs.builder()
.name("string")
.value("string")
.build())
.contentEncoding("string")
.build())
.cloudwatchLoggingOptions(FirehoseDeliveryStreamHttpEndpointConfigurationCloudwatchLoggingOptionsArgs.builder()
.enabled(false)
.logGroupName("string")
.logStreamName("string")
.build())
.name("string")
.processingConfiguration(FirehoseDeliveryStreamHttpEndpointConfigurationProcessingConfigurationArgs.builder()
.enabled(false)
.processors(FirehoseDeliveryStreamHttpEndpointConfigurationProcessingConfigurationProcessorArgs.builder()
.type("string")
.parameters(FirehoseDeliveryStreamHttpEndpointConfigurationProcessingConfigurationProcessorParameterArgs.builder()
.parameterName("string")
.parameterValue("string")
.build())
.build())
.build())
.accessKey("string")
.retryDuration(0)
.roleArn("string")
.s3BackupMode("string")
.bufferingSize(0)
.secretsManagerConfiguration(FirehoseDeliveryStreamHttpEndpointConfigurationSecretsManagerConfigurationArgs.builder()
.enabled(false)
.roleArn("string")
.secretArn("string")
.build())
.bufferingInterval(0)
.build())
.name("string")
.extendedS3Configuration(FirehoseDeliveryStreamExtendedS3ConfigurationArgs.builder()
.bucketArn("string")
.roleArn("string")
.errorOutputPrefix("string")
.fileExtension("string")
.compressionFormat("string")
.customTimeZone("string")
.dataFormatConversionConfiguration(FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationArgs.builder()
.inputFormatConfiguration(FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationArgs.builder()
.deserializer(FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializerArgs.builder()
.hiveJsonSerDe(FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializerHiveJsonSerDeArgs.builder()
.timestampFormats("string")
.build())
.openXJsonSerDe(FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializerOpenXJsonSerDeArgs.builder()
.caseInsensitive(false)
.columnToJsonKeyMappings(Map.of("string", "string"))
.convertDotsInJsonKeysToUnderscores(false)
.build())
.build())
.build())
.outputFormatConfiguration(FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationArgs.builder()
.serializer(FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializerArgs.builder()
.orcSerDe(FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializerOrcSerDeArgs.builder()
.blockSizeBytes(0)
.bloomFilterColumns("string")
.bloomFilterFalsePositiveProbability(0)
.compression("string")
.dictionaryKeyThreshold(0)
.enablePadding(false)
.formatVersion("string")
.paddingTolerance(0)
.rowIndexStride(0)
.stripeSizeBytes(0)
.build())
.parquetSerDe(FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializerParquetSerDeArgs.builder()
.blockSizeBytes(0)
.compression("string")
.enableDictionaryCompression(false)
.maxPaddingBytes(0)
.pageSizeBytes(0)
.writerVersion("string")
.build())
.build())
.build())
.schemaConfiguration(FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationSchemaConfigurationArgs.builder()
.databaseName("string")
.roleArn("string")
.tableName("string")
.catalogId("string")
.region("string")
.versionId("string")
.build())
.enabled(false)
.build())
.dynamicPartitioningConfiguration(FirehoseDeliveryStreamExtendedS3ConfigurationDynamicPartitioningConfigurationArgs.builder()
.enabled(false)
.retryDuration(0)
.build())
.bufferingSize(0)
.cloudwatchLoggingOptions(FirehoseDeliveryStreamExtendedS3ConfigurationCloudwatchLoggingOptionsArgs.builder()
.enabled(false)
.logGroupName("string")
.logStreamName("string")
.build())
.kmsKeyArn("string")
.prefix("string")
.processingConfiguration(FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationArgs.builder()
.enabled(false)
.processors(FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs.builder()
.type("string")
.parameters(FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs.builder()
.parameterName("string")
.parameterValue("string")
.build())
.build())
.build())
.bufferingInterval(0)
.s3BackupConfiguration(FirehoseDeliveryStreamExtendedS3ConfigurationS3BackupConfigurationArgs.builder()
.bucketArn("string")
.roleArn("string")
.bufferingInterval(0)
.bufferingSize(0)
.cloudwatchLoggingOptions(FirehoseDeliveryStreamExtendedS3ConfigurationS3BackupConfigurationCloudwatchLoggingOptionsArgs.builder()
.enabled(false)
.logGroupName("string")
.logStreamName("string")
.build())
.compressionFormat("string")
.errorOutputPrefix("string")
.kmsKeyArn("string")
.prefix("string")
.build())
.s3BackupMode("string")
.build())
.opensearchConfiguration(FirehoseDeliveryStreamOpensearchConfigurationArgs.builder()
.indexName("string")
.s3Configuration(FirehoseDeliveryStreamOpensearchConfigurationS3ConfigurationArgs.builder()
.bucketArn("string")
.roleArn("string")
.bufferingInterval(0)
.bufferingSize(0)
.cloudwatchLoggingOptions(FirehoseDeliveryStreamOpensearchConfigurationS3ConfigurationCloudwatchLoggingOptionsArgs.builder()
.enabled(false)
.logGroupName("string")
.logStreamName("string")
.build())
.compressionFormat("string")
.errorOutputPrefix("string")
.kmsKeyArn("string")
.prefix("string")
.build())
.roleArn("string")
.clusterEndpoint("string")
.documentIdOptions(FirehoseDeliveryStreamOpensearchConfigurationDocumentIdOptionsArgs.builder()
.defaultDocumentIdFormat("string")
.build())
.domainArn("string")
.bufferingInterval(0)
.indexRotationPeriod("string")
.processingConfiguration(FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationArgs.builder()
.enabled(false)
.processors(FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationProcessorArgs.builder()
.type("string")
.parameters(FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationProcessorParameterArgs.builder()
.parameterName("string")
.parameterValue("string")
.build())
.build())
.build())
.retryDuration(0)
.cloudwatchLoggingOptions(FirehoseDeliveryStreamOpensearchConfigurationCloudwatchLoggingOptionsArgs.builder()
.enabled(false)
.logGroupName("string")
.logStreamName("string")
.build())
.s3BackupMode("string")
.bufferingSize(0)
.typeName("string")
.vpcConfig(FirehoseDeliveryStreamOpensearchConfigurationVpcConfigArgs.builder()
.roleArn("string")
.securityGroupIds("string")
.subnetIds("string")
.vpcId("string")
.build())
.build())
.icebergConfiguration(FirehoseDeliveryStreamIcebergConfigurationArgs.builder()
.catalogArn("string")
.roleArn("string")
.s3Configuration(FirehoseDeliveryStreamIcebergConfigurationS3ConfigurationArgs.builder()
.bucketArn("string")
.roleArn("string")
.bufferingInterval(0)
.bufferingSize(0)
.cloudwatchLoggingOptions(FirehoseDeliveryStreamIcebergConfigurationS3ConfigurationCloudwatchLoggingOptionsArgs.builder()
.enabled(false)
.logGroupName("string")
.logStreamName("string")
.build())
.compressionFormat("string")
.errorOutputPrefix("string")
.kmsKeyArn("string")
.prefix("string")
.build())
.bufferingInterval(0)
.bufferingSize(0)
.cloudwatchLoggingOptions(FirehoseDeliveryStreamIcebergConfigurationCloudwatchLoggingOptionsArgs.builder()
.enabled(false)
.logGroupName("string")
.logStreamName("string")
.build())
.destinationTableConfigurations(FirehoseDeliveryStreamIcebergConfigurationDestinationTableConfigurationArgs.builder()
.databaseName("string")
.tableName("string")
.s3ErrorOutputPrefix("string")
.uniqueKeys("string")
.build())
.processingConfiguration(FirehoseDeliveryStreamIcebergConfigurationProcessingConfigurationArgs.builder()
.enabled(false)
.processors(FirehoseDeliveryStreamIcebergConfigurationProcessingConfigurationProcessorArgs.builder()
.type("string")
.parameters(FirehoseDeliveryStreamIcebergConfigurationProcessingConfigurationProcessorParameterArgs.builder()
.parameterName("string")
.parameterValue("string")
.build())
.build())
.build())
.retryDuration(0)
.s3BackupMode("string")
.build())
.kinesisSourceConfiguration(FirehoseDeliveryStreamKinesisSourceConfigurationArgs.builder()
.kinesisStreamArn("string")
.roleArn("string")
.build())
.opensearchserverlessConfiguration(FirehoseDeliveryStreamOpensearchserverlessConfigurationArgs.builder()
.collectionEndpoint("string")
.indexName("string")
.roleArn("string")
.s3Configuration(FirehoseDeliveryStreamOpensearchserverlessConfigurationS3ConfigurationArgs.builder()
.bucketArn("string")
.roleArn("string")
.bufferingInterval(0)
.bufferingSize(0)
.cloudwatchLoggingOptions(FirehoseDeliveryStreamOpensearchserverlessConfigurationS3ConfigurationCloudwatchLoggingOptionsArgs.builder()
.enabled(false)
.logGroupName("string")
.logStreamName("string")
.build())
.compressionFormat("string")
.errorOutputPrefix("string")
.kmsKeyArn("string")
.prefix("string")
.build())
.bufferingInterval(0)
.bufferingSize(0)
.cloudwatchLoggingOptions(FirehoseDeliveryStreamOpensearchserverlessConfigurationCloudwatchLoggingOptionsArgs.builder()
.enabled(false)
.logGroupName("string")
.logStreamName("string")
.build())
.processingConfiguration(FirehoseDeliveryStreamOpensearchserverlessConfigurationProcessingConfigurationArgs.builder()
.enabled(false)
.processors(FirehoseDeliveryStreamOpensearchserverlessConfigurationProcessingConfigurationProcessorArgs.builder()
.type("string")
.parameters(FirehoseDeliveryStreamOpensearchserverlessConfigurationProcessingConfigurationProcessorParameterArgs.builder()
.parameterName("string")
.parameterValue("string")
.build())
.build())
.build())
.retryDuration(0)
.s3BackupMode("string")
.vpcConfig(FirehoseDeliveryStreamOpensearchserverlessConfigurationVpcConfigArgs.builder()
.roleArn("string")
.securityGroupIds("string")
.subnetIds("string")
.vpcId("string")
.build())
.build())
.elasticsearchConfiguration(FirehoseDeliveryStreamElasticsearchConfigurationArgs.builder()
.indexName("string")
.s3Configuration(FirehoseDeliveryStreamElasticsearchConfigurationS3ConfigurationArgs.builder()
.bucketArn("string")
.roleArn("string")
.bufferingInterval(0)
.bufferingSize(0)
.cloudwatchLoggingOptions(FirehoseDeliveryStreamElasticsearchConfigurationS3ConfigurationCloudwatchLoggingOptionsArgs.builder()
.enabled(false)
.logGroupName("string")
.logStreamName("string")
.build())
.compressionFormat("string")
.errorOutputPrefix("string")
.kmsKeyArn("string")
.prefix("string")
.build())
.roleArn("string")
.clusterEndpoint("string")
.domainArn("string")
.bufferingInterval(0)
.indexRotationPeriod("string")
.processingConfiguration(FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationArgs.builder()
.enabled(false)
.processors(FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorArgs.builder()
.type("string")
.parameters(FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorParameterArgs.builder()
.parameterName("string")
.parameterValue("string")
.build())
.build())
.build())
.retryDuration(0)
.cloudwatchLoggingOptions(FirehoseDeliveryStreamElasticsearchConfigurationCloudwatchLoggingOptionsArgs.builder()
.enabled(false)
.logGroupName("string")
.logStreamName("string")
.build())
.s3BackupMode("string")
.bufferingSize(0)
.typeName("string")
.vpcConfig(FirehoseDeliveryStreamElasticsearchConfigurationVpcConfigArgs.builder()
.roleArn("string")
.securityGroupIds("string")
.subnetIds("string")
.vpcId("string")
.build())
.build())
.destinationId("string")
.arn("string")
.redshiftConfiguration(FirehoseDeliveryStreamRedshiftConfigurationArgs.builder()
.dataTableName("string")
.clusterJdbcurl("string")
.s3Configuration(FirehoseDeliveryStreamRedshiftConfigurationS3ConfigurationArgs.builder()
.bucketArn("string")
.roleArn("string")
.bufferingInterval(0)
.bufferingSize(0)
.cloudwatchLoggingOptions(FirehoseDeliveryStreamRedshiftConfigurationS3ConfigurationCloudwatchLoggingOptionsArgs.builder()
.enabled(false)
.logGroupName("string")
.logStreamName("string")
.build())
.compressionFormat("string")
.errorOutputPrefix("string")
.kmsKeyArn("string")
.prefix("string")
.build())
.roleArn("string")
.processingConfiguration(FirehoseDeliveryStreamRedshiftConfigurationProcessingConfigurationArgs.builder()
.enabled(false)
.processors(FirehoseDeliveryStreamRedshiftConfigurationProcessingConfigurationProcessorArgs.builder()
.type("string")
.parameters(FirehoseDeliveryStreamRedshiftConfigurationProcessingConfigurationProcessorParameterArgs.builder()
.parameterName("string")
.parameterValue("string")
.build())
.build())
.build())
.password("string")
.cloudwatchLoggingOptions(FirehoseDeliveryStreamRedshiftConfigurationCloudwatchLoggingOptionsArgs.builder()
.enabled(false)
.logGroupName("string")
.logStreamName("string")
.build())
.retryDuration(0)
.dataTableColumns("string")
.s3BackupConfiguration(FirehoseDeliveryStreamRedshiftConfigurationS3BackupConfigurationArgs.builder()
.bucketArn("string")
.roleArn("string")
.bufferingInterval(0)
.bufferingSize(0)
.cloudwatchLoggingOptions(FirehoseDeliveryStreamRedshiftConfigurationS3BackupConfigurationCloudwatchLoggingOptionsArgs.builder()
.enabled(false)
.logGroupName("string")
.logStreamName("string")
.build())
.compressionFormat("string")
.errorOutputPrefix("string")
.kmsKeyArn("string")
.prefix("string")
.build())
.s3BackupMode("string")
.copyOptions("string")
.secretsManagerConfiguration(FirehoseDeliveryStreamRedshiftConfigurationSecretsManagerConfigurationArgs.builder()
.enabled(false)
.roleArn("string")
.secretArn("string")
.build())
.username("string")
.build())
.serverSideEncryption(FirehoseDeliveryStreamServerSideEncryptionArgs.builder()
.enabled(false)
.keyArn("string")
.keyType("string")
.build())
.snowflakeConfiguration(FirehoseDeliveryStreamSnowflakeConfigurationArgs.builder()
.database("string")
.table("string")
.schema("string")
.s3Configuration(FirehoseDeliveryStreamSnowflakeConfigurationS3ConfigurationArgs.builder()
.bucketArn("string")
.roleArn("string")
.bufferingInterval(0)
.bufferingSize(0)
.cloudwatchLoggingOptions(FirehoseDeliveryStreamSnowflakeConfigurationS3ConfigurationCloudwatchLoggingOptionsArgs.builder()
.enabled(false)
.logGroupName("string")
.logStreamName("string")
.build())
.compressionFormat("string")
.errorOutputPrefix("string")
.kmsKeyArn("string")
.prefix("string")
.build())
.roleArn("string")
.accountUrl("string")
.dataLoadingOption("string")
.s3BackupMode("string")
.metadataColumnName("string")
.privateKey("string")
.processingConfiguration(FirehoseDeliveryStreamSnowflakeConfigurationProcessingConfigurationArgs.builder()
.enabled(false)
.processors(FirehoseDeliveryStreamSnowflakeConfigurationProcessingConfigurationProcessorArgs.builder()
.type("string")
.parameters(FirehoseDeliveryStreamSnowflakeConfigurationProcessingConfigurationProcessorParameterArgs.builder()
.parameterName("string")
.parameterValue("string")
.build())
.build())
.build())
.retryDuration(0)
.contentColumnName("string")
.keyPassphrase("string")
.cloudwatchLoggingOptions(FirehoseDeliveryStreamSnowflakeConfigurationCloudwatchLoggingOptionsArgs.builder()
.enabled(false)
.logGroupName("string")
.logStreamName("string")
.build())
.bufferingSize(0)
.secretsManagerConfiguration(FirehoseDeliveryStreamSnowflakeConfigurationSecretsManagerConfigurationArgs.builder()
.enabled(false)
.roleArn("string")
.secretArn("string")
.build())
.snowflakeRoleConfiguration(FirehoseDeliveryStreamSnowflakeConfigurationSnowflakeRoleConfigurationArgs.builder()
.enabled(false)
.snowflakeRole("string")
.build())
.snowflakeVpcConfiguration(FirehoseDeliveryStreamSnowflakeConfigurationSnowflakeVpcConfigurationArgs.builder()
.privateLinkVpceId("string")
.build())
.bufferingInterval(0)
.user("string")
.build())
.splunkConfiguration(FirehoseDeliveryStreamSplunkConfigurationArgs.builder()
.hecEndpoint("string")
.s3Configuration(FirehoseDeliveryStreamSplunkConfigurationS3ConfigurationArgs.builder()
.bucketArn("string")
.roleArn("string")
.bufferingInterval(0)
.bufferingSize(0)
.cloudwatchLoggingOptions(FirehoseDeliveryStreamSplunkConfigurationS3ConfigurationCloudwatchLoggingOptionsArgs.builder()
.enabled(false)
.logGroupName("string")
.logStreamName("string")
.build())
.compressionFormat("string")
.errorOutputPrefix("string")
.kmsKeyArn("string")
.prefix("string")
.build())
.bufferingInterval(0)
.bufferingSize(0)
.cloudwatchLoggingOptions(FirehoseDeliveryStreamSplunkConfigurationCloudwatchLoggingOptionsArgs.builder()
.enabled(false)
.logGroupName("string")
.logStreamName("string")
.build())
.hecAcknowledgmentTimeout(0)
.hecEndpointType("string")
.hecToken("string")
.processingConfiguration(FirehoseDeliveryStreamSplunkConfigurationProcessingConfigurationArgs.builder()
.enabled(false)
.processors(FirehoseDeliveryStreamSplunkConfigurationProcessingConfigurationProcessorArgs.builder()
.type("string")
.parameters(FirehoseDeliveryStreamSplunkConfigurationProcessingConfigurationProcessorParameterArgs.builder()
.parameterName("string")
.parameterValue("string")
.build())
.build())
.build())
.retryDuration(0)
.s3BackupMode("string")
.secretsManagerConfiguration(FirehoseDeliveryStreamSplunkConfigurationSecretsManagerConfigurationArgs.builder()
.enabled(false)
.roleArn("string")
.secretArn("string")
.build())
.build())
.tags(Map.of("string", "string"))
.versionId("string")
.build());
firehose_delivery_stream_resource = aws.kinesis.FirehoseDeliveryStream("firehoseDeliveryStreamResource",
destination="string",
msk_source_configuration={
"authentication_configuration": {
"connectivity": "string",
"role_arn": "string",
},
"msk_cluster_arn": "string",
"topic_name": "string",
},
http_endpoint_configuration={
"s3_configuration": {
"bucket_arn": "string",
"role_arn": "string",
"buffering_interval": 0,
"buffering_size": 0,
"cloudwatch_logging_options": {
"enabled": False,
"log_group_name": "string",
"log_stream_name": "string",
},
"compression_format": "string",
"error_output_prefix": "string",
"kms_key_arn": "string",
"prefix": "string",
},
"url": "string",
"request_configuration": {
"common_attributes": [{
"name": "string",
"value": "string",
}],
"content_encoding": "string",
},
"cloudwatch_logging_options": {
"enabled": False,
"log_group_name": "string",
"log_stream_name": "string",
},
"name": "string",
"processing_configuration": {
"enabled": False,
"processors": [{
"type": "string",
"parameters": [{
"parameter_name": "string",
"parameter_value": "string",
}],
}],
},
"access_key": "string",
"retry_duration": 0,
"role_arn": "string",
"s3_backup_mode": "string",
"buffering_size": 0,
"secrets_manager_configuration": {
"enabled": False,
"role_arn": "string",
"secret_arn": "string",
},
"buffering_interval": 0,
},
name="string",
extended_s3_configuration={
"bucket_arn": "string",
"role_arn": "string",
"error_output_prefix": "string",
"file_extension": "string",
"compression_format": "string",
"custom_time_zone": "string",
"data_format_conversion_configuration": {
"input_format_configuration": {
"deserializer": {
"hive_json_ser_de": {
"timestamp_formats": ["string"],
},
"open_x_json_ser_de": {
"case_insensitive": False,
"column_to_json_key_mappings": {
"string": "string",
},
"convert_dots_in_json_keys_to_underscores": False,
},
},
},
"output_format_configuration": {
"serializer": {
"orc_ser_de": {
"block_size_bytes": 0,
"bloom_filter_columns": ["string"],
"bloom_filter_false_positive_probability": 0,
"compression": "string",
"dictionary_key_threshold": 0,
"enable_padding": False,
"format_version": "string",
"padding_tolerance": 0,
"row_index_stride": 0,
"stripe_size_bytes": 0,
},
"parquet_ser_de": {
"block_size_bytes": 0,
"compression": "string",
"enable_dictionary_compression": False,
"max_padding_bytes": 0,
"page_size_bytes": 0,
"writer_version": "string",
},
},
},
"schema_configuration": {
"database_name": "string",
"role_arn": "string",
"table_name": "string",
"catalog_id": "string",
"region": "string",
"version_id": "string",
},
"enabled": False,
},
"dynamic_partitioning_configuration": {
"enabled": False,
"retry_duration": 0,
},
"buffering_size": 0,
"cloudwatch_logging_options": {
"enabled": False,
"log_group_name": "string",
"log_stream_name": "string",
},
"kms_key_arn": "string",
"prefix": "string",
"processing_configuration": {
"enabled": False,
"processors": [{
"type": "string",
"parameters": [{
"parameter_name": "string",
"parameter_value": "string",
}],
}],
},
"buffering_interval": 0,
"s3_backup_configuration": {
"bucket_arn": "string",
"role_arn": "string",
"buffering_interval": 0,
"buffering_size": 0,
"cloudwatch_logging_options": {
"enabled": False,
"log_group_name": "string",
"log_stream_name": "string",
},
"compression_format": "string",
"error_output_prefix": "string",
"kms_key_arn": "string",
"prefix": "string",
},
"s3_backup_mode": "string",
},
opensearch_configuration={
"index_name": "string",
"s3_configuration": {
"bucket_arn": "string",
"role_arn": "string",
"buffering_interval": 0,
"buffering_size": 0,
"cloudwatch_logging_options": {
"enabled": False,
"log_group_name": "string",
"log_stream_name": "string",
},
"compression_format": "string",
"error_output_prefix": "string",
"kms_key_arn": "string",
"prefix": "string",
},
"role_arn": "string",
"cluster_endpoint": "string",
"document_id_options": {
"default_document_id_format": "string",
},
"domain_arn": "string",
"buffering_interval": 0,
"index_rotation_period": "string",
"processing_configuration": {
"enabled": False,
"processors": [{
"type": "string",
"parameters": [{
"parameter_name": "string",
"parameter_value": "string",
}],
}],
},
"retry_duration": 0,
"cloudwatch_logging_options": {
"enabled": False,
"log_group_name": "string",
"log_stream_name": "string",
},
"s3_backup_mode": "string",
"buffering_size": 0,
"type_name": "string",
"vpc_config": {
"role_arn": "string",
"security_group_ids": ["string"],
"subnet_ids": ["string"],
"vpc_id": "string",
},
},
iceberg_configuration={
"catalog_arn": "string",
"role_arn": "string",
"s3_configuration": {
"bucket_arn": "string",
"role_arn": "string",
"buffering_interval": 0,
"buffering_size": 0,
"cloudwatch_logging_options": {
"enabled": False,
"log_group_name": "string",
"log_stream_name": "string",
},
"compression_format": "string",
"error_output_prefix": "string",
"kms_key_arn": "string",
"prefix": "string",
},
"buffering_interval": 0,
"buffering_size": 0,
"cloudwatch_logging_options": {
"enabled": False,
"log_group_name": "string",
"log_stream_name": "string",
},
"destination_table_configurations": [{
"database_name": "string",
"table_name": "string",
"s3_error_output_prefix": "string",
"unique_keys": ["string"],
}],
"processing_configuration": {
"enabled": False,
"processors": [{
"type": "string",
"parameters": [{
"parameter_name": "string",
"parameter_value": "string",
}],
}],
},
"retry_duration": 0,
"s3_backup_mode": "string",
},
kinesis_source_configuration={
"kinesis_stream_arn": "string",
"role_arn": "string",
},
opensearchserverless_configuration={
"collection_endpoint": "string",
"index_name": "string",
"role_arn": "string",
"s3_configuration": {
"bucket_arn": "string",
"role_arn": "string",
"buffering_interval": 0,
"buffering_size": 0,
"cloudwatch_logging_options": {
"enabled": False,
"log_group_name": "string",
"log_stream_name": "string",
},
"compression_format": "string",
"error_output_prefix": "string",
"kms_key_arn": "string",
"prefix": "string",
},
"buffering_interval": 0,
"buffering_size": 0,
"cloudwatch_logging_options": {
"enabled": False,
"log_group_name": "string",
"log_stream_name": "string",
},
"processing_configuration": {
"enabled": False,
"processors": [{
"type": "string",
"parameters": [{
"parameter_name": "string",
"parameter_value": "string",
}],
}],
},
"retry_duration": 0,
"s3_backup_mode": "string",
"vpc_config": {
"role_arn": "string",
"security_group_ids": ["string"],
"subnet_ids": ["string"],
"vpc_id": "string",
},
},
elasticsearch_configuration={
"index_name": "string",
"s3_configuration": {
"bucket_arn": "string",
"role_arn": "string",
"buffering_interval": 0,
"buffering_size": 0,
"cloudwatch_logging_options": {
"enabled": False,
"log_group_name": "string",
"log_stream_name": "string",
},
"compression_format": "string",
"error_output_prefix": "string",
"kms_key_arn": "string",
"prefix": "string",
},
"role_arn": "string",
"cluster_endpoint": "string",
"domain_arn": "string",
"buffering_interval": 0,
"index_rotation_period": "string",
"processing_configuration": {
"enabled": False,
"processors": [{
"type": "string",
"parameters": [{
"parameter_name": "string",
"parameter_value": "string",
}],
}],
},
"retry_duration": 0,
"cloudwatch_logging_options": {
"enabled": False,
"log_group_name": "string",
"log_stream_name": "string",
},
"s3_backup_mode": "string",
"buffering_size": 0,
"type_name": "string",
"vpc_config": {
"role_arn": "string",
"security_group_ids": ["string"],
"subnet_ids": ["string"],
"vpc_id": "string",
},
},
destination_id="string",
arn="string",
redshift_configuration={
"data_table_name": "string",
"cluster_jdbcurl": "string",
"s3_configuration": {
"bucket_arn": "string",
"role_arn": "string",
"buffering_interval": 0,
"buffering_size": 0,
"cloudwatch_logging_options": {
"enabled": False,
"log_group_name": "string",
"log_stream_name": "string",
},
"compression_format": "string",
"error_output_prefix": "string",
"kms_key_arn": "string",
"prefix": "string",
},
"role_arn": "string",
"processing_configuration": {
"enabled": False,
"processors": [{
"type": "string",
"parameters": [{
"parameter_name": "string",
"parameter_value": "string",
}],
}],
},
"password": "string",
"cloudwatch_logging_options": {
"enabled": False,
"log_group_name": "string",
"log_stream_name": "string",
},
"retry_duration": 0,
"data_table_columns": "string",
"s3_backup_configuration": {
"bucket_arn": "string",
"role_arn": "string",
"buffering_interval": 0,
"buffering_size": 0,
"cloudwatch_logging_options": {
"enabled": False,
"log_group_name": "string",
"log_stream_name": "string",
},
"compression_format": "string",
"error_output_prefix": "string",
"kms_key_arn": "string",
"prefix": "string",
},
"s3_backup_mode": "string",
"copy_options": "string",
"secrets_manager_configuration": {
"enabled": False,
"role_arn": "string",
"secret_arn": "string",
},
"username": "string",
},
server_side_encryption={
"enabled": False,
"key_arn": "string",
"key_type": "string",
},
snowflake_configuration={
"database": "string",
"table": "string",
"schema": "string",
"s3_configuration": {
"bucket_arn": "string",
"role_arn": "string",
"buffering_interval": 0,
"buffering_size": 0,
"cloudwatch_logging_options": {
"enabled": False,
"log_group_name": "string",
"log_stream_name": "string",
},
"compression_format": "string",
"error_output_prefix": "string",
"kms_key_arn": "string",
"prefix": "string",
},
"role_arn": "string",
"account_url": "string",
"data_loading_option": "string",
"s3_backup_mode": "string",
"metadata_column_name": "string",
"private_key": "string",
"processing_configuration": {
"enabled": False,
"processors": [{
"type": "string",
"parameters": [{
"parameter_name": "string",
"parameter_value": "string",
}],
}],
},
"retry_duration": 0,
"content_column_name": "string",
"key_passphrase": "string",
"cloudwatch_logging_options": {
"enabled": False,
"log_group_name": "string",
"log_stream_name": "string",
},
"buffering_size": 0,
"secrets_manager_configuration": {
"enabled": False,
"role_arn": "string",
"secret_arn": "string",
},
"snowflake_role_configuration": {
"enabled": False,
"snowflake_role": "string",
},
"snowflake_vpc_configuration": {
"private_link_vpce_id": "string",
},
"buffering_interval": 0,
"user": "string",
},
splunk_configuration={
"hec_endpoint": "string",
"s3_configuration": {
"bucket_arn": "string",
"role_arn": "string",
"buffering_interval": 0,
"buffering_size": 0,
"cloudwatch_logging_options": {
"enabled": False,
"log_group_name": "string",
"log_stream_name": "string",
},
"compression_format": "string",
"error_output_prefix": "string",
"kms_key_arn": "string",
"prefix": "string",
},
"buffering_interval": 0,
"buffering_size": 0,
"cloudwatch_logging_options": {
"enabled": False,
"log_group_name": "string",
"log_stream_name": "string",
},
"hec_acknowledgment_timeout": 0,
"hec_endpoint_type": "string",
"hec_token": "string",
"processing_configuration": {
"enabled": False,
"processors": [{
"type": "string",
"parameters": [{
"parameter_name": "string",
"parameter_value": "string",
}],
}],
},
"retry_duration": 0,
"s3_backup_mode": "string",
"secrets_manager_configuration": {
"enabled": False,
"role_arn": "string",
"secret_arn": "string",
},
},
tags={
"string": "string",
},
version_id="string")
const firehoseDeliveryStreamResource = new aws.kinesis.FirehoseDeliveryStream("firehoseDeliveryStreamResource", {
destination: "string",
mskSourceConfiguration: {
authenticationConfiguration: {
connectivity: "string",
roleArn: "string",
},
mskClusterArn: "string",
topicName: "string",
},
httpEndpointConfiguration: {
s3Configuration: {
bucketArn: "string",
roleArn: "string",
bufferingInterval: 0,
bufferingSize: 0,
cloudwatchLoggingOptions: {
enabled: false,
logGroupName: "string",
logStreamName: "string",
},
compressionFormat: "string",
errorOutputPrefix: "string",
kmsKeyArn: "string",
prefix: "string",
},
url: "string",
requestConfiguration: {
commonAttributes: [{
name: "string",
value: "string",
}],
contentEncoding: "string",
},
cloudwatchLoggingOptions: {
enabled: false,
logGroupName: "string",
logStreamName: "string",
},
name: "string",
processingConfiguration: {
enabled: false,
processors: [{
type: "string",
parameters: [{
parameterName: "string",
parameterValue: "string",
}],
}],
},
accessKey: "string",
retryDuration: 0,
roleArn: "string",
s3BackupMode: "string",
bufferingSize: 0,
secretsManagerConfiguration: {
enabled: false,
roleArn: "string",
secretArn: "string",
},
bufferingInterval: 0,
},
name: "string",
extendedS3Configuration: {
bucketArn: "string",
roleArn: "string",
errorOutputPrefix: "string",
fileExtension: "string",
compressionFormat: "string",
customTimeZone: "string",
dataFormatConversionConfiguration: {
inputFormatConfiguration: {
deserializer: {
hiveJsonSerDe: {
timestampFormats: ["string"],
},
openXJsonSerDe: {
caseInsensitive: false,
columnToJsonKeyMappings: {
string: "string",
},
convertDotsInJsonKeysToUnderscores: false,
},
},
},
outputFormatConfiguration: {
serializer: {
orcSerDe: {
blockSizeBytes: 0,
bloomFilterColumns: ["string"],
bloomFilterFalsePositiveProbability: 0,
compression: "string",
dictionaryKeyThreshold: 0,
enablePadding: false,
formatVersion: "string",
paddingTolerance: 0,
rowIndexStride: 0,
stripeSizeBytes: 0,
},
parquetSerDe: {
blockSizeBytes: 0,
compression: "string",
enableDictionaryCompression: false,
maxPaddingBytes: 0,
pageSizeBytes: 0,
writerVersion: "string",
},
},
},
schemaConfiguration: {
databaseName: "string",
roleArn: "string",
tableName: "string",
catalogId: "string",
region: "string",
versionId: "string",
},
enabled: false,
},
dynamicPartitioningConfiguration: {
enabled: false,
retryDuration: 0,
},
bufferingSize: 0,
cloudwatchLoggingOptions: {
enabled: false,
logGroupName: "string",
logStreamName: "string",
},
kmsKeyArn: "string",
prefix: "string",
processingConfiguration: {
enabled: false,
processors: [{
type: "string",
parameters: [{
parameterName: "string",
parameterValue: "string",
}],
}],
},
bufferingInterval: 0,
s3BackupConfiguration: {
bucketArn: "string",
roleArn: "string",
bufferingInterval: 0,
bufferingSize: 0,
cloudwatchLoggingOptions: {
enabled: false,
logGroupName: "string",
logStreamName: "string",
},
compressionFormat: "string",
errorOutputPrefix: "string",
kmsKeyArn: "string",
prefix: "string",
},
s3BackupMode: "string",
},
opensearchConfiguration: {
indexName: "string",
s3Configuration: {
bucketArn: "string",
roleArn: "string",
bufferingInterval: 0,
bufferingSize: 0,
cloudwatchLoggingOptions: {
enabled: false,
logGroupName: "string",
logStreamName: "string",
},
compressionFormat: "string",
errorOutputPrefix: "string",
kmsKeyArn: "string",
prefix: "string",
},
roleArn: "string",
clusterEndpoint: "string",
documentIdOptions: {
defaultDocumentIdFormat: "string",
},
domainArn: "string",
bufferingInterval: 0,
indexRotationPeriod: "string",
processingConfiguration: {
enabled: false,
processors: [{
type: "string",
parameters: [{
parameterName: "string",
parameterValue: "string",
}],
}],
},
retryDuration: 0,
cloudwatchLoggingOptions: {
enabled: false,
logGroupName: "string",
logStreamName: "string",
},
s3BackupMode: "string",
bufferingSize: 0,
typeName: "string",
vpcConfig: {
roleArn: "string",
securityGroupIds: ["string"],
subnetIds: ["string"],
vpcId: "string",
},
},
icebergConfiguration: {
catalogArn: "string",
roleArn: "string",
s3Configuration: {
bucketArn: "string",
roleArn: "string",
bufferingInterval: 0,
bufferingSize: 0,
cloudwatchLoggingOptions: {
enabled: false,
logGroupName: "string",
logStreamName: "string",
},
compressionFormat: "string",
errorOutputPrefix: "string",
kmsKeyArn: "string",
prefix: "string",
},
bufferingInterval: 0,
bufferingSize: 0,
cloudwatchLoggingOptions: {
enabled: false,
logGroupName: "string",
logStreamName: "string",
},
destinationTableConfigurations: [{
databaseName: "string",
tableName: "string",
s3ErrorOutputPrefix: "string",
uniqueKeys: ["string"],
}],
processingConfiguration: {
enabled: false,
processors: [{
type: "string",
parameters: [{
parameterName: "string",
parameterValue: "string",
}],
}],
},
retryDuration: 0,
s3BackupMode: "string",
},
kinesisSourceConfiguration: {
kinesisStreamArn: "string",
roleArn: "string",
},
opensearchserverlessConfiguration: {
collectionEndpoint: "string",
indexName: "string",
roleArn: "string",
s3Configuration: {
bucketArn: "string",
roleArn: "string",
bufferingInterval: 0,
bufferingSize: 0,
cloudwatchLoggingOptions: {
enabled: false,
logGroupName: "string",
logStreamName: "string",
},
compressionFormat: "string",
errorOutputPrefix: "string",
kmsKeyArn: "string",
prefix: "string",
},
bufferingInterval: 0,
bufferingSize: 0,
cloudwatchLoggingOptions: {
enabled: false,
logGroupName: "string",
logStreamName: "string",
},
processingConfiguration: {
enabled: false,
processors: [{
type: "string",
parameters: [{
parameterName: "string",
parameterValue: "string",
}],
}],
},
retryDuration: 0,
s3BackupMode: "string",
vpcConfig: {
roleArn: "string",
securityGroupIds: ["string"],
subnetIds: ["string"],
vpcId: "string",
},
},
elasticsearchConfiguration: {
indexName: "string",
s3Configuration: {
bucketArn: "string",
roleArn: "string",
bufferingInterval: 0,
bufferingSize: 0,
cloudwatchLoggingOptions: {
enabled: false,
logGroupName: "string",
logStreamName: "string",
},
compressionFormat: "string",
errorOutputPrefix: "string",
kmsKeyArn: "string",
prefix: "string",
},
roleArn: "string",
clusterEndpoint: "string",
domainArn: "string",
bufferingInterval: 0,
indexRotationPeriod: "string",
processingConfiguration: {
enabled: false,
processors: [{
type: "string",
parameters: [{
parameterName: "string",
parameterValue: "string",
}],
}],
},
retryDuration: 0,
cloudwatchLoggingOptions: {
enabled: false,
logGroupName: "string",
logStreamName: "string",
},
s3BackupMode: "string",
bufferingSize: 0,
typeName: "string",
vpcConfig: {
roleArn: "string",
securityGroupIds: ["string"],
subnetIds: ["string"],
vpcId: "string",
},
},
destinationId: "string",
arn: "string",
redshiftConfiguration: {
dataTableName: "string",
clusterJdbcurl: "string",
s3Configuration: {
bucketArn: "string",
roleArn: "string",
bufferingInterval: 0,
bufferingSize: 0,
cloudwatchLoggingOptions: {
enabled: false,
logGroupName: "string",
logStreamName: "string",
},
compressionFormat: "string",
errorOutputPrefix: "string",
kmsKeyArn: "string",
prefix: "string",
},
roleArn: "string",
processingConfiguration: {
enabled: false,
processors: [{
type: "string",
parameters: [{
parameterName: "string",
parameterValue: "string",
}],
}],
},
password: "string",
cloudwatchLoggingOptions: {
enabled: false,
logGroupName: "string",
logStreamName: "string",
},
retryDuration: 0,
dataTableColumns: "string",
s3BackupConfiguration: {
bucketArn: "string",
roleArn: "string",
bufferingInterval: 0,
bufferingSize: 0,
cloudwatchLoggingOptions: {
enabled: false,
logGroupName: "string",
logStreamName: "string",
},
compressionFormat: "string",
errorOutputPrefix: "string",
kmsKeyArn: "string",
prefix: "string",
},
s3BackupMode: "string",
copyOptions: "string",
secretsManagerConfiguration: {
enabled: false,
roleArn: "string",
secretArn: "string",
},
username: "string",
},
serverSideEncryption: {
enabled: false,
keyArn: "string",
keyType: "string",
},
snowflakeConfiguration: {
database: "string",
table: "string",
schema: "string",
s3Configuration: {
bucketArn: "string",
roleArn: "string",
bufferingInterval: 0,
bufferingSize: 0,
cloudwatchLoggingOptions: {
enabled: false,
logGroupName: "string",
logStreamName: "string",
},
compressionFormat: "string",
errorOutputPrefix: "string",
kmsKeyArn: "string",
prefix: "string",
},
roleArn: "string",
accountUrl: "string",
dataLoadingOption: "string",
s3BackupMode: "string",
metadataColumnName: "string",
privateKey: "string",
processingConfiguration: {
enabled: false,
processors: [{
type: "string",
parameters: [{
parameterName: "string",
parameterValue: "string",
}],
}],
},
retryDuration: 0,
contentColumnName: "string",
keyPassphrase: "string",
cloudwatchLoggingOptions: {
enabled: false,
logGroupName: "string",
logStreamName: "string",
},
bufferingSize: 0,
secretsManagerConfiguration: {
enabled: false,
roleArn: "string",
secretArn: "string",
},
snowflakeRoleConfiguration: {
enabled: false,
snowflakeRole: "string",
},
snowflakeVpcConfiguration: {
privateLinkVpceId: "string",
},
bufferingInterval: 0,
user: "string",
},
splunkConfiguration: {
hecEndpoint: "string",
s3Configuration: {
bucketArn: "string",
roleArn: "string",
bufferingInterval: 0,
bufferingSize: 0,
cloudwatchLoggingOptions: {
enabled: false,
logGroupName: "string",
logStreamName: "string",
},
compressionFormat: "string",
errorOutputPrefix: "string",
kmsKeyArn: "string",
prefix: "string",
},
bufferingInterval: 0,
bufferingSize: 0,
cloudwatchLoggingOptions: {
enabled: false,
logGroupName: "string",
logStreamName: "string",
},
hecAcknowledgmentTimeout: 0,
hecEndpointType: "string",
hecToken: "string",
processingConfiguration: {
enabled: false,
processors: [{
type: "string",
parameters: [{
parameterName: "string",
parameterValue: "string",
}],
}],
},
retryDuration: 0,
s3BackupMode: "string",
secretsManagerConfiguration: {
enabled: false,
roleArn: "string",
secretArn: "string",
},
},
tags: {
string: "string",
},
versionId: "string",
});
type: aws:kinesis:FirehoseDeliveryStream
properties:
arn: string
destination: string
destinationId: string
elasticsearchConfiguration:
bufferingInterval: 0
bufferingSize: 0
cloudwatchLoggingOptions:
enabled: false
logGroupName: string
logStreamName: string
clusterEndpoint: string
domainArn: string
indexName: string
indexRotationPeriod: string
processingConfiguration:
enabled: false
processors:
- parameters:
- parameterName: string
parameterValue: string
type: string
retryDuration: 0
roleArn: string
s3BackupMode: string
s3Configuration:
bucketArn: string
bufferingInterval: 0
bufferingSize: 0
cloudwatchLoggingOptions:
enabled: false
logGroupName: string
logStreamName: string
compressionFormat: string
errorOutputPrefix: string
kmsKeyArn: string
prefix: string
roleArn: string
typeName: string
vpcConfig:
roleArn: string
securityGroupIds:
- string
subnetIds:
- string
vpcId: string
extendedS3Configuration:
bucketArn: string
bufferingInterval: 0
bufferingSize: 0
cloudwatchLoggingOptions:
enabled: false
logGroupName: string
logStreamName: string
compressionFormat: string
customTimeZone: string
dataFormatConversionConfiguration:
enabled: false
inputFormatConfiguration:
deserializer:
hiveJsonSerDe:
timestampFormats:
- string
openXJsonSerDe:
caseInsensitive: false
columnToJsonKeyMappings:
string: string
convertDotsInJsonKeysToUnderscores: false
outputFormatConfiguration:
serializer:
orcSerDe:
blockSizeBytes: 0
bloomFilterColumns:
- string
bloomFilterFalsePositiveProbability: 0
compression: string
dictionaryKeyThreshold: 0
enablePadding: false
formatVersion: string
paddingTolerance: 0
rowIndexStride: 0
stripeSizeBytes: 0
parquetSerDe:
blockSizeBytes: 0
compression: string
enableDictionaryCompression: false
maxPaddingBytes: 0
pageSizeBytes: 0
writerVersion: string
schemaConfiguration:
catalogId: string
databaseName: string
region: string
roleArn: string
tableName: string
versionId: string
dynamicPartitioningConfiguration:
enabled: false
retryDuration: 0
errorOutputPrefix: string
fileExtension: string
kmsKeyArn: string
prefix: string
processingConfiguration:
enabled: false
processors:
- parameters:
- parameterName: string
parameterValue: string
type: string
roleArn: string
s3BackupConfiguration:
bucketArn: string
bufferingInterval: 0
bufferingSize: 0
cloudwatchLoggingOptions:
enabled: false
logGroupName: string
logStreamName: string
compressionFormat: string
errorOutputPrefix: string
kmsKeyArn: string
prefix: string
roleArn: string
s3BackupMode: string
httpEndpointConfiguration:
accessKey: string
bufferingInterval: 0
bufferingSize: 0
cloudwatchLoggingOptions:
enabled: false
logGroupName: string
logStreamName: string
name: string
processingConfiguration:
enabled: false
processors:
- parameters:
- parameterName: string
parameterValue: string
type: string
requestConfiguration:
commonAttributes:
- name: string
value: string
contentEncoding: string
retryDuration: 0
roleArn: string
s3BackupMode: string
s3Configuration:
bucketArn: string
bufferingInterval: 0
bufferingSize: 0
cloudwatchLoggingOptions:
enabled: false
logGroupName: string
logStreamName: string
compressionFormat: string
errorOutputPrefix: string
kmsKeyArn: string
prefix: string
roleArn: string
secretsManagerConfiguration:
enabled: false
roleArn: string
secretArn: string
url: string
icebergConfiguration:
bufferingInterval: 0
bufferingSize: 0
catalogArn: string
cloudwatchLoggingOptions:
enabled: false
logGroupName: string
logStreamName: string
destinationTableConfigurations:
- databaseName: string
s3ErrorOutputPrefix: string
tableName: string
uniqueKeys:
- string
processingConfiguration:
enabled: false
processors:
- parameters:
- parameterName: string
parameterValue: string
type: string
retryDuration: 0
roleArn: string
s3BackupMode: string
s3Configuration:
bucketArn: string
bufferingInterval: 0
bufferingSize: 0
cloudwatchLoggingOptions:
enabled: false
logGroupName: string
logStreamName: string
compressionFormat: string
errorOutputPrefix: string
kmsKeyArn: string
prefix: string
roleArn: string
kinesisSourceConfiguration:
kinesisStreamArn: string
roleArn: string
mskSourceConfiguration:
authenticationConfiguration:
connectivity: string
roleArn: string
mskClusterArn: string
topicName: string
name: string
opensearchConfiguration:
bufferingInterval: 0
bufferingSize: 0
cloudwatchLoggingOptions:
enabled: false
logGroupName: string
logStreamName: string
clusterEndpoint: string
documentIdOptions:
defaultDocumentIdFormat: string
domainArn: string
indexName: string
indexRotationPeriod: string
processingConfiguration:
enabled: false
processors:
- parameters:
- parameterName: string
parameterValue: string
type: string
retryDuration: 0
roleArn: string
s3BackupMode: string
s3Configuration:
bucketArn: string
bufferingInterval: 0
bufferingSize: 0
cloudwatchLoggingOptions:
enabled: false
logGroupName: string
logStreamName: string
compressionFormat: string
errorOutputPrefix: string
kmsKeyArn: string
prefix: string
roleArn: string
typeName: string
vpcConfig:
roleArn: string
securityGroupIds:
- string
subnetIds:
- string
vpcId: string
opensearchserverlessConfiguration:
bufferingInterval: 0
bufferingSize: 0
cloudwatchLoggingOptions:
enabled: false
logGroupName: string
logStreamName: string
collectionEndpoint: string
indexName: string
processingConfiguration:
enabled: false
processors:
- parameters:
- parameterName: string
parameterValue: string
type: string
retryDuration: 0
roleArn: string
s3BackupMode: string
s3Configuration:
bucketArn: string
bufferingInterval: 0
bufferingSize: 0
cloudwatchLoggingOptions:
enabled: false
logGroupName: string
logStreamName: string
compressionFormat: string
errorOutputPrefix: string
kmsKeyArn: string
prefix: string
roleArn: string
vpcConfig:
roleArn: string
securityGroupIds:
- string
subnetIds:
- string
vpcId: string
redshiftConfiguration:
cloudwatchLoggingOptions:
enabled: false
logGroupName: string
logStreamName: string
clusterJdbcurl: string
copyOptions: string
dataTableColumns: string
dataTableName: string
password: string
processingConfiguration:
enabled: false
processors:
- parameters:
- parameterName: string
parameterValue: string
type: string
retryDuration: 0
roleArn: string
s3BackupConfiguration:
bucketArn: string
bufferingInterval: 0
bufferingSize: 0
cloudwatchLoggingOptions:
enabled: false
logGroupName: string
logStreamName: string
compressionFormat: string
errorOutputPrefix: string
kmsKeyArn: string
prefix: string
roleArn: string
s3BackupMode: string
s3Configuration:
bucketArn: string
bufferingInterval: 0
bufferingSize: 0
cloudwatchLoggingOptions:
enabled: false
logGroupName: string
logStreamName: string
compressionFormat: string
errorOutputPrefix: string
kmsKeyArn: string
prefix: string
roleArn: string
secretsManagerConfiguration:
enabled: false
roleArn: string
secretArn: string
username: string
serverSideEncryption:
enabled: false
keyArn: string
keyType: string
snowflakeConfiguration:
accountUrl: string
bufferingInterval: 0
bufferingSize: 0
cloudwatchLoggingOptions:
enabled: false
logGroupName: string
logStreamName: string
contentColumnName: string
dataLoadingOption: string
database: string
keyPassphrase: string
metadataColumnName: string
privateKey: string
processingConfiguration:
enabled: false
processors:
- parameters:
- parameterName: string
parameterValue: string
type: string
retryDuration: 0
roleArn: string
s3BackupMode: string
s3Configuration:
bucketArn: string
bufferingInterval: 0
bufferingSize: 0
cloudwatchLoggingOptions:
enabled: false
logGroupName: string
logStreamName: string
compressionFormat: string
errorOutputPrefix: string
kmsKeyArn: string
prefix: string
roleArn: string
schema: string
secretsManagerConfiguration:
enabled: false
roleArn: string
secretArn: string
snowflakeRoleConfiguration:
enabled: false
snowflakeRole: string
snowflakeVpcConfiguration:
privateLinkVpceId: string
table: string
user: string
splunkConfiguration:
bufferingInterval: 0
bufferingSize: 0
cloudwatchLoggingOptions:
enabled: false
logGroupName: string
logStreamName: string
hecAcknowledgmentTimeout: 0
hecEndpoint: string
hecEndpointType: string
hecToken: string
processingConfiguration:
enabled: false
processors:
- parameters:
- parameterName: string
parameterValue: string
type: string
retryDuration: 0
s3BackupMode: string
s3Configuration:
bucketArn: string
bufferingInterval: 0
bufferingSize: 0
cloudwatchLoggingOptions:
enabled: false
logGroupName: string
logStreamName: string
compressionFormat: string
errorOutputPrefix: string
kmsKeyArn: string
prefix: string
roleArn: string
secretsManagerConfiguration:
enabled: false
roleArn: string
secretArn: string
tags:
string: string
versionId: string
FirehoseDeliveryStream Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The FirehoseDeliveryStream resource accepts the following input properties:
- Destination string
- This is the destination to where the data is delivered. The only options are
s3
(Deprecated, useextended_s3
instead),extended_s3
,redshift
,elasticsearch
,splunk
,http_endpoint
,opensearch
,opensearchserverless
andsnowflake
. - Arn string
- The Amazon Resource Name (ARN) specifying the Stream
- Destination
Id string - Elasticsearch
Configuration FirehoseDelivery Stream Elasticsearch Configuration - Configuration options when
destination
iselasticsearch
. Seeelasticsearch_configuration
block below for details. - Extended
S3Configuration FirehoseDelivery Stream Extended S3Configuration - Enhanced configuration options for the s3 destination. See
extended_s3_configuration
block below for details. - Http
Endpoint FirehoseConfiguration Delivery Stream Http Endpoint Configuration - Configuration options when
destination
ishttp_endpoint
. Requires the user to also specify ans3_configuration
block. Seehttp_endpoint_configuration
block below for details. - Iceberg
Configuration FirehoseDelivery Stream Iceberg Configuration - Configuration options when
destination
isiceberg
. Seeiceberg_configuration
block below for details. - Kinesis
Source FirehoseConfiguration Delivery Stream Kinesis Source Configuration - The stream and role Amazon Resource Names (ARNs) for a Kinesis data stream used as the source for a delivery stream. See
kinesis_source_configuration
block below for details. - Msk
Source FirehoseConfiguration Delivery Stream Msk Source Configuration - The configuration for the Amazon MSK cluster to be used as the source for a delivery stream. See
msk_source_configuration
block below for details. - Name string
- A name to identify the stream. This is unique to the AWS account and region the Stream is created in. When using for WAF logging, name must be prefixed with
aws-waf-logs-
. See AWS Documentation for more details. - Opensearch
Configuration FirehoseDelivery Stream Opensearch Configuration - Configuration options when
destination
isopensearch
. Seeopensearch_configuration
block below for details. - Opensearchserverless
Configuration FirehoseDelivery Stream Opensearchserverless Configuration - Configuration options when
destination
isopensearchserverless
. Seeopensearchserverless_configuration
block below for details. - Redshift
Configuration FirehoseDelivery Stream Redshift Configuration - Configuration options when
destination
isredshift
. Requires the user to also specify ans3_configuration
block. Seeredshift_configuration
block below for details. - Server
Side FirehoseEncryption Delivery Stream Server Side Encryption Encrypt at rest options. See
server_side_encryption
block below for details.NOTE: Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.
- Snowflake
Configuration FirehoseDelivery Stream Snowflake Configuration - Configuration options when
destination
issnowflake
. Seesnowflake_configuration
block below for details. - Splunk
Configuration FirehoseDelivery Stream Splunk Configuration - Configuration options when
destination
issplunk
. Seesplunk_configuration
block below for details. - Dictionary<string, string>
- A map of tags to assign to the resource. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - Version
Id string
- Destination string
- This is the destination to where the data is delivered. The only options are
s3
(Deprecated, useextended_s3
instead),extended_s3
,redshift
,elasticsearch
,splunk
,http_endpoint
,opensearch
,opensearchserverless
andsnowflake
. - Arn string
- The Amazon Resource Name (ARN) specifying the Stream
- Destination
Id string - Elasticsearch
Configuration FirehoseDelivery Stream Elasticsearch Configuration Args - Configuration options when
destination
iselasticsearch
. Seeelasticsearch_configuration
block below for details. - Extended
S3Configuration FirehoseDelivery Stream Extended S3Configuration Args - Enhanced configuration options for the s3 destination. See
extended_s3_configuration
block below for details. - Http
Endpoint FirehoseConfiguration Delivery Stream Http Endpoint Configuration Args - Configuration options when
destination
ishttp_endpoint
. Requires the user to also specify ans3_configuration
block. Seehttp_endpoint_configuration
block below for details. - Iceberg
Configuration FirehoseDelivery Stream Iceberg Configuration Args - Configuration options when
destination
isiceberg
. Seeiceberg_configuration
block below for details. - Kinesis
Source FirehoseConfiguration Delivery Stream Kinesis Source Configuration Args - The stream and role Amazon Resource Names (ARNs) for a Kinesis data stream used as the source for a delivery stream. See
kinesis_source_configuration
block below for details. - Msk
Source FirehoseConfiguration Delivery Stream Msk Source Configuration Args - The configuration for the Amazon MSK cluster to be used as the source for a delivery stream. See
msk_source_configuration
block below for details. - Name string
- A name to identify the stream. This is unique to the AWS account and region the Stream is created in. When using for WAF logging, name must be prefixed with
aws-waf-logs-
. See AWS Documentation for more details. - Opensearch
Configuration FirehoseDelivery Stream Opensearch Configuration Args - Configuration options when
destination
isopensearch
. Seeopensearch_configuration
block below for details. - Opensearchserverless
Configuration FirehoseDelivery Stream Opensearchserverless Configuration Args - Configuration options when
destination
isopensearchserverless
. Seeopensearchserverless_configuration
block below for details. - Redshift
Configuration FirehoseDelivery Stream Redshift Configuration Args - Configuration options when
destination
isredshift
. Requires the user to also specify ans3_configuration
block. Seeredshift_configuration
block below for details. - Server
Side FirehoseEncryption Delivery Stream Server Side Encryption Args Encrypt at rest options. See
server_side_encryption
block below for details.NOTE: Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.
- Snowflake
Configuration FirehoseDelivery Stream Snowflake Configuration Args - Configuration options when
destination
issnowflake
. Seesnowflake_configuration
block below for details. - Splunk
Configuration FirehoseDelivery Stream Splunk Configuration Args - Configuration options when
destination
issplunk
. Seesplunk_configuration
block below for details. - map[string]string
- A map of tags to assign to the resource. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - Version
Id string
- destination String
- This is the destination to where the data is delivered. The only options are
s3
(Deprecated, useextended_s3
instead),extended_s3
,redshift
,elasticsearch
,splunk
,http_endpoint
,opensearch
,opensearchserverless
andsnowflake
. - arn String
- The Amazon Resource Name (ARN) specifying the Stream
- destination
Id String - elasticsearch
Configuration FirehoseDelivery Stream Elasticsearch Configuration - Configuration options when
destination
iselasticsearch
. Seeelasticsearch_configuration
block below for details. - extended
S3Configuration FirehoseDelivery Stream Extended S3Configuration - Enhanced configuration options for the s3 destination. See
extended_s3_configuration
block below for details. - http
Endpoint FirehoseConfiguration Delivery Stream Http Endpoint Configuration - Configuration options when
destination
ishttp_endpoint
. Requires the user to also specify ans3_configuration
block. Seehttp_endpoint_configuration
block below for details. - iceberg
Configuration FirehoseDelivery Stream Iceberg Configuration - Configuration options when
destination
isiceberg
. Seeiceberg_configuration
block below for details. - kinesis
Source FirehoseConfiguration Delivery Stream Kinesis Source Configuration - The stream and role Amazon Resource Names (ARNs) for a Kinesis data stream used as the source for a delivery stream. See
kinesis_source_configuration
block below for details. - msk
Source FirehoseConfiguration Delivery Stream Msk Source Configuration - The configuration for the Amazon MSK cluster to be used as the source for a delivery stream. See
msk_source_configuration
block below for details. - name String
- A name to identify the stream. This is unique to the AWS account and region the Stream is created in. When using for WAF logging, name must be prefixed with
aws-waf-logs-
. See AWS Documentation for more details. - opensearch
Configuration FirehoseDelivery Stream Opensearch Configuration - Configuration options when
destination
isopensearch
. Seeopensearch_configuration
block below for details. - opensearchserverless
Configuration FirehoseDelivery Stream Opensearchserverless Configuration - Configuration options when
destination
isopensearchserverless
. Seeopensearchserverless_configuration
block below for details. - redshift
Configuration FirehoseDelivery Stream Redshift Configuration - Configuration options when
destination
isredshift
. Requires the user to also specify ans3_configuration
block. Seeredshift_configuration
block below for details. - server
Side FirehoseEncryption Delivery Stream Server Side Encryption Encrypt at rest options. See
server_side_encryption
block below for details.NOTE: Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.
- snowflake
Configuration FirehoseDelivery Stream Snowflake Configuration - Configuration options when
destination
issnowflake
. Seesnowflake_configuration
block below for details. - splunk
Configuration FirehoseDelivery Stream Splunk Configuration - Configuration options when
destination
issplunk
. Seesplunk_configuration
block below for details. - Map<String,String>
- A map of tags to assign to the resource. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - version
Id String
- destination string
- This is the destination to where the data is delivered. The only options are
s3
(Deprecated, useextended_s3
instead),extended_s3
,redshift
,elasticsearch
,splunk
,http_endpoint
,opensearch
,opensearchserverless
andsnowflake
. - arn string
- The Amazon Resource Name (ARN) specifying the Stream
- destination
Id string - elasticsearch
Configuration FirehoseDelivery Stream Elasticsearch Configuration - Configuration options when
destination
iselasticsearch
. Seeelasticsearch_configuration
block below for details. - extended
S3Configuration FirehoseDelivery Stream Extended S3Configuration - Enhanced configuration options for the s3 destination. See
extended_s3_configuration
block below for details. - http
Endpoint FirehoseConfiguration Delivery Stream Http Endpoint Configuration - Configuration options when
destination
ishttp_endpoint
. Requires the user to also specify ans3_configuration
block. Seehttp_endpoint_configuration
block below for details. - iceberg
Configuration FirehoseDelivery Stream Iceberg Configuration - Configuration options when
destination
isiceberg
. Seeiceberg_configuration
block below for details. - kinesis
Source FirehoseConfiguration Delivery Stream Kinesis Source Configuration - The stream and role Amazon Resource Names (ARNs) for a Kinesis data stream used as the source for a delivery stream. See
kinesis_source_configuration
block below for details. - msk
Source FirehoseConfiguration Delivery Stream Msk Source Configuration - The configuration for the Amazon MSK cluster to be used as the source for a delivery stream. See
msk_source_configuration
block below for details. - name string
- A name to identify the stream. This is unique to the AWS account and region the Stream is created in. When using for WAF logging, name must be prefixed with
aws-waf-logs-
. See AWS Documentation for more details. - opensearch
Configuration FirehoseDelivery Stream Opensearch Configuration - Configuration options when
destination
isopensearch
. Seeopensearch_configuration
block below for details. - opensearchserverless
Configuration FirehoseDelivery Stream Opensearchserverless Configuration - Configuration options when
destination
isopensearchserverless
. Seeopensearchserverless_configuration
block below for details. - redshift
Configuration FirehoseDelivery Stream Redshift Configuration - Configuration options when
destination
isredshift
. Requires the user to also specify ans3_configuration
block. Seeredshift_configuration
block below for details. - server
Side FirehoseEncryption Delivery Stream Server Side Encryption Encrypt at rest options. See
server_side_encryption
block below for details.NOTE: Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.
- snowflake
Configuration FirehoseDelivery Stream Snowflake Configuration - Configuration options when
destination
issnowflake
. Seesnowflake_configuration
block below for details. - splunk
Configuration FirehoseDelivery Stream Splunk Configuration - Configuration options when
destination
issplunk
. Seesplunk_configuration
block below for details. - {[key: string]: string}
- A map of tags to assign to the resource. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - version
Id string
- destination str
- This is the destination to where the data is delivered. The only options are
s3
(Deprecated, useextended_s3
instead),extended_s3
,redshift
,elasticsearch
,splunk
,http_endpoint
,opensearch
,opensearchserverless
andsnowflake
. - arn str
- The Amazon Resource Name (ARN) specifying the Stream
- destination_
id str - elasticsearch_
configuration FirehoseDelivery Stream Elasticsearch Configuration Args - Configuration options when
destination
iselasticsearch
. Seeelasticsearch_configuration
block below for details. - extended_
s3_ Firehoseconfiguration Delivery Stream Extended S3Configuration Args - Enhanced configuration options for the s3 destination. See
extended_s3_configuration
block below for details. - http_
endpoint_ Firehoseconfiguration Delivery Stream Http Endpoint Configuration Args - Configuration options when
destination
ishttp_endpoint
. Requires the user to also specify ans3_configuration
block. Seehttp_endpoint_configuration
block below for details. - iceberg_
configuration FirehoseDelivery Stream Iceberg Configuration Args - Configuration options when
destination
isiceberg
. Seeiceberg_configuration
block below for details. - kinesis_
source_ Firehoseconfiguration Delivery Stream Kinesis Source Configuration Args - The stream and role Amazon Resource Names (ARNs) for a Kinesis data stream used as the source for a delivery stream. See
kinesis_source_configuration
block below for details. - msk_
source_ Firehoseconfiguration Delivery Stream Msk Source Configuration Args - The configuration for the Amazon MSK cluster to be used as the source for a delivery stream. See
msk_source_configuration
block below for details. - name str
- A name to identify the stream. This is unique to the AWS account and region the Stream is created in. When using for WAF logging, name must be prefixed with
aws-waf-logs-
. See AWS Documentation for more details. - opensearch_
configuration FirehoseDelivery Stream Opensearch Configuration Args - Configuration options when
destination
isopensearch
. Seeopensearch_configuration
block below for details. - opensearchserverless_
configuration FirehoseDelivery Stream Opensearchserverless Configuration Args - Configuration options when
destination
isopensearchserverless
. Seeopensearchserverless_configuration
block below for details. - redshift_
configuration FirehoseDelivery Stream Redshift Configuration Args - Configuration options when
destination
isredshift
. Requires the user to also specify ans3_configuration
block. Seeredshift_configuration
block below for details. - server_
side_ Firehoseencryption Delivery Stream Server Side Encryption Args Encrypt at rest options. See
server_side_encryption
block below for details.NOTE: Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.
- snowflake_
configuration FirehoseDelivery Stream Snowflake Configuration Args - Configuration options when
destination
issnowflake
. Seesnowflake_configuration
block below for details. - splunk_
configuration FirehoseDelivery Stream Splunk Configuration Args - Configuration options when
destination
issplunk
. Seesplunk_configuration
block below for details. - Mapping[str, str]
- A map of tags to assign to the resource. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - version_
id str
- destination String
- This is the destination to where the data is delivered. The only options are
s3
(Deprecated, useextended_s3
instead),extended_s3
,redshift
,elasticsearch
,splunk
,http_endpoint
,opensearch
,opensearchserverless
andsnowflake
. - arn String
- The Amazon Resource Name (ARN) specifying the Stream
- destination
Id String - elasticsearch
Configuration Property Map - Configuration options when
destination
iselasticsearch
. Seeelasticsearch_configuration
block below for details. - extended
S3Configuration Property Map - Enhanced configuration options for the s3 destination. See
extended_s3_configuration
block below for details. - http
Endpoint Property MapConfiguration - Configuration options when
destination
ishttp_endpoint
. Requires the user to also specify ans3_configuration
block. Seehttp_endpoint_configuration
block below for details. - iceberg
Configuration Property Map - Configuration options when
destination
isiceberg
. Seeiceberg_configuration
block below for details. - kinesis
Source Property MapConfiguration - The stream and role Amazon Resource Names (ARNs) for a Kinesis data stream used as the source for a delivery stream. See
kinesis_source_configuration
block below for details. - msk
Source Property MapConfiguration - The configuration for the Amazon MSK cluster to be used as the source for a delivery stream. See
msk_source_configuration
block below for details. - name String
- A name to identify the stream. This is unique to the AWS account and region the Stream is created in. When using for WAF logging, name must be prefixed with
aws-waf-logs-
. See AWS Documentation for more details. - opensearch
Configuration Property Map - Configuration options when
destination
isopensearch
. Seeopensearch_configuration
block below for details. - opensearchserverless
Configuration Property Map - Configuration options when
destination
isopensearchserverless
. Seeopensearchserverless_configuration
block below for details. - redshift
Configuration Property Map - Configuration options when
destination
isredshift
. Requires the user to also specify ans3_configuration
block. Seeredshift_configuration
block below for details. - server
Side Property MapEncryption Encrypt at rest options. See
server_side_encryption
block below for details.NOTE: Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.
- snowflake
Configuration Property Map - Configuration options when
destination
issnowflake
. Seesnowflake_configuration
block below for details. - splunk
Configuration Property Map - Configuration options when
destination
issplunk
. Seesplunk_configuration
block below for details. - Map<String>
- A map of tags to assign to the resource. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - version
Id String
Outputs
All input properties are implicitly available as output properties. Additionally, the FirehoseDeliveryStream resource produces the following output properties:
- Id string
- The provider-assigned unique ID for this managed resource.
- Dictionary<string, string>
- A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block.
- Id string
- The provider-assigned unique ID for this managed resource.
- map[string]string
- A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block.
- id String
- The provider-assigned unique ID for this managed resource.
- Map<String,String>
- A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block.
- id string
- The provider-assigned unique ID for this managed resource.
- {[key: string]: string}
- A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block.
- id str
- The provider-assigned unique ID for this managed resource.
- Mapping[str, str]
- A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block.
- id String
- The provider-assigned unique ID for this managed resource.
- Map<String>
- A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block.
Look up Existing FirehoseDeliveryStream Resource
Get an existing FirehoseDeliveryStream resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: FirehoseDeliveryStreamState, opts?: CustomResourceOptions): FirehoseDeliveryStream
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
arn: Optional[str] = None,
destination: Optional[str] = None,
destination_id: Optional[str] = None,
elasticsearch_configuration: Optional[FirehoseDeliveryStreamElasticsearchConfigurationArgs] = None,
extended_s3_configuration: Optional[FirehoseDeliveryStreamExtendedS3ConfigurationArgs] = None,
http_endpoint_configuration: Optional[FirehoseDeliveryStreamHttpEndpointConfigurationArgs] = None,
iceberg_configuration: Optional[FirehoseDeliveryStreamIcebergConfigurationArgs] = None,
kinesis_source_configuration: Optional[FirehoseDeliveryStreamKinesisSourceConfigurationArgs] = None,
msk_source_configuration: Optional[FirehoseDeliveryStreamMskSourceConfigurationArgs] = None,
name: Optional[str] = None,
opensearch_configuration: Optional[FirehoseDeliveryStreamOpensearchConfigurationArgs] = None,
opensearchserverless_configuration: Optional[FirehoseDeliveryStreamOpensearchserverlessConfigurationArgs] = None,
redshift_configuration: Optional[FirehoseDeliveryStreamRedshiftConfigurationArgs] = None,
server_side_encryption: Optional[FirehoseDeliveryStreamServerSideEncryptionArgs] = None,
snowflake_configuration: Optional[FirehoseDeliveryStreamSnowflakeConfigurationArgs] = None,
splunk_configuration: Optional[FirehoseDeliveryStreamSplunkConfigurationArgs] = None,
tags: Optional[Mapping[str, str]] = None,
tags_all: Optional[Mapping[str, str]] = None,
version_id: Optional[str] = None) -> FirehoseDeliveryStream
func GetFirehoseDeliveryStream(ctx *Context, name string, id IDInput, state *FirehoseDeliveryStreamState, opts ...ResourceOption) (*FirehoseDeliveryStream, error)
public static FirehoseDeliveryStream Get(string name, Input<string> id, FirehoseDeliveryStreamState? state, CustomResourceOptions? opts = null)
public static FirehoseDeliveryStream get(String name, Output<String> id, FirehoseDeliveryStreamState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Arn string
- The Amazon Resource Name (ARN) specifying the Stream
- Destination string
- This is the destination to where the data is delivered. The only options are
s3
(Deprecated, useextended_s3
instead),extended_s3
,redshift
,elasticsearch
,splunk
,http_endpoint
,opensearch
,opensearchserverless
andsnowflake
. - Destination
Id string - Elasticsearch
Configuration FirehoseDelivery Stream Elasticsearch Configuration - Configuration options when
destination
iselasticsearch
. Seeelasticsearch_configuration
block below for details. - Extended
S3Configuration FirehoseDelivery Stream Extended S3Configuration - Enhanced configuration options for the s3 destination. See
extended_s3_configuration
block below for details. - Http
Endpoint FirehoseConfiguration Delivery Stream Http Endpoint Configuration - Configuration options when
destination
ishttp_endpoint
. Requires the user to also specify ans3_configuration
block. Seehttp_endpoint_configuration
block below for details. - Iceberg
Configuration FirehoseDelivery Stream Iceberg Configuration - Configuration options when
destination
isiceberg
. Seeiceberg_configuration
block below for details. - Kinesis
Source FirehoseConfiguration Delivery Stream Kinesis Source Configuration - The stream and role Amazon Resource Names (ARNs) for a Kinesis data stream used as the source for a delivery stream. See
kinesis_source_configuration
block below for details. - Msk
Source FirehoseConfiguration Delivery Stream Msk Source Configuration - The configuration for the Amazon MSK cluster to be used as the source for a delivery stream. See
msk_source_configuration
block below for details. - Name string
- A name to identify the stream. This is unique to the AWS account and region the Stream is created in. When using for WAF logging, name must be prefixed with
aws-waf-logs-
. See AWS Documentation for more details. - Opensearch
Configuration FirehoseDelivery Stream Opensearch Configuration - Configuration options when
destination
isopensearch
. Seeopensearch_configuration
block below for details. - Opensearchserverless
Configuration FirehoseDelivery Stream Opensearchserverless Configuration - Configuration options when
destination
isopensearchserverless
. Seeopensearchserverless_configuration
block below for details. - Redshift
Configuration FirehoseDelivery Stream Redshift Configuration - Configuration options when
destination
isredshift
. Requires the user to also specify ans3_configuration
block. Seeredshift_configuration
block below for details. - Server
Side FirehoseEncryption Delivery Stream Server Side Encryption Encrypt at rest options. See
server_side_encryption
block below for details.NOTE: Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.
- Snowflake
Configuration FirehoseDelivery Stream Snowflake Configuration - Configuration options when
destination
issnowflake
. Seesnowflake_configuration
block below for details. - Splunk
Configuration FirehoseDelivery Stream Splunk Configuration - Configuration options when
destination
issplunk
. Seesplunk_configuration
block below for details. - Dictionary<string, string>
- A map of tags to assign to the resource. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - Dictionary<string, string>
- A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block. - Version
Id string
- Arn string
- The Amazon Resource Name (ARN) specifying the Stream
- Destination string
- This is the destination to where the data is delivered. The only options are
s3
(Deprecated, useextended_s3
instead),extended_s3
,redshift
,elasticsearch
,splunk
,http_endpoint
,opensearch
,opensearchserverless
andsnowflake
. - Destination
Id string - Elasticsearch
Configuration FirehoseDelivery Stream Elasticsearch Configuration Args - Configuration options when
destination
iselasticsearch
. Seeelasticsearch_configuration
block below for details. - Extended
S3Configuration FirehoseDelivery Stream Extended S3Configuration Args - Enhanced configuration options for the s3 destination. See
extended_s3_configuration
block below for details. - Http
Endpoint FirehoseConfiguration Delivery Stream Http Endpoint Configuration Args - Configuration options when
destination
ishttp_endpoint
. Requires the user to also specify ans3_configuration
block. Seehttp_endpoint_configuration
block below for details. - Iceberg
Configuration FirehoseDelivery Stream Iceberg Configuration Args - Configuration options when
destination
isiceberg
. Seeiceberg_configuration
block below for details. - Kinesis
Source FirehoseConfiguration Delivery Stream Kinesis Source Configuration Args - The stream and role Amazon Resource Names (ARNs) for a Kinesis data stream used as the source for a delivery stream. See
kinesis_source_configuration
block below for details. - Msk
Source FirehoseConfiguration Delivery Stream Msk Source Configuration Args - The configuration for the Amazon MSK cluster to be used as the source for a delivery stream. See
msk_source_configuration
block below for details. - Name string
- A name to identify the stream. This is unique to the AWS account and region the Stream is created in. When using for WAF logging, name must be prefixed with
aws-waf-logs-
. See AWS Documentation for more details. - Opensearch
Configuration FirehoseDelivery Stream Opensearch Configuration Args - Configuration options when
destination
isopensearch
. Seeopensearch_configuration
block below for details. - Opensearchserverless
Configuration FirehoseDelivery Stream Opensearchserverless Configuration Args - Configuration options when
destination
isopensearchserverless
. Seeopensearchserverless_configuration
block below for details. - Redshift
Configuration FirehoseDelivery Stream Redshift Configuration Args - Configuration options when
destination
isredshift
. Requires the user to also specify ans3_configuration
block. Seeredshift_configuration
block below for details. - Server
Side FirehoseEncryption Delivery Stream Server Side Encryption Args Encrypt at rest options. See
server_side_encryption
block below for details.NOTE: Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.
- Snowflake
Configuration FirehoseDelivery Stream Snowflake Configuration Args - Configuration options when
destination
issnowflake
. Seesnowflake_configuration
block below for details. - Splunk
Configuration FirehoseDelivery Stream Splunk Configuration Args - Configuration options when
destination
issplunk
. Seesplunk_configuration
block below for details. - map[string]string
- A map of tags to assign to the resource. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - map[string]string
- A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block. - Version
Id string
- arn String
- The Amazon Resource Name (ARN) specifying the Stream
- destination String
- This is the destination to where the data is delivered. The only options are
s3
(Deprecated, useextended_s3
instead),extended_s3
,redshift
,elasticsearch
,splunk
,http_endpoint
,opensearch
,opensearchserverless
andsnowflake
. - destination
Id String - elasticsearch
Configuration FirehoseDelivery Stream Elasticsearch Configuration - Configuration options when
destination
iselasticsearch
. Seeelasticsearch_configuration
block below for details. - extended
S3Configuration FirehoseDelivery Stream Extended S3Configuration - Enhanced configuration options for the s3 destination. See
extended_s3_configuration
block below for details. - http
Endpoint FirehoseConfiguration Delivery Stream Http Endpoint Configuration - Configuration options when
destination
ishttp_endpoint
. Requires the user to also specify ans3_configuration
block. Seehttp_endpoint_configuration
block below for details. - iceberg
Configuration FirehoseDelivery Stream Iceberg Configuration - Configuration options when
destination
isiceberg
. Seeiceberg_configuration
block below for details. - kinesis
Source FirehoseConfiguration Delivery Stream Kinesis Source Configuration - The stream and role Amazon Resource Names (ARNs) for a Kinesis data stream used as the source for a delivery stream. See
kinesis_source_configuration
block below for details. - msk
Source FirehoseConfiguration Delivery Stream Msk Source Configuration - The configuration for the Amazon MSK cluster to be used as the source for a delivery stream. See
msk_source_configuration
block below for details. - name String
- A name to identify the stream. This is unique to the AWS account and region the Stream is created in. When using for WAF logging, name must be prefixed with
aws-waf-logs-
. See AWS Documentation for more details. - opensearch
Configuration FirehoseDelivery Stream Opensearch Configuration - Configuration options when
destination
isopensearch
. Seeopensearch_configuration
block below for details. - opensearchserverless
Configuration FirehoseDelivery Stream Opensearchserverless Configuration - Configuration options when
destination
isopensearchserverless
. Seeopensearchserverless_configuration
block below for details. - redshift
Configuration FirehoseDelivery Stream Redshift Configuration - Configuration options when
destination
isredshift
. Requires the user to also specify ans3_configuration
block. Seeredshift_configuration
block below for details. - server
Side FirehoseEncryption Delivery Stream Server Side Encryption Encrypt at rest options. See
server_side_encryption
block below for details.NOTE: Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.
- snowflake
Configuration FirehoseDelivery Stream Snowflake Configuration - Configuration options when
destination
issnowflake
. Seesnowflake_configuration
block below for details. - splunk
Configuration FirehoseDelivery Stream Splunk Configuration - Configuration options when
destination
issplunk
. Seesplunk_configuration
block below for details. - Map<String,String>
- A map of tags to assign to the resource. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - Map<String,String>
- A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block. - version
Id String
- arn string
- The Amazon Resource Name (ARN) specifying the Stream
- destination string
- This is the destination to where the data is delivered. The only options are
s3
(Deprecated, useextended_s3
instead),extended_s3
,redshift
,elasticsearch
,splunk
,http_endpoint
,opensearch
,opensearchserverless
andsnowflake
. - destination
Id string - elasticsearch
Configuration FirehoseDelivery Stream Elasticsearch Configuration - Configuration options when
destination
iselasticsearch
. Seeelasticsearch_configuration
block below for details. - extended
S3Configuration FirehoseDelivery Stream Extended S3Configuration - Enhanced configuration options for the s3 destination. See
extended_s3_configuration
block below for details. - http
Endpoint FirehoseConfiguration Delivery Stream Http Endpoint Configuration - Configuration options when
destination
ishttp_endpoint
. Requires the user to also specify ans3_configuration
block. Seehttp_endpoint_configuration
block below for details. - iceberg
Configuration FirehoseDelivery Stream Iceberg Configuration - Configuration options when
destination
isiceberg
. Seeiceberg_configuration
block below for details. - kinesis
Source FirehoseConfiguration Delivery Stream Kinesis Source Configuration - The stream and role Amazon Resource Names (ARNs) for a Kinesis data stream used as the source for a delivery stream. See
kinesis_source_configuration
block below for details. - msk
Source FirehoseConfiguration Delivery Stream Msk Source Configuration - The configuration for the Amazon MSK cluster to be used as the source for a delivery stream. See
msk_source_configuration
block below for details. - name string
- A name to identify the stream. This is unique to the AWS account and region the Stream is created in. When using for WAF logging, name must be prefixed with
aws-waf-logs-
. See AWS Documentation for more details. - opensearch
Configuration FirehoseDelivery Stream Opensearch Configuration - Configuration options when
destination
isopensearch
. Seeopensearch_configuration
block below for details. - opensearchserverless
Configuration FirehoseDelivery Stream Opensearchserverless Configuration - Configuration options when
destination
isopensearchserverless
. Seeopensearchserverless_configuration
block below for details. - redshift
Configuration FirehoseDelivery Stream Redshift Configuration - Configuration options when
destination
isredshift
. Requires the user to also specify ans3_configuration
block. Seeredshift_configuration
block below for details. - server
Side FirehoseEncryption Delivery Stream Server Side Encryption Encrypt at rest options. See
server_side_encryption
block below for details.NOTE: Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.
- snowflake
Configuration FirehoseDelivery Stream Snowflake Configuration - Configuration options when
destination
issnowflake
. Seesnowflake_configuration
block below for details. - splunk
Configuration FirehoseDelivery Stream Splunk Configuration - Configuration options when
destination
issplunk
. Seesplunk_configuration
block below for details. - {[key: string]: string}
- A map of tags to assign to the resource. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - {[key: string]: string}
- A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block. - version
Id string
- arn str
- The Amazon Resource Name (ARN) specifying the Stream
- destination str
- This is the destination to where the data is delivered. The only options are
s3
(Deprecated, useextended_s3
instead),extended_s3
,redshift
,elasticsearch
,splunk
,http_endpoint
,opensearch
,opensearchserverless
andsnowflake
. - destination_
id str - elasticsearch_
configuration FirehoseDelivery Stream Elasticsearch Configuration Args - Configuration options when
destination
iselasticsearch
. Seeelasticsearch_configuration
block below for details. - extended_
s3_ Firehoseconfiguration Delivery Stream Extended S3Configuration Args - Enhanced configuration options for the s3 destination. See
extended_s3_configuration
block below for details. - http_
endpoint_ Firehoseconfiguration Delivery Stream Http Endpoint Configuration Args - Configuration options when
destination
ishttp_endpoint
. Requires the user to also specify ans3_configuration
block. Seehttp_endpoint_configuration
block below for details. - iceberg_
configuration FirehoseDelivery Stream Iceberg Configuration Args - Configuration options when
destination
isiceberg
. Seeiceberg_configuration
block below for details. - kinesis_
source_ Firehoseconfiguration Delivery Stream Kinesis Source Configuration Args - The stream and role Amazon Resource Names (ARNs) for a Kinesis data stream used as the source for a delivery stream. See
kinesis_source_configuration
block below for details. - msk_
source_ Firehoseconfiguration Delivery Stream Msk Source Configuration Args - The configuration for the Amazon MSK cluster to be used as the source for a delivery stream. See
msk_source_configuration
block below for details. - name str
- A name to identify the stream. This is unique to the AWS account and region the Stream is created in. When using for WAF logging, name must be prefixed with
aws-waf-logs-
. See AWS Documentation for more details. - opensearch_
configuration FirehoseDelivery Stream Opensearch Configuration Args - Configuration options when
destination
isopensearch
. Seeopensearch_configuration
block below for details. - opensearchserverless_
configuration FirehoseDelivery Stream Opensearchserverless Configuration Args - Configuration options when
destination
isopensearchserverless
. Seeopensearchserverless_configuration
block below for details. - redshift_
configuration FirehoseDelivery Stream Redshift Configuration Args - Configuration options when
destination
isredshift
. Requires the user to also specify ans3_configuration
block. Seeredshift_configuration
block below for details. - server_
side_ Firehoseencryption Delivery Stream Server Side Encryption Args Encrypt at rest options. See
server_side_encryption
block below for details.NOTE: Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.
- snowflake_
configuration FirehoseDelivery Stream Snowflake Configuration Args - Configuration options when
destination
issnowflake
. Seesnowflake_configuration
block below for details. - splunk_
configuration FirehoseDelivery Stream Splunk Configuration Args - Configuration options when
destination
issplunk
. Seesplunk_configuration
block below for details. - Mapping[str, str]
- A map of tags to assign to the resource. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - Mapping[str, str]
- A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block. - version_
id str
- arn String
- The Amazon Resource Name (ARN) specifying the Stream
- destination String
- This is the destination to where the data is delivered. The only options are
s3
(Deprecated, useextended_s3
instead),extended_s3
,redshift
,elasticsearch
,splunk
,http_endpoint
,opensearch
,opensearchserverless
andsnowflake
. - destination
Id String - elasticsearch
Configuration Property Map - Configuration options when
destination
iselasticsearch
. Seeelasticsearch_configuration
block below for details. - extended
S3Configuration Property Map - Enhanced configuration options for the s3 destination. See
extended_s3_configuration
block below for details. - http
Endpoint Property MapConfiguration - Configuration options when
destination
ishttp_endpoint
. Requires the user to also specify ans3_configuration
block. Seehttp_endpoint_configuration
block below for details. - iceberg
Configuration Property Map - Configuration options when
destination
isiceberg
. Seeiceberg_configuration
block below for details. - kinesis
Source Property MapConfiguration - The stream and role Amazon Resource Names (ARNs) for a Kinesis data stream used as the source for a delivery stream. See
kinesis_source_configuration
block below for details. - msk
Source Property MapConfiguration - The configuration for the Amazon MSK cluster to be used as the source for a delivery stream. See
msk_source_configuration
block below for details. - name String
- A name to identify the stream. This is unique to the AWS account and region the Stream is created in. When using for WAF logging, name must be prefixed with
aws-waf-logs-
. See AWS Documentation for more details. - opensearch
Configuration Property Map - Configuration options when
destination
isopensearch
. Seeopensearch_configuration
block below for details. - opensearchserverless
Configuration Property Map - Configuration options when
destination
isopensearchserverless
. Seeopensearchserverless_configuration
block below for details. - redshift
Configuration Property Map - Configuration options when
destination
isredshift
. Requires the user to also specify ans3_configuration
block. Seeredshift_configuration
block below for details. - server
Side Property MapEncryption Encrypt at rest options. See
server_side_encryption
block below for details.NOTE: Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.
- snowflake
Configuration Property Map - Configuration options when
destination
issnowflake
. Seesnowflake_configuration
block below for details. - splunk
Configuration Property Map - Configuration options when
destination
issplunk
. Seesplunk_configuration
block below for details. - Map<String>
- A map of tags to assign to the resource. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - Map<String>
- A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block. - version
Id String
Supporting Types
FirehoseDeliveryStreamElasticsearchConfiguration, FirehoseDeliveryStreamElasticsearchConfigurationArgs
- Index
Name string - The Elasticsearch index name.
- Role
Arn string - The ARN of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. The IAM role must have permission for
DescribeElasticsearchDomain
,DescribeElasticsearchDomains
, andDescribeElasticsearchDomainConfig
. The pattern needs to bearn:.*
. - S3Configuration
Firehose
Delivery Stream Elasticsearch Configuration S3Configuration - The S3 Configuration. See
s3_configuration
block below for details. - Buffering
Interval int - Buffer incoming data for the specified period of time, in seconds between 0 to 900, before delivering it to the destination. The default value is 300s.
- Buffering
Size int - Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB.
- Cloudwatch
Logging FirehoseOptions Delivery Stream Elasticsearch Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - Cluster
Endpoint string - The endpoint to use when communicating with the cluster. Conflicts with
domain_arn
. - Domain
Arn string - The ARN of the Amazon ES domain. The pattern needs to be
arn:.*
. Conflicts withcluster_endpoint
. - Index
Rotation stringPeriod - The Elasticsearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate expiration of old data. Valid values are
NoRotation
,OneHour
,OneDay
,OneWeek
, andOneMonth
. The default value isOneDay
. - Processing
Configuration FirehoseDelivery Stream Elasticsearch Configuration Processing Configuration - The data processing configuration. See
processing_configuration
block below for details. - Retry
Duration int - After an initial failure to deliver to Amazon Elasticsearch, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0.
- S3Backup
Mode string - Defines how documents should be delivered to Amazon S3. Valid values are
FailedDocumentsOnly
andAllDocuments
. Default value isFailedDocumentsOnly
. - Type
Name string - The Elasticsearch type name with maximum length of 100 characters.
- Vpc
Config FirehoseDelivery Stream Elasticsearch Configuration Vpc Config - The VPC configuration for the delivery stream to connect to Elastic Search associated with the VPC. See
vpc_config
block below for details.
- Index
Name string - The Elasticsearch index name.
- Role
Arn string - The ARN of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. The IAM role must have permission for
DescribeElasticsearchDomain
,DescribeElasticsearchDomains
, andDescribeElasticsearchDomainConfig
. The pattern needs to bearn:.*
. - S3Configuration
Firehose
Delivery Stream Elasticsearch Configuration S3Configuration - The S3 Configuration. See
s3_configuration
block below for details. - Buffering
Interval int - Buffer incoming data for the specified period of time, in seconds between 0 to 900, before delivering it to the destination. The default value is 300s.
- Buffering
Size int - Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB.
- Cloudwatch
Logging FirehoseOptions Delivery Stream Elasticsearch Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - Cluster
Endpoint string - The endpoint to use when communicating with the cluster. Conflicts with
domain_arn
. - Domain
Arn string - The ARN of the Amazon ES domain. The pattern needs to be
arn:.*
. Conflicts withcluster_endpoint
. - Index
Rotation stringPeriod - The Elasticsearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate expiration of old data. Valid values are
NoRotation
,OneHour
,OneDay
,OneWeek
, andOneMonth
. The default value isOneDay
. - Processing
Configuration FirehoseDelivery Stream Elasticsearch Configuration Processing Configuration - The data processing configuration. See
processing_configuration
block below for details. - Retry
Duration int - After an initial failure to deliver to Amazon Elasticsearch, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0.
- S3Backup
Mode string - Defines how documents should be delivered to Amazon S3. Valid values are
FailedDocumentsOnly
andAllDocuments
. Default value isFailedDocumentsOnly
. - Type
Name string - The Elasticsearch type name with maximum length of 100 characters.
- Vpc
Config FirehoseDelivery Stream Elasticsearch Configuration Vpc Config - The VPC configuration for the delivery stream to connect to Elastic Search associated with the VPC. See
vpc_config
block below for details.
- index
Name String - The Elasticsearch index name.
- role
Arn String - The ARN of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. The IAM role must have permission for
DescribeElasticsearchDomain
,DescribeElasticsearchDomains
, andDescribeElasticsearchDomainConfig
. The pattern needs to bearn:.*
. - s3Configuration
Firehose
Delivery Stream Elasticsearch Configuration S3Configuration - The S3 Configuration. See
s3_configuration
block below for details. - buffering
Interval Integer - Buffer incoming data for the specified period of time, in seconds between 0 to 900, before delivering it to the destination. The default value is 300s.
- buffering
Size Integer - Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB.
- cloudwatch
Logging FirehoseOptions Delivery Stream Elasticsearch Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - cluster
Endpoint String - The endpoint to use when communicating with the cluster. Conflicts with
domain_arn
. - domain
Arn String - The ARN of the Amazon ES domain. The pattern needs to be
arn:.*
. Conflicts withcluster_endpoint
. - index
Rotation StringPeriod - The Elasticsearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate expiration of old data. Valid values are
NoRotation
,OneHour
,OneDay
,OneWeek
, andOneMonth
. The default value isOneDay
. - processing
Configuration FirehoseDelivery Stream Elasticsearch Configuration Processing Configuration - The data processing configuration. See
processing_configuration
block below for details. - retry
Duration Integer - After an initial failure to deliver to Amazon Elasticsearch, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0.
- s3Backup
Mode String - Defines how documents should be delivered to Amazon S3. Valid values are
FailedDocumentsOnly
andAllDocuments
. Default value isFailedDocumentsOnly
. - type
Name String - The Elasticsearch type name with maximum length of 100 characters.
- vpc
Config FirehoseDelivery Stream Elasticsearch Configuration Vpc Config - The VPC configuration for the delivery stream to connect to Elastic Search associated with the VPC. See
vpc_config
block below for details.
- index
Name string - The Elasticsearch index name.
- role
Arn string - The ARN of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. The IAM role must have permission for
DescribeElasticsearchDomain
,DescribeElasticsearchDomains
, andDescribeElasticsearchDomainConfig
. The pattern needs to bearn:.*
. - s3Configuration
Firehose
Delivery Stream Elasticsearch Configuration S3Configuration - The S3 Configuration. See
s3_configuration
block below for details. - buffering
Interval number - Buffer incoming data for the specified period of time, in seconds between 0 to 900, before delivering it to the destination. The default value is 300s.
- buffering
Size number - Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB.
- cloudwatch
Logging FirehoseOptions Delivery Stream Elasticsearch Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - cluster
Endpoint string - The endpoint to use when communicating with the cluster. Conflicts with
domain_arn
. - domain
Arn string - The ARN of the Amazon ES domain. The pattern needs to be
arn:.*
. Conflicts withcluster_endpoint
. - index
Rotation stringPeriod - The Elasticsearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate expiration of old data. Valid values are
NoRotation
,OneHour
,OneDay
,OneWeek
, andOneMonth
. The default value isOneDay
. - processing
Configuration FirehoseDelivery Stream Elasticsearch Configuration Processing Configuration - The data processing configuration. See
processing_configuration
block below for details. - retry
Duration number - After an initial failure to deliver to Amazon Elasticsearch, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0.
- s3Backup
Mode string - Defines how documents should be delivered to Amazon S3. Valid values are
FailedDocumentsOnly
andAllDocuments
. Default value isFailedDocumentsOnly
. - type
Name string - The Elasticsearch type name with maximum length of 100 characters.
- vpc
Config FirehoseDelivery Stream Elasticsearch Configuration Vpc Config - The VPC configuration for the delivery stream to connect to Elastic Search associated with the VPC. See
vpc_config
block below for details.
- index_
name str - The Elasticsearch index name.
- role_
arn str - The ARN of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. The IAM role must have permission for
DescribeElasticsearchDomain
,DescribeElasticsearchDomains
, andDescribeElasticsearchDomainConfig
. The pattern needs to bearn:.*
. - s3_
configuration FirehoseDelivery Stream Elasticsearch Configuration S3Configuration - The S3 Configuration. See
s3_configuration
block below for details. - buffering_
interval int - Buffer incoming data for the specified period of time, in seconds between 0 to 900, before delivering it to the destination. The default value is 300s.
- buffering_
size int - Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB.
- cloudwatch_
logging_ Firehoseoptions Delivery Stream Elasticsearch Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - cluster_
endpoint str - The endpoint to use when communicating with the cluster. Conflicts with
domain_arn
. - domain_
arn str - The ARN of the Amazon ES domain. The pattern needs to be
arn:.*
. Conflicts withcluster_endpoint
. - index_
rotation_ strperiod - The Elasticsearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate expiration of old data. Valid values are
NoRotation
,OneHour
,OneDay
,OneWeek
, andOneMonth
. The default value isOneDay
. - processing_
configuration FirehoseDelivery Stream Elasticsearch Configuration Processing Configuration - The data processing configuration. See
processing_configuration
block below for details. - retry_
duration int - After an initial failure to deliver to Amazon Elasticsearch, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0.
- s3_
backup_ strmode - Defines how documents should be delivered to Amazon S3. Valid values are
FailedDocumentsOnly
andAllDocuments
. Default value isFailedDocumentsOnly
. - type_
name str - The Elasticsearch type name with maximum length of 100 characters.
- vpc_
config FirehoseDelivery Stream Elasticsearch Configuration Vpc Config - The VPC configuration for the delivery stream to connect to Elastic Search associated with the VPC. See
vpc_config
block below for details.
- index
Name String - The Elasticsearch index name.
- role
Arn String - The ARN of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. The IAM role must have permission for
DescribeElasticsearchDomain
,DescribeElasticsearchDomains
, andDescribeElasticsearchDomainConfig
. The pattern needs to bearn:.*
. - s3Configuration Property Map
- The S3 Configuration. See
s3_configuration
block below for details. - buffering
Interval Number - Buffer incoming data for the specified period of time, in seconds between 0 to 900, before delivering it to the destination. The default value is 300s.
- buffering
Size Number - Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB.
- cloudwatch
Logging Property MapOptions - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - cluster
Endpoint String - The endpoint to use when communicating with the cluster. Conflicts with
domain_arn
. - domain
Arn String - The ARN of the Amazon ES domain. The pattern needs to be
arn:.*
. Conflicts withcluster_endpoint
. - index
Rotation StringPeriod - The Elasticsearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate expiration of old data. Valid values are
NoRotation
,OneHour
,OneDay
,OneWeek
, andOneMonth
. The default value isOneDay
. - processing
Configuration Property Map - The data processing configuration. See
processing_configuration
block below for details. - retry
Duration Number - After an initial failure to deliver to Amazon Elasticsearch, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0.
- s3Backup
Mode String - Defines how documents should be delivered to Amazon S3. Valid values are
FailedDocumentsOnly
andAllDocuments
. Default value isFailedDocumentsOnly
. - type
Name String - The Elasticsearch type name with maximum length of 100 characters.
- vpc
Config Property Map - The VPC configuration for the delivery stream to connect to Elastic Search associated with the VPC. See
vpc_config
block below for details.
FirehoseDeliveryStreamElasticsearchConfigurationCloudwatchLoggingOptions, FirehoseDeliveryStreamElasticsearchConfigurationCloudwatchLoggingOptionsArgs
- Enabled bool
- Enables or disables the logging. Defaults to
false
. - Log
Group stringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - Log
Stream stringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- Enabled bool
- Enables or disables the logging. Defaults to
false
. - Log
Group stringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - Log
Stream stringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled Boolean
- Enables or disables the logging. Defaults to
false
. - log
Group StringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - log
Stream StringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled boolean
- Enables or disables the logging. Defaults to
false
. - log
Group stringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - log
Stream stringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled bool
- Enables or disables the logging. Defaults to
false
. - log_
group_ strname - The CloudWatch group name for logging. This value is required if
enabled
is true. - log_
stream_ strname - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled Boolean
- Enables or disables the logging. Defaults to
false
. - log
Group StringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - log
Stream StringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfiguration, FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationArgs
- Enabled bool
- Enables or disables data processing.
- Processors
List<Firehose
Delivery Stream Elasticsearch Configuration Processing Configuration Processor> - Specifies the data processors as multiple blocks. See
processors
block below for details.
- Enabled bool
- Enables or disables data processing.
- Processors
[]Firehose
Delivery Stream Elasticsearch Configuration Processing Configuration Processor - Specifies the data processors as multiple blocks. See
processors
block below for details.
- enabled Boolean
- Enables or disables data processing.
- processors
List<Firehose
Delivery Stream Elasticsearch Configuration Processing Configuration Processor> - Specifies the data processors as multiple blocks. See
processors
block below for details.
- enabled boolean
- Enables or disables data processing.
- processors
Firehose
Delivery Stream Elasticsearch Configuration Processing Configuration Processor[] - Specifies the data processors as multiple blocks. See
processors
block below for details.
- enabled bool
- Enables or disables data processing.
- processors
Sequence[Firehose
Delivery Stream Elasticsearch Configuration Processing Configuration Processor] - Specifies the data processors as multiple blocks. See
processors
block below for details.
- enabled Boolean
- Enables or disables data processing.
- processors List<Property Map>
- Specifies the data processors as multiple blocks. See
processors
block below for details.
FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessor, FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorArgs
- Type string
- The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
,Decompression
,CloudWatchLogProcessing
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - Parameters
List<Firehose
Delivery Stream Elasticsearch Configuration Processing Configuration Processor Parameter> - Specifies the processor parameters as multiple blocks. See
parameters
block below for details.
- Type string
- The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
,Decompression
,CloudWatchLogProcessing
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - Parameters
[]Firehose
Delivery Stream Elasticsearch Configuration Processing Configuration Processor Parameter - Specifies the processor parameters as multiple blocks. See
parameters
block below for details.
- type String
- The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
,Decompression
,CloudWatchLogProcessing
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameters
List<Firehose
Delivery Stream Elasticsearch Configuration Processing Configuration Processor Parameter> - Specifies the processor parameters as multiple blocks. See
parameters
block below for details.
- type string
- The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
,Decompression
,CloudWatchLogProcessing
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameters
Firehose
Delivery Stream Elasticsearch Configuration Processing Configuration Processor Parameter[] - Specifies the processor parameters as multiple blocks. See
parameters
block below for details.
- type str
- The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
,Decompression
,CloudWatchLogProcessing
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameters
Sequence[Firehose
Delivery Stream Elasticsearch Configuration Processing Configuration Processor Parameter] - Specifies the processor parameters as multiple blocks. See
parameters
block below for details.
- type String
- The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
,Decompression
,CloudWatchLogProcessing
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameters List<Property Map>
- Specifies the processor parameters as multiple blocks. See
parameters
block below for details.
FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorParameter, FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorParameterArgs
- Parameter
Name string - Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
,CompressionFormat
,DataMessageExtraction
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - Parameter
Value string Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
NOTE: Parameters with default values, including
NumberOfRetries
(default: 3),RoleArn
(default: firehose role ARN),BufferSizeInMBs
(default: 1), andBufferIntervalInSeconds
(default: 60), are not stored in Pulumi state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.
- Parameter
Name string - Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
,CompressionFormat
,DataMessageExtraction
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - Parameter
Value string Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
NOTE: Parameters with default values, including
NumberOfRetries
(default: 3),RoleArn
(default: firehose role ARN),BufferSizeInMBs
(default: 1), andBufferIntervalInSeconds
(default: 60), are not stored in Pulumi state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.
- parameter
Name String - Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
,CompressionFormat
,DataMessageExtraction
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameter
Value String Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
NOTE: Parameters with default values, including
NumberOfRetries
(default: 3),RoleArn
(default: firehose role ARN),BufferSizeInMBs
(default: 1), andBufferIntervalInSeconds
(default: 60), are not stored in Pulumi state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.
- parameter
Name string - Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
,CompressionFormat
,DataMessageExtraction
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameter
Value string Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
NOTE: Parameters with default values, including
NumberOfRetries
(default: 3),RoleArn
(default: firehose role ARN),BufferSizeInMBs
(default: 1), andBufferIntervalInSeconds
(default: 60), are not stored in Pulumi state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.
- parameter_
name str - Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
,CompressionFormat
,DataMessageExtraction
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameter_
value str Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
NOTE: Parameters with default values, including
NumberOfRetries
(default: 3),RoleArn
(default: firehose role ARN),BufferSizeInMBs
(default: 1), andBufferIntervalInSeconds
(default: 60), are not stored in Pulumi state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.
- parameter
Name String - Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
,CompressionFormat
,DataMessageExtraction
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameter
Value String Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
NOTE: Parameters with default values, including
NumberOfRetries
(default: 3),RoleArn
(default: firehose role ARN),BufferSizeInMBs
(default: 1), andBufferIntervalInSeconds
(default: 60), are not stored in Pulumi state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.
FirehoseDeliveryStreamElasticsearchConfigurationS3Configuration, FirehoseDeliveryStreamElasticsearchConfigurationS3ConfigurationArgs
- Bucket
Arn string - The ARN of the S3 bucket
- Role
Arn string - The ARN of the AWS credentials.
- Buffering
Interval int - Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- Buffering
Size int - Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- Cloudwatch
Logging FirehoseOptions Delivery Stream Elasticsearch Configuration S3Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - Compression
Format string - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - Error
Output stringPrefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - Kms
Key stringArn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- Prefix string
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- Bucket
Arn string - The ARN of the S3 bucket
- Role
Arn string - The ARN of the AWS credentials.
- Buffering
Interval int - Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- Buffering
Size int - Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- Cloudwatch
Logging FirehoseOptions Delivery Stream Elasticsearch Configuration S3Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - Compression
Format string - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - Error
Output stringPrefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - Kms
Key stringArn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- Prefix string
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- bucket
Arn String - The ARN of the S3 bucket
- role
Arn String - The ARN of the AWS credentials.
- buffering
Interval Integer - Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- buffering
Size Integer - Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- cloudwatch
Logging FirehoseOptions Delivery Stream Elasticsearch Configuration S3Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - compression
Format String - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - error
Output StringPrefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - kms
Key StringArn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- prefix String
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- bucket
Arn string - The ARN of the S3 bucket
- role
Arn string - The ARN of the AWS credentials.
- buffering
Interval number - Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- buffering
Size number - Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- cloudwatch
Logging FirehoseOptions Delivery Stream Elasticsearch Configuration S3Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - compression
Format string - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - error
Output stringPrefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - kms
Key stringArn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- prefix string
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- bucket_
arn str - The ARN of the S3 bucket
- role_
arn str - The ARN of the AWS credentials.
- buffering_
interval int - Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- buffering_
size int - Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- cloudwatch_
logging_ Firehoseoptions Delivery Stream Elasticsearch Configuration S3Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - compression_
format str - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - error_
output_ strprefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - kms_
key_ strarn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- prefix str
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- bucket
Arn String - The ARN of the S3 bucket
- role
Arn String - The ARN of the AWS credentials.
- buffering
Interval Number - Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- buffering
Size Number - Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- cloudwatch
Logging Property MapOptions - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - compression
Format String - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - error
Output StringPrefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - kms
Key StringArn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- prefix String
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
FirehoseDeliveryStreamElasticsearchConfigurationS3ConfigurationCloudwatchLoggingOptions, FirehoseDeliveryStreamElasticsearchConfigurationS3ConfigurationCloudwatchLoggingOptionsArgs
- Enabled bool
- Enables or disables the logging. Defaults to
false
. - Log
Group stringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - Log
Stream stringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- Enabled bool
- Enables or disables the logging. Defaults to
false
. - Log
Group stringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - Log
Stream stringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled Boolean
- Enables or disables the logging. Defaults to
false
. - log
Group StringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - log
Stream StringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled boolean
- Enables or disables the logging. Defaults to
false
. - log
Group stringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - log
Stream stringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled bool
- Enables or disables the logging. Defaults to
false
. - log_
group_ strname - The CloudWatch group name for logging. This value is required if
enabled
is true. - log_
stream_ strname - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled Boolean
- Enables or disables the logging. Defaults to
false
. - log
Group StringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - log
Stream StringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
FirehoseDeliveryStreamElasticsearchConfigurationVpcConfig, FirehoseDeliveryStreamElasticsearchConfigurationVpcConfigArgs
- Role
Arn string - The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions
- Security
Group List<string>Ids - A list of security group IDs to associate with Kinesis Firehose.
- Subnet
Ids List<string> - A list of subnet IDs to associate with Kinesis Firehose.
- Vpc
Id string
- Role
Arn string - The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions
- Security
Group []stringIds - A list of security group IDs to associate with Kinesis Firehose.
- Subnet
Ids []string - A list of subnet IDs to associate with Kinesis Firehose.
- Vpc
Id string
- role
Arn String - The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions
- security
Group List<String>Ids - A list of security group IDs to associate with Kinesis Firehose.
- subnet
Ids List<String> - A list of subnet IDs to associate with Kinesis Firehose.
- vpc
Id String
- role
Arn string - The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions
- security
Group string[]Ids - A list of security group IDs to associate with Kinesis Firehose.
- subnet
Ids string[] - A list of subnet IDs to associate with Kinesis Firehose.
- vpc
Id string
- role_
arn str - The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions
- security_
group_ Sequence[str]ids - A list of security group IDs to associate with Kinesis Firehose.
- subnet_
ids Sequence[str] - A list of subnet IDs to associate with Kinesis Firehose.
- vpc_
id str
- role
Arn String - The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions
- security
Group List<String>Ids - A list of security group IDs to associate with Kinesis Firehose.
- subnet
Ids List<String> - A list of subnet IDs to associate with Kinesis Firehose.
- vpc
Id String
FirehoseDeliveryStreamExtendedS3Configuration, FirehoseDeliveryStreamExtendedS3ConfigurationArgs
- Bucket
Arn string - The ARN of the S3 bucket
- Role
Arn string - Buffering
Interval int - Buffering
Size int - Cloudwatch
Logging FirehoseOptions Delivery Stream Extended S3Configuration Cloudwatch Logging Options - Compression
Format string - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - Custom
Time stringZone - The time zone you prefer. Valid values are
UTC
or a non-3-letter IANA time zones (for example,America/Los_Angeles
). Default value isUTC
. - Data
Format FirehoseConversion Configuration Delivery Stream Extended S3Configuration Data Format Conversion Configuration - Nested argument for the serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3. See
data_format_conversion_configuration
block below for details. - Dynamic
Partitioning FirehoseConfiguration Delivery Stream Extended S3Configuration Dynamic Partitioning Configuration - The configuration for dynamic partitioning. Required when using dynamic partitioning. See
dynamic_partitioning_configuration
block below for details. - Error
Output stringPrefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - File
Extension string - The file extension to override the default file extension (for example,
.json
). - Kms
Key stringArn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- Prefix string
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- Processing
Configuration FirehoseDelivery Stream Extended S3Configuration Processing Configuration - The data processing configuration. See
processing_configuration
block below for details. - S3Backup
Configuration FirehoseDelivery Stream Extended S3Configuration S3Backup Configuration - The configuration for backup in Amazon S3. Required if
s3_backup_mode
isEnabled
. Supports the same fields ass3_configuration
object. - S3Backup
Mode string - The Amazon S3 backup mode. Valid values are
Disabled
andEnabled
. Default value isDisabled
.
- Bucket
Arn string - The ARN of the S3 bucket
- Role
Arn string - Buffering
Interval int - Buffering
Size int - Cloudwatch
Logging FirehoseOptions Delivery Stream Extended S3Configuration Cloudwatch Logging Options - Compression
Format string - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - Custom
Time stringZone - The time zone you prefer. Valid values are
UTC
or a non-3-letter IANA time zones (for example,America/Los_Angeles
). Default value isUTC
. - Data
Format FirehoseConversion Configuration Delivery Stream Extended S3Configuration Data Format Conversion Configuration - Nested argument for the serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3. See
data_format_conversion_configuration
block below for details. - Dynamic
Partitioning FirehoseConfiguration Delivery Stream Extended S3Configuration Dynamic Partitioning Configuration - The configuration for dynamic partitioning. Required when using dynamic partitioning. See
dynamic_partitioning_configuration
block below for details. - Error
Output stringPrefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - File
Extension string - The file extension to override the default file extension (for example,
.json
). - Kms
Key stringArn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- Prefix string
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- Processing
Configuration FirehoseDelivery Stream Extended S3Configuration Processing Configuration - The data processing configuration. See
processing_configuration
block below for details. - S3Backup
Configuration FirehoseDelivery Stream Extended S3Configuration S3Backup Configuration - The configuration for backup in Amazon S3. Required if
s3_backup_mode
isEnabled
. Supports the same fields ass3_configuration
object. - S3Backup
Mode string - The Amazon S3 backup mode. Valid values are
Disabled
andEnabled
. Default value isDisabled
.
- bucket
Arn String - The ARN of the S3 bucket
- role
Arn String - buffering
Interval Integer - buffering
Size Integer - cloudwatch
Logging FirehoseOptions Delivery Stream Extended S3Configuration Cloudwatch Logging Options - compression
Format String - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - custom
Time StringZone - The time zone you prefer. Valid values are
UTC
or a non-3-letter IANA time zones (for example,America/Los_Angeles
). Default value isUTC
. - data
Format FirehoseConversion Configuration Delivery Stream Extended S3Configuration Data Format Conversion Configuration - Nested argument for the serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3. See
data_format_conversion_configuration
block below for details. - dynamic
Partitioning FirehoseConfiguration Delivery Stream Extended S3Configuration Dynamic Partitioning Configuration - The configuration for dynamic partitioning. Required when using dynamic partitioning. See
dynamic_partitioning_configuration
block below for details. - error
Output StringPrefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - file
Extension String - The file extension to override the default file extension (for example,
.json
). - kms
Key StringArn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- prefix String
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- processing
Configuration FirehoseDelivery Stream Extended S3Configuration Processing Configuration - The data processing configuration. See
processing_configuration
block below for details. - s3Backup
Configuration FirehoseDelivery Stream Extended S3Configuration S3Backup Configuration - The configuration for backup in Amazon S3. Required if
s3_backup_mode
isEnabled
. Supports the same fields ass3_configuration
object. - s3Backup
Mode String - The Amazon S3 backup mode. Valid values are
Disabled
andEnabled
. Default value isDisabled
.
- bucket
Arn string - The ARN of the S3 bucket
- role
Arn string - buffering
Interval number - buffering
Size number - cloudwatch
Logging FirehoseOptions Delivery Stream Extended S3Configuration Cloudwatch Logging Options - compression
Format string - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - custom
Time stringZone - The time zone you prefer. Valid values are
UTC
or a non-3-letter IANA time zones (for example,America/Los_Angeles
). Default value isUTC
. - data
Format FirehoseConversion Configuration Delivery Stream Extended S3Configuration Data Format Conversion Configuration - Nested argument for the serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3. See
data_format_conversion_configuration
block below for details. - dynamic
Partitioning FirehoseConfiguration Delivery Stream Extended S3Configuration Dynamic Partitioning Configuration - The configuration for dynamic partitioning. Required when using dynamic partitioning. See
dynamic_partitioning_configuration
block below for details. - error
Output stringPrefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - file
Extension string - The file extension to override the default file extension (for example,
.json
). - kms
Key stringArn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- prefix string
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- processing
Configuration FirehoseDelivery Stream Extended S3Configuration Processing Configuration - The data processing configuration. See
processing_configuration
block below for details. - s3Backup
Configuration FirehoseDelivery Stream Extended S3Configuration S3Backup Configuration - The configuration for backup in Amazon S3. Required if
s3_backup_mode
isEnabled
. Supports the same fields ass3_configuration
object. - s3Backup
Mode string - The Amazon S3 backup mode. Valid values are
Disabled
andEnabled
. Default value isDisabled
.
- bucket_
arn str - The ARN of the S3 bucket
- role_
arn str - buffering_
interval int - buffering_
size int - cloudwatch_
logging_ Firehoseoptions Delivery Stream Extended S3Configuration Cloudwatch Logging Options - compression_
format str - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - custom_
time_ strzone - The time zone you prefer. Valid values are
UTC
or a non-3-letter IANA time zones (for example,America/Los_Angeles
). Default value isUTC
. - data_
format_ Firehoseconversion_ configuration Delivery Stream Extended S3Configuration Data Format Conversion Configuration - Nested argument for the serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3. See
data_format_conversion_configuration
block below for details. - dynamic_
partitioning_ Firehoseconfiguration Delivery Stream Extended S3Configuration Dynamic Partitioning Configuration - The configuration for dynamic partitioning. Required when using dynamic partitioning. See
dynamic_partitioning_configuration
block below for details. - error_
output_ strprefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - file_
extension str - The file extension to override the default file extension (for example,
.json
). - kms_
key_ strarn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- prefix str
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- processing_
configuration FirehoseDelivery Stream Extended S3Configuration Processing Configuration - The data processing configuration. See
processing_configuration
block below for details. - s3_
backup_ Firehoseconfiguration Delivery Stream Extended S3Configuration S3Backup Configuration - The configuration for backup in Amazon S3. Required if
s3_backup_mode
isEnabled
. Supports the same fields ass3_configuration
object. - s3_
backup_ strmode - The Amazon S3 backup mode. Valid values are
Disabled
andEnabled
. Default value isDisabled
.
- bucket
Arn String - The ARN of the S3 bucket
- role
Arn String - buffering
Interval Number - buffering
Size Number - cloudwatch
Logging Property MapOptions - compression
Format String - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - custom
Time StringZone - The time zone you prefer. Valid values are
UTC
or a non-3-letter IANA time zones (for example,America/Los_Angeles
). Default value isUTC
. - data
Format Property MapConversion Configuration - Nested argument for the serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3. See
data_format_conversion_configuration
block below for details. - dynamic
Partitioning Property MapConfiguration - The configuration for dynamic partitioning. Required when using dynamic partitioning. See
dynamic_partitioning_configuration
block below for details. - error
Output StringPrefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - file
Extension String - The file extension to override the default file extension (for example,
.json
). - kms
Key StringArn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- prefix String
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- processing
Configuration Property Map - The data processing configuration. See
processing_configuration
block below for details. - s3Backup
Configuration Property Map - The configuration for backup in Amazon S3. Required if
s3_backup_mode
isEnabled
. Supports the same fields ass3_configuration
object. - s3Backup
Mode String - The Amazon S3 backup mode. Valid values are
Disabled
andEnabled
. Default value isDisabled
.
FirehoseDeliveryStreamExtendedS3ConfigurationCloudwatchLoggingOptions, FirehoseDeliveryStreamExtendedS3ConfigurationCloudwatchLoggingOptionsArgs
- Enabled bool
- Enables or disables the logging. Defaults to
false
. - Log
Group stringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - Log
Stream stringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- Enabled bool
- Enables or disables the logging. Defaults to
false
. - Log
Group stringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - Log
Stream stringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled Boolean
- Enables or disables the logging. Defaults to
false
. - log
Group StringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - log
Stream StringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled boolean
- Enables or disables the logging. Defaults to
false
. - log
Group stringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - log
Stream stringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled bool
- Enables or disables the logging. Defaults to
false
. - log_
group_ strname - The CloudWatch group name for logging. This value is required if
enabled
is true. - log_
stream_ strname - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled Boolean
- Enables or disables the logging. Defaults to
false
. - log
Group StringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - log
Stream StringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfiguration, FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationArgs
- Input
Format FirehoseConfiguration Delivery Stream Extended S3Configuration Data Format Conversion Configuration Input Format Configuration - Specifies the deserializer that you want Kinesis Data Firehose to use to convert the format of your data from JSON. See
input_format_configuration
block below for details. - Output
Format FirehoseConfiguration Delivery Stream Extended S3Configuration Data Format Conversion Configuration Output Format Configuration - Specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data to the Parquet or ORC format. See
output_format_configuration
block below for details. - Schema
Configuration FirehoseDelivery Stream Extended S3Configuration Data Format Conversion Configuration Schema Configuration - Specifies the AWS Glue Data Catalog table that contains the column information. See
schema_configuration
block below for details. - Enabled bool
- Defaults to
true
. Set it tofalse
if you want to disable format conversion while preserving the configuration details.
- Input
Format FirehoseConfiguration Delivery Stream Extended S3Configuration Data Format Conversion Configuration Input Format Configuration - Specifies the deserializer that you want Kinesis Data Firehose to use to convert the format of your data from JSON. See
input_format_configuration
block below for details. - Output
Format FirehoseConfiguration Delivery Stream Extended S3Configuration Data Format Conversion Configuration Output Format Configuration - Specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data to the Parquet or ORC format. See
output_format_configuration
block below for details. - Schema
Configuration FirehoseDelivery Stream Extended S3Configuration Data Format Conversion Configuration Schema Configuration - Specifies the AWS Glue Data Catalog table that contains the column information. See
schema_configuration
block below for details. - Enabled bool
- Defaults to
true
. Set it tofalse
if you want to disable format conversion while preserving the configuration details.
- input
Format FirehoseConfiguration Delivery Stream Extended S3Configuration Data Format Conversion Configuration Input Format Configuration - Specifies the deserializer that you want Kinesis Data Firehose to use to convert the format of your data from JSON. See
input_format_configuration
block below for details. - output
Format FirehoseConfiguration Delivery Stream Extended S3Configuration Data Format Conversion Configuration Output Format Configuration - Specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data to the Parquet or ORC format. See
output_format_configuration
block below for details. - schema
Configuration FirehoseDelivery Stream Extended S3Configuration Data Format Conversion Configuration Schema Configuration - Specifies the AWS Glue Data Catalog table that contains the column information. See
schema_configuration
block below for details. - enabled Boolean
- Defaults to
true
. Set it tofalse
if you want to disable format conversion while preserving the configuration details.
- input
Format FirehoseConfiguration Delivery Stream Extended S3Configuration Data Format Conversion Configuration Input Format Configuration - Specifies the deserializer that you want Kinesis Data Firehose to use to convert the format of your data from JSON. See
input_format_configuration
block below for details. - output
Format FirehoseConfiguration Delivery Stream Extended S3Configuration Data Format Conversion Configuration Output Format Configuration - Specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data to the Parquet or ORC format. See
output_format_configuration
block below for details. - schema
Configuration FirehoseDelivery Stream Extended S3Configuration Data Format Conversion Configuration Schema Configuration - Specifies the AWS Glue Data Catalog table that contains the column information. See
schema_configuration
block below for details. - enabled boolean
- Defaults to
true
. Set it tofalse
if you want to disable format conversion while preserving the configuration details.
- input_
format_ Firehoseconfiguration Delivery Stream Extended S3Configuration Data Format Conversion Configuration Input Format Configuration - Specifies the deserializer that you want Kinesis Data Firehose to use to convert the format of your data from JSON. See
input_format_configuration
block below for details. - output_
format_ Firehoseconfiguration Delivery Stream Extended S3Configuration Data Format Conversion Configuration Output Format Configuration - Specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data to the Parquet or ORC format. See
output_format_configuration
block below for details. - schema_
configuration FirehoseDelivery Stream Extended S3Configuration Data Format Conversion Configuration Schema Configuration - Specifies the AWS Glue Data Catalog table that contains the column information. See
schema_configuration
block below for details. - enabled bool
- Defaults to
true
. Set it tofalse
if you want to disable format conversion while preserving the configuration details.
- input
Format Property MapConfiguration - Specifies the deserializer that you want Kinesis Data Firehose to use to convert the format of your data from JSON. See
input_format_configuration
block below for details. - output
Format Property MapConfiguration - Specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data to the Parquet or ORC format. See
output_format_configuration
block below for details. - schema
Configuration Property Map - Specifies the AWS Glue Data Catalog table that contains the column information. See
schema_configuration
block below for details. - enabled Boolean
- Defaults to
true
. Set it tofalse
if you want to disable format conversion while preserving the configuration details.
FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfiguration, FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationArgs
- Deserializer
Firehose
Delivery Stream Extended S3Configuration Data Format Conversion Configuration Input Format Configuration Deserializer - Specifies which deserializer to use. You can choose either the Apache Hive JSON SerDe or the OpenX JSON SerDe. See
deserializer
block below for details.
- Deserializer
Firehose
Delivery Stream Extended S3Configuration Data Format Conversion Configuration Input Format Configuration Deserializer - Specifies which deserializer to use. You can choose either the Apache Hive JSON SerDe or the OpenX JSON SerDe. See
deserializer
block below for details.
- deserializer
Firehose
Delivery Stream Extended S3Configuration Data Format Conversion Configuration Input Format Configuration Deserializer - Specifies which deserializer to use. You can choose either the Apache Hive JSON SerDe or the OpenX JSON SerDe. See
deserializer
block below for details.
- deserializer
Firehose
Delivery Stream Extended S3Configuration Data Format Conversion Configuration Input Format Configuration Deserializer - Specifies which deserializer to use. You can choose either the Apache Hive JSON SerDe or the OpenX JSON SerDe. See
deserializer
block below for details.
- deserializer
Firehose
Delivery Stream Extended S3Configuration Data Format Conversion Configuration Input Format Configuration Deserializer - Specifies which deserializer to use. You can choose either the Apache Hive JSON SerDe or the OpenX JSON SerDe. See
deserializer
block below for details.
- deserializer Property Map
- Specifies which deserializer to use. You can choose either the Apache Hive JSON SerDe or the OpenX JSON SerDe. See
deserializer
block below for details.
FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializer, FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializerArgs
- Hive
Json FirehoseSer De Delivery Stream Extended S3Configuration Data Format Conversion Configuration Input Format Configuration Deserializer Hive Json Ser De - Specifies the native Hive / HCatalog JsonSerDe. More details below. See
hive_json_ser_de
block below for details. - Open
XJson FirehoseSer De Delivery Stream Extended S3Configuration Data Format Conversion Configuration Input Format Configuration Deserializer Open XJson Ser De - Specifies the OpenX SerDe. See
open_x_json_ser_de
block below for details.
- Hive
Json FirehoseSer De Delivery Stream Extended S3Configuration Data Format Conversion Configuration Input Format Configuration Deserializer Hive Json Ser De - Specifies the native Hive / HCatalog JsonSerDe. More details below. See
hive_json_ser_de
block below for details. - Open
XJson FirehoseSer De Delivery Stream Extended S3Configuration Data Format Conversion Configuration Input Format Configuration Deserializer Open XJson Ser De - Specifies the OpenX SerDe. See
open_x_json_ser_de
block below for details.
- hive
Json FirehoseSer De Delivery Stream Extended S3Configuration Data Format Conversion Configuration Input Format Configuration Deserializer Hive Json Ser De - Specifies the native Hive / HCatalog JsonSerDe. More details below. See
hive_json_ser_de
block below for details. - open
XJson FirehoseSer De Delivery Stream Extended S3Configuration Data Format Conversion Configuration Input Format Configuration Deserializer Open XJson Ser De - Specifies the OpenX SerDe. See
open_x_json_ser_de
block below for details.
- hive
Json FirehoseSer De Delivery Stream Extended S3Configuration Data Format Conversion Configuration Input Format Configuration Deserializer Hive Json Ser De - Specifies the native Hive / HCatalog JsonSerDe. More details below. See
hive_json_ser_de
block below for details. - open
XJson FirehoseSer De Delivery Stream Extended S3Configuration Data Format Conversion Configuration Input Format Configuration Deserializer Open XJson Ser De - Specifies the OpenX SerDe. See
open_x_json_ser_de
block below for details.
- hive_
json_ Firehoseser_ de Delivery Stream Extended S3Configuration Data Format Conversion Configuration Input Format Configuration Deserializer Hive Json Ser De - Specifies the native Hive / HCatalog JsonSerDe. More details below. See
hive_json_ser_de
block below for details. - open_
x_ Firehosejson_ ser_ de Delivery Stream Extended S3Configuration Data Format Conversion Configuration Input Format Configuration Deserializer Open XJson Ser De - Specifies the OpenX SerDe. See
open_x_json_ser_de
block below for details.
- hive
Json Property MapSer De - Specifies the native Hive / HCatalog JsonSerDe. More details below. See
hive_json_ser_de
block below for details. - open
XJson Property MapSer De - Specifies the OpenX SerDe. See
open_x_json_ser_de
block below for details.
FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializerHiveJsonSerDe, FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializerHiveJsonSerDeArgs
- Timestamp
Formats List<string> - A list of how you want Kinesis Data Firehose to parse the date and time stamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime's DateTimeFormat format strings. For more information, see Class DateTimeFormat. You can also use the special value millis to parse time stamps in epoch milliseconds. If you don't specify a format, Kinesis Data Firehose uses java.sql.Timestamp::valueOf by default.
- Timestamp
Formats []string - A list of how you want Kinesis Data Firehose to parse the date and time stamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime's DateTimeFormat format strings. For more information, see Class DateTimeFormat. You can also use the special value millis to parse time stamps in epoch milliseconds. If you don't specify a format, Kinesis Data Firehose uses java.sql.Timestamp::valueOf by default.
- timestamp
Formats List<String> - A list of how you want Kinesis Data Firehose to parse the date and time stamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime's DateTimeFormat format strings. For more information, see Class DateTimeFormat. You can also use the special value millis to parse time stamps in epoch milliseconds. If you don't specify a format, Kinesis Data Firehose uses java.sql.Timestamp::valueOf by default.
- timestamp
Formats string[] - A list of how you want Kinesis Data Firehose to parse the date and time stamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime's DateTimeFormat format strings. For more information, see Class DateTimeFormat. You can also use the special value millis to parse time stamps in epoch milliseconds. If you don't specify a format, Kinesis Data Firehose uses java.sql.Timestamp::valueOf by default.
- timestamp_
formats Sequence[str] - A list of how you want Kinesis Data Firehose to parse the date and time stamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime's DateTimeFormat format strings. For more information, see Class DateTimeFormat. You can also use the special value millis to parse time stamps in epoch milliseconds. If you don't specify a format, Kinesis Data Firehose uses java.sql.Timestamp::valueOf by default.
- timestamp
Formats List<String> - A list of how you want Kinesis Data Firehose to parse the date and time stamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime's DateTimeFormat format strings. For more information, see Class DateTimeFormat. You can also use the special value millis to parse time stamps in epoch milliseconds. If you don't specify a format, Kinesis Data Firehose uses java.sql.Timestamp::valueOf by default.
FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializerOpenXJsonSerDe, FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializerOpenXJsonSerDeArgs
- Case
Insensitive bool - When set to true, which is the default, Kinesis Data Firehose converts JSON keys to lowercase before deserializing them.
- Column
To Dictionary<string, string>Json Key Mappings - A map of column names to JSON keys that aren't identical to the column names. This is useful when the JSON contains keys that are Hive keywords. For example, timestamp is a Hive keyword. If you have a JSON key named timestamp, set this parameter to
{ ts = "timestamp" }
to map this key to a column named ts. - Convert
Dots boolIn Json Keys To Underscores - When set to
true
, specifies that the names of the keys include dots and that you want Kinesis Data Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is "a.b", you can define the column name to be "a_b" when using this option. Defaults tofalse
.
- Case
Insensitive bool - When set to true, which is the default, Kinesis Data Firehose converts JSON keys to lowercase before deserializing them.
- Column
To map[string]stringJson Key Mappings - A map of column names to JSON keys that aren't identical to the column names. This is useful when the JSON contains keys that are Hive keywords. For example, timestamp is a Hive keyword. If you have a JSON key named timestamp, set this parameter to
{ ts = "timestamp" }
to map this key to a column named ts. - Convert
Dots boolIn Json Keys To Underscores - When set to
true
, specifies that the names of the keys include dots and that you want Kinesis Data Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is "a.b", you can define the column name to be "a_b" when using this option. Defaults tofalse
.
- case
Insensitive Boolean - When set to true, which is the default, Kinesis Data Firehose converts JSON keys to lowercase before deserializing them.
- column
To Map<String,String>Json Key Mappings - A map of column names to JSON keys that aren't identical to the column names. This is useful when the JSON contains keys that are Hive keywords. For example, timestamp is a Hive keyword. If you have a JSON key named timestamp, set this parameter to
{ ts = "timestamp" }
to map this key to a column named ts. - convert
Dots BooleanIn Json Keys To Underscores - When set to
true
, specifies that the names of the keys include dots and that you want Kinesis Data Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is "a.b", you can define the column name to be "a_b" when using this option. Defaults tofalse
.
- case
Insensitive boolean - When set to true, which is the default, Kinesis Data Firehose converts JSON keys to lowercase before deserializing them.
- column
To {[key: string]: string}Json Key Mappings - A map of column names to JSON keys that aren't identical to the column names. This is useful when the JSON contains keys that are Hive keywords. For example, timestamp is a Hive keyword. If you have a JSON key named timestamp, set this parameter to
{ ts = "timestamp" }
to map this key to a column named ts. - convert
Dots booleanIn Json Keys To Underscores - When set to
true
, specifies that the names of the keys include dots and that you want Kinesis Data Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is "a.b", you can define the column name to be "a_b" when using this option. Defaults tofalse
.
- case_
insensitive bool - When set to true, which is the default, Kinesis Data Firehose converts JSON keys to lowercase before deserializing them.
- column_
to_ Mapping[str, str]json_ key_ mappings - A map of column names to JSON keys that aren't identical to the column names. This is useful when the JSON contains keys that are Hive keywords. For example, timestamp is a Hive keyword. If you have a JSON key named timestamp, set this parameter to
{ ts = "timestamp" }
to map this key to a column named ts. - convert_
dots_ boolin_ json_ keys_ to_ underscores - When set to
true
, specifies that the names of the keys include dots and that you want Kinesis Data Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is "a.b", you can define the column name to be "a_b" when using this option. Defaults tofalse
.
- case
Insensitive Boolean - When set to true, which is the default, Kinesis Data Firehose converts JSON keys to lowercase before deserializing them.
- column
To Map<String>Json Key Mappings - A map of column names to JSON keys that aren't identical to the column names. This is useful when the JSON contains keys that are Hive keywords. For example, timestamp is a Hive keyword. If you have a JSON key named timestamp, set this parameter to
{ ts = "timestamp" }
to map this key to a column named ts. - convert
Dots BooleanIn Json Keys To Underscores - When set to
true
, specifies that the names of the keys include dots and that you want Kinesis Data Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is "a.b", you can define the column name to be "a_b" when using this option. Defaults tofalse
.
FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfiguration, FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationArgs
- Serializer
Firehose
Delivery Stream Extended S3Configuration Data Format Conversion Configuration Output Format Configuration Serializer - Specifies which serializer to use. You can choose either the ORC SerDe or the Parquet SerDe. See
serializer
block below for details.
- Serializer
Firehose
Delivery Stream Extended S3Configuration Data Format Conversion Configuration Output Format Configuration Serializer - Specifies which serializer to use. You can choose either the ORC SerDe or the Parquet SerDe. See
serializer
block below for details.
- serializer
Firehose
Delivery Stream Extended S3Configuration Data Format Conversion Configuration Output Format Configuration Serializer - Specifies which serializer to use. You can choose either the ORC SerDe or the Parquet SerDe. See
serializer
block below for details.
- serializer
Firehose
Delivery Stream Extended S3Configuration Data Format Conversion Configuration Output Format Configuration Serializer - Specifies which serializer to use. You can choose either the ORC SerDe or the Parquet SerDe. See
serializer
block below for details.
- serializer
Firehose
Delivery Stream Extended S3Configuration Data Format Conversion Configuration Output Format Configuration Serializer - Specifies which serializer to use. You can choose either the ORC SerDe or the Parquet SerDe. See
serializer
block below for details.
- serializer Property Map
- Specifies which serializer to use. You can choose either the ORC SerDe or the Parquet SerDe. See
serializer
block below for details.
FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializer, FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializerArgs
- Orc
Ser FirehoseDe Delivery Stream Extended S3Configuration Data Format Conversion Configuration Output Format Configuration Serializer Orc Ser De - Specifies converting data to the ORC format before storing it in Amazon S3. For more information, see Apache ORC. See
orc_ser_de
block below for details. - Parquet
Ser FirehoseDe Delivery Stream Extended S3Configuration Data Format Conversion Configuration Output Format Configuration Serializer Parquet Ser De - Specifies converting data to the Parquet format before storing it in Amazon S3. For more information, see Apache Parquet. More details below.
- Orc
Ser FirehoseDe Delivery Stream Extended S3Configuration Data Format Conversion Configuration Output Format Configuration Serializer Orc Ser De - Specifies converting data to the ORC format before storing it in Amazon S3. For more information, see Apache ORC. See
orc_ser_de
block below for details. - Parquet
Ser FirehoseDe Delivery Stream Extended S3Configuration Data Format Conversion Configuration Output Format Configuration Serializer Parquet Ser De - Specifies converting data to the Parquet format before storing it in Amazon S3. For more information, see Apache Parquet. More details below.
- orc
Ser FirehoseDe Delivery Stream Extended S3Configuration Data Format Conversion Configuration Output Format Configuration Serializer Orc Ser De - Specifies converting data to the ORC format before storing it in Amazon S3. For more information, see Apache ORC. See
orc_ser_de
block below for details. - parquet
Ser FirehoseDe Delivery Stream Extended S3Configuration Data Format Conversion Configuration Output Format Configuration Serializer Parquet Ser De - Specifies converting data to the Parquet format before storing it in Amazon S3. For more information, see Apache Parquet. More details below.
- orc
Ser FirehoseDe Delivery Stream Extended S3Configuration Data Format Conversion Configuration Output Format Configuration Serializer Orc Ser De - Specifies converting data to the ORC format before storing it in Amazon S3. For more information, see Apache ORC. See
orc_ser_de
block below for details. - parquet
Ser FirehoseDe Delivery Stream Extended S3Configuration Data Format Conversion Configuration Output Format Configuration Serializer Parquet Ser De - Specifies converting data to the Parquet format before storing it in Amazon S3. For more information, see Apache Parquet. More details below.
- orc_
ser_ Firehosede Delivery Stream Extended S3Configuration Data Format Conversion Configuration Output Format Configuration Serializer Orc Ser De - Specifies converting data to the ORC format before storing it in Amazon S3. For more information, see Apache ORC. See
orc_ser_de
block below for details. - parquet_
ser_ Firehosede Delivery Stream Extended S3Configuration Data Format Conversion Configuration Output Format Configuration Serializer Parquet Ser De - Specifies converting data to the Parquet format before storing it in Amazon S3. For more information, see Apache Parquet. More details below.
- orc
Ser Property MapDe - Specifies converting data to the ORC format before storing it in Amazon S3. For more information, see Apache ORC. See
orc_ser_de
block below for details. - parquet
Ser Property MapDe - Specifies converting data to the Parquet format before storing it in Amazon S3. For more information, see Apache Parquet. More details below.
FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializerOrcSerDe, FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializerOrcSerDeArgs
- Block
Size intBytes - The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.
- Bloom
Filter List<string>Columns - A list of column names for which you want Kinesis Data Firehose to create bloom filters.
- Bloom
Filter doubleFalse Positive Probability - The Bloom filter false positive probability (FPP). The lower the FPP, the bigger the Bloom filter. The default value is
0.05
, the minimum is0
, and the maximum is1
. - Compression string
- The compression code to use over data blocks. The default is
SNAPPY
. - Dictionary
Key doubleThreshold - A float that represents the fraction of the total number of non-null rows. To turn off dictionary encoding, set this fraction to a number that is less than the number of distinct keys in a dictionary. To always use dictionary encoding, set this threshold to
1
. - Enable
Padding bool - Set this to
true
to indicate that you want stripes to be padded to the HDFS block boundaries. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default isfalse
. - Format
Version string - The version of the file to write. The possible values are
V0_11
andV0_12
. The default isV0_12
. - Padding
Tolerance double - A float between 0 and 1 that defines the tolerance for block padding as a decimal fraction of stripe size. The default value is
0.05
, which means 5 percent of stripe size. For the default values of 64 MiB ORC stripes and 256 MiB HDFS blocks, the default block padding tolerance of 5 percent reserves a maximum of 3.2 MiB for padding within the 256 MiB block. In such a case, if the available size within the block is more than 3.2 MiB, a new, smaller stripe is inserted to fit within that space. This ensures that no stripe crosses block boundaries and causes remote reads within a node-local task. Kinesis Data Firehose ignores this parameter whenenable_padding
isfalse
. - Row
Index intStride - The number of rows between index entries. The default is
10000
and the minimum is1000
. - Stripe
Size intBytes - The number of bytes in each stripe. The default is 64 MiB and the minimum is 8 MiB.
- Block
Size intBytes - The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.
- Bloom
Filter []stringColumns - A list of column names for which you want Kinesis Data Firehose to create bloom filters.
- Bloom
Filter float64False Positive Probability - The Bloom filter false positive probability (FPP). The lower the FPP, the bigger the Bloom filter. The default value is
0.05
, the minimum is0
, and the maximum is1
. - Compression string
- The compression code to use over data blocks. The default is
SNAPPY
. - Dictionary
Key float64Threshold - A float that represents the fraction of the total number of non-null rows. To turn off dictionary encoding, set this fraction to a number that is less than the number of distinct keys in a dictionary. To always use dictionary encoding, set this threshold to
1
. - Enable
Padding bool - Set this to
true
to indicate that you want stripes to be padded to the HDFS block boundaries. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default isfalse
. - Format
Version string - The version of the file to write. The possible values are
V0_11
andV0_12
. The default isV0_12
. - Padding
Tolerance float64 - A float between 0 and 1 that defines the tolerance for block padding as a decimal fraction of stripe size. The default value is
0.05
, which means 5 percent of stripe size. For the default values of 64 MiB ORC stripes and 256 MiB HDFS blocks, the default block padding tolerance of 5 percent reserves a maximum of 3.2 MiB for padding within the 256 MiB block. In such a case, if the available size within the block is more than 3.2 MiB, a new, smaller stripe is inserted to fit within that space. This ensures that no stripe crosses block boundaries and causes remote reads within a node-local task. Kinesis Data Firehose ignores this parameter whenenable_padding
isfalse
. - Row
Index intStride - The number of rows between index entries. The default is
10000
and the minimum is1000
. - Stripe
Size intBytes - The number of bytes in each stripe. The default is 64 MiB and the minimum is 8 MiB.
- block
Size IntegerBytes - The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.
- bloom
Filter List<String>Columns - A list of column names for which you want Kinesis Data Firehose to create bloom filters.
- bloom
Filter DoubleFalse Positive Probability - The Bloom filter false positive probability (FPP). The lower the FPP, the bigger the Bloom filter. The default value is
0.05
, the minimum is0
, and the maximum is1
. - compression String
- The compression code to use over data blocks. The default is
SNAPPY
. - dictionary
Key DoubleThreshold - A float that represents the fraction of the total number of non-null rows. To turn off dictionary encoding, set this fraction to a number that is less than the number of distinct keys in a dictionary. To always use dictionary encoding, set this threshold to
1
. - enable
Padding Boolean - Set this to
true
to indicate that you want stripes to be padded to the HDFS block boundaries. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default isfalse
. - format
Version String - The version of the file to write. The possible values are
V0_11
andV0_12
. The default isV0_12
. - padding
Tolerance Double - A float between 0 and 1 that defines the tolerance for block padding as a decimal fraction of stripe size. The default value is
0.05
, which means 5 percent of stripe size. For the default values of 64 MiB ORC stripes and 256 MiB HDFS blocks, the default block padding tolerance of 5 percent reserves a maximum of 3.2 MiB for padding within the 256 MiB block. In such a case, if the available size within the block is more than 3.2 MiB, a new, smaller stripe is inserted to fit within that space. This ensures that no stripe crosses block boundaries and causes remote reads within a node-local task. Kinesis Data Firehose ignores this parameter whenenable_padding
isfalse
. - row
Index IntegerStride - The number of rows between index entries. The default is
10000
and the minimum is1000
. - stripe
Size IntegerBytes - The number of bytes in each stripe. The default is 64 MiB and the minimum is 8 MiB.
- block
Size numberBytes - The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.
- bloom
Filter string[]Columns - A list of column names for which you want Kinesis Data Firehose to create bloom filters.
- bloom
Filter numberFalse Positive Probability - The Bloom filter false positive probability (FPP). The lower the FPP, the bigger the Bloom filter. The default value is
0.05
, the minimum is0
, and the maximum is1
. - compression string
- The compression code to use over data blocks. The default is
SNAPPY
. - dictionary
Key numberThreshold - A float that represents the fraction of the total number of non-null rows. To turn off dictionary encoding, set this fraction to a number that is less than the number of distinct keys in a dictionary. To always use dictionary encoding, set this threshold to
1
. - enable
Padding boolean - Set this to
true
to indicate that you want stripes to be padded to the HDFS block boundaries. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default isfalse
. - format
Version string - The version of the file to write. The possible values are
V0_11
andV0_12
. The default isV0_12
. - padding
Tolerance number - A float between 0 and 1 that defines the tolerance for block padding as a decimal fraction of stripe size. The default value is
0.05
, which means 5 percent of stripe size. For the default values of 64 MiB ORC stripes and 256 MiB HDFS blocks, the default block padding tolerance of 5 percent reserves a maximum of 3.2 MiB for padding within the 256 MiB block. In such a case, if the available size within the block is more than 3.2 MiB, a new, smaller stripe is inserted to fit within that space. This ensures that no stripe crosses block boundaries and causes remote reads within a node-local task. Kinesis Data Firehose ignores this parameter whenenable_padding
isfalse
. - row
Index numberStride - The number of rows between index entries. The default is
10000
and the minimum is1000
. - stripe
Size numberBytes - The number of bytes in each stripe. The default is 64 MiB and the minimum is 8 MiB.
- block_
size_ intbytes - The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.
- bloom_
filter_ Sequence[str]columns - A list of column names for which you want Kinesis Data Firehose to create bloom filters.
- bloom_
filter_ floatfalse_ positive_ probability - The Bloom filter false positive probability (FPP). The lower the FPP, the bigger the Bloom filter. The default value is
0.05
, the minimum is0
, and the maximum is1
. - compression str
- The compression code to use over data blocks. The default is
SNAPPY
. - dictionary_
key_ floatthreshold - A float that represents the fraction of the total number of non-null rows. To turn off dictionary encoding, set this fraction to a number that is less than the number of distinct keys in a dictionary. To always use dictionary encoding, set this threshold to
1
. - enable_
padding bool - Set this to
true
to indicate that you want stripes to be padded to the HDFS block boundaries. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default isfalse
. - format_
version str - The version of the file to write. The possible values are
V0_11
andV0_12
. The default isV0_12
. - padding_
tolerance float - A float between 0 and 1 that defines the tolerance for block padding as a decimal fraction of stripe size. The default value is
0.05
, which means 5 percent of stripe size. For the default values of 64 MiB ORC stripes and 256 MiB HDFS blocks, the default block padding tolerance of 5 percent reserves a maximum of 3.2 MiB for padding within the 256 MiB block. In such a case, if the available size within the block is more than 3.2 MiB, a new, smaller stripe is inserted to fit within that space. This ensures that no stripe crosses block boundaries and causes remote reads within a node-local task. Kinesis Data Firehose ignores this parameter whenenable_padding
isfalse
. - row_
index_ intstride - The number of rows between index entries. The default is
10000
and the minimum is1000
. - stripe_
size_ intbytes - The number of bytes in each stripe. The default is 64 MiB and the minimum is 8 MiB.
- block
Size NumberBytes - The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.
- bloom
Filter List<String>Columns - A list of column names for which you want Kinesis Data Firehose to create bloom filters.
- bloom
Filter NumberFalse Positive Probability - The Bloom filter false positive probability (FPP). The lower the FPP, the bigger the Bloom filter. The default value is
0.05
, the minimum is0
, and the maximum is1
. - compression String
- The compression code to use over data blocks. The default is
SNAPPY
. - dictionary
Key NumberThreshold - A float that represents the fraction of the total number of non-null rows. To turn off dictionary encoding, set this fraction to a number that is less than the number of distinct keys in a dictionary. To always use dictionary encoding, set this threshold to
1
. - enable
Padding Boolean - Set this to
true
to indicate that you want stripes to be padded to the HDFS block boundaries. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default isfalse
. - format
Version String - The version of the file to write. The possible values are
V0_11
andV0_12
. The default isV0_12
. - padding
Tolerance Number - A float between 0 and 1 that defines the tolerance for block padding as a decimal fraction of stripe size. The default value is
0.05
, which means 5 percent of stripe size. For the default values of 64 MiB ORC stripes and 256 MiB HDFS blocks, the default block padding tolerance of 5 percent reserves a maximum of 3.2 MiB for padding within the 256 MiB block. In such a case, if the available size within the block is more than 3.2 MiB, a new, smaller stripe is inserted to fit within that space. This ensures that no stripe crosses block boundaries and causes remote reads within a node-local task. Kinesis Data Firehose ignores this parameter whenenable_padding
isfalse
. - row
Index NumberStride - The number of rows between index entries. The default is
10000
and the minimum is1000
. - stripe
Size NumberBytes - The number of bytes in each stripe. The default is 64 MiB and the minimum is 8 MiB.
FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializerParquetSerDe, FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializerParquetSerDeArgs
- Block
Size intBytes - The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.
- Compression string
- The compression code to use over data blocks. The possible values are
UNCOMPRESSED
,SNAPPY
, andGZIP
, with the default beingSNAPPY
. UseSNAPPY
for higher decompression speed. UseGZIP
if the compression ratio is more important than speed. - Enable
Dictionary boolCompression - Indicates whether to enable dictionary compression.
- Max
Padding intBytes - The maximum amount of padding to apply. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is
0
. - Page
Size intBytes - The Parquet page size. Column chunks are divided into pages. A page is conceptually an indivisible unit (in terms of compression and encoding). The minimum value is 64 KiB and the default is 1 MiB.
- Writer
Version string - Indicates the version of row format to output. The possible values are
V1
andV2
. The default isV1
.
- Block
Size intBytes - The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.
- Compression string
- The compression code to use over data blocks. The possible values are
UNCOMPRESSED
,SNAPPY
, andGZIP
, with the default beingSNAPPY
. UseSNAPPY
for higher decompression speed. UseGZIP
if the compression ratio is more important than speed. - Enable
Dictionary boolCompression - Indicates whether to enable dictionary compression.
- Max
Padding intBytes - The maximum amount of padding to apply. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is
0
. - Page
Size intBytes - The Parquet page size. Column chunks are divided into pages. A page is conceptually an indivisible unit (in terms of compression and encoding). The minimum value is 64 KiB and the default is 1 MiB.
- Writer
Version string - Indicates the version of row format to output. The possible values are
V1
andV2
. The default isV1
.
- block
Size IntegerBytes - The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.
- compression String
- The compression code to use over data blocks. The possible values are
UNCOMPRESSED
,SNAPPY
, andGZIP
, with the default beingSNAPPY
. UseSNAPPY
for higher decompression speed. UseGZIP
if the compression ratio is more important than speed. - enable
Dictionary BooleanCompression - Indicates whether to enable dictionary compression.
- max
Padding IntegerBytes - The maximum amount of padding to apply. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is
0
. - page
Size IntegerBytes - The Parquet page size. Column chunks are divided into pages. A page is conceptually an indivisible unit (in terms of compression and encoding). The minimum value is 64 KiB and the default is 1 MiB.
- writer
Version String - Indicates the version of row format to output. The possible values are
V1
andV2
. The default isV1
.
- block
Size numberBytes - The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.
- compression string
- The compression code to use over data blocks. The possible values are
UNCOMPRESSED
,SNAPPY
, andGZIP
, with the default beingSNAPPY
. UseSNAPPY
for higher decompression speed. UseGZIP
if the compression ratio is more important than speed. - enable
Dictionary booleanCompression - Indicates whether to enable dictionary compression.
- max
Padding numberBytes - The maximum amount of padding to apply. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is
0
. - page
Size numberBytes - The Parquet page size. Column chunks are divided into pages. A page is conceptually an indivisible unit (in terms of compression and encoding). The minimum value is 64 KiB and the default is 1 MiB.
- writer
Version string - Indicates the version of row format to output. The possible values are
V1
andV2
. The default isV1
.
- block_
size_ intbytes - The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.
- compression str
- The compression code to use over data blocks. The possible values are
UNCOMPRESSED
,SNAPPY
, andGZIP
, with the default beingSNAPPY
. UseSNAPPY
for higher decompression speed. UseGZIP
if the compression ratio is more important than speed. - enable_
dictionary_ boolcompression - Indicates whether to enable dictionary compression.
- max_
padding_ intbytes - The maximum amount of padding to apply. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is
0
. - page_
size_ intbytes - The Parquet page size. Column chunks are divided into pages. A page is conceptually an indivisible unit (in terms of compression and encoding). The minimum value is 64 KiB and the default is 1 MiB.
- writer_
version str - Indicates the version of row format to output. The possible values are
V1
andV2
. The default isV1
.
- block
Size NumberBytes - The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.
- compression String
- The compression code to use over data blocks. The possible values are
UNCOMPRESSED
,SNAPPY
, andGZIP
, with the default beingSNAPPY
. UseSNAPPY
for higher decompression speed. UseGZIP
if the compression ratio is more important than speed. - enable
Dictionary BooleanCompression - Indicates whether to enable dictionary compression.
- max
Padding NumberBytes - The maximum amount of padding to apply. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is
0
. - page
Size NumberBytes - The Parquet page size. Column chunks are divided into pages. A page is conceptually an indivisible unit (in terms of compression and encoding). The minimum value is 64 KiB and the default is 1 MiB.
- writer
Version String - Indicates the version of row format to output. The possible values are
V1
andV2
. The default isV1
.
FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationSchemaConfiguration, FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationSchemaConfigurationArgs
- Database
Name string - Specifies the name of the AWS Glue database that contains the schema for the output data.
- Role
Arn string - The role that Kinesis Data Firehose can use to access AWS Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren't allowed.
- Table
Name string - Specifies the AWS Glue table that contains the column information that constitutes your data schema.
- Catalog
Id string - The ID of the AWS Glue Data Catalog. If you don't supply this, the AWS account ID is used by default.
- Region string
- If you don't specify an AWS Region, the default is the current region.
- Version
Id string - Specifies the table version for the output data schema. Defaults to
LATEST
.
- Database
Name string - Specifies the name of the AWS Glue database that contains the schema for the output data.
- Role
Arn string - The role that Kinesis Data Firehose can use to access AWS Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren't allowed.
- Table
Name string - Specifies the AWS Glue table that contains the column information that constitutes your data schema.
- Catalog
Id string - The ID of the AWS Glue Data Catalog. If you don't supply this, the AWS account ID is used by default.
- Region string
- If you don't specify an AWS Region, the default is the current region.
- Version
Id string - Specifies the table version for the output data schema. Defaults to
LATEST
.
- database
Name String - Specifies the name of the AWS Glue database that contains the schema for the output data.
- role
Arn String - The role that Kinesis Data Firehose can use to access AWS Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren't allowed.
- table
Name String - Specifies the AWS Glue table that contains the column information that constitutes your data schema.
- catalog
Id String - The ID of the AWS Glue Data Catalog. If you don't supply this, the AWS account ID is used by default.
- region String
- If you don't specify an AWS Region, the default is the current region.
- version
Id String - Specifies the table version for the output data schema. Defaults to
LATEST
.
- database
Name string - Specifies the name of the AWS Glue database that contains the schema for the output data.
- role
Arn string - The role that Kinesis Data Firehose can use to access AWS Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren't allowed.
- table
Name string - Specifies the AWS Glue table that contains the column information that constitutes your data schema.
- catalog
Id string - The ID of the AWS Glue Data Catalog. If you don't supply this, the AWS account ID is used by default.
- region string
- If you don't specify an AWS Region, the default is the current region.
- version
Id string - Specifies the table version for the output data schema. Defaults to
LATEST
.
- database_
name str - Specifies the name of the AWS Glue database that contains the schema for the output data.
- role_
arn str - The role that Kinesis Data Firehose can use to access AWS Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren't allowed.
- table_
name str - Specifies the AWS Glue table that contains the column information that constitutes your data schema.
- catalog_
id str - The ID of the AWS Glue Data Catalog. If you don't supply this, the AWS account ID is used by default.
- region str
- If you don't specify an AWS Region, the default is the current region.
- version_
id str - Specifies the table version for the output data schema. Defaults to
LATEST
.
- database
Name String - Specifies the name of the AWS Glue database that contains the schema for the output data.
- role
Arn String - The role that Kinesis Data Firehose can use to access AWS Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren't allowed.
- table
Name String - Specifies the AWS Glue table that contains the column information that constitutes your data schema.
- catalog
Id String - The ID of the AWS Glue Data Catalog. If you don't supply this, the AWS account ID is used by default.
- region String
- If you don't specify an AWS Region, the default is the current region.
- version
Id String - Specifies the table version for the output data schema. Defaults to
LATEST
.
FirehoseDeliveryStreamExtendedS3ConfigurationDynamicPartitioningConfiguration, FirehoseDeliveryStreamExtendedS3ConfigurationDynamicPartitioningConfigurationArgs
- Enabled bool
- Enables or disables dynamic partitioning. Defaults to
false
. - Retry
Duration int Total amount of seconds Firehose spends on retries. Valid values between 0 and 7200. Default is 300.
NOTE: You can enable dynamic partitioning only when you create a new delivery stream. Once you enable dynamic partitioning on a delivery stream, it cannot be disabled on this delivery stream. Therefore, the provider will recreate the resource whenever dynamic partitioning is enabled or disabled.
- Enabled bool
- Enables or disables dynamic partitioning. Defaults to
false
. - Retry
Duration int Total amount of seconds Firehose spends on retries. Valid values between 0 and 7200. Default is 300.
NOTE: You can enable dynamic partitioning only when you create a new delivery stream. Once you enable dynamic partitioning on a delivery stream, it cannot be disabled on this delivery stream. Therefore, the provider will recreate the resource whenever dynamic partitioning is enabled or disabled.
- enabled Boolean
- Enables or disables dynamic partitioning. Defaults to
false
. - retry
Duration Integer Total amount of seconds Firehose spends on retries. Valid values between 0 and 7200. Default is 300.
NOTE: You can enable dynamic partitioning only when you create a new delivery stream. Once you enable dynamic partitioning on a delivery stream, it cannot be disabled on this delivery stream. Therefore, the provider will recreate the resource whenever dynamic partitioning is enabled or disabled.
- enabled boolean
- Enables or disables dynamic partitioning. Defaults to
false
. - retry
Duration number Total amount of seconds Firehose spends on retries. Valid values between 0 and 7200. Default is 300.
NOTE: You can enable dynamic partitioning only when you create a new delivery stream. Once you enable dynamic partitioning on a delivery stream, it cannot be disabled on this delivery stream. Therefore, the provider will recreate the resource whenever dynamic partitioning is enabled or disabled.
- enabled bool
- Enables or disables dynamic partitioning. Defaults to
false
. - retry_
duration int Total amount of seconds Firehose spends on retries. Valid values between 0 and 7200. Default is 300.
NOTE: You can enable dynamic partitioning only when you create a new delivery stream. Once you enable dynamic partitioning on a delivery stream, it cannot be disabled on this delivery stream. Therefore, the provider will recreate the resource whenever dynamic partitioning is enabled or disabled.
- enabled Boolean
- Enables or disables dynamic partitioning. Defaults to
false
. - retry
Duration Number Total amount of seconds Firehose spends on retries. Valid values between 0 and 7200. Default is 300.
NOTE: You can enable dynamic partitioning only when you create a new delivery stream. Once you enable dynamic partitioning on a delivery stream, it cannot be disabled on this delivery stream. Therefore, the provider will recreate the resource whenever dynamic partitioning is enabled or disabled.
FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfiguration, FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationArgs
- Enabled bool
- Enables or disables data processing.
- Processors
List<Firehose
Delivery Stream Extended S3Configuration Processing Configuration Processor> - Specifies the data processors as multiple blocks. See
processors
block below for details.
- Enabled bool
- Enables or disables data processing.
- Processors
[]Firehose
Delivery Stream Extended S3Configuration Processing Configuration Processor - Specifies the data processors as multiple blocks. See
processors
block below for details.
- enabled Boolean
- Enables or disables data processing.
- processors
List<Firehose
Delivery Stream Extended S3Configuration Processing Configuration Processor> - Specifies the data processors as multiple blocks. See
processors
block below for details.
- enabled boolean
- Enables or disables data processing.
- processors
Firehose
Delivery Stream Extended S3Configuration Processing Configuration Processor[] - Specifies the data processors as multiple blocks. See
processors
block below for details.
- enabled bool
- Enables or disables data processing.
- processors
Sequence[Firehose
Delivery Stream Extended S3Configuration Processing Configuration Processor] - Specifies the data processors as multiple blocks. See
processors
block below for details.
- enabled Boolean
- Enables or disables data processing.
- processors List<Property Map>
- Specifies the data processors as multiple blocks. See
processors
block below for details.
FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessor, FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs
- Type string
- The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
,Decompression
,CloudWatchLogProcessing
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - Parameters
List<Firehose
Delivery Stream Extended S3Configuration Processing Configuration Processor Parameter> - Specifies the processor parameters as multiple blocks. See
parameters
block below for details.
- Type string
- The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
,Decompression
,CloudWatchLogProcessing
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - Parameters
[]Firehose
Delivery Stream Extended S3Configuration Processing Configuration Processor Parameter - Specifies the processor parameters as multiple blocks. See
parameters
block below for details.
- type String
- The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
,Decompression
,CloudWatchLogProcessing
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameters
List<Firehose
Delivery Stream Extended S3Configuration Processing Configuration Processor Parameter> - Specifies the processor parameters as multiple blocks. See
parameters
block below for details.
- type string
- The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
,Decompression
,CloudWatchLogProcessing
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameters
Firehose
Delivery Stream Extended S3Configuration Processing Configuration Processor Parameter[] - Specifies the processor parameters as multiple blocks. See
parameters
block below for details.
- type str
- The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
,Decompression
,CloudWatchLogProcessing
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameters
Sequence[Firehose
Delivery Stream Extended S3Configuration Processing Configuration Processor Parameter] - Specifies the processor parameters as multiple blocks. See
parameters
block below for details.
- type String
- The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
,Decompression
,CloudWatchLogProcessing
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameters List<Property Map>
- Specifies the processor parameters as multiple blocks. See
parameters
block below for details.
FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameter, FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs
- Parameter
Name string - Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
,CompressionFormat
,DataMessageExtraction
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - Parameter
Value string Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
NOTE: Parameters with default values, including
NumberOfRetries
(default: 3),RoleArn
(default: firehose role ARN),BufferSizeInMBs
(default: 1), andBufferIntervalInSeconds
(default: 60), are not stored in Pulumi state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.
- Parameter
Name string - Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
,CompressionFormat
,DataMessageExtraction
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - Parameter
Value string Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
NOTE: Parameters with default values, including
NumberOfRetries
(default: 3),RoleArn
(default: firehose role ARN),BufferSizeInMBs
(default: 1), andBufferIntervalInSeconds
(default: 60), are not stored in Pulumi state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.
- parameter
Name String - Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
,CompressionFormat
,DataMessageExtraction
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameter
Value String Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
NOTE: Parameters with default values, including
NumberOfRetries
(default: 3),RoleArn
(default: firehose role ARN),BufferSizeInMBs
(default: 1), andBufferIntervalInSeconds
(default: 60), are not stored in Pulumi state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.
- parameter
Name string - Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
,CompressionFormat
,DataMessageExtraction
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameter
Value string Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
NOTE: Parameters with default values, including
NumberOfRetries
(default: 3),RoleArn
(default: firehose role ARN),BufferSizeInMBs
(default: 1), andBufferIntervalInSeconds
(default: 60), are not stored in Pulumi state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.
- parameter_
name str - Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
,CompressionFormat
,DataMessageExtraction
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameter_
value str Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
NOTE: Parameters with default values, including
NumberOfRetries
(default: 3),RoleArn
(default: firehose role ARN),BufferSizeInMBs
(default: 1), andBufferIntervalInSeconds
(default: 60), are not stored in Pulumi state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.
- parameter
Name String - Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
,CompressionFormat
,DataMessageExtraction
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameter
Value String Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
NOTE: Parameters with default values, including
NumberOfRetries
(default: 3),RoleArn
(default: firehose role ARN),BufferSizeInMBs
(default: 1), andBufferIntervalInSeconds
(default: 60), are not stored in Pulumi state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.
FirehoseDeliveryStreamExtendedS3ConfigurationS3BackupConfiguration, FirehoseDeliveryStreamExtendedS3ConfigurationS3BackupConfigurationArgs
- Bucket
Arn string - The ARN of the S3 bucket
- Role
Arn string - Buffering
Interval int - Buffering
Size int - Cloudwatch
Logging FirehoseOptions Delivery Stream Extended S3Configuration S3Backup Configuration Cloudwatch Logging Options - Compression
Format string - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - Error
Output stringPrefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - Kms
Key stringArn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- Prefix string
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- Bucket
Arn string - The ARN of the S3 bucket
- Role
Arn string - Buffering
Interval int - Buffering
Size int - Cloudwatch
Logging FirehoseOptions Delivery Stream Extended S3Configuration S3Backup Configuration Cloudwatch Logging Options - Compression
Format string - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - Error
Output stringPrefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - Kms
Key stringArn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- Prefix string
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- bucket
Arn String - The ARN of the S3 bucket
- role
Arn String - buffering
Interval Integer - buffering
Size Integer - cloudwatch
Logging FirehoseOptions Delivery Stream Extended S3Configuration S3Backup Configuration Cloudwatch Logging Options - compression
Format String - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - error
Output StringPrefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - kms
Key StringArn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- prefix String
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- bucket
Arn string - The ARN of the S3 bucket
- role
Arn string - buffering
Interval number - buffering
Size number - cloudwatch
Logging FirehoseOptions Delivery Stream Extended S3Configuration S3Backup Configuration Cloudwatch Logging Options - compression
Format string - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - error
Output stringPrefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - kms
Key stringArn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- prefix string
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- bucket_
arn str - The ARN of the S3 bucket
- role_
arn str - buffering_
interval int - buffering_
size int - cloudwatch_
logging_ Firehoseoptions Delivery Stream Extended S3Configuration S3Backup Configuration Cloudwatch Logging Options - compression_
format str - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - error_
output_ strprefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - kms_
key_ strarn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- prefix str
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- bucket
Arn String - The ARN of the S3 bucket
- role
Arn String - buffering
Interval Number - buffering
Size Number - cloudwatch
Logging Property MapOptions - compression
Format String - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - error
Output StringPrefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - kms
Key StringArn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- prefix String
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
FirehoseDeliveryStreamExtendedS3ConfigurationS3BackupConfigurationCloudwatchLoggingOptions, FirehoseDeliveryStreamExtendedS3ConfigurationS3BackupConfigurationCloudwatchLoggingOptionsArgs
- Enabled bool
- Enables or disables the logging. Defaults to
false
. - Log
Group stringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - Log
Stream stringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- Enabled bool
- Enables or disables the logging. Defaults to
false
. - Log
Group stringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - Log
Stream stringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled Boolean
- Enables or disables the logging. Defaults to
false
. - log
Group StringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - log
Stream StringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled boolean
- Enables or disables the logging. Defaults to
false
. - log
Group stringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - log
Stream stringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled bool
- Enables or disables the logging. Defaults to
false
. - log_
group_ strname - The CloudWatch group name for logging. This value is required if
enabled
is true. - log_
stream_ strname - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled Boolean
- Enables or disables the logging. Defaults to
false
. - log
Group StringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - log
Stream StringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
FirehoseDeliveryStreamHttpEndpointConfiguration, FirehoseDeliveryStreamHttpEndpointConfigurationArgs
- S3Configuration
Firehose
Delivery Stream Http Endpoint Configuration S3Configuration - The S3 Configuration. See
s3_configuration
block below for details. - Url string
- The HTTP endpoint URL to which Kinesis Firehose sends your data.
- Access
Key string - The access key required for Kinesis Firehose to authenticate with the HTTP endpoint selected as the destination.
- Buffering
Interval int - Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300 (5 minutes).
- Buffering
Size int - Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5.
- Cloudwatch
Logging FirehoseOptions Delivery Stream Http Endpoint Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - Name string
- The HTTP endpoint name.
- Processing
Configuration FirehoseDelivery Stream Http Endpoint Configuration Processing Configuration - The data processing configuration. See
processing_configuration
block below for details. - Request
Configuration FirehoseDelivery Stream Http Endpoint Configuration Request Configuration - The request configuration. See
request_configuration
block below for details. - Retry
Duration int - Total amount of seconds Firehose spends on retries. This duration starts after the initial attempt fails, It does not include the time periods during which Firehose waits for acknowledgment from the specified destination after each attempt. Valid values between
0
and7200
. Default is300
. - Role
Arn string - Kinesis Data Firehose uses this IAM role for all the permissions that the delivery stream needs. The pattern needs to be
arn:.*
. - S3Backup
Mode string - Defines how documents should be delivered to Amazon S3. Valid values are
FailedDataOnly
andAllData
. Default value isFailedDataOnly
. - Secrets
Manager FirehoseConfiguration Delivery Stream Http Endpoint Configuration Secrets Manager Configuration - The Secret Manager Configuration. See
secrets_manager_configuration
block below for details.
- S3Configuration
Firehose
Delivery Stream Http Endpoint Configuration S3Configuration - The S3 Configuration. See
s3_configuration
block below for details. - Url string
- The HTTP endpoint URL to which Kinesis Firehose sends your data.
- Access
Key string - The access key required for Kinesis Firehose to authenticate with the HTTP endpoint selected as the destination.
- Buffering
Interval int - Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300 (5 minutes).
- Buffering
Size int - Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5.
- Cloudwatch
Logging FirehoseOptions Delivery Stream Http Endpoint Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - Name string
- The HTTP endpoint name.
- Processing
Configuration FirehoseDelivery Stream Http Endpoint Configuration Processing Configuration - The data processing configuration. See
processing_configuration
block below for details. - Request
Configuration FirehoseDelivery Stream Http Endpoint Configuration Request Configuration - The request configuration. See
request_configuration
block below for details. - Retry
Duration int - Total amount of seconds Firehose spends on retries. This duration starts after the initial attempt fails, It does not include the time periods during which Firehose waits for acknowledgment from the specified destination after each attempt. Valid values between
0
and7200
. Default is300
. - Role
Arn string - Kinesis Data Firehose uses this IAM role for all the permissions that the delivery stream needs. The pattern needs to be
arn:.*
. - S3Backup
Mode string - Defines how documents should be delivered to Amazon S3. Valid values are
FailedDataOnly
andAllData
. Default value isFailedDataOnly
. - Secrets
Manager FirehoseConfiguration Delivery Stream Http Endpoint Configuration Secrets Manager Configuration - The Secret Manager Configuration. See
secrets_manager_configuration
block below for details.
- s3Configuration
Firehose
Delivery Stream Http Endpoint Configuration S3Configuration - The S3 Configuration. See
s3_configuration
block below for details. - url String
- The HTTP endpoint URL to which Kinesis Firehose sends your data.
- access
Key String - The access key required for Kinesis Firehose to authenticate with the HTTP endpoint selected as the destination.
- buffering
Interval Integer - Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300 (5 minutes).
- buffering
Size Integer - Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5.
- cloudwatch
Logging FirehoseOptions Delivery Stream Http Endpoint Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - name String
- The HTTP endpoint name.
- processing
Configuration FirehoseDelivery Stream Http Endpoint Configuration Processing Configuration - The data processing configuration. See
processing_configuration
block below for details. - request
Configuration FirehoseDelivery Stream Http Endpoint Configuration Request Configuration - The request configuration. See
request_configuration
block below for details. - retry
Duration Integer - Total amount of seconds Firehose spends on retries. This duration starts after the initial attempt fails, It does not include the time periods during which Firehose waits for acknowledgment from the specified destination after each attempt. Valid values between
0
and7200
. Default is300
. - role
Arn String - Kinesis Data Firehose uses this IAM role for all the permissions that the delivery stream needs. The pattern needs to be
arn:.*
. - s3Backup
Mode String - Defines how documents should be delivered to Amazon S3. Valid values are
FailedDataOnly
andAllData
. Default value isFailedDataOnly
. - secrets
Manager FirehoseConfiguration Delivery Stream Http Endpoint Configuration Secrets Manager Configuration - The Secret Manager Configuration. See
secrets_manager_configuration
block below for details.
- s3Configuration
Firehose
Delivery Stream Http Endpoint Configuration S3Configuration - The S3 Configuration. See
s3_configuration
block below for details. - url string
- The HTTP endpoint URL to which Kinesis Firehose sends your data.
- access
Key string - The access key required for Kinesis Firehose to authenticate with the HTTP endpoint selected as the destination.
- buffering
Interval number - Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300 (5 minutes).
- buffering
Size number - Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5.
- cloudwatch
Logging FirehoseOptions Delivery Stream Http Endpoint Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - name string
- The HTTP endpoint name.
- processing
Configuration FirehoseDelivery Stream Http Endpoint Configuration Processing Configuration - The data processing configuration. See
processing_configuration
block below for details. - request
Configuration FirehoseDelivery Stream Http Endpoint Configuration Request Configuration - The request configuration. See
request_configuration
block below for details. - retry
Duration number - Total amount of seconds Firehose spends on retries. This duration starts after the initial attempt fails, It does not include the time periods during which Firehose waits for acknowledgment from the specified destination after each attempt. Valid values between
0
and7200
. Default is300
. - role
Arn string - Kinesis Data Firehose uses this IAM role for all the permissions that the delivery stream needs. The pattern needs to be
arn:.*
. - s3Backup
Mode string - Defines how documents should be delivered to Amazon S3. Valid values are
FailedDataOnly
andAllData
. Default value isFailedDataOnly
. - secrets
Manager FirehoseConfiguration Delivery Stream Http Endpoint Configuration Secrets Manager Configuration - The Secret Manager Configuration. See
secrets_manager_configuration
block below for details.
- s3_
configuration FirehoseDelivery Stream Http Endpoint Configuration S3Configuration - The S3 Configuration. See
s3_configuration
block below for details. - url str
- The HTTP endpoint URL to which Kinesis Firehose sends your data.
- access_
key str - The access key required for Kinesis Firehose to authenticate with the HTTP endpoint selected as the destination.
- buffering_
interval int - Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300 (5 minutes).
- buffering_
size int - Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5.
- cloudwatch_
logging_ Firehoseoptions Delivery Stream Http Endpoint Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - name str
- The HTTP endpoint name.
- processing_
configuration FirehoseDelivery Stream Http Endpoint Configuration Processing Configuration - The data processing configuration. See
processing_configuration
block below for details. - request_
configuration FirehoseDelivery Stream Http Endpoint Configuration Request Configuration - The request configuration. See
request_configuration
block below for details. - retry_
duration int - Total amount of seconds Firehose spends on retries. This duration starts after the initial attempt fails, It does not include the time periods during which Firehose waits for acknowledgment from the specified destination after each attempt. Valid values between
0
and7200
. Default is300
. - role_
arn str - Kinesis Data Firehose uses this IAM role for all the permissions that the delivery stream needs. The pattern needs to be
arn:.*
. - s3_
backup_ strmode - Defines how documents should be delivered to Amazon S3. Valid values are
FailedDataOnly
andAllData
. Default value isFailedDataOnly
. - secrets_
manager_ Firehoseconfiguration Delivery Stream Http Endpoint Configuration Secrets Manager Configuration - The Secret Manager Configuration. See
secrets_manager_configuration
block below for details.
- s3Configuration Property Map
- The S3 Configuration. See
s3_configuration
block below for details. - url String
- The HTTP endpoint URL to which Kinesis Firehose sends your data.
- access
Key String - The access key required for Kinesis Firehose to authenticate with the HTTP endpoint selected as the destination.
- buffering
Interval Number - Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300 (5 minutes).
- buffering
Size Number - Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5.
- cloudwatch
Logging Property MapOptions - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - name String
- The HTTP endpoint name.
- processing
Configuration Property Map - The data processing configuration. See
processing_configuration
block below for details. - request
Configuration Property Map - The request configuration. See
request_configuration
block below for details. - retry
Duration Number - Total amount of seconds Firehose spends on retries. This duration starts after the initial attempt fails, It does not include the time periods during which Firehose waits for acknowledgment from the specified destination after each attempt. Valid values between
0
and7200
. Default is300
. - role
Arn String - Kinesis Data Firehose uses this IAM role for all the permissions that the delivery stream needs. The pattern needs to be
arn:.*
. - s3Backup
Mode String - Defines how documents should be delivered to Amazon S3. Valid values are
FailedDataOnly
andAllData
. Default value isFailedDataOnly
. - secrets
Manager Property MapConfiguration - The Secret Manager Configuration. See
secrets_manager_configuration
block below for details.
FirehoseDeliveryStreamHttpEndpointConfigurationCloudwatchLoggingOptions, FirehoseDeliveryStreamHttpEndpointConfigurationCloudwatchLoggingOptionsArgs
- Enabled bool
- Enables or disables the logging. Defaults to
false
. - Log
Group stringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - Log
Stream stringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- Enabled bool
- Enables or disables the logging. Defaults to
false
. - Log
Group stringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - Log
Stream stringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled Boolean
- Enables or disables the logging. Defaults to
false
. - log
Group StringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - log
Stream StringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled boolean
- Enables or disables the logging. Defaults to
false
. - log
Group stringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - log
Stream stringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled bool
- Enables or disables the logging. Defaults to
false
. - log_
group_ strname - The CloudWatch group name for logging. This value is required if
enabled
is true. - log_
stream_ strname - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled Boolean
- Enables or disables the logging. Defaults to
false
. - log
Group StringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - log
Stream StringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
FirehoseDeliveryStreamHttpEndpointConfigurationProcessingConfiguration, FirehoseDeliveryStreamHttpEndpointConfigurationProcessingConfigurationArgs
- Enabled bool
- Enables or disables data processing.
- Processors
List<Firehose
Delivery Stream Http Endpoint Configuration Processing Configuration Processor> - Specifies the data processors as multiple blocks. See
processors
block below for details.
- Enabled bool
- Enables or disables data processing.
- Processors
[]Firehose
Delivery Stream Http Endpoint Configuration Processing Configuration Processor - Specifies the data processors as multiple blocks. See
processors
block below for details.
- enabled Boolean
- Enables or disables data processing.
- processors
List<Firehose
Delivery Stream Http Endpoint Configuration Processing Configuration Processor> - Specifies the data processors as multiple blocks. See
processors
block below for details.
- enabled boolean
- Enables or disables data processing.
- processors
Firehose
Delivery Stream Http Endpoint Configuration Processing Configuration Processor[] - Specifies the data processors as multiple blocks. See
processors
block below for details.
- enabled bool
- Enables or disables data processing.
- processors
Sequence[Firehose
Delivery Stream Http Endpoint Configuration Processing Configuration Processor] - Specifies the data processors as multiple blocks. See
processors
block below for details.
- enabled Boolean
- Enables or disables data processing.
- processors List<Property Map>
- Specifies the data processors as multiple blocks. See
processors
block below for details.
FirehoseDeliveryStreamHttpEndpointConfigurationProcessingConfigurationProcessor, FirehoseDeliveryStreamHttpEndpointConfigurationProcessingConfigurationProcessorArgs
- Type string
- The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
,Decompression
,CloudWatchLogProcessing
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - Parameters
List<Firehose
Delivery Stream Http Endpoint Configuration Processing Configuration Processor Parameter> - Specifies the processor parameters as multiple blocks. See
parameters
block below for details.
- Type string
- The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
,Decompression
,CloudWatchLogProcessing
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - Parameters
[]Firehose
Delivery Stream Http Endpoint Configuration Processing Configuration Processor Parameter - Specifies the processor parameters as multiple blocks. See
parameters
block below for details.
- type String
- The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
,Decompression
,CloudWatchLogProcessing
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameters
List<Firehose
Delivery Stream Http Endpoint Configuration Processing Configuration Processor Parameter> - Specifies the processor parameters as multiple blocks. See
parameters
block below for details.
- type string
- The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
,Decompression
,CloudWatchLogProcessing
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameters
Firehose
Delivery Stream Http Endpoint Configuration Processing Configuration Processor Parameter[] - Specifies the processor parameters as multiple blocks. See
parameters
block below for details.
- type str
- The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
,Decompression
,CloudWatchLogProcessing
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameters
Sequence[Firehose
Delivery Stream Http Endpoint Configuration Processing Configuration Processor Parameter] - Specifies the processor parameters as multiple blocks. See
parameters
block below for details.
- type String
- The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
,Decompression
,CloudWatchLogProcessing
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameters List<Property Map>
- Specifies the processor parameters as multiple blocks. See
parameters
block below for details.
FirehoseDeliveryStreamHttpEndpointConfigurationProcessingConfigurationProcessorParameter, FirehoseDeliveryStreamHttpEndpointConfigurationProcessingConfigurationProcessorParameterArgs
- Parameter
Name string - Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
,CompressionFormat
,DataMessageExtraction
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - Parameter
Value string Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
NOTE: Parameters with default values, including
NumberOfRetries
(default: 3),RoleArn
(default: firehose role ARN),BufferSizeInMBs
(default: 1), andBufferIntervalInSeconds
(default: 60), are not stored in Pulumi state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.
- Parameter
Name string - Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
,CompressionFormat
,DataMessageExtraction
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - Parameter
Value string Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
NOTE: Parameters with default values, including
NumberOfRetries
(default: 3),RoleArn
(default: firehose role ARN),BufferSizeInMBs
(default: 1), andBufferIntervalInSeconds
(default: 60), are not stored in Pulumi state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.
- parameter
Name String - Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
,CompressionFormat
,DataMessageExtraction
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameter
Value String Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
NOTE: Parameters with default values, including
NumberOfRetries
(default: 3),RoleArn
(default: firehose role ARN),BufferSizeInMBs
(default: 1), andBufferIntervalInSeconds
(default: 60), are not stored in Pulumi state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.
- parameter
Name string - Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
,CompressionFormat
,DataMessageExtraction
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameter
Value string Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
NOTE: Parameters with default values, including
NumberOfRetries
(default: 3),RoleArn
(default: firehose role ARN),BufferSizeInMBs
(default: 1), andBufferIntervalInSeconds
(default: 60), are not stored in Pulumi state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.
- parameter_
name str - Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
,CompressionFormat
,DataMessageExtraction
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameter_
value str Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
NOTE: Parameters with default values, including
NumberOfRetries
(default: 3),RoleArn
(default: firehose role ARN),BufferSizeInMBs
(default: 1), andBufferIntervalInSeconds
(default: 60), are not stored in Pulumi state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.
- parameter
Name String - Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
,CompressionFormat
,DataMessageExtraction
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameter
Value String Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
NOTE: Parameters with default values, including
NumberOfRetries
(default: 3),RoleArn
(default: firehose role ARN),BufferSizeInMBs
(default: 1), andBufferIntervalInSeconds
(default: 60), are not stored in Pulumi state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.
FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfiguration, FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationArgs
- Common
Attributes List<FirehoseDelivery Stream Http Endpoint Configuration Request Configuration Common Attribute> - Describes the metadata sent to the HTTP endpoint destination. See
common_attributes
block below for details. - Content
Encoding string - Kinesis Data Firehose uses the content encoding to compress the body of a request before sending the request to the destination. Valid values are
NONE
andGZIP
. Default value isNONE
.
- Common
Attributes []FirehoseDelivery Stream Http Endpoint Configuration Request Configuration Common Attribute - Describes the metadata sent to the HTTP endpoint destination. See
common_attributes
block below for details. - Content
Encoding string - Kinesis Data Firehose uses the content encoding to compress the body of a request before sending the request to the destination. Valid values are
NONE
andGZIP
. Default value isNONE
.
- common
Attributes List<FirehoseDelivery Stream Http Endpoint Configuration Request Configuration Common Attribute> - Describes the metadata sent to the HTTP endpoint destination. See
common_attributes
block below for details. - content
Encoding String - Kinesis Data Firehose uses the content encoding to compress the body of a request before sending the request to the destination. Valid values are
NONE
andGZIP
. Default value isNONE
.
- common
Attributes FirehoseDelivery Stream Http Endpoint Configuration Request Configuration Common Attribute[] - Describes the metadata sent to the HTTP endpoint destination. See
common_attributes
block below for details. - content
Encoding string - Kinesis Data Firehose uses the content encoding to compress the body of a request before sending the request to the destination. Valid values are
NONE
andGZIP
. Default value isNONE
.
- common_
attributes Sequence[FirehoseDelivery Stream Http Endpoint Configuration Request Configuration Common Attribute] - Describes the metadata sent to the HTTP endpoint destination. See
common_attributes
block below for details. - content_
encoding str - Kinesis Data Firehose uses the content encoding to compress the body of a request before sending the request to the destination. Valid values are
NONE
andGZIP
. Default value isNONE
.
- common
Attributes List<Property Map> - Describes the metadata sent to the HTTP endpoint destination. See
common_attributes
block below for details. - content
Encoding String - Kinesis Data Firehose uses the content encoding to compress the body of a request before sending the request to the destination. Valid values are
NONE
andGZIP
. Default value isNONE
.
FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationCommonAttribute, FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationCommonAttributeArgs
FirehoseDeliveryStreamHttpEndpointConfigurationS3Configuration, FirehoseDeliveryStreamHttpEndpointConfigurationS3ConfigurationArgs
- Bucket
Arn string - The ARN of the S3 bucket
- Role
Arn string - The ARN of the AWS credentials.
- Buffering
Interval int - Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- Buffering
Size int - Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- Cloudwatch
Logging FirehoseOptions Delivery Stream Http Endpoint Configuration S3Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - Compression
Format string - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - Error
Output stringPrefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - Kms
Key stringArn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- Prefix string
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- Bucket
Arn string - The ARN of the S3 bucket
- Role
Arn string - The ARN of the AWS credentials.
- Buffering
Interval int - Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- Buffering
Size int - Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- Cloudwatch
Logging FirehoseOptions Delivery Stream Http Endpoint Configuration S3Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - Compression
Format string - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - Error
Output stringPrefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - Kms
Key stringArn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- Prefix string
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- bucket
Arn String - The ARN of the S3 bucket
- role
Arn String - The ARN of the AWS credentials.
- buffering
Interval Integer - Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- buffering
Size Integer - Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- cloudwatch
Logging FirehoseOptions Delivery Stream Http Endpoint Configuration S3Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - compression
Format String - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - error
Output StringPrefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - kms
Key StringArn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- prefix String
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- bucket
Arn string - The ARN of the S3 bucket
- role
Arn string - The ARN of the AWS credentials.
- buffering
Interval number - Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- buffering
Size number - Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- cloudwatch
Logging FirehoseOptions Delivery Stream Http Endpoint Configuration S3Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - compression
Format string - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - error
Output stringPrefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - kms
Key stringArn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- prefix string
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- bucket_
arn str - The ARN of the S3 bucket
- role_
arn str - The ARN of the AWS credentials.
- buffering_
interval int - Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- buffering_
size int - Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- cloudwatch_
logging_ Firehoseoptions Delivery Stream Http Endpoint Configuration S3Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - compression_
format str - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - error_
output_ strprefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - kms_
key_ strarn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- prefix str
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- bucket
Arn String - The ARN of the S3 bucket
- role
Arn String - The ARN of the AWS credentials.
- buffering
Interval Number - Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- buffering
Size Number - Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- cloudwatch
Logging Property MapOptions - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - compression
Format String - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - error
Output StringPrefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - kms
Key StringArn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- prefix String
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
FirehoseDeliveryStreamHttpEndpointConfigurationS3ConfigurationCloudwatchLoggingOptions, FirehoseDeliveryStreamHttpEndpointConfigurationS3ConfigurationCloudwatchLoggingOptionsArgs
- Enabled bool
- Enables or disables the logging. Defaults to
false
. - Log
Group stringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - Log
Stream stringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- Enabled bool
- Enables or disables the logging. Defaults to
false
. - Log
Group stringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - Log
Stream stringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled Boolean
- Enables or disables the logging. Defaults to
false
. - log
Group StringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - log
Stream StringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled boolean
- Enables or disables the logging. Defaults to
false
. - log
Group stringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - log
Stream stringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled bool
- Enables or disables the logging. Defaults to
false
. - log_
group_ strname - The CloudWatch group name for logging. This value is required if
enabled
is true. - log_
stream_ strname - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled Boolean
- Enables or disables the logging. Defaults to
false
. - log
Group StringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - log
Stream StringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
FirehoseDeliveryStreamHttpEndpointConfigurationSecretsManagerConfiguration, FirehoseDeliveryStreamHttpEndpointConfigurationSecretsManagerConfigurationArgs
- enabled bool
- Enables or disables the Secrets Manager configuration.
- role_
arn str - The ARN of the role the stream assumes.
- secret_
arn str - The ARN of the Secrets Manager secret. This value is required if
enabled
is true.
FirehoseDeliveryStreamIcebergConfiguration, FirehoseDeliveryStreamIcebergConfigurationArgs
- Catalog
Arn string - Glue catalog ARN identifier of the destination Apache Iceberg Tables. You must specify the ARN in the format
arn:aws:glue:region:account-id:catalog
- Role
Arn string - The ARN of the IAM role to be assumed by Firehose for calling Apache Iceberg Tables.
- S3Configuration
Firehose
Delivery Stream Iceberg Configuration S3Configuration - The S3 Configuration. See
s3_configuration
block below for details. - Buffering
Interval int - Buffer incoming data for the specified period of time, in seconds between 0 and 900, before delivering it to the destination. The default value is 300.
- Buffering
Size int - Buffer incoming data to the specified size, in MBs between 1 and 128, before delivering it to the destination. The default value is 5.
- Cloudwatch
Logging FirehoseOptions Delivery Stream Iceberg Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - Destination
Table List<FirehoseConfigurations Delivery Stream Iceberg Configuration Destination Table Configuration> - Destination table configurations which Firehose uses to deliver data to Apache Iceberg Tables. Firehose will write data with insert if table specific configuration is not provided. See
destination_table_configuration
block below for details. - Processing
Configuration FirehoseDelivery Stream Iceberg Configuration Processing Configuration - The data processing configuration. See
processing_configuration
block below for details. - Retry
Duration int - The period of time, in seconds between 0 to 7200, during which Firehose retries to deliver data to the specified destination.
- S3Backup
Mode string
- Catalog
Arn string - Glue catalog ARN identifier of the destination Apache Iceberg Tables. You must specify the ARN in the format
arn:aws:glue:region:account-id:catalog
- Role
Arn string - The ARN of the IAM role to be assumed by Firehose for calling Apache Iceberg Tables.
- S3Configuration
Firehose
Delivery Stream Iceberg Configuration S3Configuration - The S3 Configuration. See
s3_configuration
block below for details. - Buffering
Interval int - Buffer incoming data for the specified period of time, in seconds between 0 and 900, before delivering it to the destination. The default value is 300.
- Buffering
Size int - Buffer incoming data to the specified size, in MBs between 1 and 128, before delivering it to the destination. The default value is 5.
- Cloudwatch
Logging FirehoseOptions Delivery Stream Iceberg Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - Destination
Table []FirehoseConfigurations Delivery Stream Iceberg Configuration Destination Table Configuration - Destination table configurations which Firehose uses to deliver data to Apache Iceberg Tables. Firehose will write data with insert if table specific configuration is not provided. See
destination_table_configuration
block below for details. - Processing
Configuration FirehoseDelivery Stream Iceberg Configuration Processing Configuration - The data processing configuration. See
processing_configuration
block below for details. - Retry
Duration int - The period of time, in seconds between 0 to 7200, during which Firehose retries to deliver data to the specified destination.
- S3Backup
Mode string
- catalog
Arn String - Glue catalog ARN identifier of the destination Apache Iceberg Tables. You must specify the ARN in the format
arn:aws:glue:region:account-id:catalog
- role
Arn String - The ARN of the IAM role to be assumed by Firehose for calling Apache Iceberg Tables.
- s3Configuration
Firehose
Delivery Stream Iceberg Configuration S3Configuration - The S3 Configuration. See
s3_configuration
block below for details. - buffering
Interval Integer - Buffer incoming data for the specified period of time, in seconds between 0 and 900, before delivering it to the destination. The default value is 300.
- buffering
Size Integer - Buffer incoming data to the specified size, in MBs between 1 and 128, before delivering it to the destination. The default value is 5.
- cloudwatch
Logging FirehoseOptions Delivery Stream Iceberg Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - destination
Table List<FirehoseConfigurations Delivery Stream Iceberg Configuration Destination Table Configuration> - Destination table configurations which Firehose uses to deliver data to Apache Iceberg Tables. Firehose will write data with insert if table specific configuration is not provided. See
destination_table_configuration
block below for details. - processing
Configuration FirehoseDelivery Stream Iceberg Configuration Processing Configuration - The data processing configuration. See
processing_configuration
block below for details. - retry
Duration Integer - The period of time, in seconds between 0 to 7200, during which Firehose retries to deliver data to the specified destination.
- s3Backup
Mode String
- catalog
Arn string - Glue catalog ARN identifier of the destination Apache Iceberg Tables. You must specify the ARN in the format
arn:aws:glue:region:account-id:catalog
- role
Arn string - The ARN of the IAM role to be assumed by Firehose for calling Apache Iceberg Tables.
- s3Configuration
Firehose
Delivery Stream Iceberg Configuration S3Configuration - The S3 Configuration. See
s3_configuration
block below for details. - buffering
Interval number - Buffer incoming data for the specified period of time, in seconds between 0 and 900, before delivering it to the destination. The default value is 300.
- buffering
Size number - Buffer incoming data to the specified size, in MBs between 1 and 128, before delivering it to the destination. The default value is 5.
- cloudwatch
Logging FirehoseOptions Delivery Stream Iceberg Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - destination
Table FirehoseConfigurations Delivery Stream Iceberg Configuration Destination Table Configuration[] - Destination table configurations which Firehose uses to deliver data to Apache Iceberg Tables. Firehose will write data with insert if table specific configuration is not provided. See
destination_table_configuration
block below for details. - processing
Configuration FirehoseDelivery Stream Iceberg Configuration Processing Configuration - The data processing configuration. See
processing_configuration
block below for details. - retry
Duration number - The period of time, in seconds between 0 to 7200, during which Firehose retries to deliver data to the specified destination.
- s3Backup
Mode string
- catalog_
arn str - Glue catalog ARN identifier of the destination Apache Iceberg Tables. You must specify the ARN in the format
arn:aws:glue:region:account-id:catalog
- role_
arn str - The ARN of the IAM role to be assumed by Firehose for calling Apache Iceberg Tables.
- s3_
configuration FirehoseDelivery Stream Iceberg Configuration S3Configuration - The S3 Configuration. See
s3_configuration
block below for details. - buffering_
interval int - Buffer incoming data for the specified period of time, in seconds between 0 and 900, before delivering it to the destination. The default value is 300.
- buffering_
size int - Buffer incoming data to the specified size, in MBs between 1 and 128, before delivering it to the destination. The default value is 5.
- cloudwatch_
logging_ Firehoseoptions Delivery Stream Iceberg Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - destination_
table_ Sequence[Firehoseconfigurations Delivery Stream Iceberg Configuration Destination Table Configuration] - Destination table configurations which Firehose uses to deliver data to Apache Iceberg Tables. Firehose will write data with insert if table specific configuration is not provided. See
destination_table_configuration
block below for details. - processing_
configuration FirehoseDelivery Stream Iceberg Configuration Processing Configuration - The data processing configuration. See
processing_configuration
block below for details. - retry_
duration int - The period of time, in seconds between 0 to 7200, during which Firehose retries to deliver data to the specified destination.
- s3_
backup_ strmode
- catalog
Arn String - Glue catalog ARN identifier of the destination Apache Iceberg Tables. You must specify the ARN in the format
arn:aws:glue:region:account-id:catalog
- role
Arn String - The ARN of the IAM role to be assumed by Firehose for calling Apache Iceberg Tables.
- s3Configuration Property Map
- The S3 Configuration. See
s3_configuration
block below for details. - buffering
Interval Number - Buffer incoming data for the specified period of time, in seconds between 0 and 900, before delivering it to the destination. The default value is 300.
- buffering
Size Number - Buffer incoming data to the specified size, in MBs between 1 and 128, before delivering it to the destination. The default value is 5.
- cloudwatch
Logging Property MapOptions - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - destination
Table List<Property Map>Configurations - Destination table configurations which Firehose uses to deliver data to Apache Iceberg Tables. Firehose will write data with insert if table specific configuration is not provided. See
destination_table_configuration
block below for details. - processing
Configuration Property Map - The data processing configuration. See
processing_configuration
block below for details. - retry
Duration Number - The period of time, in seconds between 0 to 7200, during which Firehose retries to deliver data to the specified destination.
- s3Backup
Mode String
FirehoseDeliveryStreamIcebergConfigurationCloudwatchLoggingOptions, FirehoseDeliveryStreamIcebergConfigurationCloudwatchLoggingOptionsArgs
- Enabled bool
- Enables or disables the logging. Defaults to
false
. - Log
Group stringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - Log
Stream stringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- Enabled bool
- Enables or disables the logging. Defaults to
false
. - Log
Group stringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - Log
Stream stringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled Boolean
- Enables or disables the logging. Defaults to
false
. - log
Group StringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - log
Stream StringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled boolean
- Enables or disables the logging. Defaults to
false
. - log
Group stringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - log
Stream stringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled bool
- Enables or disables the logging. Defaults to
false
. - log_
group_ strname - The CloudWatch group name for logging. This value is required if
enabled
is true. - log_
stream_ strname - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled Boolean
- Enables or disables the logging. Defaults to
false
. - log
Group StringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - log
Stream StringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
FirehoseDeliveryStreamIcebergConfigurationDestinationTableConfiguration, FirehoseDeliveryStreamIcebergConfigurationDestinationTableConfigurationArgs
- Database
Name string - The name of the Apache Iceberg database.
- Table
Name string - The name of the Apache Iceberg Table.
- S3Error
Output stringPrefix - The table specific S3 error output prefix. All the errors that occurred while delivering to this table will be prefixed with this value in S3 destination.
- Unique
Keys List<string> - A list of unique keys for a given Apache Iceberg table. Firehose will use these for running Create, Update, or Delete operations on the given Iceberg table.
- Database
Name string - The name of the Apache Iceberg database.
- Table
Name string - The name of the Apache Iceberg Table.
- S3Error
Output stringPrefix - The table specific S3 error output prefix. All the errors that occurred while delivering to this table will be prefixed with this value in S3 destination.
- Unique
Keys []string - A list of unique keys for a given Apache Iceberg table. Firehose will use these for running Create, Update, or Delete operations on the given Iceberg table.
- database
Name String - The name of the Apache Iceberg database.
- table
Name String - The name of the Apache Iceberg Table.
- s3Error
Output StringPrefix - The table specific S3 error output prefix. All the errors that occurred while delivering to this table will be prefixed with this value in S3 destination.
- unique
Keys List<String> - A list of unique keys for a given Apache Iceberg table. Firehose will use these for running Create, Update, or Delete operations on the given Iceberg table.
- database
Name string - The name of the Apache Iceberg database.
- table
Name string - The name of the Apache Iceberg Table.
- s3Error
Output stringPrefix - The table specific S3 error output prefix. All the errors that occurred while delivering to this table will be prefixed with this value in S3 destination.
- unique
Keys string[] - A list of unique keys for a given Apache Iceberg table. Firehose will use these for running Create, Update, or Delete operations on the given Iceberg table.
- database_
name str - The name of the Apache Iceberg database.
- table_
name str - The name of the Apache Iceberg Table.
- s3_
error_ stroutput_ prefix - The table specific S3 error output prefix. All the errors that occurred while delivering to this table will be prefixed with this value in S3 destination.
- unique_
keys Sequence[str] - A list of unique keys for a given Apache Iceberg table. Firehose will use these for running Create, Update, or Delete operations on the given Iceberg table.
- database
Name String - The name of the Apache Iceberg database.
- table
Name String - The name of the Apache Iceberg Table.
- s3Error
Output StringPrefix - The table specific S3 error output prefix. All the errors that occurred while delivering to this table will be prefixed with this value in S3 destination.
- unique
Keys List<String> - A list of unique keys for a given Apache Iceberg table. Firehose will use these for running Create, Update, or Delete operations on the given Iceberg table.
FirehoseDeliveryStreamIcebergConfigurationProcessingConfiguration, FirehoseDeliveryStreamIcebergConfigurationProcessingConfigurationArgs
- Enabled bool
- Enables or disables data processing.
- Processors
List<Firehose
Delivery Stream Iceberg Configuration Processing Configuration Processor> - Specifies the data processors as multiple blocks. See
processors
block below for details.
- Enabled bool
- Enables or disables data processing.
- Processors
[]Firehose
Delivery Stream Iceberg Configuration Processing Configuration Processor - Specifies the data processors as multiple blocks. See
processors
block below for details.
- enabled Boolean
- Enables or disables data processing.
- processors
List<Firehose
Delivery Stream Iceberg Configuration Processing Configuration Processor> - Specifies the data processors as multiple blocks. See
processors
block below for details.
- enabled boolean
- Enables or disables data processing.
- processors
Firehose
Delivery Stream Iceberg Configuration Processing Configuration Processor[] - Specifies the data processors as multiple blocks. See
processors
block below for details.
- enabled bool
- Enables or disables data processing.
- processors
Sequence[Firehose
Delivery Stream Iceberg Configuration Processing Configuration Processor] - Specifies the data processors as multiple blocks. See
processors
block below for details.
- enabled Boolean
- Enables or disables data processing.
- processors List<Property Map>
- Specifies the data processors as multiple blocks. See
processors
block below for details.
FirehoseDeliveryStreamIcebergConfigurationProcessingConfigurationProcessor, FirehoseDeliveryStreamIcebergConfigurationProcessingConfigurationProcessorArgs
- Type string
- The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
,Decompression
,CloudWatchLogProcessing
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - Parameters
List<Firehose
Delivery Stream Iceberg Configuration Processing Configuration Processor Parameter> - Specifies the processor parameters as multiple blocks. See
parameters
block below for details.
- Type string
- The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
,Decompression
,CloudWatchLogProcessing
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - Parameters
[]Firehose
Delivery Stream Iceberg Configuration Processing Configuration Processor Parameter - Specifies the processor parameters as multiple blocks. See
parameters
block below for details.
- type String
- The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
,Decompression
,CloudWatchLogProcessing
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameters
List<Firehose
Delivery Stream Iceberg Configuration Processing Configuration Processor Parameter> - Specifies the processor parameters as multiple blocks. See
parameters
block below for details.
- type string
- The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
,Decompression
,CloudWatchLogProcessing
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameters
Firehose
Delivery Stream Iceberg Configuration Processing Configuration Processor Parameter[] - Specifies the processor parameters as multiple blocks. See
parameters
block below for details.
- type str
- The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
,Decompression
,CloudWatchLogProcessing
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameters
Sequence[Firehose
Delivery Stream Iceberg Configuration Processing Configuration Processor Parameter] - Specifies the processor parameters as multiple blocks. See
parameters
block below for details.
- type String
- The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
,Decompression
,CloudWatchLogProcessing
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameters List<Property Map>
- Specifies the processor parameters as multiple blocks. See
parameters
block below for details.
FirehoseDeliveryStreamIcebergConfigurationProcessingConfigurationProcessorParameter, FirehoseDeliveryStreamIcebergConfigurationProcessingConfigurationProcessorParameterArgs
- Parameter
Name string - Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
,CompressionFormat
,DataMessageExtraction
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - Parameter
Value string Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
NOTE: Parameters with default values, including
NumberOfRetries
(default: 3),RoleArn
(default: firehose role ARN),BufferSizeInMBs
(default: 1), andBufferIntervalInSeconds
(default: 60), are not stored in Pulumi state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.
- Parameter
Name string - Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
,CompressionFormat
,DataMessageExtraction
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - Parameter
Value string Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
NOTE: Parameters with default values, including
NumberOfRetries
(default: 3),RoleArn
(default: firehose role ARN),BufferSizeInMBs
(default: 1), andBufferIntervalInSeconds
(default: 60), are not stored in Pulumi state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.
- parameter
Name String - Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
,CompressionFormat
,DataMessageExtraction
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameter
Value String Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
NOTE: Parameters with default values, including
NumberOfRetries
(default: 3),RoleArn
(default: firehose role ARN),BufferSizeInMBs
(default: 1), andBufferIntervalInSeconds
(default: 60), are not stored in Pulumi state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.
- parameter
Name string - Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
,CompressionFormat
,DataMessageExtraction
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameter
Value string Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
NOTE: Parameters with default values, including
NumberOfRetries
(default: 3),RoleArn
(default: firehose role ARN),BufferSizeInMBs
(default: 1), andBufferIntervalInSeconds
(default: 60), are not stored in Pulumi state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.
- parameter_
name str - Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
,CompressionFormat
,DataMessageExtraction
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameter_
value str Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
NOTE: Parameters with default values, including
NumberOfRetries
(default: 3),RoleArn
(default: firehose role ARN),BufferSizeInMBs
(default: 1), andBufferIntervalInSeconds
(default: 60), are not stored in Pulumi state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.
- parameter
Name String - Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
,CompressionFormat
,DataMessageExtraction
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameter
Value String Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
NOTE: Parameters with default values, including
NumberOfRetries
(default: 3),RoleArn
(default: firehose role ARN),BufferSizeInMBs
(default: 1), andBufferIntervalInSeconds
(default: 60), are not stored in Pulumi state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.
FirehoseDeliveryStreamIcebergConfigurationS3Configuration, FirehoseDeliveryStreamIcebergConfigurationS3ConfigurationArgs
- Bucket
Arn string - The ARN of the S3 bucket
- Role
Arn string - The ARN of the AWS credentials.
- Buffering
Interval int - Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- Buffering
Size int - Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- Cloudwatch
Logging FirehoseOptions Delivery Stream Iceberg Configuration S3Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - Compression
Format string - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - Error
Output stringPrefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - Kms
Key stringArn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- Prefix string
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- Bucket
Arn string - The ARN of the S3 bucket
- Role
Arn string - The ARN of the AWS credentials.
- Buffering
Interval int - Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- Buffering
Size int - Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- Cloudwatch
Logging FirehoseOptions Delivery Stream Iceberg Configuration S3Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - Compression
Format string - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - Error
Output stringPrefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - Kms
Key stringArn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- Prefix string
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- bucket
Arn String - The ARN of the S3 bucket
- role
Arn String - The ARN of the AWS credentials.
- buffering
Interval Integer - Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- buffering
Size Integer - Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- cloudwatch
Logging FirehoseOptions Delivery Stream Iceberg Configuration S3Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - compression
Format String - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - error
Output StringPrefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - kms
Key StringArn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- prefix String
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- bucket
Arn string - The ARN of the S3 bucket
- role
Arn string - The ARN of the AWS credentials.
- buffering
Interval number - Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- buffering
Size number - Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- cloudwatch
Logging FirehoseOptions Delivery Stream Iceberg Configuration S3Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - compression
Format string - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - error
Output stringPrefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - kms
Key stringArn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- prefix string
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- bucket_
arn str - The ARN of the S3 bucket
- role_
arn str - The ARN of the AWS credentials.
- buffering_
interval int - Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- buffering_
size int - Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- cloudwatch_
logging_ Firehoseoptions Delivery Stream Iceberg Configuration S3Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - compression_
format str - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - error_
output_ strprefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - kms_
key_ strarn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- prefix str
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- bucket
Arn String - The ARN of the S3 bucket
- role
Arn String - The ARN of the AWS credentials.
- buffering
Interval Number - Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- buffering
Size Number - Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- cloudwatch
Logging Property MapOptions - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - compression
Format String - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - error
Output StringPrefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - kms
Key StringArn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- prefix String
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
FirehoseDeliveryStreamIcebergConfigurationS3ConfigurationCloudwatchLoggingOptions, FirehoseDeliveryStreamIcebergConfigurationS3ConfigurationCloudwatchLoggingOptionsArgs
- Enabled bool
- Enables or disables the logging. Defaults to
false
. - Log
Group stringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - Log
Stream stringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- Enabled bool
- Enables or disables the logging. Defaults to
false
. - Log
Group stringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - Log
Stream stringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled Boolean
- Enables or disables the logging. Defaults to
false
. - log
Group StringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - log
Stream StringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled boolean
- Enables or disables the logging. Defaults to
false
. - log
Group stringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - log
Stream stringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled bool
- Enables or disables the logging. Defaults to
false
. - log_
group_ strname - The CloudWatch group name for logging. This value is required if
enabled
is true. - log_
stream_ strname - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled Boolean
- Enables or disables the logging. Defaults to
false
. - log
Group StringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - log
Stream StringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
FirehoseDeliveryStreamKinesisSourceConfiguration, FirehoseDeliveryStreamKinesisSourceConfigurationArgs
- Kinesis
Stream stringArn - The kinesis stream used as the source of the firehose delivery stream.
- Role
Arn string - The ARN of the role that provides access to the source Kinesis stream.
- Kinesis
Stream stringArn - The kinesis stream used as the source of the firehose delivery stream.
- Role
Arn string - The ARN of the role that provides access to the source Kinesis stream.
- kinesis
Stream StringArn - The kinesis stream used as the source of the firehose delivery stream.
- role
Arn String - The ARN of the role that provides access to the source Kinesis stream.
- kinesis
Stream stringArn - The kinesis stream used as the source of the firehose delivery stream.
- role
Arn string - The ARN of the role that provides access to the source Kinesis stream.
- kinesis_
stream_ strarn - The kinesis stream used as the source of the firehose delivery stream.
- role_
arn str - The ARN of the role that provides access to the source Kinesis stream.
- kinesis
Stream StringArn - The kinesis stream used as the source of the firehose delivery stream.
- role
Arn String - The ARN of the role that provides access to the source Kinesis stream.
FirehoseDeliveryStreamMskSourceConfiguration, FirehoseDeliveryStreamMskSourceConfigurationArgs
- Authentication
Configuration FirehoseDelivery Stream Msk Source Configuration Authentication Configuration - The authentication configuration of the Amazon MSK cluster. See
authentication_configuration
block below for details. - Msk
Cluster stringArn - The ARN of the Amazon MSK cluster.
- Topic
Name string - The topic name within the Amazon MSK cluster.
- Authentication
Configuration FirehoseDelivery Stream Msk Source Configuration Authentication Configuration - The authentication configuration of the Amazon MSK cluster. See
authentication_configuration
block below for details. - Msk
Cluster stringArn - The ARN of the Amazon MSK cluster.
- Topic
Name string - The topic name within the Amazon MSK cluster.
- authentication
Configuration FirehoseDelivery Stream Msk Source Configuration Authentication Configuration - The authentication configuration of the Amazon MSK cluster. See
authentication_configuration
block below for details. - msk
Cluster StringArn - The ARN of the Amazon MSK cluster.
- topic
Name String - The topic name within the Amazon MSK cluster.
- authentication
Configuration FirehoseDelivery Stream Msk Source Configuration Authentication Configuration - The authentication configuration of the Amazon MSK cluster. See
authentication_configuration
block below for details. - msk
Cluster stringArn - The ARN of the Amazon MSK cluster.
- topic
Name string - The topic name within the Amazon MSK cluster.
- authentication_
configuration FirehoseDelivery Stream Msk Source Configuration Authentication Configuration - The authentication configuration of the Amazon MSK cluster. See
authentication_configuration
block below for details. - msk_
cluster_ strarn - The ARN of the Amazon MSK cluster.
- topic_
name str - The topic name within the Amazon MSK cluster.
- authentication
Configuration Property Map - The authentication configuration of the Amazon MSK cluster. See
authentication_configuration
block below for details. - msk
Cluster StringArn - The ARN of the Amazon MSK cluster.
- topic
Name String - The topic name within the Amazon MSK cluster.
FirehoseDeliveryStreamMskSourceConfigurationAuthenticationConfiguration, FirehoseDeliveryStreamMskSourceConfigurationAuthenticationConfigurationArgs
- Connectivity string
- The type of connectivity used to access the Amazon MSK cluster. Valid values:
PUBLIC
,PRIVATE
. - Role
Arn string - The ARN of the role used to access the Amazon MSK cluster.
- Connectivity string
- The type of connectivity used to access the Amazon MSK cluster. Valid values:
PUBLIC
,PRIVATE
. - Role
Arn string - The ARN of the role used to access the Amazon MSK cluster.
- connectivity String
- The type of connectivity used to access the Amazon MSK cluster. Valid values:
PUBLIC
,PRIVATE
. - role
Arn String - The ARN of the role used to access the Amazon MSK cluster.
- connectivity string
- The type of connectivity used to access the Amazon MSK cluster. Valid values:
PUBLIC
,PRIVATE
. - role
Arn string - The ARN of the role used to access the Amazon MSK cluster.
- connectivity str
- The type of connectivity used to access the Amazon MSK cluster. Valid values:
PUBLIC
,PRIVATE
. - role_
arn str - The ARN of the role used to access the Amazon MSK cluster.
- connectivity String
- The type of connectivity used to access the Amazon MSK cluster. Valid values:
PUBLIC
,PRIVATE
. - role
Arn String - The ARN of the role used to access the Amazon MSK cluster.
FirehoseDeliveryStreamOpensearchConfiguration, FirehoseDeliveryStreamOpensearchConfigurationArgs
- Index
Name string - The OpenSearch index name.
- Role
Arn string - The ARN of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. The IAM role must have permission for
DescribeDomain
,DescribeDomains
, andDescribeDomainConfig
. The pattern needs to bearn:.*
. - S3Configuration
Firehose
Delivery Stream Opensearch Configuration S3Configuration - The S3 Configuration. See
s3_configuration
block below for details. - Buffering
Interval int - Buffer incoming data for the specified period of time, in seconds between 0 to 900, before delivering it to the destination. The default value is 300s.
- Buffering
Size int - Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB.
- Cloudwatch
Logging FirehoseOptions Delivery Stream Opensearch Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - Cluster
Endpoint string - The endpoint to use when communicating with the cluster. Conflicts with
domain_arn
. - Document
Id FirehoseOptions Delivery Stream Opensearch Configuration Document Id Options - The method for setting up document ID. See [
document_id_options
block] below for details. - Domain
Arn string - The ARN of the Amazon ES domain. The pattern needs to be
arn:.*
. Conflicts withcluster_endpoint
. - Index
Rotation stringPeriod - The OpenSearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate expiration of old data. Valid values are
NoRotation
,OneHour
,OneDay
,OneWeek
, andOneMonth
. The default value isOneDay
. - Processing
Configuration FirehoseDelivery Stream Opensearch Configuration Processing Configuration - The data processing configuration. See
processing_configuration
block below for details. - Retry
Duration int - After an initial failure to deliver to Amazon OpenSearch, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0.
- S3Backup
Mode string - Defines how documents should be delivered to Amazon S3. Valid values are
FailedDocumentsOnly
andAllDocuments
. Default value isFailedDocumentsOnly
. - Type
Name string - The Elasticsearch type name with maximum length of 100 characters. Types are deprecated in OpenSearch_1.1. TypeName must be empty.
- Vpc
Config FirehoseDelivery Stream Opensearch Configuration Vpc Config - The VPC configuration for the delivery stream to connect to OpenSearch associated with the VPC. See
vpc_config
block below for details.
- Index
Name string - The OpenSearch index name.
- Role
Arn string - The ARN of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. The IAM role must have permission for
DescribeDomain
,DescribeDomains
, andDescribeDomainConfig
. The pattern needs to bearn:.*
. - S3Configuration
Firehose
Delivery Stream Opensearch Configuration S3Configuration - The S3 Configuration. See
s3_configuration
block below for details. - Buffering
Interval int - Buffer incoming data for the specified period of time, in seconds between 0 to 900, before delivering it to the destination. The default value is 300s.
- Buffering
Size int - Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB.
- Cloudwatch
Logging FirehoseOptions Delivery Stream Opensearch Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - Cluster
Endpoint string - The endpoint to use when communicating with the cluster. Conflicts with
domain_arn
. - Document
Id FirehoseOptions Delivery Stream Opensearch Configuration Document Id Options - The method for setting up document ID. See [
document_id_options
block] below for details. - Domain
Arn string - The ARN of the Amazon ES domain. The pattern needs to be
arn:.*
. Conflicts withcluster_endpoint
. - Index
Rotation stringPeriod - The OpenSearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate expiration of old data. Valid values are
NoRotation
,OneHour
,OneDay
,OneWeek
, andOneMonth
. The default value isOneDay
. - Processing
Configuration FirehoseDelivery Stream Opensearch Configuration Processing Configuration - The data processing configuration. See
processing_configuration
block below for details. - Retry
Duration int - After an initial failure to deliver to Amazon OpenSearch, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0.
- S3Backup
Mode string - Defines how documents should be delivered to Amazon S3. Valid values are
FailedDocumentsOnly
andAllDocuments
. Default value isFailedDocumentsOnly
. - Type
Name string - The Elasticsearch type name with maximum length of 100 characters. Types are deprecated in OpenSearch_1.1. TypeName must be empty.
- Vpc
Config FirehoseDelivery Stream Opensearch Configuration Vpc Config - The VPC configuration for the delivery stream to connect to OpenSearch associated with the VPC. See
vpc_config
block below for details.
- index
Name String - The OpenSearch index name.
- role
Arn String - The ARN of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. The IAM role must have permission for
DescribeDomain
,DescribeDomains
, andDescribeDomainConfig
. The pattern needs to bearn:.*
. - s3Configuration
Firehose
Delivery Stream Opensearch Configuration S3Configuration - The S3 Configuration. See
s3_configuration
block below for details. - buffering
Interval Integer - Buffer incoming data for the specified period of time, in seconds between 0 to 900, before delivering it to the destination. The default value is 300s.
- buffering
Size Integer - Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB.
- cloudwatch
Logging FirehoseOptions Delivery Stream Opensearch Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - cluster
Endpoint String - The endpoint to use when communicating with the cluster. Conflicts with
domain_arn
. - document
Id FirehoseOptions Delivery Stream Opensearch Configuration Document Id Options - The method for setting up document ID. See [
document_id_options
block] below for details. - domain
Arn String - The ARN of the Amazon ES domain. The pattern needs to be
arn:.*
. Conflicts withcluster_endpoint
. - index
Rotation StringPeriod - The OpenSearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate expiration of old data. Valid values are
NoRotation
,OneHour
,OneDay
,OneWeek
, andOneMonth
. The default value isOneDay
. - processing
Configuration FirehoseDelivery Stream Opensearch Configuration Processing Configuration - The data processing configuration. See
processing_configuration
block below for details. - retry
Duration Integer - After an initial failure to deliver to Amazon OpenSearch, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0.
- s3Backup
Mode String - Defines how documents should be delivered to Amazon S3. Valid values are
FailedDocumentsOnly
andAllDocuments
. Default value isFailedDocumentsOnly
. - type
Name String - The Elasticsearch type name with maximum length of 100 characters. Types are deprecated in OpenSearch_1.1. TypeName must be empty.
- vpc
Config FirehoseDelivery Stream Opensearch Configuration Vpc Config - The VPC configuration for the delivery stream to connect to OpenSearch associated with the VPC. See
vpc_config
block below for details.
- index
Name string - The OpenSearch index name.
- role
Arn string - The ARN of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. The IAM role must have permission for
DescribeDomain
,DescribeDomains
, andDescribeDomainConfig
. The pattern needs to bearn:.*
. - s3Configuration
Firehose
Delivery Stream Opensearch Configuration S3Configuration - The S3 Configuration. See
s3_configuration
block below for details. - buffering
Interval number - Buffer incoming data for the specified period of time, in seconds between 0 to 900, before delivering it to the destination. The default value is 300s.
- buffering
Size number - Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB.
- cloudwatch
Logging FirehoseOptions Delivery Stream Opensearch Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - cluster
Endpoint string - The endpoint to use when communicating with the cluster. Conflicts with
domain_arn
. - document
Id FirehoseOptions Delivery Stream Opensearch Configuration Document Id Options - The method for setting up document ID. See [
document_id_options
block] below for details. - domain
Arn string - The ARN of the Amazon ES domain. The pattern needs to be
arn:.*
. Conflicts withcluster_endpoint
. - index
Rotation stringPeriod - The OpenSearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate expiration of old data. Valid values are
NoRotation
,OneHour
,OneDay
,OneWeek
, andOneMonth
. The default value isOneDay
. - processing
Configuration FirehoseDelivery Stream Opensearch Configuration Processing Configuration - The data processing configuration. See
processing_configuration
block below for details. - retry
Duration number - After an initial failure to deliver to Amazon OpenSearch, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0.
- s3Backup
Mode string - Defines how documents should be delivered to Amazon S3. Valid values are
FailedDocumentsOnly
andAllDocuments
. Default value isFailedDocumentsOnly
. - type
Name string - The Elasticsearch type name with maximum length of 100 characters. Types are deprecated in OpenSearch_1.1. TypeName must be empty.
- vpc
Config FirehoseDelivery Stream Opensearch Configuration Vpc Config - The VPC configuration for the delivery stream to connect to OpenSearch associated with the VPC. See
vpc_config
block below for details.
- index_
name str - The OpenSearch index name.
- role_
arn str - The ARN of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. The IAM role must have permission for
DescribeDomain
,DescribeDomains
, andDescribeDomainConfig
. The pattern needs to bearn:.*
. - s3_
configuration FirehoseDelivery Stream Opensearch Configuration S3Configuration - The S3 Configuration. See
s3_configuration
block below for details. - buffering_
interval int - Buffer incoming data for the specified period of time, in seconds between 0 to 900, before delivering it to the destination. The default value is 300s.
- buffering_
size int - Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB.
- cloudwatch_
logging_ Firehoseoptions Delivery Stream Opensearch Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - cluster_
endpoint str - The endpoint to use when communicating with the cluster. Conflicts with
domain_arn
. - document_
id_ Firehoseoptions Delivery Stream Opensearch Configuration Document Id Options - The method for setting up document ID. See [
document_id_options
block] below for details. - domain_
arn str - The ARN of the Amazon ES domain. The pattern needs to be
arn:.*
. Conflicts withcluster_endpoint
. - index_
rotation_ strperiod - The OpenSearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate expiration of old data. Valid values are
NoRotation
,OneHour
,OneDay
,OneWeek
, andOneMonth
. The default value isOneDay
. - processing_
configuration FirehoseDelivery Stream Opensearch Configuration Processing Configuration - The data processing configuration. See
processing_configuration
block below for details. - retry_
duration int - After an initial failure to deliver to Amazon OpenSearch, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0.
- s3_
backup_ strmode - Defines how documents should be delivered to Amazon S3. Valid values are
FailedDocumentsOnly
andAllDocuments
. Default value isFailedDocumentsOnly
. - type_
name str - The Elasticsearch type name with maximum length of 100 characters. Types are deprecated in OpenSearch_1.1. TypeName must be empty.
- vpc_
config FirehoseDelivery Stream Opensearch Configuration Vpc Config - The VPC configuration for the delivery stream to connect to OpenSearch associated with the VPC. See
vpc_config
block below for details.
- index
Name String - The OpenSearch index name.
- role
Arn String - The ARN of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. The IAM role must have permission for
DescribeDomain
,DescribeDomains
, andDescribeDomainConfig
. The pattern needs to bearn:.*
. - s3Configuration Property Map
- The S3 Configuration. See
s3_configuration
block below for details. - buffering
Interval Number - Buffer incoming data for the specified period of time, in seconds between 0 to 900, before delivering it to the destination. The default value is 300s.
- buffering
Size Number - Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB.
- cloudwatch
Logging Property MapOptions - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - cluster
Endpoint String - The endpoint to use when communicating with the cluster. Conflicts with
domain_arn
. - document
Id Property MapOptions - The method for setting up document ID. See [
document_id_options
block] below for details. - domain
Arn String - The ARN of the Amazon ES domain. The pattern needs to be
arn:.*
. Conflicts withcluster_endpoint
. - index
Rotation StringPeriod - The OpenSearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate expiration of old data. Valid values are
NoRotation
,OneHour
,OneDay
,OneWeek
, andOneMonth
. The default value isOneDay
. - processing
Configuration Property Map - The data processing configuration. See
processing_configuration
block below for details. - retry
Duration Number - After an initial failure to deliver to Amazon OpenSearch, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0.
- s3Backup
Mode String - Defines how documents should be delivered to Amazon S3. Valid values are
FailedDocumentsOnly
andAllDocuments
. Default value isFailedDocumentsOnly
. - type
Name String - The Elasticsearch type name with maximum length of 100 characters. Types are deprecated in OpenSearch_1.1. TypeName must be empty.
- vpc
Config Property Map - The VPC configuration for the delivery stream to connect to OpenSearch associated with the VPC. See
vpc_config
block below for details.
FirehoseDeliveryStreamOpensearchConfigurationCloudwatchLoggingOptions, FirehoseDeliveryStreamOpensearchConfigurationCloudwatchLoggingOptionsArgs
- Enabled bool
- Enables or disables the logging. Defaults to
false
. - Log
Group stringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - Log
Stream stringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- Enabled bool
- Enables or disables the logging. Defaults to
false
. - Log
Group stringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - Log
Stream stringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled Boolean
- Enables or disables the logging. Defaults to
false
. - log
Group StringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - log
Stream StringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled boolean
- Enables or disables the logging. Defaults to
false
. - log
Group stringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - log
Stream stringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled bool
- Enables or disables the logging. Defaults to
false
. - log_
group_ strname - The CloudWatch group name for logging. This value is required if
enabled
is true. - log_
stream_ strname - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled Boolean
- Enables or disables the logging. Defaults to
false
. - log
Group StringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - log
Stream StringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
FirehoseDeliveryStreamOpensearchConfigurationDocumentIdOptions, FirehoseDeliveryStreamOpensearchConfigurationDocumentIdOptionsArgs
- Default
Document stringId Format - The method for setting up document ID. Valid values:
FIREHOSE_DEFAULT
,NO_DOCUMENT_ID
.
- Default
Document stringId Format - The method for setting up document ID. Valid values:
FIREHOSE_DEFAULT
,NO_DOCUMENT_ID
.
- default
Document StringId Format - The method for setting up document ID. Valid values:
FIREHOSE_DEFAULT
,NO_DOCUMENT_ID
.
- default
Document stringId Format - The method for setting up document ID. Valid values:
FIREHOSE_DEFAULT
,NO_DOCUMENT_ID
.
- default_
document_ strid_ format - The method for setting up document ID. Valid values:
FIREHOSE_DEFAULT
,NO_DOCUMENT_ID
.
- default
Document StringId Format - The method for setting up document ID. Valid values:
FIREHOSE_DEFAULT
,NO_DOCUMENT_ID
.
FirehoseDeliveryStreamOpensearchConfigurationProcessingConfiguration, FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationArgs
- Enabled bool
- Enables or disables data processing.
- Processors
List<Firehose
Delivery Stream Opensearch Configuration Processing Configuration Processor> - Specifies the data processors as multiple blocks. See
processors
block below for details.
- Enabled bool
- Enables or disables data processing.
- Processors
[]Firehose
Delivery Stream Opensearch Configuration Processing Configuration Processor - Specifies the data processors as multiple blocks. See
processors
block below for details.
- enabled Boolean
- Enables or disables data processing.
- processors
List<Firehose
Delivery Stream Opensearch Configuration Processing Configuration Processor> - Specifies the data processors as multiple blocks. See
processors
block below for details.
- enabled boolean
- Enables or disables data processing.
- processors
Firehose
Delivery Stream Opensearch Configuration Processing Configuration Processor[] - Specifies the data processors as multiple blocks. See
processors
block below for details.
- enabled bool
- Enables or disables data processing.
- processors
Sequence[Firehose
Delivery Stream Opensearch Configuration Processing Configuration Processor] - Specifies the data processors as multiple blocks. See
processors
block below for details.
- enabled Boolean
- Enables or disables data processing.
- processors List<Property Map>
- Specifies the data processors as multiple blocks. See
processors
block below for details.
FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationProcessor, FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationProcessorArgs
- Type string
- The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
,Decompression
,CloudWatchLogProcessing
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - Parameters
List<Firehose
Delivery Stream Opensearch Configuration Processing Configuration Processor Parameter> - Specifies the processor parameters as multiple blocks. See
parameters
block below for details.
- Type string
- The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
,Decompression
,CloudWatchLogProcessing
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - Parameters
[]Firehose
Delivery Stream Opensearch Configuration Processing Configuration Processor Parameter - Specifies the processor parameters as multiple blocks. See
parameters
block below for details.
- type String
- The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
,Decompression
,CloudWatchLogProcessing
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameters
List<Firehose
Delivery Stream Opensearch Configuration Processing Configuration Processor Parameter> - Specifies the processor parameters as multiple blocks. See
parameters
block below for details.
- type string
- The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
,Decompression
,CloudWatchLogProcessing
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameters
Firehose
Delivery Stream Opensearch Configuration Processing Configuration Processor Parameter[] - Specifies the processor parameters as multiple blocks. See
parameters
block below for details.
- type str
- The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
,Decompression
,CloudWatchLogProcessing
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameters
Sequence[Firehose
Delivery Stream Opensearch Configuration Processing Configuration Processor Parameter] - Specifies the processor parameters as multiple blocks. See
parameters
block below for details.
- type String
- The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
,Decompression
,CloudWatchLogProcessing
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameters List<Property Map>
- Specifies the processor parameters as multiple blocks. See
parameters
block below for details.
FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationProcessorParameter, FirehoseDeliveryStreamOpensearchConfigurationProcessingConfigurationProcessorParameterArgs
- Parameter
Name string - Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
,CompressionFormat
,DataMessageExtraction
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - Parameter
Value string Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
NOTE: Parameters with default values, including
NumberOfRetries
(default: 3),RoleArn
(default: firehose role ARN),BufferSizeInMBs
(default: 1), andBufferIntervalInSeconds
(default: 60), are not stored in Pulumi state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.
- Parameter
Name string - Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
,CompressionFormat
,DataMessageExtraction
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - Parameter
Value string Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
NOTE: Parameters with default values, including
NumberOfRetries
(default: 3),RoleArn
(default: firehose role ARN),BufferSizeInMBs
(default: 1), andBufferIntervalInSeconds
(default: 60), are not stored in Pulumi state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.
- parameter
Name String - Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
,CompressionFormat
,DataMessageExtraction
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameter
Value String Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
NOTE: Parameters with default values, including
NumberOfRetries
(default: 3),RoleArn
(default: firehose role ARN),BufferSizeInMBs
(default: 1), andBufferIntervalInSeconds
(default: 60), are not stored in Pulumi state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.
- parameter
Name string - Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
,CompressionFormat
,DataMessageExtraction
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameter
Value string Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
NOTE: Parameters with default values, including
NumberOfRetries
(default: 3),RoleArn
(default: firehose role ARN),BufferSizeInMBs
(default: 1), andBufferIntervalInSeconds
(default: 60), are not stored in Pulumi state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.
- parameter_
name str - Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
,CompressionFormat
,DataMessageExtraction
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameter_
value str Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
NOTE: Parameters with default values, including
NumberOfRetries
(default: 3),RoleArn
(default: firehose role ARN),BufferSizeInMBs
(default: 1), andBufferIntervalInSeconds
(default: 60), are not stored in Pulumi state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.
- parameter
Name String - Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
,CompressionFormat
,DataMessageExtraction
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameter
Value String Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
NOTE: Parameters with default values, including
NumberOfRetries
(default: 3),RoleArn
(default: firehose role ARN),BufferSizeInMBs
(default: 1), andBufferIntervalInSeconds
(default: 60), are not stored in Pulumi state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.
FirehoseDeliveryStreamOpensearchConfigurationS3Configuration, FirehoseDeliveryStreamOpensearchConfigurationS3ConfigurationArgs
- Bucket
Arn string - The ARN of the S3 bucket
- Role
Arn string - The ARN of the AWS credentials.
- Buffering
Interval int - Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- Buffering
Size int - Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- Cloudwatch
Logging FirehoseOptions Delivery Stream Opensearch Configuration S3Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - Compression
Format string - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - Error
Output stringPrefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - Kms
Key stringArn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- Prefix string
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- Bucket
Arn string - The ARN of the S3 bucket
- Role
Arn string - The ARN of the AWS credentials.
- Buffering
Interval int - Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- Buffering
Size int - Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- Cloudwatch
Logging FirehoseOptions Delivery Stream Opensearch Configuration S3Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - Compression
Format string - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - Error
Output stringPrefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - Kms
Key stringArn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- Prefix string
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- bucket
Arn String - The ARN of the S3 bucket
- role
Arn String - The ARN of the AWS credentials.
- buffering
Interval Integer - Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- buffering
Size Integer - Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- cloudwatch
Logging FirehoseOptions Delivery Stream Opensearch Configuration S3Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - compression
Format String - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - error
Output StringPrefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - kms
Key StringArn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- prefix String
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- bucket
Arn string - The ARN of the S3 bucket
- role
Arn string - The ARN of the AWS credentials.
- buffering
Interval number - Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- buffering
Size number - Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- cloudwatch
Logging FirehoseOptions Delivery Stream Opensearch Configuration S3Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - compression
Format string - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - error
Output stringPrefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - kms
Key stringArn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- prefix string
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- bucket_
arn str - The ARN of the S3 bucket
- role_
arn str - The ARN of the AWS credentials.
- buffering_
interval int - Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- buffering_
size int - Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- cloudwatch_
logging_ Firehoseoptions Delivery Stream Opensearch Configuration S3Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - compression_
format str - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - error_
output_ strprefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - kms_
key_ strarn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- prefix str
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- bucket
Arn String - The ARN of the S3 bucket
- role
Arn String - The ARN of the AWS credentials.
- buffering
Interval Number - Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- buffering
Size Number - Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- cloudwatch
Logging Property MapOptions - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - compression
Format String - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - error
Output StringPrefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - kms
Key StringArn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- prefix String
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
FirehoseDeliveryStreamOpensearchConfigurationS3ConfigurationCloudwatchLoggingOptions, FirehoseDeliveryStreamOpensearchConfigurationS3ConfigurationCloudwatchLoggingOptionsArgs
- Enabled bool
- Enables or disables the logging. Defaults to
false
. - Log
Group stringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - Log
Stream stringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- Enabled bool
- Enables or disables the logging. Defaults to
false
. - Log
Group stringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - Log
Stream stringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled Boolean
- Enables or disables the logging. Defaults to
false
. - log
Group StringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - log
Stream StringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled boolean
- Enables or disables the logging. Defaults to
false
. - log
Group stringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - log
Stream stringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled bool
- Enables or disables the logging. Defaults to
false
. - log_
group_ strname - The CloudWatch group name for logging. This value is required if
enabled
is true. - log_
stream_ strname - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled Boolean
- Enables or disables the logging. Defaults to
false
. - log
Group StringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - log
Stream StringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
FirehoseDeliveryStreamOpensearchConfigurationVpcConfig, FirehoseDeliveryStreamOpensearchConfigurationVpcConfigArgs
- Role
Arn string - The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions
- Security
Group List<string>Ids - A list of security group IDs to associate with Kinesis Firehose.
- Subnet
Ids List<string> - A list of subnet IDs to associate with Kinesis Firehose.
- Vpc
Id string
- Role
Arn string - The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions
- Security
Group []stringIds - A list of security group IDs to associate with Kinesis Firehose.
- Subnet
Ids []string - A list of subnet IDs to associate with Kinesis Firehose.
- Vpc
Id string
- role
Arn String - The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions
- security
Group List<String>Ids - A list of security group IDs to associate with Kinesis Firehose.
- subnet
Ids List<String> - A list of subnet IDs to associate with Kinesis Firehose.
- vpc
Id String
- role
Arn string - The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions
- security
Group string[]Ids - A list of security group IDs to associate with Kinesis Firehose.
- subnet
Ids string[] - A list of subnet IDs to associate with Kinesis Firehose.
- vpc
Id string
- role_
arn str - The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions
- security_
group_ Sequence[str]ids - A list of security group IDs to associate with Kinesis Firehose.
- subnet_
ids Sequence[str] - A list of subnet IDs to associate with Kinesis Firehose.
- vpc_
id str
- role
Arn String - The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions
- security
Group List<String>Ids - A list of security group IDs to associate with Kinesis Firehose.
- subnet
Ids List<String> - A list of subnet IDs to associate with Kinesis Firehose.
- vpc
Id String
FirehoseDeliveryStreamOpensearchserverlessConfiguration, FirehoseDeliveryStreamOpensearchserverlessConfigurationArgs
- Collection
Endpoint string - The endpoint to use when communicating with the collection in the Serverless offering for Amazon OpenSearch Service.
- Index
Name string - The Serverless offering for Amazon OpenSearch Service index name.
- Role
Arn string - The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Serverless offering for Amazon OpenSearch Service Configuration API and for indexing documents. The pattern needs to be
arn:.*
. - S3Configuration
Firehose
Delivery Stream Opensearchserverless Configuration S3Configuration - The S3 Configuration. See
s3_configuration
block below for details. - Buffering
Interval int - Buffer incoming data for the specified period of time, in seconds between 0 to 900, before delivering it to the destination. The default value is 300s.
- Buffering
Size int - Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB.
- Cloudwatch
Logging FirehoseOptions Delivery Stream Opensearchserverless Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - Processing
Configuration FirehoseDelivery Stream Opensearchserverless Configuration Processing Configuration - The data processing configuration. See
processing_configuration
block below for details. - Retry
Duration int - After an initial failure to deliver to the Serverless offering for Amazon OpenSearch Service, the total amount of time, in seconds between 0 to 7200, during which Kinesis Data Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0.
- S3Backup
Mode string - Defines how documents should be delivered to Amazon S3. Valid values are
FailedDocumentsOnly
andAllDocuments
. Default value isFailedDocumentsOnly
. - Vpc
Config FirehoseDelivery Stream Opensearchserverless Configuration Vpc Config - The VPC configuration for the delivery stream to connect to OpenSearch Serverless associated with the VPC. See
vpc_config
block below for details.
- Collection
Endpoint string - The endpoint to use when communicating with the collection in the Serverless offering for Amazon OpenSearch Service.
- Index
Name string - The Serverless offering for Amazon OpenSearch Service index name.
- Role
Arn string - The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Serverless offering for Amazon OpenSearch Service Configuration API and for indexing documents. The pattern needs to be
arn:.*
. - S3Configuration
Firehose
Delivery Stream Opensearchserverless Configuration S3Configuration - The S3 Configuration. See
s3_configuration
block below for details. - Buffering
Interval int - Buffer incoming data for the specified period of time, in seconds between 0 to 900, before delivering it to the destination. The default value is 300s.
- Buffering
Size int - Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB.
- Cloudwatch
Logging FirehoseOptions Delivery Stream Opensearchserverless Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - Processing
Configuration FirehoseDelivery Stream Opensearchserverless Configuration Processing Configuration - The data processing configuration. See
processing_configuration
block below for details. - Retry
Duration int - After an initial failure to deliver to the Serverless offering for Amazon OpenSearch Service, the total amount of time, in seconds between 0 to 7200, during which Kinesis Data Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0.
- S3Backup
Mode string - Defines how documents should be delivered to Amazon S3. Valid values are
FailedDocumentsOnly
andAllDocuments
. Default value isFailedDocumentsOnly
. - Vpc
Config FirehoseDelivery Stream Opensearchserverless Configuration Vpc Config - The VPC configuration for the delivery stream to connect to OpenSearch Serverless associated with the VPC. See
vpc_config
block below for details.
- collection
Endpoint String - The endpoint to use when communicating with the collection in the Serverless offering for Amazon OpenSearch Service.
- index
Name String - The Serverless offering for Amazon OpenSearch Service index name.
- role
Arn String - The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Serverless offering for Amazon OpenSearch Service Configuration API and for indexing documents. The pattern needs to be
arn:.*
. - s3Configuration
Firehose
Delivery Stream Opensearchserverless Configuration S3Configuration - The S3 Configuration. See
s3_configuration
block below for details. - buffering
Interval Integer - Buffer incoming data for the specified period of time, in seconds between 0 to 900, before delivering it to the destination. The default value is 300s.
- buffering
Size Integer - Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB.
- cloudwatch
Logging FirehoseOptions Delivery Stream Opensearchserverless Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - processing
Configuration FirehoseDelivery Stream Opensearchserverless Configuration Processing Configuration - The data processing configuration. See
processing_configuration
block below for details. - retry
Duration Integer - After an initial failure to deliver to the Serverless offering for Amazon OpenSearch Service, the total amount of time, in seconds between 0 to 7200, during which Kinesis Data Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0.
- s3Backup
Mode String - Defines how documents should be delivered to Amazon S3. Valid values are
FailedDocumentsOnly
andAllDocuments
. Default value isFailedDocumentsOnly
. - vpc
Config FirehoseDelivery Stream Opensearchserverless Configuration Vpc Config - The VPC configuration for the delivery stream to connect to OpenSearch Serverless associated with the VPC. See
vpc_config
block below for details.
- collection
Endpoint string - The endpoint to use when communicating with the collection in the Serverless offering for Amazon OpenSearch Service.
- index
Name string - The Serverless offering for Amazon OpenSearch Service index name.
- role
Arn string - The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Serverless offering for Amazon OpenSearch Service Configuration API and for indexing documents. The pattern needs to be
arn:.*
. - s3Configuration
Firehose
Delivery Stream Opensearchserverless Configuration S3Configuration - The S3 Configuration. See
s3_configuration
block below for details. - buffering
Interval number - Buffer incoming data for the specified period of time, in seconds between 0 to 900, before delivering it to the destination. The default value is 300s.
- buffering
Size number - Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB.
- cloudwatch
Logging FirehoseOptions Delivery Stream Opensearchserverless Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - processing
Configuration FirehoseDelivery Stream Opensearchserverless Configuration Processing Configuration - The data processing configuration. See
processing_configuration
block below for details. - retry
Duration number - After an initial failure to deliver to the Serverless offering for Amazon OpenSearch Service, the total amount of time, in seconds between 0 to 7200, during which Kinesis Data Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0.
- s3Backup
Mode string - Defines how documents should be delivered to Amazon S3. Valid values are
FailedDocumentsOnly
andAllDocuments
. Default value isFailedDocumentsOnly
. - vpc
Config FirehoseDelivery Stream Opensearchserverless Configuration Vpc Config - The VPC configuration for the delivery stream to connect to OpenSearch Serverless associated with the VPC. See
vpc_config
block below for details.
- collection_
endpoint str - The endpoint to use when communicating with the collection in the Serverless offering for Amazon OpenSearch Service.
- index_
name str - The Serverless offering for Amazon OpenSearch Service index name.
- role_
arn str - The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Serverless offering for Amazon OpenSearch Service Configuration API and for indexing documents. The pattern needs to be
arn:.*
. - s3_
configuration FirehoseDelivery Stream Opensearchserverless Configuration S3Configuration - The S3 Configuration. See
s3_configuration
block below for details. - buffering_
interval int - Buffer incoming data for the specified period of time, in seconds between 0 to 900, before delivering it to the destination. The default value is 300s.
- buffering_
size int - Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB.
- cloudwatch_
logging_ Firehoseoptions Delivery Stream Opensearchserverless Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - processing_
configuration FirehoseDelivery Stream Opensearchserverless Configuration Processing Configuration - The data processing configuration. See
processing_configuration
block below for details. - retry_
duration int - After an initial failure to deliver to the Serverless offering for Amazon OpenSearch Service, the total amount of time, in seconds between 0 to 7200, during which Kinesis Data Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0.
- s3_
backup_ strmode - Defines how documents should be delivered to Amazon S3. Valid values are
FailedDocumentsOnly
andAllDocuments
. Default value isFailedDocumentsOnly
. - vpc_
config FirehoseDelivery Stream Opensearchserverless Configuration Vpc Config - The VPC configuration for the delivery stream to connect to OpenSearch Serverless associated with the VPC. See
vpc_config
block below for details.
- collection
Endpoint String - The endpoint to use when communicating with the collection in the Serverless offering for Amazon OpenSearch Service.
- index
Name String - The Serverless offering for Amazon OpenSearch Service index name.
- role
Arn String - The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Serverless offering for Amazon OpenSearch Service Configuration API and for indexing documents. The pattern needs to be
arn:.*
. - s3Configuration Property Map
- The S3 Configuration. See
s3_configuration
block below for details. - buffering
Interval Number - Buffer incoming data for the specified period of time, in seconds between 0 to 900, before delivering it to the destination. The default value is 300s.
- buffering
Size Number - Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB.
- cloudwatch
Logging Property MapOptions - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - processing
Configuration Property Map - The data processing configuration. See
processing_configuration
block below for details. - retry
Duration Number - After an initial failure to deliver to the Serverless offering for Amazon OpenSearch Service, the total amount of time, in seconds between 0 to 7200, during which Kinesis Data Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0.
- s3Backup
Mode String - Defines how documents should be delivered to Amazon S3. Valid values are
FailedDocumentsOnly
andAllDocuments
. Default value isFailedDocumentsOnly
. - vpc
Config Property Map - The VPC configuration for the delivery stream to connect to OpenSearch Serverless associated with the VPC. See
vpc_config
block below for details.
FirehoseDeliveryStreamOpensearchserverlessConfigurationCloudwatchLoggingOptions, FirehoseDeliveryStreamOpensearchserverlessConfigurationCloudwatchLoggingOptionsArgs
- Enabled bool
- Enables or disables the logging. Defaults to
false
. - Log
Group stringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - Log
Stream stringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- Enabled bool
- Enables or disables the logging. Defaults to
false
. - Log
Group stringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - Log
Stream stringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled Boolean
- Enables or disables the logging. Defaults to
false
. - log
Group StringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - log
Stream StringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled boolean
- Enables or disables the logging. Defaults to
false
. - log
Group stringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - log
Stream stringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled bool
- Enables or disables the logging. Defaults to
false
. - log_
group_ strname - The CloudWatch group name for logging. This value is required if
enabled
is true. - log_
stream_ strname - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled Boolean
- Enables or disables the logging. Defaults to
false
. - log
Group StringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - log
Stream StringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
FirehoseDeliveryStreamOpensearchserverlessConfigurationProcessingConfiguration, FirehoseDeliveryStreamOpensearchserverlessConfigurationProcessingConfigurationArgs
- Enabled bool
- Enables or disables data processing.
- Processors
List<Firehose
Delivery Stream Opensearchserverless Configuration Processing Configuration Processor> - Specifies the data processors as multiple blocks. See
processors
block below for details.
- Enabled bool
- Enables or disables data processing.
- Processors
[]Firehose
Delivery Stream Opensearchserverless Configuration Processing Configuration Processor - Specifies the data processors as multiple blocks. See
processors
block below for details.
- enabled Boolean
- Enables or disables data processing.
- processors
List<Firehose
Delivery Stream Opensearchserverless Configuration Processing Configuration Processor> - Specifies the data processors as multiple blocks. See
processors
block below for details.
- enabled boolean
- Enables or disables data processing.
- processors
Firehose
Delivery Stream Opensearchserverless Configuration Processing Configuration Processor[] - Specifies the data processors as multiple blocks. See
processors
block below for details.
- enabled bool
- Enables or disables data processing.
- processors
Sequence[Firehose
Delivery Stream Opensearchserverless Configuration Processing Configuration Processor] - Specifies the data processors as multiple blocks. See
processors
block below for details.
- enabled Boolean
- Enables or disables data processing.
- processors List<Property Map>
- Specifies the data processors as multiple blocks. See
processors
block below for details.
FirehoseDeliveryStreamOpensearchserverlessConfigurationProcessingConfigurationProcessor, FirehoseDeliveryStreamOpensearchserverlessConfigurationProcessingConfigurationProcessorArgs
- Type string
- The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
,Decompression
,CloudWatchLogProcessing
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - Parameters
List<Firehose
Delivery Stream Opensearchserverless Configuration Processing Configuration Processor Parameter> - Specifies the processor parameters as multiple blocks. See
parameters
block below for details.
- Type string
- The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
,Decompression
,CloudWatchLogProcessing
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - Parameters
[]Firehose
Delivery Stream Opensearchserverless Configuration Processing Configuration Processor Parameter - Specifies the processor parameters as multiple blocks. See
parameters
block below for details.
- type String
- The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
,Decompression
,CloudWatchLogProcessing
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameters
List<Firehose
Delivery Stream Opensearchserverless Configuration Processing Configuration Processor Parameter> - Specifies the processor parameters as multiple blocks. See
parameters
block below for details.
- type string
- The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
,Decompression
,CloudWatchLogProcessing
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameters
Firehose
Delivery Stream Opensearchserverless Configuration Processing Configuration Processor Parameter[] - Specifies the processor parameters as multiple blocks. See
parameters
block below for details.
- type str
- The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
,Decompression
,CloudWatchLogProcessing
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameters
Sequence[Firehose
Delivery Stream Opensearchserverless Configuration Processing Configuration Processor Parameter] - Specifies the processor parameters as multiple blocks. See
parameters
block below for details.
- type String
- The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
,Decompression
,CloudWatchLogProcessing
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameters List<Property Map>
- Specifies the processor parameters as multiple blocks. See
parameters
block below for details.
FirehoseDeliveryStreamOpensearchserverlessConfigurationProcessingConfigurationProcessorParameter, FirehoseDeliveryStreamOpensearchserverlessConfigurationProcessingConfigurationProcessorParameterArgs
- Parameter
Name string - Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
,CompressionFormat
,DataMessageExtraction
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - Parameter
Value string Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
NOTE: Parameters with default values, including
NumberOfRetries
(default: 3),RoleArn
(default: firehose role ARN),BufferSizeInMBs
(default: 1), andBufferIntervalInSeconds
(default: 60), are not stored in Pulumi state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.
- Parameter
Name string - Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
,CompressionFormat
,DataMessageExtraction
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - Parameter
Value string Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
NOTE: Parameters with default values, including
NumberOfRetries
(default: 3),RoleArn
(default: firehose role ARN),BufferSizeInMBs
(default: 1), andBufferIntervalInSeconds
(default: 60), are not stored in Pulumi state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.
- parameter
Name String - Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
,CompressionFormat
,DataMessageExtraction
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameter
Value String Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
NOTE: Parameters with default values, including
NumberOfRetries
(default: 3),RoleArn
(default: firehose role ARN),BufferSizeInMBs
(default: 1), andBufferIntervalInSeconds
(default: 60), are not stored in Pulumi state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.
- parameter
Name string - Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
,CompressionFormat
,DataMessageExtraction
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameter
Value string Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
NOTE: Parameters with default values, including
NumberOfRetries
(default: 3),RoleArn
(default: firehose role ARN),BufferSizeInMBs
(default: 1), andBufferIntervalInSeconds
(default: 60), are not stored in Pulumi state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.
- parameter_
name str - Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
,CompressionFormat
,DataMessageExtraction
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameter_
value str Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
NOTE: Parameters with default values, including
NumberOfRetries
(default: 3),RoleArn
(default: firehose role ARN),BufferSizeInMBs
(default: 1), andBufferIntervalInSeconds
(default: 60), are not stored in Pulumi state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.
- parameter
Name String - Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
,CompressionFormat
,DataMessageExtraction
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameter
Value String Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
NOTE: Parameters with default values, including
NumberOfRetries
(default: 3),RoleArn
(default: firehose role ARN),BufferSizeInMBs
(default: 1), andBufferIntervalInSeconds
(default: 60), are not stored in Pulumi state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.
FirehoseDeliveryStreamOpensearchserverlessConfigurationS3Configuration, FirehoseDeliveryStreamOpensearchserverlessConfigurationS3ConfigurationArgs
- Bucket
Arn string - The ARN of the S3 bucket
- Role
Arn string - The ARN of the AWS credentials.
- Buffering
Interval int - Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- Buffering
Size int - Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- Cloudwatch
Logging FirehoseOptions Delivery Stream Opensearchserverless Configuration S3Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - Compression
Format string - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - Error
Output stringPrefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - Kms
Key stringArn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- Prefix string
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- Bucket
Arn string - The ARN of the S3 bucket
- Role
Arn string - The ARN of the AWS credentials.
- Buffering
Interval int - Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- Buffering
Size int - Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- Cloudwatch
Logging FirehoseOptions Delivery Stream Opensearchserverless Configuration S3Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - Compression
Format string - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - Error
Output stringPrefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - Kms
Key stringArn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- Prefix string
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- bucket
Arn String - The ARN of the S3 bucket
- role
Arn String - The ARN of the AWS credentials.
- buffering
Interval Integer - Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- buffering
Size Integer - Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- cloudwatch
Logging FirehoseOptions Delivery Stream Opensearchserverless Configuration S3Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - compression
Format String - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - error
Output StringPrefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - kms
Key StringArn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- prefix String
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- bucket
Arn string - The ARN of the S3 bucket
- role
Arn string - The ARN of the AWS credentials.
- buffering
Interval number - Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- buffering
Size number - Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- cloudwatch
Logging FirehoseOptions Delivery Stream Opensearchserverless Configuration S3Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - compression
Format string - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - error
Output stringPrefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - kms
Key stringArn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- prefix string
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- bucket_
arn str - The ARN of the S3 bucket
- role_
arn str - The ARN of the AWS credentials.
- buffering_
interval int - Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- buffering_
size int - Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- cloudwatch_
logging_ Firehoseoptions Delivery Stream Opensearchserverless Configuration S3Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - compression_
format str - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - error_
output_ strprefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - kms_
key_ strarn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- prefix str
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- bucket
Arn String - The ARN of the S3 bucket
- role
Arn String - The ARN of the AWS credentials.
- buffering
Interval Number - Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- buffering
Size Number - Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- cloudwatch
Logging Property MapOptions - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - compression
Format String - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - error
Output StringPrefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - kms
Key StringArn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- prefix String
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
FirehoseDeliveryStreamOpensearchserverlessConfigurationS3ConfigurationCloudwatchLoggingOptions, FirehoseDeliveryStreamOpensearchserverlessConfigurationS3ConfigurationCloudwatchLoggingOptionsArgs
- Enabled bool
- Enables or disables the logging. Defaults to
false
. - Log
Group stringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - Log
Stream stringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- Enabled bool
- Enables or disables the logging. Defaults to
false
. - Log
Group stringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - Log
Stream stringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled Boolean
- Enables or disables the logging. Defaults to
false
. - log
Group StringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - log
Stream StringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled boolean
- Enables or disables the logging. Defaults to
false
. - log
Group stringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - log
Stream stringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled bool
- Enables or disables the logging. Defaults to
false
. - log_
group_ strname - The CloudWatch group name for logging. This value is required if
enabled
is true. - log_
stream_ strname - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled Boolean
- Enables or disables the logging. Defaults to
false
. - log
Group StringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - log
Stream StringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
FirehoseDeliveryStreamOpensearchserverlessConfigurationVpcConfig, FirehoseDeliveryStreamOpensearchserverlessConfigurationVpcConfigArgs
- Role
Arn string - The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions
- Security
Group List<string>Ids - A list of security group IDs to associate with Kinesis Firehose.
- Subnet
Ids List<string> - A list of subnet IDs to associate with Kinesis Firehose.
- Vpc
Id string
- Role
Arn string - The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions
- Security
Group []stringIds - A list of security group IDs to associate with Kinesis Firehose.
- Subnet
Ids []string - A list of subnet IDs to associate with Kinesis Firehose.
- Vpc
Id string
- role
Arn String - The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions
- security
Group List<String>Ids - A list of security group IDs to associate with Kinesis Firehose.
- subnet
Ids List<String> - A list of subnet IDs to associate with Kinesis Firehose.
- vpc
Id String
- role
Arn string - The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions
- security
Group string[]Ids - A list of security group IDs to associate with Kinesis Firehose.
- subnet
Ids string[] - A list of subnet IDs to associate with Kinesis Firehose.
- vpc
Id string
- role_
arn str - The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions
- security_
group_ Sequence[str]ids - A list of security group IDs to associate with Kinesis Firehose.
- subnet_
ids Sequence[str] - A list of subnet IDs to associate with Kinesis Firehose.
- vpc_
id str
- role
Arn String - The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Make sure role has necessary IAM permissions
- security
Group List<String>Ids - A list of security group IDs to associate with Kinesis Firehose.
- subnet
Ids List<String> - A list of subnet IDs to associate with Kinesis Firehose.
- vpc
Id String
FirehoseDeliveryStreamRedshiftConfiguration, FirehoseDeliveryStreamRedshiftConfigurationArgs
- Cluster
Jdbcurl string - The jdbcurl of the redshift cluster.
- Data
Table stringName - The name of the table in the redshift cluster that the s3 bucket will copy to.
- Role
Arn string - The arn of the role the stream assumes.
- S3Configuration
Firehose
Delivery Stream Redshift Configuration S3Configuration - The S3 Configuration. See s3_configuration below for details.
- Cloudwatch
Logging FirehoseOptions Delivery Stream Redshift Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - Copy
Options string - Copy options for copying the data from the s3 intermediate bucket into redshift, for example to change the default delimiter. For valid values, see the AWS documentation
- Data
Table stringColumns - The data table columns that will be targeted by the copy command.
- Password string
- The password for the username above. This value is required if
secrets_manager_configuration
is not provided. - Processing
Configuration FirehoseDelivery Stream Redshift Configuration Processing Configuration - The data processing configuration. See
processing_configuration
block below for details. - Retry
Duration int - The length of time during which Firehose retries delivery after a failure, starting from the initial request and including the first attempt. The default value is 3600 seconds (60 minutes). Firehose does not retry if the value of DurationInSeconds is 0 (zero) or if the first delivery attempt takes longer than the current value.
- S3Backup
Configuration FirehoseDelivery Stream Redshift Configuration S3Backup Configuration - The configuration for backup in Amazon S3. Required if
s3_backup_mode
isEnabled
. Supports the same fields ass3_configuration
object.secrets_manager_configuration
- (Optional) The Secrets Manager configuration. Seesecrets_manager_configuration
block below for details. This value is required ifusername
andpassword
are not provided. - S3Backup
Mode string - The Amazon S3 backup mode. Valid values are
Disabled
andEnabled
. Default value isDisabled
. - Secrets
Manager FirehoseConfiguration Delivery Stream Redshift Configuration Secrets Manager Configuration - Username string
- The username that the firehose delivery stream will assume. It is strongly recommended that the username and password provided is used exclusively for Amazon Kinesis Firehose purposes, and that the permissions for the account are restricted for Amazon Redshift INSERT permissions. This value is required if
secrets_manager_configuration
is not provided.
- Cluster
Jdbcurl string - The jdbcurl of the redshift cluster.
- Data
Table stringName - The name of the table in the redshift cluster that the s3 bucket will copy to.
- Role
Arn string - The arn of the role the stream assumes.
- S3Configuration
Firehose
Delivery Stream Redshift Configuration S3Configuration - The S3 Configuration. See s3_configuration below for details.
- Cloudwatch
Logging FirehoseOptions Delivery Stream Redshift Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - Copy
Options string - Copy options for copying the data from the s3 intermediate bucket into redshift, for example to change the default delimiter. For valid values, see the AWS documentation
- Data
Table stringColumns - The data table columns that will be targeted by the copy command.
- Password string
- The password for the username above. This value is required if
secrets_manager_configuration
is not provided. - Processing
Configuration FirehoseDelivery Stream Redshift Configuration Processing Configuration - The data processing configuration. See
processing_configuration
block below for details. - Retry
Duration int - The length of time during which Firehose retries delivery after a failure, starting from the initial request and including the first attempt. The default value is 3600 seconds (60 minutes). Firehose does not retry if the value of DurationInSeconds is 0 (zero) or if the first delivery attempt takes longer than the current value.
- S3Backup
Configuration FirehoseDelivery Stream Redshift Configuration S3Backup Configuration - The configuration for backup in Amazon S3. Required if
s3_backup_mode
isEnabled
. Supports the same fields ass3_configuration
object.secrets_manager_configuration
- (Optional) The Secrets Manager configuration. Seesecrets_manager_configuration
block below for details. This value is required ifusername
andpassword
are not provided. - S3Backup
Mode string - The Amazon S3 backup mode. Valid values are
Disabled
andEnabled
. Default value isDisabled
. - Secrets
Manager FirehoseConfiguration Delivery Stream Redshift Configuration Secrets Manager Configuration - Username string
- The username that the firehose delivery stream will assume. It is strongly recommended that the username and password provided is used exclusively for Amazon Kinesis Firehose purposes, and that the permissions for the account are restricted for Amazon Redshift INSERT permissions. This value is required if
secrets_manager_configuration
is not provided.
- cluster
Jdbcurl String - The jdbcurl of the redshift cluster.
- data
Table StringName - The name of the table in the redshift cluster that the s3 bucket will copy to.
- role
Arn String - The arn of the role the stream assumes.
- s3Configuration
Firehose
Delivery Stream Redshift Configuration S3Configuration - The S3 Configuration. See s3_configuration below for details.
- cloudwatch
Logging FirehoseOptions Delivery Stream Redshift Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - copy
Options String - Copy options for copying the data from the s3 intermediate bucket into redshift, for example to change the default delimiter. For valid values, see the AWS documentation
- data
Table StringColumns - The data table columns that will be targeted by the copy command.
- password String
- The password for the username above. This value is required if
secrets_manager_configuration
is not provided. - processing
Configuration FirehoseDelivery Stream Redshift Configuration Processing Configuration - The data processing configuration. See
processing_configuration
block below for details. - retry
Duration Integer - The length of time during which Firehose retries delivery after a failure, starting from the initial request and including the first attempt. The default value is 3600 seconds (60 minutes). Firehose does not retry if the value of DurationInSeconds is 0 (zero) or if the first delivery attempt takes longer than the current value.
- s3Backup
Configuration FirehoseDelivery Stream Redshift Configuration S3Backup Configuration - The configuration for backup in Amazon S3. Required if
s3_backup_mode
isEnabled
. Supports the same fields ass3_configuration
object.secrets_manager_configuration
- (Optional) The Secrets Manager configuration. Seesecrets_manager_configuration
block below for details. This value is required ifusername
andpassword
are not provided. - s3Backup
Mode String - The Amazon S3 backup mode. Valid values are
Disabled
andEnabled
. Default value isDisabled
. - secrets
Manager FirehoseConfiguration Delivery Stream Redshift Configuration Secrets Manager Configuration - username String
- The username that the firehose delivery stream will assume. It is strongly recommended that the username and password provided is used exclusively for Amazon Kinesis Firehose purposes, and that the permissions for the account are restricted for Amazon Redshift INSERT permissions. This value is required if
secrets_manager_configuration
is not provided.
- cluster
Jdbcurl string - The jdbcurl of the redshift cluster.
- data
Table stringName - The name of the table in the redshift cluster that the s3 bucket will copy to.
- role
Arn string - The arn of the role the stream assumes.
- s3Configuration
Firehose
Delivery Stream Redshift Configuration S3Configuration - The S3 Configuration. See s3_configuration below for details.
- cloudwatch
Logging FirehoseOptions Delivery Stream Redshift Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - copy
Options string - Copy options for copying the data from the s3 intermediate bucket into redshift, for example to change the default delimiter. For valid values, see the AWS documentation
- data
Table stringColumns - The data table columns that will be targeted by the copy command.
- password string
- The password for the username above. This value is required if
secrets_manager_configuration
is not provided. - processing
Configuration FirehoseDelivery Stream Redshift Configuration Processing Configuration - The data processing configuration. See
processing_configuration
block below for details. - retry
Duration number - The length of time during which Firehose retries delivery after a failure, starting from the initial request and including the first attempt. The default value is 3600 seconds (60 minutes). Firehose does not retry if the value of DurationInSeconds is 0 (zero) or if the first delivery attempt takes longer than the current value.
- s3Backup
Configuration FirehoseDelivery Stream Redshift Configuration S3Backup Configuration - The configuration for backup in Amazon S3. Required if
s3_backup_mode
isEnabled
. Supports the same fields ass3_configuration
object.secrets_manager_configuration
- (Optional) The Secrets Manager configuration. Seesecrets_manager_configuration
block below for details. This value is required ifusername
andpassword
are not provided. - s3Backup
Mode string - The Amazon S3 backup mode. Valid values are
Disabled
andEnabled
. Default value isDisabled
. - secrets
Manager FirehoseConfiguration Delivery Stream Redshift Configuration Secrets Manager Configuration - username string
- The username that the firehose delivery stream will assume. It is strongly recommended that the username and password provided is used exclusively for Amazon Kinesis Firehose purposes, and that the permissions for the account are restricted for Amazon Redshift INSERT permissions. This value is required if
secrets_manager_configuration
is not provided.
- cluster_
jdbcurl str - The jdbcurl of the redshift cluster.
- data_
table_ strname - The name of the table in the redshift cluster that the s3 bucket will copy to.
- role_
arn str - The arn of the role the stream assumes.
- s3_
configuration FirehoseDelivery Stream Redshift Configuration S3Configuration - The S3 Configuration. See s3_configuration below for details.
- cloudwatch_
logging_ Firehoseoptions Delivery Stream Redshift Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - copy_
options str - Copy options for copying the data from the s3 intermediate bucket into redshift, for example to change the default delimiter. For valid values, see the AWS documentation
- data_
table_ strcolumns - The data table columns that will be targeted by the copy command.
- password str
- The password for the username above. This value is required if
secrets_manager_configuration
is not provided. - processing_
configuration FirehoseDelivery Stream Redshift Configuration Processing Configuration - The data processing configuration. See
processing_configuration
block below for details. - retry_
duration int - The length of time during which Firehose retries delivery after a failure, starting from the initial request and including the first attempt. The default value is 3600 seconds (60 minutes). Firehose does not retry if the value of DurationInSeconds is 0 (zero) or if the first delivery attempt takes longer than the current value.
- s3_
backup_ Firehoseconfiguration Delivery Stream Redshift Configuration S3Backup Configuration - The configuration for backup in Amazon S3. Required if
s3_backup_mode
isEnabled
. Supports the same fields ass3_configuration
object.secrets_manager_configuration
- (Optional) The Secrets Manager configuration. Seesecrets_manager_configuration
block below for details. This value is required ifusername
andpassword
are not provided. - s3_
backup_ strmode - The Amazon S3 backup mode. Valid values are
Disabled
andEnabled
. Default value isDisabled
. - secrets_
manager_ Firehoseconfiguration Delivery Stream Redshift Configuration Secrets Manager Configuration - username str
- The username that the firehose delivery stream will assume. It is strongly recommended that the username and password provided is used exclusively for Amazon Kinesis Firehose purposes, and that the permissions for the account are restricted for Amazon Redshift INSERT permissions. This value is required if
secrets_manager_configuration
is not provided.
- cluster
Jdbcurl String - The jdbcurl of the redshift cluster.
- data
Table StringName - The name of the table in the redshift cluster that the s3 bucket will copy to.
- role
Arn String - The arn of the role the stream assumes.
- s3Configuration Property Map
- The S3 Configuration. See s3_configuration below for details.
- cloudwatch
Logging Property MapOptions - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - copy
Options String - Copy options for copying the data from the s3 intermediate bucket into redshift, for example to change the default delimiter. For valid values, see the AWS documentation
- data
Table StringColumns - The data table columns that will be targeted by the copy command.
- password String
- The password for the username above. This value is required if
secrets_manager_configuration
is not provided. - processing
Configuration Property Map - The data processing configuration. See
processing_configuration
block below for details. - retry
Duration Number - The length of time during which Firehose retries delivery after a failure, starting from the initial request and including the first attempt. The default value is 3600 seconds (60 minutes). Firehose does not retry if the value of DurationInSeconds is 0 (zero) or if the first delivery attempt takes longer than the current value.
- s3Backup
Configuration Property Map - The configuration for backup in Amazon S3. Required if
s3_backup_mode
isEnabled
. Supports the same fields ass3_configuration
object.secrets_manager_configuration
- (Optional) The Secrets Manager configuration. Seesecrets_manager_configuration
block below for details. This value is required ifusername
andpassword
are not provided. - s3Backup
Mode String - The Amazon S3 backup mode. Valid values are
Disabled
andEnabled
. Default value isDisabled
. - secrets
Manager Property MapConfiguration - username String
- The username that the firehose delivery stream will assume. It is strongly recommended that the username and password provided is used exclusively for Amazon Kinesis Firehose purposes, and that the permissions for the account are restricted for Amazon Redshift INSERT permissions. This value is required if
secrets_manager_configuration
is not provided.
FirehoseDeliveryStreamRedshiftConfigurationCloudwatchLoggingOptions, FirehoseDeliveryStreamRedshiftConfigurationCloudwatchLoggingOptionsArgs
- Enabled bool
- Enables or disables the logging. Defaults to
false
. - Log
Group stringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - Log
Stream stringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- Enabled bool
- Enables or disables the logging. Defaults to
false
. - Log
Group stringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - Log
Stream stringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled Boolean
- Enables or disables the logging. Defaults to
false
. - log
Group StringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - log
Stream StringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled boolean
- Enables or disables the logging. Defaults to
false
. - log
Group stringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - log
Stream stringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled bool
- Enables or disables the logging. Defaults to
false
. - log_
group_ strname - The CloudWatch group name for logging. This value is required if
enabled
is true. - log_
stream_ strname - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled Boolean
- Enables or disables the logging. Defaults to
false
. - log
Group StringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - log
Stream StringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
FirehoseDeliveryStreamRedshiftConfigurationProcessingConfiguration, FirehoseDeliveryStreamRedshiftConfigurationProcessingConfigurationArgs
- Enabled bool
- Enables or disables data processing.
- Processors
List<Firehose
Delivery Stream Redshift Configuration Processing Configuration Processor> - Specifies the data processors as multiple blocks. See
processors
block below for details.
- Enabled bool
- Enables or disables data processing.
- Processors
[]Firehose
Delivery Stream Redshift Configuration Processing Configuration Processor - Specifies the data processors as multiple blocks. See
processors
block below for details.
- enabled Boolean
- Enables or disables data processing.
- processors
List<Firehose
Delivery Stream Redshift Configuration Processing Configuration Processor> - Specifies the data processors as multiple blocks. See
processors
block below for details.
- enabled boolean
- Enables or disables data processing.
- processors
Firehose
Delivery Stream Redshift Configuration Processing Configuration Processor[] - Specifies the data processors as multiple blocks. See
processors
block below for details.
- enabled bool
- Enables or disables data processing.
- processors
Sequence[Firehose
Delivery Stream Redshift Configuration Processing Configuration Processor] - Specifies the data processors as multiple blocks. See
processors
block below for details.
- enabled Boolean
- Enables or disables data processing.
- processors List<Property Map>
- Specifies the data processors as multiple blocks. See
processors
block below for details.
FirehoseDeliveryStreamRedshiftConfigurationProcessingConfigurationProcessor, FirehoseDeliveryStreamRedshiftConfigurationProcessingConfigurationProcessorArgs
- Type string
- The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
,Decompression
,CloudWatchLogProcessing
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - Parameters
List<Firehose
Delivery Stream Redshift Configuration Processing Configuration Processor Parameter> - Specifies the processor parameters as multiple blocks. See
parameters
block below for details.
- Type string
- The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
,Decompression
,CloudWatchLogProcessing
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - Parameters
[]Firehose
Delivery Stream Redshift Configuration Processing Configuration Processor Parameter - Specifies the processor parameters as multiple blocks. See
parameters
block below for details.
- type String
- The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
,Decompression
,CloudWatchLogProcessing
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameters
List<Firehose
Delivery Stream Redshift Configuration Processing Configuration Processor Parameter> - Specifies the processor parameters as multiple blocks. See
parameters
block below for details.
- type string
- The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
,Decompression
,CloudWatchLogProcessing
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameters
Firehose
Delivery Stream Redshift Configuration Processing Configuration Processor Parameter[] - Specifies the processor parameters as multiple blocks. See
parameters
block below for details.
- type str
- The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
,Decompression
,CloudWatchLogProcessing
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameters
Sequence[Firehose
Delivery Stream Redshift Configuration Processing Configuration Processor Parameter] - Specifies the processor parameters as multiple blocks. See
parameters
block below for details.
- type String
- The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
,Decompression
,CloudWatchLogProcessing
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameters List<Property Map>
- Specifies the processor parameters as multiple blocks. See
parameters
block below for details.
FirehoseDeliveryStreamRedshiftConfigurationProcessingConfigurationProcessorParameter, FirehoseDeliveryStreamRedshiftConfigurationProcessingConfigurationProcessorParameterArgs
- Parameter
Name string - Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
,CompressionFormat
,DataMessageExtraction
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - Parameter
Value string Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
NOTE: Parameters with default values, including
NumberOfRetries
(default: 3),RoleArn
(default: firehose role ARN),BufferSizeInMBs
(default: 1), andBufferIntervalInSeconds
(default: 60), are not stored in Pulumi state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.
- Parameter
Name string - Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
,CompressionFormat
,DataMessageExtraction
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - Parameter
Value string Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
NOTE: Parameters with default values, including
NumberOfRetries
(default: 3),RoleArn
(default: firehose role ARN),BufferSizeInMBs
(default: 1), andBufferIntervalInSeconds
(default: 60), are not stored in Pulumi state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.
- parameter
Name String - Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
,CompressionFormat
,DataMessageExtraction
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameter
Value String Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
NOTE: Parameters with default values, including
NumberOfRetries
(default: 3),RoleArn
(default: firehose role ARN),BufferSizeInMBs
(default: 1), andBufferIntervalInSeconds
(default: 60), are not stored in Pulumi state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.
- parameter
Name string - Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
,CompressionFormat
,DataMessageExtraction
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameter
Value string Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
NOTE: Parameters with default values, including
NumberOfRetries
(default: 3),RoleArn
(default: firehose role ARN),BufferSizeInMBs
(default: 1), andBufferIntervalInSeconds
(default: 60), are not stored in Pulumi state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.
- parameter_
name str - Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
,CompressionFormat
,DataMessageExtraction
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameter_
value str Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
NOTE: Parameters with default values, including
NumberOfRetries
(default: 3),RoleArn
(default: firehose role ARN),BufferSizeInMBs
(default: 1), andBufferIntervalInSeconds
(default: 60), are not stored in Pulumi state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.
- parameter
Name String - Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
,CompressionFormat
,DataMessageExtraction
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameter
Value String Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
NOTE: Parameters with default values, including
NumberOfRetries
(default: 3),RoleArn
(default: firehose role ARN),BufferSizeInMBs
(default: 1), andBufferIntervalInSeconds
(default: 60), are not stored in Pulumi state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.
FirehoseDeliveryStreamRedshiftConfigurationS3BackupConfiguration, FirehoseDeliveryStreamRedshiftConfigurationS3BackupConfigurationArgs
- Bucket
Arn string - The ARN of the S3 bucket
- Role
Arn string - Buffering
Interval int - Buffering
Size int - Cloudwatch
Logging FirehoseOptions Delivery Stream Redshift Configuration S3Backup Configuration Cloudwatch Logging Options - Compression
Format string - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - Error
Output stringPrefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - Kms
Key stringArn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- Prefix string
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- Bucket
Arn string - The ARN of the S3 bucket
- Role
Arn string - Buffering
Interval int - Buffering
Size int - Cloudwatch
Logging FirehoseOptions Delivery Stream Redshift Configuration S3Backup Configuration Cloudwatch Logging Options - Compression
Format string - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - Error
Output stringPrefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - Kms
Key stringArn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- Prefix string
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- bucket
Arn String - The ARN of the S3 bucket
- role
Arn String - buffering
Interval Integer - buffering
Size Integer - cloudwatch
Logging FirehoseOptions Delivery Stream Redshift Configuration S3Backup Configuration Cloudwatch Logging Options - compression
Format String - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - error
Output StringPrefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - kms
Key StringArn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- prefix String
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- bucket
Arn string - The ARN of the S3 bucket
- role
Arn string - buffering
Interval number - buffering
Size number - cloudwatch
Logging FirehoseOptions Delivery Stream Redshift Configuration S3Backup Configuration Cloudwatch Logging Options - compression
Format string - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - error
Output stringPrefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - kms
Key stringArn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- prefix string
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- bucket_
arn str - The ARN of the S3 bucket
- role_
arn str - buffering_
interval int - buffering_
size int - cloudwatch_
logging_ Firehoseoptions Delivery Stream Redshift Configuration S3Backup Configuration Cloudwatch Logging Options - compression_
format str - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - error_
output_ strprefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - kms_
key_ strarn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- prefix str
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- bucket
Arn String - The ARN of the S3 bucket
- role
Arn String - buffering
Interval Number - buffering
Size Number - cloudwatch
Logging Property MapOptions - compression
Format String - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - error
Output StringPrefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - kms
Key StringArn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- prefix String
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
FirehoseDeliveryStreamRedshiftConfigurationS3BackupConfigurationCloudwatchLoggingOptions, FirehoseDeliveryStreamRedshiftConfigurationS3BackupConfigurationCloudwatchLoggingOptionsArgs
- Enabled bool
- Enables or disables the logging. Defaults to
false
. - Log
Group stringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - Log
Stream stringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- Enabled bool
- Enables or disables the logging. Defaults to
false
. - Log
Group stringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - Log
Stream stringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled Boolean
- Enables or disables the logging. Defaults to
false
. - log
Group StringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - log
Stream StringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled boolean
- Enables or disables the logging. Defaults to
false
. - log
Group stringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - log
Stream stringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled bool
- Enables or disables the logging. Defaults to
false
. - log_
group_ strname - The CloudWatch group name for logging. This value is required if
enabled
is true. - log_
stream_ strname - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled Boolean
- Enables or disables the logging. Defaults to
false
. - log
Group StringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - log
Stream StringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
FirehoseDeliveryStreamRedshiftConfigurationS3Configuration, FirehoseDeliveryStreamRedshiftConfigurationS3ConfigurationArgs
- Bucket
Arn string - The ARN of the S3 bucket
- Role
Arn string - The ARN of the AWS credentials.
- Buffering
Interval int - Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- Buffering
Size int - Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- Cloudwatch
Logging FirehoseOptions Delivery Stream Redshift Configuration S3Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - Compression
Format string - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - Error
Output stringPrefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - Kms
Key stringArn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- Prefix string
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- Bucket
Arn string - The ARN of the S3 bucket
- Role
Arn string - The ARN of the AWS credentials.
- Buffering
Interval int - Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- Buffering
Size int - Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- Cloudwatch
Logging FirehoseOptions Delivery Stream Redshift Configuration S3Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - Compression
Format string - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - Error
Output stringPrefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - Kms
Key stringArn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- Prefix string
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- bucket
Arn String - The ARN of the S3 bucket
- role
Arn String - The ARN of the AWS credentials.
- buffering
Interval Integer - Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- buffering
Size Integer - Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- cloudwatch
Logging FirehoseOptions Delivery Stream Redshift Configuration S3Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - compression
Format String - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - error
Output StringPrefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - kms
Key StringArn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- prefix String
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- bucket
Arn string - The ARN of the S3 bucket
- role
Arn string - The ARN of the AWS credentials.
- buffering
Interval number - Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- buffering
Size number - Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- cloudwatch
Logging FirehoseOptions Delivery Stream Redshift Configuration S3Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - compression
Format string - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - error
Output stringPrefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - kms
Key stringArn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- prefix string
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- bucket_
arn str - The ARN of the S3 bucket
- role_
arn str - The ARN of the AWS credentials.
- buffering_
interval int - Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- buffering_
size int - Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- cloudwatch_
logging_ Firehoseoptions Delivery Stream Redshift Configuration S3Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - compression_
format str - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - error_
output_ strprefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - kms_
key_ strarn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- prefix str
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- bucket
Arn String - The ARN of the S3 bucket
- role
Arn String - The ARN of the AWS credentials.
- buffering
Interval Number - Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- buffering
Size Number - Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- cloudwatch
Logging Property MapOptions - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - compression
Format String - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - error
Output StringPrefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - kms
Key StringArn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- prefix String
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
FirehoseDeliveryStreamRedshiftConfigurationS3ConfigurationCloudwatchLoggingOptions, FirehoseDeliveryStreamRedshiftConfigurationS3ConfigurationCloudwatchLoggingOptionsArgs
- Enabled bool
- Enables or disables the logging. Defaults to
false
. - Log
Group stringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - Log
Stream stringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- Enabled bool
- Enables or disables the logging. Defaults to
false
. - Log
Group stringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - Log
Stream stringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled Boolean
- Enables or disables the logging. Defaults to
false
. - log
Group StringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - log
Stream StringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled boolean
- Enables or disables the logging. Defaults to
false
. - log
Group stringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - log
Stream stringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled bool
- Enables or disables the logging. Defaults to
false
. - log_
group_ strname - The CloudWatch group name for logging. This value is required if
enabled
is true. - log_
stream_ strname - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled Boolean
- Enables or disables the logging. Defaults to
false
. - log
Group StringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - log
Stream StringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
FirehoseDeliveryStreamRedshiftConfigurationSecretsManagerConfiguration, FirehoseDeliveryStreamRedshiftConfigurationSecretsManagerConfigurationArgs
- enabled bool
- Enables or disables the Secrets Manager configuration.
- role_
arn str - The ARN of the role the stream assumes.
- secret_
arn str - The ARN of the Secrets Manager secret. This value is required if
enabled
is true.
FirehoseDeliveryStreamServerSideEncryption, FirehoseDeliveryStreamServerSideEncryptionArgs
FirehoseDeliveryStreamSnowflakeConfiguration, FirehoseDeliveryStreamSnowflakeConfigurationArgs
- Account
Url string - The URL of the Snowflake account. Format: https://[account_identifier].snowflakecomputing.com.
- Database string
- The Snowflake database name.
- Role
Arn string - The ARN of the IAM role.
- S3Configuration
Firehose
Delivery Stream Snowflake Configuration S3Configuration - The S3 configuration. See
s3_configuration
block below for details. - Schema string
- The Snowflake schema name.
- Table string
- The Snowflake table name.
- Buffering
Interval int - Buffer incoming data for the specified period of time, in seconds between 0 to 900, before delivering it to the destination. The default value is 0s.
- Buffering
Size int - Buffer incoming data to the specified size, in MBs between 1 to 128, before delivering it to the destination. The default value is 1MB.
- Cloudwatch
Logging FirehoseOptions Delivery Stream Snowflake Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - Content
Column stringName - The name of the content column.
- Data
Loading stringOption - The data loading option.
- Key
Passphrase string - The passphrase for the private key.
- Metadata
Column stringName - The name of the metadata column.
- Private
Key string - The private key for authentication. This value is required if
secrets_manager_configuration
is not provided. - Processing
Configuration FirehoseDelivery Stream Snowflake Configuration Processing Configuration - The processing configuration. See
processing_configuration
block below for details. - Retry
Duration int - After an initial failure to deliver to Snowflake, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 60s. There will be no retry if the value is 0.
- S3Backup
Mode string - The S3 backup mode.
- Secrets
Manager FirehoseConfiguration Delivery Stream Snowflake Configuration Secrets Manager Configuration - The Secrets Manager configuration. See
secrets_manager_configuration
block below for details. This value is required ifuser
andprivate_key
are not provided. - Snowflake
Role FirehoseConfiguration Delivery Stream Snowflake Configuration Snowflake Role Configuration - The configuration for Snowflake role.
- Snowflake
Vpc FirehoseConfiguration Delivery Stream Snowflake Configuration Snowflake Vpc Configuration - The VPC configuration for Snowflake.
- User string
- The user for authentication. This value is required if
secrets_manager_configuration
is not provided.
- Account
Url string - The URL of the Snowflake account. Format: https://[account_identifier].snowflakecomputing.com.
- Database string
- The Snowflake database name.
- Role
Arn string - The ARN of the IAM role.
- S3Configuration
Firehose
Delivery Stream Snowflake Configuration S3Configuration - The S3 configuration. See
s3_configuration
block below for details. - Schema string
- The Snowflake schema name.
- Table string
- The Snowflake table name.
- Buffering
Interval int - Buffer incoming data for the specified period of time, in seconds between 0 to 900, before delivering it to the destination. The default value is 0s.
- Buffering
Size int - Buffer incoming data to the specified size, in MBs between 1 to 128, before delivering it to the destination. The default value is 1MB.
- Cloudwatch
Logging FirehoseOptions Delivery Stream Snowflake Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - Content
Column stringName - The name of the content column.
- Data
Loading stringOption - The data loading option.
- Key
Passphrase string - The passphrase for the private key.
- Metadata
Column stringName - The name of the metadata column.
- Private
Key string - The private key for authentication. This value is required if
secrets_manager_configuration
is not provided. - Processing
Configuration FirehoseDelivery Stream Snowflake Configuration Processing Configuration - The processing configuration. See
processing_configuration
block below for details. - Retry
Duration int - After an initial failure to deliver to Snowflake, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 60s. There will be no retry if the value is 0.
- S3Backup
Mode string - The S3 backup mode.
- Secrets
Manager FirehoseConfiguration Delivery Stream Snowflake Configuration Secrets Manager Configuration - The Secrets Manager configuration. See
secrets_manager_configuration
block below for details. This value is required ifuser
andprivate_key
are not provided. - Snowflake
Role FirehoseConfiguration Delivery Stream Snowflake Configuration Snowflake Role Configuration - The configuration for Snowflake role.
- Snowflake
Vpc FirehoseConfiguration Delivery Stream Snowflake Configuration Snowflake Vpc Configuration - The VPC configuration for Snowflake.
- User string
- The user for authentication. This value is required if
secrets_manager_configuration
is not provided.
- account
Url String - The URL of the Snowflake account. Format: https://[account_identifier].snowflakecomputing.com.
- database String
- The Snowflake database name.
- role
Arn String - The ARN of the IAM role.
- s3Configuration
Firehose
Delivery Stream Snowflake Configuration S3Configuration - The S3 configuration. See
s3_configuration
block below for details. - schema String
- The Snowflake schema name.
- table String
- The Snowflake table name.
- buffering
Interval Integer - Buffer incoming data for the specified period of time, in seconds between 0 to 900, before delivering it to the destination. The default value is 0s.
- buffering
Size Integer - Buffer incoming data to the specified size, in MBs between 1 to 128, before delivering it to the destination. The default value is 1MB.
- cloudwatch
Logging FirehoseOptions Delivery Stream Snowflake Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - content
Column StringName - The name of the content column.
- data
Loading StringOption - The data loading option.
- key
Passphrase String - The passphrase for the private key.
- metadata
Column StringName - The name of the metadata column.
- private
Key String - The private key for authentication. This value is required if
secrets_manager_configuration
is not provided. - processing
Configuration FirehoseDelivery Stream Snowflake Configuration Processing Configuration - The processing configuration. See
processing_configuration
block below for details. - retry
Duration Integer - After an initial failure to deliver to Snowflake, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 60s. There will be no retry if the value is 0.
- s3Backup
Mode String - The S3 backup mode.
- secrets
Manager FirehoseConfiguration Delivery Stream Snowflake Configuration Secrets Manager Configuration - The Secrets Manager configuration. See
secrets_manager_configuration
block below for details. This value is required ifuser
andprivate_key
are not provided. - snowflake
Role FirehoseConfiguration Delivery Stream Snowflake Configuration Snowflake Role Configuration - The configuration for Snowflake role.
- snowflake
Vpc FirehoseConfiguration Delivery Stream Snowflake Configuration Snowflake Vpc Configuration - The VPC configuration for Snowflake.
- user String
- The user for authentication. This value is required if
secrets_manager_configuration
is not provided.
- account
Url string - The URL of the Snowflake account. Format: https://[account_identifier].snowflakecomputing.com.
- database string
- The Snowflake database name.
- role
Arn string - The ARN of the IAM role.
- s3Configuration
Firehose
Delivery Stream Snowflake Configuration S3Configuration - The S3 configuration. See
s3_configuration
block below for details. - schema string
- The Snowflake schema name.
- table string
- The Snowflake table name.
- buffering
Interval number - Buffer incoming data for the specified period of time, in seconds between 0 to 900, before delivering it to the destination. The default value is 0s.
- buffering
Size number - Buffer incoming data to the specified size, in MBs between 1 to 128, before delivering it to the destination. The default value is 1MB.
- cloudwatch
Logging FirehoseOptions Delivery Stream Snowflake Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - content
Column stringName - The name of the content column.
- data
Loading stringOption - The data loading option.
- key
Passphrase string - The passphrase for the private key.
- metadata
Column stringName - The name of the metadata column.
- private
Key string - The private key for authentication. This value is required if
secrets_manager_configuration
is not provided. - processing
Configuration FirehoseDelivery Stream Snowflake Configuration Processing Configuration - The processing configuration. See
processing_configuration
block below for details. - retry
Duration number - After an initial failure to deliver to Snowflake, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 60s. There will be no retry if the value is 0.
- s3Backup
Mode string - The S3 backup mode.
- secrets
Manager FirehoseConfiguration Delivery Stream Snowflake Configuration Secrets Manager Configuration - The Secrets Manager configuration. See
secrets_manager_configuration
block below for details. This value is required ifuser
andprivate_key
are not provided. - snowflake
Role FirehoseConfiguration Delivery Stream Snowflake Configuration Snowflake Role Configuration - The configuration for Snowflake role.
- snowflake
Vpc FirehoseConfiguration Delivery Stream Snowflake Configuration Snowflake Vpc Configuration - The VPC configuration for Snowflake.
- user string
- The user for authentication. This value is required if
secrets_manager_configuration
is not provided.
- account_
url str - The URL of the Snowflake account. Format: https://[account_identifier].snowflakecomputing.com.
- database str
- The Snowflake database name.
- role_
arn str - The ARN of the IAM role.
- s3_
configuration FirehoseDelivery Stream Snowflake Configuration S3Configuration - The S3 configuration. See
s3_configuration
block below for details. - schema str
- The Snowflake schema name.
- table str
- The Snowflake table name.
- buffering_
interval int - Buffer incoming data for the specified period of time, in seconds between 0 to 900, before delivering it to the destination. The default value is 0s.
- buffering_
size int - Buffer incoming data to the specified size, in MBs between 1 to 128, before delivering it to the destination. The default value is 1MB.
- cloudwatch_
logging_ Firehoseoptions Delivery Stream Snowflake Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - content_
column_ strname - The name of the content column.
- data_
loading_ stroption - The data loading option.
- key_
passphrase str - The passphrase for the private key.
- metadata_
column_ strname - The name of the metadata column.
- private_
key str - The private key for authentication. This value is required if
secrets_manager_configuration
is not provided. - processing_
configuration FirehoseDelivery Stream Snowflake Configuration Processing Configuration - The processing configuration. See
processing_configuration
block below for details. - retry_
duration int - After an initial failure to deliver to Snowflake, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 60s. There will be no retry if the value is 0.
- s3_
backup_ strmode - The S3 backup mode.
- secrets_
manager_ Firehoseconfiguration Delivery Stream Snowflake Configuration Secrets Manager Configuration - The Secrets Manager configuration. See
secrets_manager_configuration
block below for details. This value is required ifuser
andprivate_key
are not provided. - snowflake_
role_ Firehoseconfiguration Delivery Stream Snowflake Configuration Snowflake Role Configuration - The configuration for Snowflake role.
- snowflake_
vpc_ Firehoseconfiguration Delivery Stream Snowflake Configuration Snowflake Vpc Configuration - The VPC configuration for Snowflake.
- user str
- The user for authentication. This value is required if
secrets_manager_configuration
is not provided.
- account
Url String - The URL of the Snowflake account. Format: https://[account_identifier].snowflakecomputing.com.
- database String
- The Snowflake database name.
- role
Arn String - The ARN of the IAM role.
- s3Configuration Property Map
- The S3 configuration. See
s3_configuration
block below for details. - schema String
- The Snowflake schema name.
- table String
- The Snowflake table name.
- buffering
Interval Number - Buffer incoming data for the specified period of time, in seconds between 0 to 900, before delivering it to the destination. The default value is 0s.
- buffering
Size Number - Buffer incoming data to the specified size, in MBs between 1 to 128, before delivering it to the destination. The default value is 1MB.
- cloudwatch
Logging Property MapOptions - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - content
Column StringName - The name of the content column.
- data
Loading StringOption - The data loading option.
- key
Passphrase String - The passphrase for the private key.
- metadata
Column StringName - The name of the metadata column.
- private
Key String - The private key for authentication. This value is required if
secrets_manager_configuration
is not provided. - processing
Configuration Property Map - The processing configuration. See
processing_configuration
block below for details. - retry
Duration Number - After an initial failure to deliver to Snowflake, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 60s. There will be no retry if the value is 0.
- s3Backup
Mode String - The S3 backup mode.
- secrets
Manager Property MapConfiguration - The Secrets Manager configuration. See
secrets_manager_configuration
block below for details. This value is required ifuser
andprivate_key
are not provided. - snowflake
Role Property MapConfiguration - The configuration for Snowflake role.
- snowflake
Vpc Property MapConfiguration - The VPC configuration for Snowflake.
- user String
- The user for authentication. This value is required if
secrets_manager_configuration
is not provided.
FirehoseDeliveryStreamSnowflakeConfigurationCloudwatchLoggingOptions, FirehoseDeliveryStreamSnowflakeConfigurationCloudwatchLoggingOptionsArgs
- Enabled bool
- Enables or disables the logging. Defaults to
false
. - Log
Group stringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - Log
Stream stringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- Enabled bool
- Enables or disables the logging. Defaults to
false
. - Log
Group stringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - Log
Stream stringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled Boolean
- Enables or disables the logging. Defaults to
false
. - log
Group StringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - log
Stream StringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled boolean
- Enables or disables the logging. Defaults to
false
. - log
Group stringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - log
Stream stringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled bool
- Enables or disables the logging. Defaults to
false
. - log_
group_ strname - The CloudWatch group name for logging. This value is required if
enabled
is true. - log_
stream_ strname - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled Boolean
- Enables or disables the logging. Defaults to
false
. - log
Group StringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - log
Stream StringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
FirehoseDeliveryStreamSnowflakeConfigurationProcessingConfiguration, FirehoseDeliveryStreamSnowflakeConfigurationProcessingConfigurationArgs
- Enabled bool
- Enables or disables data processing.
- Processors
List<Firehose
Delivery Stream Snowflake Configuration Processing Configuration Processor> - Specifies the data processors as multiple blocks. See
processors
block below for details.
- Enabled bool
- Enables or disables data processing.
- Processors
[]Firehose
Delivery Stream Snowflake Configuration Processing Configuration Processor - Specifies the data processors as multiple blocks. See
processors
block below for details.
- enabled Boolean
- Enables or disables data processing.
- processors
List<Firehose
Delivery Stream Snowflake Configuration Processing Configuration Processor> - Specifies the data processors as multiple blocks. See
processors
block below for details.
- enabled boolean
- Enables or disables data processing.
- processors
Firehose
Delivery Stream Snowflake Configuration Processing Configuration Processor[] - Specifies the data processors as multiple blocks. See
processors
block below for details.
- enabled bool
- Enables or disables data processing.
- processors
Sequence[Firehose
Delivery Stream Snowflake Configuration Processing Configuration Processor] - Specifies the data processors as multiple blocks. See
processors
block below for details.
- enabled Boolean
- Enables or disables data processing.
- processors List<Property Map>
- Specifies the data processors as multiple blocks. See
processors
block below for details.
FirehoseDeliveryStreamSnowflakeConfigurationProcessingConfigurationProcessor, FirehoseDeliveryStreamSnowflakeConfigurationProcessingConfigurationProcessorArgs
- Type string
- The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
,Decompression
,CloudWatchLogProcessing
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - Parameters
List<Firehose
Delivery Stream Snowflake Configuration Processing Configuration Processor Parameter> - Specifies the processor parameters as multiple blocks. See
parameters
block below for details.
- Type string
- The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
,Decompression
,CloudWatchLogProcessing
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - Parameters
[]Firehose
Delivery Stream Snowflake Configuration Processing Configuration Processor Parameter - Specifies the processor parameters as multiple blocks. See
parameters
block below for details.
- type String
- The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
,Decompression
,CloudWatchLogProcessing
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameters
List<Firehose
Delivery Stream Snowflake Configuration Processing Configuration Processor Parameter> - Specifies the processor parameters as multiple blocks. See
parameters
block below for details.
- type string
- The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
,Decompression
,CloudWatchLogProcessing
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameters
Firehose
Delivery Stream Snowflake Configuration Processing Configuration Processor Parameter[] - Specifies the processor parameters as multiple blocks. See
parameters
block below for details.
- type str
- The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
,Decompression
,CloudWatchLogProcessing
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameters
Sequence[Firehose
Delivery Stream Snowflake Configuration Processing Configuration Processor Parameter] - Specifies the processor parameters as multiple blocks. See
parameters
block below for details.
- type String
- The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
,Decompression
,CloudWatchLogProcessing
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameters List<Property Map>
- Specifies the processor parameters as multiple blocks. See
parameters
block below for details.
FirehoseDeliveryStreamSnowflakeConfigurationProcessingConfigurationProcessorParameter, FirehoseDeliveryStreamSnowflakeConfigurationProcessingConfigurationProcessorParameterArgs
- Parameter
Name string - Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
,CompressionFormat
,DataMessageExtraction
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - Parameter
Value string Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
NOTE: Parameters with default values, including
NumberOfRetries
(default: 3),RoleArn
(default: firehose role ARN),BufferSizeInMBs
(default: 1), andBufferIntervalInSeconds
(default: 60), are not stored in Pulumi state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.
- Parameter
Name string - Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
,CompressionFormat
,DataMessageExtraction
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - Parameter
Value string Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
NOTE: Parameters with default values, including
NumberOfRetries
(default: 3),RoleArn
(default: firehose role ARN),BufferSizeInMBs
(default: 1), andBufferIntervalInSeconds
(default: 60), are not stored in Pulumi state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.
- parameter
Name String - Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
,CompressionFormat
,DataMessageExtraction
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameter
Value String Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
NOTE: Parameters with default values, including
NumberOfRetries
(default: 3),RoleArn
(default: firehose role ARN),BufferSizeInMBs
(default: 1), andBufferIntervalInSeconds
(default: 60), are not stored in Pulumi state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.
- parameter
Name string - Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
,CompressionFormat
,DataMessageExtraction
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameter
Value string Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
NOTE: Parameters with default values, including
NumberOfRetries
(default: 3),RoleArn
(default: firehose role ARN),BufferSizeInMBs
(default: 1), andBufferIntervalInSeconds
(default: 60), are not stored in Pulumi state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.
- parameter_
name str - Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
,CompressionFormat
,DataMessageExtraction
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameter_
value str Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
NOTE: Parameters with default values, including
NumberOfRetries
(default: 3),RoleArn
(default: firehose role ARN),BufferSizeInMBs
(default: 1), andBufferIntervalInSeconds
(default: 60), are not stored in Pulumi state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.
- parameter
Name String - Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
,CompressionFormat
,DataMessageExtraction
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameter
Value String Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
NOTE: Parameters with default values, including
NumberOfRetries
(default: 3),RoleArn
(default: firehose role ARN),BufferSizeInMBs
(default: 1), andBufferIntervalInSeconds
(default: 60), are not stored in Pulumi state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.
FirehoseDeliveryStreamSnowflakeConfigurationS3Configuration, FirehoseDeliveryStreamSnowflakeConfigurationS3ConfigurationArgs
- Bucket
Arn string - The ARN of the S3 bucket
- Role
Arn string - The ARN of the AWS credentials.
- Buffering
Interval int - Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- Buffering
Size int - Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- Cloudwatch
Logging FirehoseOptions Delivery Stream Snowflake Configuration S3Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - Compression
Format string - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - Error
Output stringPrefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - Kms
Key stringArn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- Prefix string
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- Bucket
Arn string - The ARN of the S3 bucket
- Role
Arn string - The ARN of the AWS credentials.
- Buffering
Interval int - Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- Buffering
Size int - Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- Cloudwatch
Logging FirehoseOptions Delivery Stream Snowflake Configuration S3Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - Compression
Format string - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - Error
Output stringPrefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - Kms
Key stringArn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- Prefix string
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- bucket
Arn String - The ARN of the S3 bucket
- role
Arn String - The ARN of the AWS credentials.
- buffering
Interval Integer - Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- buffering
Size Integer - Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- cloudwatch
Logging FirehoseOptions Delivery Stream Snowflake Configuration S3Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - compression
Format String - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - error
Output StringPrefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - kms
Key StringArn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- prefix String
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- bucket
Arn string - The ARN of the S3 bucket
- role
Arn string - The ARN of the AWS credentials.
- buffering
Interval number - Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- buffering
Size number - Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- cloudwatch
Logging FirehoseOptions Delivery Stream Snowflake Configuration S3Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - compression
Format string - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - error
Output stringPrefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - kms
Key stringArn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- prefix string
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- bucket_
arn str - The ARN of the S3 bucket
- role_
arn str - The ARN of the AWS credentials.
- buffering_
interval int - Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- buffering_
size int - Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- cloudwatch_
logging_ Firehoseoptions Delivery Stream Snowflake Configuration S3Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - compression_
format str - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - error_
output_ strprefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - kms_
key_ strarn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- prefix str
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- bucket
Arn String - The ARN of the S3 bucket
- role
Arn String - The ARN of the AWS credentials.
- buffering
Interval Number - Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- buffering
Size Number - Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- cloudwatch
Logging Property MapOptions - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - compression
Format String - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - error
Output StringPrefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - kms
Key StringArn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- prefix String
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
FirehoseDeliveryStreamSnowflakeConfigurationS3ConfigurationCloudwatchLoggingOptions, FirehoseDeliveryStreamSnowflakeConfigurationS3ConfigurationCloudwatchLoggingOptionsArgs
- Enabled bool
- Enables or disables the logging. Defaults to
false
. - Log
Group stringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - Log
Stream stringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- Enabled bool
- Enables or disables the logging. Defaults to
false
. - Log
Group stringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - Log
Stream stringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled Boolean
- Enables or disables the logging. Defaults to
false
. - log
Group StringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - log
Stream StringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled boolean
- Enables or disables the logging. Defaults to
false
. - log
Group stringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - log
Stream stringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled bool
- Enables or disables the logging. Defaults to
false
. - log_
group_ strname - The CloudWatch group name for logging. This value is required if
enabled
is true. - log_
stream_ strname - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled Boolean
- Enables or disables the logging. Defaults to
false
. - log
Group StringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - log
Stream StringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
FirehoseDeliveryStreamSnowflakeConfigurationSecretsManagerConfiguration, FirehoseDeliveryStreamSnowflakeConfigurationSecretsManagerConfigurationArgs
- enabled bool
- Enables or disables the Secrets Manager configuration.
- role_
arn str - The ARN of the role the stream assumes.
- secret_
arn str - The ARN of the Secrets Manager secret. This value is required if
enabled
is true.
FirehoseDeliveryStreamSnowflakeConfigurationSnowflakeRoleConfiguration, FirehoseDeliveryStreamSnowflakeConfigurationSnowflakeRoleConfigurationArgs
- Enabled bool
- Whether the Snowflake role is enabled.
- Snowflake
Role string - The Snowflake role.
- Enabled bool
- Whether the Snowflake role is enabled.
- Snowflake
Role string - The Snowflake role.
- enabled Boolean
- Whether the Snowflake role is enabled.
- snowflake
Role String - The Snowflake role.
- enabled boolean
- Whether the Snowflake role is enabled.
- snowflake
Role string - The Snowflake role.
- enabled bool
- Whether the Snowflake role is enabled.
- snowflake_
role str - The Snowflake role.
- enabled Boolean
- Whether the Snowflake role is enabled.
- snowflake
Role String - The Snowflake role.
FirehoseDeliveryStreamSnowflakeConfigurationSnowflakeVpcConfiguration, FirehoseDeliveryStreamSnowflakeConfigurationSnowflakeVpcConfigurationArgs
- Private
Link stringVpce Id - The VPCE ID for Firehose to privately connect with Snowflake.
- Private
Link stringVpce Id - The VPCE ID for Firehose to privately connect with Snowflake.
- private
Link StringVpce Id - The VPCE ID for Firehose to privately connect with Snowflake.
- private
Link stringVpce Id - The VPCE ID for Firehose to privately connect with Snowflake.
- private_
link_ strvpce_ id - The VPCE ID for Firehose to privately connect with Snowflake.
- private
Link StringVpce Id - The VPCE ID for Firehose to privately connect with Snowflake.
FirehoseDeliveryStreamSplunkConfiguration, FirehoseDeliveryStreamSplunkConfigurationArgs
- Hec
Endpoint string - The HTTP Event Collector (HEC) endpoint to which Kinesis Firehose sends your data.
- S3Configuration
Firehose
Delivery Stream Splunk Configuration S3Configuration - The S3 Configuration. See
s3_configuration
block below for details. - Buffering
Interval int - Buffer incoming data for the specified period of time, in seconds between 0 to 60, before delivering it to the destination. The default value is 60s.
- Buffering
Size int - Buffer incoming data to the specified size, in MBs between 1 to 5, before delivering it to the destination. The default value is 5MB.
- Cloudwatch
Logging FirehoseOptions Delivery Stream Splunk Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - Hec
Acknowledgment intTimeout - The amount of time, in seconds between 180 and 600, that Kinesis Firehose waits to receive an acknowledgment from Splunk after it sends it data.
- Hec
Endpoint stringType - The HEC endpoint type. Valid values are
Raw
orEvent
. The default value isRaw
. - Hec
Token string - The GUID that you obtain from your Splunk cluster when you create a new HEC endpoint. This value is required if
secrets_manager_configuration
is not provided. - Processing
Configuration FirehoseDelivery Stream Splunk Configuration Processing Configuration - The data processing configuration. See
processing_configuration
block below for details. - Retry
Duration int - After an initial failure to deliver to Splunk, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0.
- S3Backup
Mode string - Defines how documents should be delivered to Amazon S3. Valid values are
FailedEventsOnly
andAllEvents
. Default value isFailedEventsOnly
.secrets_manager_configuration
- (Optional) The Secrets Manager configuration. Seesecrets_manager_configuration
block below for details. This value is required ifhec_token
is not provided. - Secrets
Manager FirehoseConfiguration Delivery Stream Splunk Configuration Secrets Manager Configuration
- Hec
Endpoint string - The HTTP Event Collector (HEC) endpoint to which Kinesis Firehose sends your data.
- S3Configuration
Firehose
Delivery Stream Splunk Configuration S3Configuration - The S3 Configuration. See
s3_configuration
block below for details. - Buffering
Interval int - Buffer incoming data for the specified period of time, in seconds between 0 to 60, before delivering it to the destination. The default value is 60s.
- Buffering
Size int - Buffer incoming data to the specified size, in MBs between 1 to 5, before delivering it to the destination. The default value is 5MB.
- Cloudwatch
Logging FirehoseOptions Delivery Stream Splunk Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - Hec
Acknowledgment intTimeout - The amount of time, in seconds between 180 and 600, that Kinesis Firehose waits to receive an acknowledgment from Splunk after it sends it data.
- Hec
Endpoint stringType - The HEC endpoint type. Valid values are
Raw
orEvent
. The default value isRaw
. - Hec
Token string - The GUID that you obtain from your Splunk cluster when you create a new HEC endpoint. This value is required if
secrets_manager_configuration
is not provided. - Processing
Configuration FirehoseDelivery Stream Splunk Configuration Processing Configuration - The data processing configuration. See
processing_configuration
block below for details. - Retry
Duration int - After an initial failure to deliver to Splunk, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0.
- S3Backup
Mode string - Defines how documents should be delivered to Amazon S3. Valid values are
FailedEventsOnly
andAllEvents
. Default value isFailedEventsOnly
.secrets_manager_configuration
- (Optional) The Secrets Manager configuration. Seesecrets_manager_configuration
block below for details. This value is required ifhec_token
is not provided. - Secrets
Manager FirehoseConfiguration Delivery Stream Splunk Configuration Secrets Manager Configuration
- hec
Endpoint String - The HTTP Event Collector (HEC) endpoint to which Kinesis Firehose sends your data.
- s3Configuration
Firehose
Delivery Stream Splunk Configuration S3Configuration - The S3 Configuration. See
s3_configuration
block below for details. - buffering
Interval Integer - Buffer incoming data for the specified period of time, in seconds between 0 to 60, before delivering it to the destination. The default value is 60s.
- buffering
Size Integer - Buffer incoming data to the specified size, in MBs between 1 to 5, before delivering it to the destination. The default value is 5MB.
- cloudwatch
Logging FirehoseOptions Delivery Stream Splunk Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - hec
Acknowledgment IntegerTimeout - The amount of time, in seconds between 180 and 600, that Kinesis Firehose waits to receive an acknowledgment from Splunk after it sends it data.
- hec
Endpoint StringType - The HEC endpoint type. Valid values are
Raw
orEvent
. The default value isRaw
. - hec
Token String - The GUID that you obtain from your Splunk cluster when you create a new HEC endpoint. This value is required if
secrets_manager_configuration
is not provided. - processing
Configuration FirehoseDelivery Stream Splunk Configuration Processing Configuration - The data processing configuration. See
processing_configuration
block below for details. - retry
Duration Integer - After an initial failure to deliver to Splunk, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0.
- s3Backup
Mode String - Defines how documents should be delivered to Amazon S3. Valid values are
FailedEventsOnly
andAllEvents
. Default value isFailedEventsOnly
.secrets_manager_configuration
- (Optional) The Secrets Manager configuration. Seesecrets_manager_configuration
block below for details. This value is required ifhec_token
is not provided. - secrets
Manager FirehoseConfiguration Delivery Stream Splunk Configuration Secrets Manager Configuration
- hec
Endpoint string - The HTTP Event Collector (HEC) endpoint to which Kinesis Firehose sends your data.
- s3Configuration
Firehose
Delivery Stream Splunk Configuration S3Configuration - The S3 Configuration. See
s3_configuration
block below for details. - buffering
Interval number - Buffer incoming data for the specified period of time, in seconds between 0 to 60, before delivering it to the destination. The default value is 60s.
- buffering
Size number - Buffer incoming data to the specified size, in MBs between 1 to 5, before delivering it to the destination. The default value is 5MB.
- cloudwatch
Logging FirehoseOptions Delivery Stream Splunk Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - hec
Acknowledgment numberTimeout - The amount of time, in seconds between 180 and 600, that Kinesis Firehose waits to receive an acknowledgment from Splunk after it sends it data.
- hec
Endpoint stringType - The HEC endpoint type. Valid values are
Raw
orEvent
. The default value isRaw
. - hec
Token string - The GUID that you obtain from your Splunk cluster when you create a new HEC endpoint. This value is required if
secrets_manager_configuration
is not provided. - processing
Configuration FirehoseDelivery Stream Splunk Configuration Processing Configuration - The data processing configuration. See
processing_configuration
block below for details. - retry
Duration number - After an initial failure to deliver to Splunk, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0.
- s3Backup
Mode string - Defines how documents should be delivered to Amazon S3. Valid values are
FailedEventsOnly
andAllEvents
. Default value isFailedEventsOnly
.secrets_manager_configuration
- (Optional) The Secrets Manager configuration. Seesecrets_manager_configuration
block below for details. This value is required ifhec_token
is not provided. - secrets
Manager FirehoseConfiguration Delivery Stream Splunk Configuration Secrets Manager Configuration
- hec_
endpoint str - The HTTP Event Collector (HEC) endpoint to which Kinesis Firehose sends your data.
- s3_
configuration FirehoseDelivery Stream Splunk Configuration S3Configuration - The S3 Configuration. See
s3_configuration
block below for details. - buffering_
interval int - Buffer incoming data for the specified period of time, in seconds between 0 to 60, before delivering it to the destination. The default value is 60s.
- buffering_
size int - Buffer incoming data to the specified size, in MBs between 1 to 5, before delivering it to the destination. The default value is 5MB.
- cloudwatch_
logging_ Firehoseoptions Delivery Stream Splunk Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - hec_
acknowledgment_ inttimeout - The amount of time, in seconds between 180 and 600, that Kinesis Firehose waits to receive an acknowledgment from Splunk after it sends it data.
- hec_
endpoint_ strtype - The HEC endpoint type. Valid values are
Raw
orEvent
. The default value isRaw
. - hec_
token str - The GUID that you obtain from your Splunk cluster when you create a new HEC endpoint. This value is required if
secrets_manager_configuration
is not provided. - processing_
configuration FirehoseDelivery Stream Splunk Configuration Processing Configuration - The data processing configuration. See
processing_configuration
block below for details. - retry_
duration int - After an initial failure to deliver to Splunk, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0.
- s3_
backup_ strmode - Defines how documents should be delivered to Amazon S3. Valid values are
FailedEventsOnly
andAllEvents
. Default value isFailedEventsOnly
.secrets_manager_configuration
- (Optional) The Secrets Manager configuration. Seesecrets_manager_configuration
block below for details. This value is required ifhec_token
is not provided. - secrets_
manager_ Firehoseconfiguration Delivery Stream Splunk Configuration Secrets Manager Configuration
- hec
Endpoint String - The HTTP Event Collector (HEC) endpoint to which Kinesis Firehose sends your data.
- s3Configuration Property Map
- The S3 Configuration. See
s3_configuration
block below for details. - buffering
Interval Number - Buffer incoming data for the specified period of time, in seconds between 0 to 60, before delivering it to the destination. The default value is 60s.
- buffering
Size Number - Buffer incoming data to the specified size, in MBs between 1 to 5, before delivering it to the destination. The default value is 5MB.
- cloudwatch
Logging Property MapOptions - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - hec
Acknowledgment NumberTimeout - The amount of time, in seconds between 180 and 600, that Kinesis Firehose waits to receive an acknowledgment from Splunk after it sends it data.
- hec
Endpoint StringType - The HEC endpoint type. Valid values are
Raw
orEvent
. The default value isRaw
. - hec
Token String - The GUID that you obtain from your Splunk cluster when you create a new HEC endpoint. This value is required if
secrets_manager_configuration
is not provided. - processing
Configuration Property Map - The data processing configuration. See
processing_configuration
block below for details. - retry
Duration Number - After an initial failure to deliver to Splunk, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0.
- s3Backup
Mode String - Defines how documents should be delivered to Amazon S3. Valid values are
FailedEventsOnly
andAllEvents
. Default value isFailedEventsOnly
.secrets_manager_configuration
- (Optional) The Secrets Manager configuration. Seesecrets_manager_configuration
block below for details. This value is required ifhec_token
is not provided. - secrets
Manager Property MapConfiguration
FirehoseDeliveryStreamSplunkConfigurationCloudwatchLoggingOptions, FirehoseDeliveryStreamSplunkConfigurationCloudwatchLoggingOptionsArgs
- Enabled bool
- Enables or disables the logging. Defaults to
false
. - Log
Group stringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - Log
Stream stringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- Enabled bool
- Enables or disables the logging. Defaults to
false
. - Log
Group stringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - Log
Stream stringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled Boolean
- Enables or disables the logging. Defaults to
false
. - log
Group StringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - log
Stream StringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled boolean
- Enables or disables the logging. Defaults to
false
. - log
Group stringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - log
Stream stringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled bool
- Enables or disables the logging. Defaults to
false
. - log_
group_ strname - The CloudWatch group name for logging. This value is required if
enabled
is true. - log_
stream_ strname - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled Boolean
- Enables or disables the logging. Defaults to
false
. - log
Group StringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - log
Stream StringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
FirehoseDeliveryStreamSplunkConfigurationProcessingConfiguration, FirehoseDeliveryStreamSplunkConfigurationProcessingConfigurationArgs
- Enabled bool
- Enables or disables data processing.
- Processors
List<Firehose
Delivery Stream Splunk Configuration Processing Configuration Processor> - Specifies the data processors as multiple blocks. See
processors
block below for details.
- Enabled bool
- Enables or disables data processing.
- Processors
[]Firehose
Delivery Stream Splunk Configuration Processing Configuration Processor - Specifies the data processors as multiple blocks. See
processors
block below for details.
- enabled Boolean
- Enables or disables data processing.
- processors
List<Firehose
Delivery Stream Splunk Configuration Processing Configuration Processor> - Specifies the data processors as multiple blocks. See
processors
block below for details.
- enabled boolean
- Enables or disables data processing.
- processors
Firehose
Delivery Stream Splunk Configuration Processing Configuration Processor[] - Specifies the data processors as multiple blocks. See
processors
block below for details.
- enabled bool
- Enables or disables data processing.
- processors
Sequence[Firehose
Delivery Stream Splunk Configuration Processing Configuration Processor] - Specifies the data processors as multiple blocks. See
processors
block below for details.
- enabled Boolean
- Enables or disables data processing.
- processors List<Property Map>
- Specifies the data processors as multiple blocks. See
processors
block below for details.
FirehoseDeliveryStreamSplunkConfigurationProcessingConfigurationProcessor, FirehoseDeliveryStreamSplunkConfigurationProcessingConfigurationProcessorArgs
- Type string
- The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
,Decompression
,CloudWatchLogProcessing
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - Parameters
List<Firehose
Delivery Stream Splunk Configuration Processing Configuration Processor Parameter> - Specifies the processor parameters as multiple blocks. See
parameters
block below for details.
- Type string
- The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
,Decompression
,CloudWatchLogProcessing
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - Parameters
[]Firehose
Delivery Stream Splunk Configuration Processing Configuration Processor Parameter - Specifies the processor parameters as multiple blocks. See
parameters
block below for details.
- type String
- The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
,Decompression
,CloudWatchLogProcessing
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameters
List<Firehose
Delivery Stream Splunk Configuration Processing Configuration Processor Parameter> - Specifies the processor parameters as multiple blocks. See
parameters
block below for details.
- type string
- The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
,Decompression
,CloudWatchLogProcessing
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameters
Firehose
Delivery Stream Splunk Configuration Processing Configuration Processor Parameter[] - Specifies the processor parameters as multiple blocks. See
parameters
block below for details.
- type str
- The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
,Decompression
,CloudWatchLogProcessing
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameters
Sequence[Firehose
Delivery Stream Splunk Configuration Processing Configuration Processor Parameter] - Specifies the processor parameters as multiple blocks. See
parameters
block below for details.
- type String
- The type of processor. Valid Values:
RecordDeAggregation
,Lambda
,MetadataExtraction
,AppendDelimiterToRecord
,Decompression
,CloudWatchLogProcessing
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameters List<Property Map>
- Specifies the processor parameters as multiple blocks. See
parameters
block below for details.
FirehoseDeliveryStreamSplunkConfigurationProcessingConfigurationProcessorParameter, FirehoseDeliveryStreamSplunkConfigurationProcessingConfigurationProcessorParameterArgs
- Parameter
Name string - Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
,CompressionFormat
,DataMessageExtraction
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - Parameter
Value string Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
NOTE: Parameters with default values, including
NumberOfRetries
(default: 3),RoleArn
(default: firehose role ARN),BufferSizeInMBs
(default: 1), andBufferIntervalInSeconds
(default: 60), are not stored in Pulumi state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.
- Parameter
Name string - Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
,CompressionFormat
,DataMessageExtraction
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - Parameter
Value string Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
NOTE: Parameters with default values, including
NumberOfRetries
(default: 3),RoleArn
(default: firehose role ARN),BufferSizeInMBs
(default: 1), andBufferIntervalInSeconds
(default: 60), are not stored in Pulumi state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.
- parameter
Name String - Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
,CompressionFormat
,DataMessageExtraction
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameter
Value String Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
NOTE: Parameters with default values, including
NumberOfRetries
(default: 3),RoleArn
(default: firehose role ARN),BufferSizeInMBs
(default: 1), andBufferIntervalInSeconds
(default: 60), are not stored in Pulumi state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.
- parameter
Name string - Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
,CompressionFormat
,DataMessageExtraction
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameter
Value string Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
NOTE: Parameters with default values, including
NumberOfRetries
(default: 3),RoleArn
(default: firehose role ARN),BufferSizeInMBs
(default: 1), andBufferIntervalInSeconds
(default: 60), are not stored in Pulumi state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.
- parameter_
name str - Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
,CompressionFormat
,DataMessageExtraction
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameter_
value str Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
NOTE: Parameters with default values, including
NumberOfRetries
(default: 3),RoleArn
(default: firehose role ARN),BufferSizeInMBs
(default: 1), andBufferIntervalInSeconds
(default: 60), are not stored in Pulumi state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.
- parameter
Name String - Parameter name. Valid Values:
LambdaArn
,NumberOfRetries
,MetadataExtractionQuery
,JsonParsingEngine
,RoleArn
,BufferSizeInMBs
,BufferIntervalInSeconds
,SubRecordType
,Delimiter
,CompressionFormat
,DataMessageExtraction
. Validation is done against AWS SDK constants; so values not explicitly listed may also work. - parameter
Value String Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well.
NOTE: Parameters with default values, including
NumberOfRetries
(default: 3),RoleArn
(default: firehose role ARN),BufferSizeInMBs
(default: 1), andBufferIntervalInSeconds
(default: 60), are not stored in Pulumi state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values.
FirehoseDeliveryStreamSplunkConfigurationS3Configuration, FirehoseDeliveryStreamSplunkConfigurationS3ConfigurationArgs
- Bucket
Arn string - The ARN of the S3 bucket
- Role
Arn string - The ARN of the AWS credentials.
- Buffering
Interval int - Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- Buffering
Size int - Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- Cloudwatch
Logging FirehoseOptions Delivery Stream Splunk Configuration S3Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - Compression
Format string - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - Error
Output stringPrefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - Kms
Key stringArn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- Prefix string
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- Bucket
Arn string - The ARN of the S3 bucket
- Role
Arn string - The ARN of the AWS credentials.
- Buffering
Interval int - Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- Buffering
Size int - Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- Cloudwatch
Logging FirehoseOptions Delivery Stream Splunk Configuration S3Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - Compression
Format string - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - Error
Output stringPrefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - Kms
Key stringArn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- Prefix string
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- bucket
Arn String - The ARN of the S3 bucket
- role
Arn String - The ARN of the AWS credentials.
- buffering
Interval Integer - Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- buffering
Size Integer - Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- cloudwatch
Logging FirehoseOptions Delivery Stream Splunk Configuration S3Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - compression
Format String - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - error
Output StringPrefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - kms
Key StringArn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- prefix String
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- bucket
Arn string - The ARN of the S3 bucket
- role
Arn string - The ARN of the AWS credentials.
- buffering
Interval number - Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- buffering
Size number - Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- cloudwatch
Logging FirehoseOptions Delivery Stream Splunk Configuration S3Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - compression
Format string - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - error
Output stringPrefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - kms
Key stringArn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- prefix string
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- bucket_
arn str - The ARN of the S3 bucket
- role_
arn str - The ARN of the AWS credentials.
- buffering_
interval int - Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- buffering_
size int - Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- cloudwatch_
logging_ Firehoseoptions Delivery Stream Splunk Configuration S3Configuration Cloudwatch Logging Options - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - compression_
format str - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - error_
output_ strprefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - kms_
key_ strarn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- prefix str
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
- bucket
Arn String - The ARN of the S3 bucket
- role
Arn String - The ARN of the AWS credentials.
- buffering
Interval Number - Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
- buffering
Size Number - Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
- cloudwatch
Logging Property MapOptions - The CloudWatch Logging Options for the delivery stream. See
cloudwatch_logging_options
block below for details. - compression
Format String - The compression format. If no value is specified, the default is
UNCOMPRESSED
. Other supported values areGZIP
,ZIP
,Snappy
, &HADOOP_SNAPPY
. - error
Output StringPrefix - Prefix added to failed records before writing them to S3. Not currently supported for
redshift
destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects. - kms
Key StringArn - Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used.
- prefix String
- The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
FirehoseDeliveryStreamSplunkConfigurationS3ConfigurationCloudwatchLoggingOptions, FirehoseDeliveryStreamSplunkConfigurationS3ConfigurationCloudwatchLoggingOptionsArgs
- Enabled bool
- Enables or disables the logging. Defaults to
false
. - Log
Group stringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - Log
Stream stringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- Enabled bool
- Enables or disables the logging. Defaults to
false
. - Log
Group stringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - Log
Stream stringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled Boolean
- Enables or disables the logging. Defaults to
false
. - log
Group StringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - log
Stream StringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled boolean
- Enables or disables the logging. Defaults to
false
. - log
Group stringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - log
Stream stringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled bool
- Enables or disables the logging. Defaults to
false
. - log_
group_ strname - The CloudWatch group name for logging. This value is required if
enabled
is true. - log_
stream_ strname - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
- enabled Boolean
- Enables or disables the logging. Defaults to
false
. - log
Group StringName - The CloudWatch group name for logging. This value is required if
enabled
is true. - log
Stream StringName - The CloudWatch log stream name for logging. This value is required if
enabled
is true.
FirehoseDeliveryStreamSplunkConfigurationSecretsManagerConfiguration, FirehoseDeliveryStreamSplunkConfigurationSecretsManagerConfigurationArgs
- enabled bool
- Enables or disables the Secrets Manager configuration.
- role_
arn str - The ARN of the role the stream assumes.
- secret_
arn str - The ARN of the Secrets Manager secret. This value is required if
enabled
is true.
Import
Using pulumi import
, import Kinesis Firehose Delivery streams using the stream ARN. For example:
$ pulumi import aws:kinesis/firehoseDeliveryStream:FirehoseDeliveryStream foo arn:aws:firehose:us-east-1:XXX:deliverystream/example
Note: Import does not work for stream destination s3
. Consider using extended_s3
since s3
destination is deprecated.
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- AWS Classic pulumi/pulumi-aws
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
aws
Terraform Provider.