aws.mskconnect.Connector
Explore with Pulumi AI
Provides an Amazon MSK Connect Connector resource.
Example Usage
Basic configuration
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const example = new aws.mskconnect.Connector("example", {
name: "example",
kafkaconnectVersion: "2.7.1",
capacity: {
autoscaling: {
mcuCount: 1,
minWorkerCount: 1,
maxWorkerCount: 2,
scaleInPolicy: {
cpuUtilizationPercentage: 20,
},
scaleOutPolicy: {
cpuUtilizationPercentage: 80,
},
},
},
connectorConfiguration: {
"connector.class": "com.github.jcustenborder.kafka.connect.simulator.SimulatorSinkConnector",
"tasks.max": "1",
topics: "example",
},
kafkaCluster: {
apacheKafkaCluster: {
bootstrapServers: exampleAwsMskCluster.bootstrapBrokersTls,
vpc: {
securityGroups: [exampleAwsSecurityGroup.id],
subnets: [
example1.id,
example2.id,
example3.id,
],
},
},
},
kafkaClusterClientAuthentication: {
authenticationType: "NONE",
},
kafkaClusterEncryptionInTransit: {
encryptionType: "TLS",
},
plugins: [{
customPlugin: {
arn: exampleAwsMskconnectCustomPlugin.arn,
revision: exampleAwsMskconnectCustomPlugin.latestRevision,
},
}],
serviceExecutionRoleArn: exampleAwsIamRole.arn,
});
import pulumi
import pulumi_aws as aws
example = aws.mskconnect.Connector("example",
name="example",
kafkaconnect_version="2.7.1",
capacity={
"autoscaling": {
"mcu_count": 1,
"min_worker_count": 1,
"max_worker_count": 2,
"scale_in_policy": {
"cpu_utilization_percentage": 20,
},
"scale_out_policy": {
"cpu_utilization_percentage": 80,
},
},
},
connector_configuration={
"connector.class": "com.github.jcustenborder.kafka.connect.simulator.SimulatorSinkConnector",
"tasks.max": "1",
"topics": "example",
},
kafka_cluster={
"apache_kafka_cluster": {
"bootstrap_servers": example_aws_msk_cluster["bootstrapBrokersTls"],
"vpc": {
"security_groups": [example_aws_security_group["id"]],
"subnets": [
example1["id"],
example2["id"],
example3["id"],
],
},
},
},
kafka_cluster_client_authentication={
"authentication_type": "NONE",
},
kafka_cluster_encryption_in_transit={
"encryption_type": "TLS",
},
plugins=[{
"custom_plugin": {
"arn": example_aws_mskconnect_custom_plugin["arn"],
"revision": example_aws_mskconnect_custom_plugin["latestRevision"],
},
}],
service_execution_role_arn=example_aws_iam_role["arn"])
package main
import (
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/mskconnect"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := mskconnect.NewConnector(ctx, "example", &mskconnect.ConnectorArgs{
Name: pulumi.String("example"),
KafkaconnectVersion: pulumi.String("2.7.1"),
Capacity: &mskconnect.ConnectorCapacityArgs{
Autoscaling: &mskconnect.ConnectorCapacityAutoscalingArgs{
McuCount: pulumi.Int(1),
MinWorkerCount: pulumi.Int(1),
MaxWorkerCount: pulumi.Int(2),
ScaleInPolicy: &mskconnect.ConnectorCapacityAutoscalingScaleInPolicyArgs{
CpuUtilizationPercentage: pulumi.Int(20),
},
ScaleOutPolicy: &mskconnect.ConnectorCapacityAutoscalingScaleOutPolicyArgs{
CpuUtilizationPercentage: pulumi.Int(80),
},
},
},
ConnectorConfiguration: pulumi.StringMap{
"connector.class": pulumi.String("com.github.jcustenborder.kafka.connect.simulator.SimulatorSinkConnector"),
"tasks.max": pulumi.String("1"),
"topics": pulumi.String("example"),
},
KafkaCluster: &mskconnect.ConnectorKafkaClusterArgs{
ApacheKafkaCluster: &mskconnect.ConnectorKafkaClusterApacheKafkaClusterArgs{
BootstrapServers: pulumi.Any(exampleAwsMskCluster.BootstrapBrokersTls),
Vpc: &mskconnect.ConnectorKafkaClusterApacheKafkaClusterVpcArgs{
SecurityGroups: pulumi.StringArray{
exampleAwsSecurityGroup.Id,
},
Subnets: pulumi.StringArray{
example1.Id,
example2.Id,
example3.Id,
},
},
},
},
KafkaClusterClientAuthentication: &mskconnect.ConnectorKafkaClusterClientAuthenticationArgs{
AuthenticationType: pulumi.String("NONE"),
},
KafkaClusterEncryptionInTransit: &mskconnect.ConnectorKafkaClusterEncryptionInTransitArgs{
EncryptionType: pulumi.String("TLS"),
},
Plugins: mskconnect.ConnectorPluginArray{
&mskconnect.ConnectorPluginArgs{
CustomPlugin: &mskconnect.ConnectorPluginCustomPluginArgs{
Arn: pulumi.Any(exampleAwsMskconnectCustomPlugin.Arn),
Revision: pulumi.Any(exampleAwsMskconnectCustomPlugin.LatestRevision),
},
},
},
ServiceExecutionRoleArn: pulumi.Any(exampleAwsIamRole.Arn),
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var example = new Aws.MskConnect.Connector("example", new()
{
Name = "example",
KafkaconnectVersion = "2.7.1",
Capacity = new Aws.MskConnect.Inputs.ConnectorCapacityArgs
{
Autoscaling = new Aws.MskConnect.Inputs.ConnectorCapacityAutoscalingArgs
{
McuCount = 1,
MinWorkerCount = 1,
MaxWorkerCount = 2,
ScaleInPolicy = new Aws.MskConnect.Inputs.ConnectorCapacityAutoscalingScaleInPolicyArgs
{
CpuUtilizationPercentage = 20,
},
ScaleOutPolicy = new Aws.MskConnect.Inputs.ConnectorCapacityAutoscalingScaleOutPolicyArgs
{
CpuUtilizationPercentage = 80,
},
},
},
ConnectorConfiguration =
{
{ "connector.class", "com.github.jcustenborder.kafka.connect.simulator.SimulatorSinkConnector" },
{ "tasks.max", "1" },
{ "topics", "example" },
},
KafkaCluster = new Aws.MskConnect.Inputs.ConnectorKafkaClusterArgs
{
ApacheKafkaCluster = new Aws.MskConnect.Inputs.ConnectorKafkaClusterApacheKafkaClusterArgs
{
BootstrapServers = exampleAwsMskCluster.BootstrapBrokersTls,
Vpc = new Aws.MskConnect.Inputs.ConnectorKafkaClusterApacheKafkaClusterVpcArgs
{
SecurityGroups = new[]
{
exampleAwsSecurityGroup.Id,
},
Subnets = new[]
{
example1.Id,
example2.Id,
example3.Id,
},
},
},
},
KafkaClusterClientAuthentication = new Aws.MskConnect.Inputs.ConnectorKafkaClusterClientAuthenticationArgs
{
AuthenticationType = "NONE",
},
KafkaClusterEncryptionInTransit = new Aws.MskConnect.Inputs.ConnectorKafkaClusterEncryptionInTransitArgs
{
EncryptionType = "TLS",
},
Plugins = new[]
{
new Aws.MskConnect.Inputs.ConnectorPluginArgs
{
CustomPlugin = new Aws.MskConnect.Inputs.ConnectorPluginCustomPluginArgs
{
Arn = exampleAwsMskconnectCustomPlugin.Arn,
Revision = exampleAwsMskconnectCustomPlugin.LatestRevision,
},
},
},
ServiceExecutionRoleArn = exampleAwsIamRole.Arn,
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.mskconnect.Connector;
import com.pulumi.aws.mskconnect.ConnectorArgs;
import com.pulumi.aws.mskconnect.inputs.ConnectorCapacityArgs;
import com.pulumi.aws.mskconnect.inputs.ConnectorCapacityAutoscalingArgs;
import com.pulumi.aws.mskconnect.inputs.ConnectorCapacityAutoscalingScaleInPolicyArgs;
import com.pulumi.aws.mskconnect.inputs.ConnectorCapacityAutoscalingScaleOutPolicyArgs;
import com.pulumi.aws.mskconnect.inputs.ConnectorKafkaClusterArgs;
import com.pulumi.aws.mskconnect.inputs.ConnectorKafkaClusterApacheKafkaClusterArgs;
import com.pulumi.aws.mskconnect.inputs.ConnectorKafkaClusterApacheKafkaClusterVpcArgs;
import com.pulumi.aws.mskconnect.inputs.ConnectorKafkaClusterClientAuthenticationArgs;
import com.pulumi.aws.mskconnect.inputs.ConnectorKafkaClusterEncryptionInTransitArgs;
import com.pulumi.aws.mskconnect.inputs.ConnectorPluginArgs;
import com.pulumi.aws.mskconnect.inputs.ConnectorPluginCustomPluginArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var example = new Connector("example", ConnectorArgs.builder()
.name("example")
.kafkaconnectVersion("2.7.1")
.capacity(ConnectorCapacityArgs.builder()
.autoscaling(ConnectorCapacityAutoscalingArgs.builder()
.mcuCount(1)
.minWorkerCount(1)
.maxWorkerCount(2)
.scaleInPolicy(ConnectorCapacityAutoscalingScaleInPolicyArgs.builder()
.cpuUtilizationPercentage(20)
.build())
.scaleOutPolicy(ConnectorCapacityAutoscalingScaleOutPolicyArgs.builder()
.cpuUtilizationPercentage(80)
.build())
.build())
.build())
.connectorConfiguration(Map.ofEntries(
Map.entry("connector.class", "com.github.jcustenborder.kafka.connect.simulator.SimulatorSinkConnector"),
Map.entry("tasks.max", "1"),
Map.entry("topics", "example")
))
.kafkaCluster(ConnectorKafkaClusterArgs.builder()
.apacheKafkaCluster(ConnectorKafkaClusterApacheKafkaClusterArgs.builder()
.bootstrapServers(exampleAwsMskCluster.bootstrapBrokersTls())
.vpc(ConnectorKafkaClusterApacheKafkaClusterVpcArgs.builder()
.securityGroups(exampleAwsSecurityGroup.id())
.subnets(
example1.id(),
example2.id(),
example3.id())
.build())
.build())
.build())
.kafkaClusterClientAuthentication(ConnectorKafkaClusterClientAuthenticationArgs.builder()
.authenticationType("NONE")
.build())
.kafkaClusterEncryptionInTransit(ConnectorKafkaClusterEncryptionInTransitArgs.builder()
.encryptionType("TLS")
.build())
.plugins(ConnectorPluginArgs.builder()
.customPlugin(ConnectorPluginCustomPluginArgs.builder()
.arn(exampleAwsMskconnectCustomPlugin.arn())
.revision(exampleAwsMskconnectCustomPlugin.latestRevision())
.build())
.build())
.serviceExecutionRoleArn(exampleAwsIamRole.arn())
.build());
}
}
resources:
example:
type: aws:mskconnect:Connector
properties:
name: example
kafkaconnectVersion: 2.7.1
capacity:
autoscaling:
mcuCount: 1
minWorkerCount: 1
maxWorkerCount: 2
scaleInPolicy:
cpuUtilizationPercentage: 20
scaleOutPolicy:
cpuUtilizationPercentage: 80
connectorConfiguration:
connector.class: com.github.jcustenborder.kafka.connect.simulator.SimulatorSinkConnector
tasks.max: '1'
topics: example
kafkaCluster:
apacheKafkaCluster:
bootstrapServers: ${exampleAwsMskCluster.bootstrapBrokersTls}
vpc:
securityGroups:
- ${exampleAwsSecurityGroup.id}
subnets:
- ${example1.id}
- ${example2.id}
- ${example3.id}
kafkaClusterClientAuthentication:
authenticationType: NONE
kafkaClusterEncryptionInTransit:
encryptionType: TLS
plugins:
- customPlugin:
arn: ${exampleAwsMskconnectCustomPlugin.arn}
revision: ${exampleAwsMskconnectCustomPlugin.latestRevision}
serviceExecutionRoleArn: ${exampleAwsIamRole.arn}
Create Connector Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new Connector(name: string, args: ConnectorArgs, opts?: CustomResourceOptions);
@overload
def Connector(resource_name: str,
args: ConnectorArgs,
opts: Optional[ResourceOptions] = None)
@overload
def Connector(resource_name: str,
opts: Optional[ResourceOptions] = None,
kafkaconnect_version: Optional[str] = None,
connector_configuration: Optional[Mapping[str, str]] = None,
kafka_cluster: Optional[ConnectorKafkaClusterArgs] = None,
kafka_cluster_client_authentication: Optional[ConnectorKafkaClusterClientAuthenticationArgs] = None,
kafka_cluster_encryption_in_transit: Optional[ConnectorKafkaClusterEncryptionInTransitArgs] = None,
capacity: Optional[ConnectorCapacityArgs] = None,
plugins: Optional[Sequence[ConnectorPluginArgs]] = None,
service_execution_role_arn: Optional[str] = None,
description: Optional[str] = None,
log_delivery: Optional[ConnectorLogDeliveryArgs] = None,
name: Optional[str] = None,
tags: Optional[Mapping[str, str]] = None,
worker_configuration: Optional[ConnectorWorkerConfigurationArgs] = None)
func NewConnector(ctx *Context, name string, args ConnectorArgs, opts ...ResourceOption) (*Connector, error)
public Connector(string name, ConnectorArgs args, CustomResourceOptions? opts = null)
public Connector(String name, ConnectorArgs args)
public Connector(String name, ConnectorArgs args, CustomResourceOptions options)
type: aws:mskconnect:Connector
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args ConnectorArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args ConnectorArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args ConnectorArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args ConnectorArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args ConnectorArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var connectorResource = new Aws.MskConnect.Connector("connectorResource", new()
{
KafkaconnectVersion = "string",
ConnectorConfiguration =
{
{ "string", "string" },
},
KafkaCluster = new Aws.MskConnect.Inputs.ConnectorKafkaClusterArgs
{
ApacheKafkaCluster = new Aws.MskConnect.Inputs.ConnectorKafkaClusterApacheKafkaClusterArgs
{
BootstrapServers = "string",
Vpc = new Aws.MskConnect.Inputs.ConnectorKafkaClusterApacheKafkaClusterVpcArgs
{
SecurityGroups = new[]
{
"string",
},
Subnets = new[]
{
"string",
},
},
},
},
KafkaClusterClientAuthentication = new Aws.MskConnect.Inputs.ConnectorKafkaClusterClientAuthenticationArgs
{
AuthenticationType = "string",
},
KafkaClusterEncryptionInTransit = new Aws.MskConnect.Inputs.ConnectorKafkaClusterEncryptionInTransitArgs
{
EncryptionType = "string",
},
Capacity = new Aws.MskConnect.Inputs.ConnectorCapacityArgs
{
Autoscaling = new Aws.MskConnect.Inputs.ConnectorCapacityAutoscalingArgs
{
MaxWorkerCount = 0,
MinWorkerCount = 0,
McuCount = 0,
ScaleInPolicy = new Aws.MskConnect.Inputs.ConnectorCapacityAutoscalingScaleInPolicyArgs
{
CpuUtilizationPercentage = 0,
},
ScaleOutPolicy = new Aws.MskConnect.Inputs.ConnectorCapacityAutoscalingScaleOutPolicyArgs
{
CpuUtilizationPercentage = 0,
},
},
ProvisionedCapacity = new Aws.MskConnect.Inputs.ConnectorCapacityProvisionedCapacityArgs
{
WorkerCount = 0,
McuCount = 0,
},
},
Plugins = new[]
{
new Aws.MskConnect.Inputs.ConnectorPluginArgs
{
CustomPlugin = new Aws.MskConnect.Inputs.ConnectorPluginCustomPluginArgs
{
Arn = "string",
Revision = 0,
},
},
},
ServiceExecutionRoleArn = "string",
Description = "string",
LogDelivery = new Aws.MskConnect.Inputs.ConnectorLogDeliveryArgs
{
WorkerLogDelivery = new Aws.MskConnect.Inputs.ConnectorLogDeliveryWorkerLogDeliveryArgs
{
CloudwatchLogs = new Aws.MskConnect.Inputs.ConnectorLogDeliveryWorkerLogDeliveryCloudwatchLogsArgs
{
Enabled = false,
LogGroup = "string",
},
Firehose = new Aws.MskConnect.Inputs.ConnectorLogDeliveryWorkerLogDeliveryFirehoseArgs
{
Enabled = false,
DeliveryStream = "string",
},
S3 = new Aws.MskConnect.Inputs.ConnectorLogDeliveryWorkerLogDeliveryS3Args
{
Enabled = false,
Bucket = "string",
Prefix = "string",
},
},
},
Name = "string",
Tags =
{
{ "string", "string" },
},
WorkerConfiguration = new Aws.MskConnect.Inputs.ConnectorWorkerConfigurationArgs
{
Arn = "string",
Revision = 0,
},
});
example, err := mskconnect.NewConnector(ctx, "connectorResource", &mskconnect.ConnectorArgs{
KafkaconnectVersion: pulumi.String("string"),
ConnectorConfiguration: pulumi.StringMap{
"string": pulumi.String("string"),
},
KafkaCluster: &mskconnect.ConnectorKafkaClusterArgs{
ApacheKafkaCluster: &mskconnect.ConnectorKafkaClusterApacheKafkaClusterArgs{
BootstrapServers: pulumi.String("string"),
Vpc: &mskconnect.ConnectorKafkaClusterApacheKafkaClusterVpcArgs{
SecurityGroups: pulumi.StringArray{
pulumi.String("string"),
},
Subnets: pulumi.StringArray{
pulumi.String("string"),
},
},
},
},
KafkaClusterClientAuthentication: &mskconnect.ConnectorKafkaClusterClientAuthenticationArgs{
AuthenticationType: pulumi.String("string"),
},
KafkaClusterEncryptionInTransit: &mskconnect.ConnectorKafkaClusterEncryptionInTransitArgs{
EncryptionType: pulumi.String("string"),
},
Capacity: &mskconnect.ConnectorCapacityArgs{
Autoscaling: &mskconnect.ConnectorCapacityAutoscalingArgs{
MaxWorkerCount: pulumi.Int(0),
MinWorkerCount: pulumi.Int(0),
McuCount: pulumi.Int(0),
ScaleInPolicy: &mskconnect.ConnectorCapacityAutoscalingScaleInPolicyArgs{
CpuUtilizationPercentage: pulumi.Int(0),
},
ScaleOutPolicy: &mskconnect.ConnectorCapacityAutoscalingScaleOutPolicyArgs{
CpuUtilizationPercentage: pulumi.Int(0),
},
},
ProvisionedCapacity: &mskconnect.ConnectorCapacityProvisionedCapacityArgs{
WorkerCount: pulumi.Int(0),
McuCount: pulumi.Int(0),
},
},
Plugins: mskconnect.ConnectorPluginArray{
&mskconnect.ConnectorPluginArgs{
CustomPlugin: &mskconnect.ConnectorPluginCustomPluginArgs{
Arn: pulumi.String("string"),
Revision: pulumi.Int(0),
},
},
},
ServiceExecutionRoleArn: pulumi.String("string"),
Description: pulumi.String("string"),
LogDelivery: &mskconnect.ConnectorLogDeliveryArgs{
WorkerLogDelivery: &mskconnect.ConnectorLogDeliveryWorkerLogDeliveryArgs{
CloudwatchLogs: &mskconnect.ConnectorLogDeliveryWorkerLogDeliveryCloudwatchLogsArgs{
Enabled: pulumi.Bool(false),
LogGroup: pulumi.String("string"),
},
Firehose: &mskconnect.ConnectorLogDeliveryWorkerLogDeliveryFirehoseArgs{
Enabled: pulumi.Bool(false),
DeliveryStream: pulumi.String("string"),
},
S3: &mskconnect.ConnectorLogDeliveryWorkerLogDeliveryS3Args{
Enabled: pulumi.Bool(false),
Bucket: pulumi.String("string"),
Prefix: pulumi.String("string"),
},
},
},
Name: pulumi.String("string"),
Tags: pulumi.StringMap{
"string": pulumi.String("string"),
},
WorkerConfiguration: &mskconnect.ConnectorWorkerConfigurationArgs{
Arn: pulumi.String("string"),
Revision: pulumi.Int(0),
},
})
var connectorResource = new Connector("connectorResource", ConnectorArgs.builder()
.kafkaconnectVersion("string")
.connectorConfiguration(Map.of("string", "string"))
.kafkaCluster(ConnectorKafkaClusterArgs.builder()
.apacheKafkaCluster(ConnectorKafkaClusterApacheKafkaClusterArgs.builder()
.bootstrapServers("string")
.vpc(ConnectorKafkaClusterApacheKafkaClusterVpcArgs.builder()
.securityGroups("string")
.subnets("string")
.build())
.build())
.build())
.kafkaClusterClientAuthentication(ConnectorKafkaClusterClientAuthenticationArgs.builder()
.authenticationType("string")
.build())
.kafkaClusterEncryptionInTransit(ConnectorKafkaClusterEncryptionInTransitArgs.builder()
.encryptionType("string")
.build())
.capacity(ConnectorCapacityArgs.builder()
.autoscaling(ConnectorCapacityAutoscalingArgs.builder()
.maxWorkerCount(0)
.minWorkerCount(0)
.mcuCount(0)
.scaleInPolicy(ConnectorCapacityAutoscalingScaleInPolicyArgs.builder()
.cpuUtilizationPercentage(0)
.build())
.scaleOutPolicy(ConnectorCapacityAutoscalingScaleOutPolicyArgs.builder()
.cpuUtilizationPercentage(0)
.build())
.build())
.provisionedCapacity(ConnectorCapacityProvisionedCapacityArgs.builder()
.workerCount(0)
.mcuCount(0)
.build())
.build())
.plugins(ConnectorPluginArgs.builder()
.customPlugin(ConnectorPluginCustomPluginArgs.builder()
.arn("string")
.revision(0)
.build())
.build())
.serviceExecutionRoleArn("string")
.description("string")
.logDelivery(ConnectorLogDeliveryArgs.builder()
.workerLogDelivery(ConnectorLogDeliveryWorkerLogDeliveryArgs.builder()
.cloudwatchLogs(ConnectorLogDeliveryWorkerLogDeliveryCloudwatchLogsArgs.builder()
.enabled(false)
.logGroup("string")
.build())
.firehose(ConnectorLogDeliveryWorkerLogDeliveryFirehoseArgs.builder()
.enabled(false)
.deliveryStream("string")
.build())
.s3(ConnectorLogDeliveryWorkerLogDeliveryS3Args.builder()
.enabled(false)
.bucket("string")
.prefix("string")
.build())
.build())
.build())
.name("string")
.tags(Map.of("string", "string"))
.workerConfiguration(ConnectorWorkerConfigurationArgs.builder()
.arn("string")
.revision(0)
.build())
.build());
connector_resource = aws.mskconnect.Connector("connectorResource",
kafkaconnect_version="string",
connector_configuration={
"string": "string",
},
kafka_cluster={
"apache_kafka_cluster": {
"bootstrap_servers": "string",
"vpc": {
"security_groups": ["string"],
"subnets": ["string"],
},
},
},
kafka_cluster_client_authentication={
"authentication_type": "string",
},
kafka_cluster_encryption_in_transit={
"encryption_type": "string",
},
capacity={
"autoscaling": {
"max_worker_count": 0,
"min_worker_count": 0,
"mcu_count": 0,
"scale_in_policy": {
"cpu_utilization_percentage": 0,
},
"scale_out_policy": {
"cpu_utilization_percentage": 0,
},
},
"provisioned_capacity": {
"worker_count": 0,
"mcu_count": 0,
},
},
plugins=[{
"custom_plugin": {
"arn": "string",
"revision": 0,
},
}],
service_execution_role_arn="string",
description="string",
log_delivery={
"worker_log_delivery": {
"cloudwatch_logs": {
"enabled": False,
"log_group": "string",
},
"firehose": {
"enabled": False,
"delivery_stream": "string",
},
"s3": {
"enabled": False,
"bucket": "string",
"prefix": "string",
},
},
},
name="string",
tags={
"string": "string",
},
worker_configuration={
"arn": "string",
"revision": 0,
})
const connectorResource = new aws.mskconnect.Connector("connectorResource", {
kafkaconnectVersion: "string",
connectorConfiguration: {
string: "string",
},
kafkaCluster: {
apacheKafkaCluster: {
bootstrapServers: "string",
vpc: {
securityGroups: ["string"],
subnets: ["string"],
},
},
},
kafkaClusterClientAuthentication: {
authenticationType: "string",
},
kafkaClusterEncryptionInTransit: {
encryptionType: "string",
},
capacity: {
autoscaling: {
maxWorkerCount: 0,
minWorkerCount: 0,
mcuCount: 0,
scaleInPolicy: {
cpuUtilizationPercentage: 0,
},
scaleOutPolicy: {
cpuUtilizationPercentage: 0,
},
},
provisionedCapacity: {
workerCount: 0,
mcuCount: 0,
},
},
plugins: [{
customPlugin: {
arn: "string",
revision: 0,
},
}],
serviceExecutionRoleArn: "string",
description: "string",
logDelivery: {
workerLogDelivery: {
cloudwatchLogs: {
enabled: false,
logGroup: "string",
},
firehose: {
enabled: false,
deliveryStream: "string",
},
s3: {
enabled: false,
bucket: "string",
prefix: "string",
},
},
},
name: "string",
tags: {
string: "string",
},
workerConfiguration: {
arn: "string",
revision: 0,
},
});
type: aws:mskconnect:Connector
properties:
capacity:
autoscaling:
maxWorkerCount: 0
mcuCount: 0
minWorkerCount: 0
scaleInPolicy:
cpuUtilizationPercentage: 0
scaleOutPolicy:
cpuUtilizationPercentage: 0
provisionedCapacity:
mcuCount: 0
workerCount: 0
connectorConfiguration:
string: string
description: string
kafkaCluster:
apacheKafkaCluster:
bootstrapServers: string
vpc:
securityGroups:
- string
subnets:
- string
kafkaClusterClientAuthentication:
authenticationType: string
kafkaClusterEncryptionInTransit:
encryptionType: string
kafkaconnectVersion: string
logDelivery:
workerLogDelivery:
cloudwatchLogs:
enabled: false
logGroup: string
firehose:
deliveryStream: string
enabled: false
s3:
bucket: string
enabled: false
prefix: string
name: string
plugins:
- customPlugin:
arn: string
revision: 0
serviceExecutionRoleArn: string
tags:
string: string
workerConfiguration:
arn: string
revision: 0
Connector Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The Connector resource accepts the following input properties:
- Capacity
Connector
Capacity - Information about the capacity allocated to the connector. See
capacity
Block for details. - Connector
Configuration Dictionary<string, string> - A map of keys to values that represent the configuration for the connector.
- Kafka
Cluster ConnectorKafka Cluster - Specifies which Apache Kafka cluster to connect to. See
kafka_cluster
Block for details. - Kafka
Cluster ConnectorClient Authentication Kafka Cluster Client Authentication - Details of the client authentication used by the Apache Kafka cluster. See
kafka_cluster_client_authentication
Block for details. - Kafka
Cluster ConnectorEncryption In Transit Kafka Cluster Encryption In Transit - Details of encryption in transit to the Apache Kafka cluster. See
kafka_cluster_encryption_in_transit
Block for details. - Kafkaconnect
Version string - The version of Kafka Connect. It has to be compatible with both the Apache Kafka cluster's version and the plugins.
- Plugins
List<Connector
Plugin> - Specifies which plugins to use for the connector. See
plugin
Block for details. - Service
Execution stringRole Arn The Amazon Resource Name (ARN) of the IAM role used by the connector to access the Amazon Web Services resources that it needs. The types of resources depends on the logic of the connector. For example, a connector that has Amazon S3 as a destination must have permissions that allow it to write to the S3 destination bucket.
The following arguments are optional:
- Description string
- A summary description of the connector.
- Log
Delivery ConnectorLog Delivery - Details about log delivery. See
log_delivery
Block for details. - Name string
- The name of the connector.
- Dictionary<string, string>
- A map of tags to assign to the resource. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - Worker
Configuration ConnectorWorker Configuration - Specifies which worker configuration to use with the connector. See
worker_configuration
Block for details.
- Capacity
Connector
Capacity Args - Information about the capacity allocated to the connector. See
capacity
Block for details. - Connector
Configuration map[string]string - A map of keys to values that represent the configuration for the connector.
- Kafka
Cluster ConnectorKafka Cluster Args - Specifies which Apache Kafka cluster to connect to. See
kafka_cluster
Block for details. - Kafka
Cluster ConnectorClient Authentication Kafka Cluster Client Authentication Args - Details of the client authentication used by the Apache Kafka cluster. See
kafka_cluster_client_authentication
Block for details. - Kafka
Cluster ConnectorEncryption In Transit Kafka Cluster Encryption In Transit Args - Details of encryption in transit to the Apache Kafka cluster. See
kafka_cluster_encryption_in_transit
Block for details. - Kafkaconnect
Version string - The version of Kafka Connect. It has to be compatible with both the Apache Kafka cluster's version and the plugins.
- Plugins
[]Connector
Plugin Args - Specifies which plugins to use for the connector. See
plugin
Block for details. - Service
Execution stringRole Arn The Amazon Resource Name (ARN) of the IAM role used by the connector to access the Amazon Web Services resources that it needs. The types of resources depends on the logic of the connector. For example, a connector that has Amazon S3 as a destination must have permissions that allow it to write to the S3 destination bucket.
The following arguments are optional:
- Description string
- A summary description of the connector.
- Log
Delivery ConnectorLog Delivery Args - Details about log delivery. See
log_delivery
Block for details. - Name string
- The name of the connector.
- map[string]string
- A map of tags to assign to the resource. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - Worker
Configuration ConnectorWorker Configuration Args - Specifies which worker configuration to use with the connector. See
worker_configuration
Block for details.
- capacity
Connector
Capacity - Information about the capacity allocated to the connector. See
capacity
Block for details. - connector
Configuration Map<String,String> - A map of keys to values that represent the configuration for the connector.
- kafka
Cluster ConnectorKafka Cluster - Specifies which Apache Kafka cluster to connect to. See
kafka_cluster
Block for details. - kafka
Cluster ConnectorClient Authentication Kafka Cluster Client Authentication - Details of the client authentication used by the Apache Kafka cluster. See
kafka_cluster_client_authentication
Block for details. - kafka
Cluster ConnectorEncryption In Transit Kafka Cluster Encryption In Transit - Details of encryption in transit to the Apache Kafka cluster. See
kafka_cluster_encryption_in_transit
Block for details. - kafkaconnect
Version String - The version of Kafka Connect. It has to be compatible with both the Apache Kafka cluster's version and the plugins.
- plugins
List<Connector
Plugin> - Specifies which plugins to use for the connector. See
plugin
Block for details. - service
Execution StringRole Arn The Amazon Resource Name (ARN) of the IAM role used by the connector to access the Amazon Web Services resources that it needs. The types of resources depends on the logic of the connector. For example, a connector that has Amazon S3 as a destination must have permissions that allow it to write to the S3 destination bucket.
The following arguments are optional:
- description String
- A summary description of the connector.
- log
Delivery ConnectorLog Delivery - Details about log delivery. See
log_delivery
Block for details. - name String
- The name of the connector.
- Map<String,String>
- A map of tags to assign to the resource. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - worker
Configuration ConnectorWorker Configuration - Specifies which worker configuration to use with the connector. See
worker_configuration
Block for details.
- capacity
Connector
Capacity - Information about the capacity allocated to the connector. See
capacity
Block for details. - connector
Configuration {[key: string]: string} - A map of keys to values that represent the configuration for the connector.
- kafka
Cluster ConnectorKafka Cluster - Specifies which Apache Kafka cluster to connect to. See
kafka_cluster
Block for details. - kafka
Cluster ConnectorClient Authentication Kafka Cluster Client Authentication - Details of the client authentication used by the Apache Kafka cluster. See
kafka_cluster_client_authentication
Block for details. - kafka
Cluster ConnectorEncryption In Transit Kafka Cluster Encryption In Transit - Details of encryption in transit to the Apache Kafka cluster. See
kafka_cluster_encryption_in_transit
Block for details. - kafkaconnect
Version string - The version of Kafka Connect. It has to be compatible with both the Apache Kafka cluster's version and the plugins.
- plugins
Connector
Plugin[] - Specifies which plugins to use for the connector. See
plugin
Block for details. - service
Execution stringRole Arn The Amazon Resource Name (ARN) of the IAM role used by the connector to access the Amazon Web Services resources that it needs. The types of resources depends on the logic of the connector. For example, a connector that has Amazon S3 as a destination must have permissions that allow it to write to the S3 destination bucket.
The following arguments are optional:
- description string
- A summary description of the connector.
- log
Delivery ConnectorLog Delivery - Details about log delivery. See
log_delivery
Block for details. - name string
- The name of the connector.
- {[key: string]: string}
- A map of tags to assign to the resource. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - worker
Configuration ConnectorWorker Configuration - Specifies which worker configuration to use with the connector. See
worker_configuration
Block for details.
- capacity
Connector
Capacity Args - Information about the capacity allocated to the connector. See
capacity
Block for details. - connector_
configuration Mapping[str, str] - A map of keys to values that represent the configuration for the connector.
- kafka_
cluster ConnectorKafka Cluster Args - Specifies which Apache Kafka cluster to connect to. See
kafka_cluster
Block for details. - kafka_
cluster_ Connectorclient_ authentication Kafka Cluster Client Authentication Args - Details of the client authentication used by the Apache Kafka cluster. See
kafka_cluster_client_authentication
Block for details. - kafka_
cluster_ Connectorencryption_ in_ transit Kafka Cluster Encryption In Transit Args - Details of encryption in transit to the Apache Kafka cluster. See
kafka_cluster_encryption_in_transit
Block for details. - kafkaconnect_
version str - The version of Kafka Connect. It has to be compatible with both the Apache Kafka cluster's version and the plugins.
- plugins
Sequence[Connector
Plugin Args] - Specifies which plugins to use for the connector. See
plugin
Block for details. - service_
execution_ strrole_ arn The Amazon Resource Name (ARN) of the IAM role used by the connector to access the Amazon Web Services resources that it needs. The types of resources depends on the logic of the connector. For example, a connector that has Amazon S3 as a destination must have permissions that allow it to write to the S3 destination bucket.
The following arguments are optional:
- description str
- A summary description of the connector.
- log_
delivery ConnectorLog Delivery Args - Details about log delivery. See
log_delivery
Block for details. - name str
- The name of the connector.
- Mapping[str, str]
- A map of tags to assign to the resource. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - worker_
configuration ConnectorWorker Configuration Args - Specifies which worker configuration to use with the connector. See
worker_configuration
Block for details.
- capacity Property Map
- Information about the capacity allocated to the connector. See
capacity
Block for details. - connector
Configuration Map<String> - A map of keys to values that represent the configuration for the connector.
- kafka
Cluster Property Map - Specifies which Apache Kafka cluster to connect to. See
kafka_cluster
Block for details. - kafka
Cluster Property MapClient Authentication - Details of the client authentication used by the Apache Kafka cluster. See
kafka_cluster_client_authentication
Block for details. - kafka
Cluster Property MapEncryption In Transit - Details of encryption in transit to the Apache Kafka cluster. See
kafka_cluster_encryption_in_transit
Block for details. - kafkaconnect
Version String - The version of Kafka Connect. It has to be compatible with both the Apache Kafka cluster's version and the plugins.
- plugins List<Property Map>
- Specifies which plugins to use for the connector. See
plugin
Block for details. - service
Execution StringRole Arn The Amazon Resource Name (ARN) of the IAM role used by the connector to access the Amazon Web Services resources that it needs. The types of resources depends on the logic of the connector. For example, a connector that has Amazon S3 as a destination must have permissions that allow it to write to the S3 destination bucket.
The following arguments are optional:
- description String
- A summary description of the connector.
- log
Delivery Property Map - Details about log delivery. See
log_delivery
Block for details. - name String
- The name of the connector.
- Map<String>
- A map of tags to assign to the resource. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - worker
Configuration Property Map - Specifies which worker configuration to use with the connector. See
worker_configuration
Block for details.
Outputs
All input properties are implicitly available as output properties. Additionally, the Connector resource produces the following output properties:
- Arn string
- The Amazon Resource Name (ARN) of the connector.
- Id string
- The provider-assigned unique ID for this managed resource.
- Dictionary<string, string>
- A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block. - Version string
- The current version of the connector.
- Arn string
- The Amazon Resource Name (ARN) of the connector.
- Id string
- The provider-assigned unique ID for this managed resource.
- map[string]string
- A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block. - Version string
- The current version of the connector.
- arn String
- The Amazon Resource Name (ARN) of the connector.
- id String
- The provider-assigned unique ID for this managed resource.
- Map<String,String>
- A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block. - version String
- The current version of the connector.
- arn string
- The Amazon Resource Name (ARN) of the connector.
- id string
- The provider-assigned unique ID for this managed resource.
- {[key: string]: string}
- A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block. - version string
- The current version of the connector.
- arn str
- The Amazon Resource Name (ARN) of the connector.
- id str
- The provider-assigned unique ID for this managed resource.
- Mapping[str, str]
- A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block. - version str
- The current version of the connector.
- arn String
- The Amazon Resource Name (ARN) of the connector.
- id String
- The provider-assigned unique ID for this managed resource.
- Map<String>
- A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block. - version String
- The current version of the connector.
Look up Existing Connector Resource
Get an existing Connector resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: ConnectorState, opts?: CustomResourceOptions): Connector
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
arn: Optional[str] = None,
capacity: Optional[ConnectorCapacityArgs] = None,
connector_configuration: Optional[Mapping[str, str]] = None,
description: Optional[str] = None,
kafka_cluster: Optional[ConnectorKafkaClusterArgs] = None,
kafka_cluster_client_authentication: Optional[ConnectorKafkaClusterClientAuthenticationArgs] = None,
kafka_cluster_encryption_in_transit: Optional[ConnectorKafkaClusterEncryptionInTransitArgs] = None,
kafkaconnect_version: Optional[str] = None,
log_delivery: Optional[ConnectorLogDeliveryArgs] = None,
name: Optional[str] = None,
plugins: Optional[Sequence[ConnectorPluginArgs]] = None,
service_execution_role_arn: Optional[str] = None,
tags: Optional[Mapping[str, str]] = None,
tags_all: Optional[Mapping[str, str]] = None,
version: Optional[str] = None,
worker_configuration: Optional[ConnectorWorkerConfigurationArgs] = None) -> Connector
func GetConnector(ctx *Context, name string, id IDInput, state *ConnectorState, opts ...ResourceOption) (*Connector, error)
public static Connector Get(string name, Input<string> id, ConnectorState? state, CustomResourceOptions? opts = null)
public static Connector get(String name, Output<String> id, ConnectorState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Arn string
- The Amazon Resource Name (ARN) of the connector.
- Capacity
Connector
Capacity - Information about the capacity allocated to the connector. See
capacity
Block for details. - Connector
Configuration Dictionary<string, string> - A map of keys to values that represent the configuration for the connector.
- Description string
- A summary description of the connector.
- Kafka
Cluster ConnectorKafka Cluster - Specifies which Apache Kafka cluster to connect to. See
kafka_cluster
Block for details. - Kafka
Cluster ConnectorClient Authentication Kafka Cluster Client Authentication - Details of the client authentication used by the Apache Kafka cluster. See
kafka_cluster_client_authentication
Block for details. - Kafka
Cluster ConnectorEncryption In Transit Kafka Cluster Encryption In Transit - Details of encryption in transit to the Apache Kafka cluster. See
kafka_cluster_encryption_in_transit
Block for details. - Kafkaconnect
Version string - The version of Kafka Connect. It has to be compatible with both the Apache Kafka cluster's version and the plugins.
- Log
Delivery ConnectorLog Delivery - Details about log delivery. See
log_delivery
Block for details. - Name string
- The name of the connector.
- Plugins
List<Connector
Plugin> - Specifies which plugins to use for the connector. See
plugin
Block for details. - Service
Execution stringRole Arn The Amazon Resource Name (ARN) of the IAM role used by the connector to access the Amazon Web Services resources that it needs. The types of resources depends on the logic of the connector. For example, a connector that has Amazon S3 as a destination must have permissions that allow it to write to the S3 destination bucket.
The following arguments are optional:
- Dictionary<string, string>
- A map of tags to assign to the resource. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - Dictionary<string, string>
- A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block. - Version string
- The current version of the connector.
- Worker
Configuration ConnectorWorker Configuration - Specifies which worker configuration to use with the connector. See
worker_configuration
Block for details.
- Arn string
- The Amazon Resource Name (ARN) of the connector.
- Capacity
Connector
Capacity Args - Information about the capacity allocated to the connector. See
capacity
Block for details. - Connector
Configuration map[string]string - A map of keys to values that represent the configuration for the connector.
- Description string
- A summary description of the connector.
- Kafka
Cluster ConnectorKafka Cluster Args - Specifies which Apache Kafka cluster to connect to. See
kafka_cluster
Block for details. - Kafka
Cluster ConnectorClient Authentication Kafka Cluster Client Authentication Args - Details of the client authentication used by the Apache Kafka cluster. See
kafka_cluster_client_authentication
Block for details. - Kafka
Cluster ConnectorEncryption In Transit Kafka Cluster Encryption In Transit Args - Details of encryption in transit to the Apache Kafka cluster. See
kafka_cluster_encryption_in_transit
Block for details. - Kafkaconnect
Version string - The version of Kafka Connect. It has to be compatible with both the Apache Kafka cluster's version and the plugins.
- Log
Delivery ConnectorLog Delivery Args - Details about log delivery. See
log_delivery
Block for details. - Name string
- The name of the connector.
- Plugins
[]Connector
Plugin Args - Specifies which plugins to use for the connector. See
plugin
Block for details. - Service
Execution stringRole Arn The Amazon Resource Name (ARN) of the IAM role used by the connector to access the Amazon Web Services resources that it needs. The types of resources depends on the logic of the connector. For example, a connector that has Amazon S3 as a destination must have permissions that allow it to write to the S3 destination bucket.
The following arguments are optional:
- map[string]string
- A map of tags to assign to the resource. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - map[string]string
- A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block. - Version string
- The current version of the connector.
- Worker
Configuration ConnectorWorker Configuration Args - Specifies which worker configuration to use with the connector. See
worker_configuration
Block for details.
- arn String
- The Amazon Resource Name (ARN) of the connector.
- capacity
Connector
Capacity - Information about the capacity allocated to the connector. See
capacity
Block for details. - connector
Configuration Map<String,String> - A map of keys to values that represent the configuration for the connector.
- description String
- A summary description of the connector.
- kafka
Cluster ConnectorKafka Cluster - Specifies which Apache Kafka cluster to connect to. See
kafka_cluster
Block for details. - kafka
Cluster ConnectorClient Authentication Kafka Cluster Client Authentication - Details of the client authentication used by the Apache Kafka cluster. See
kafka_cluster_client_authentication
Block for details. - kafka
Cluster ConnectorEncryption In Transit Kafka Cluster Encryption In Transit - Details of encryption in transit to the Apache Kafka cluster. See
kafka_cluster_encryption_in_transit
Block for details. - kafkaconnect
Version String - The version of Kafka Connect. It has to be compatible with both the Apache Kafka cluster's version and the plugins.
- log
Delivery ConnectorLog Delivery - Details about log delivery. See
log_delivery
Block for details. - name String
- The name of the connector.
- plugins
List<Connector
Plugin> - Specifies which plugins to use for the connector. See
plugin
Block for details. - service
Execution StringRole Arn The Amazon Resource Name (ARN) of the IAM role used by the connector to access the Amazon Web Services resources that it needs. The types of resources depends on the logic of the connector. For example, a connector that has Amazon S3 as a destination must have permissions that allow it to write to the S3 destination bucket.
The following arguments are optional:
- Map<String,String>
- A map of tags to assign to the resource. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - Map<String,String>
- A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block. - version String
- The current version of the connector.
- worker
Configuration ConnectorWorker Configuration - Specifies which worker configuration to use with the connector. See
worker_configuration
Block for details.
- arn string
- The Amazon Resource Name (ARN) of the connector.
- capacity
Connector
Capacity - Information about the capacity allocated to the connector. See
capacity
Block for details. - connector
Configuration {[key: string]: string} - A map of keys to values that represent the configuration for the connector.
- description string
- A summary description of the connector.
- kafka
Cluster ConnectorKafka Cluster - Specifies which Apache Kafka cluster to connect to. See
kafka_cluster
Block for details. - kafka
Cluster ConnectorClient Authentication Kafka Cluster Client Authentication - Details of the client authentication used by the Apache Kafka cluster. See
kafka_cluster_client_authentication
Block for details. - kafka
Cluster ConnectorEncryption In Transit Kafka Cluster Encryption In Transit - Details of encryption in transit to the Apache Kafka cluster. See
kafka_cluster_encryption_in_transit
Block for details. - kafkaconnect
Version string - The version of Kafka Connect. It has to be compatible with both the Apache Kafka cluster's version and the plugins.
- log
Delivery ConnectorLog Delivery - Details about log delivery. See
log_delivery
Block for details. - name string
- The name of the connector.
- plugins
Connector
Plugin[] - Specifies which plugins to use for the connector. See
plugin
Block for details. - service
Execution stringRole Arn The Amazon Resource Name (ARN) of the IAM role used by the connector to access the Amazon Web Services resources that it needs. The types of resources depends on the logic of the connector. For example, a connector that has Amazon S3 as a destination must have permissions that allow it to write to the S3 destination bucket.
The following arguments are optional:
- {[key: string]: string}
- A map of tags to assign to the resource. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - {[key: string]: string}
- A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block. - version string
- The current version of the connector.
- worker
Configuration ConnectorWorker Configuration - Specifies which worker configuration to use with the connector. See
worker_configuration
Block for details.
- arn str
- The Amazon Resource Name (ARN) of the connector.
- capacity
Connector
Capacity Args - Information about the capacity allocated to the connector. See
capacity
Block for details. - connector_
configuration Mapping[str, str] - A map of keys to values that represent the configuration for the connector.
- description str
- A summary description of the connector.
- kafka_
cluster ConnectorKafka Cluster Args - Specifies which Apache Kafka cluster to connect to. See
kafka_cluster
Block for details. - kafka_
cluster_ Connectorclient_ authentication Kafka Cluster Client Authentication Args - Details of the client authentication used by the Apache Kafka cluster. See
kafka_cluster_client_authentication
Block for details. - kafka_
cluster_ Connectorencryption_ in_ transit Kafka Cluster Encryption In Transit Args - Details of encryption in transit to the Apache Kafka cluster. See
kafka_cluster_encryption_in_transit
Block for details. - kafkaconnect_
version str - The version of Kafka Connect. It has to be compatible with both the Apache Kafka cluster's version and the plugins.
- log_
delivery ConnectorLog Delivery Args - Details about log delivery. See
log_delivery
Block for details. - name str
- The name of the connector.
- plugins
Sequence[Connector
Plugin Args] - Specifies which plugins to use for the connector. See
plugin
Block for details. - service_
execution_ strrole_ arn The Amazon Resource Name (ARN) of the IAM role used by the connector to access the Amazon Web Services resources that it needs. The types of resources depends on the logic of the connector. For example, a connector that has Amazon S3 as a destination must have permissions that allow it to write to the S3 destination bucket.
The following arguments are optional:
- Mapping[str, str]
- A map of tags to assign to the resource. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - Mapping[str, str]
- A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block. - version str
- The current version of the connector.
- worker_
configuration ConnectorWorker Configuration Args - Specifies which worker configuration to use with the connector. See
worker_configuration
Block for details.
- arn String
- The Amazon Resource Name (ARN) of the connector.
- capacity Property Map
- Information about the capacity allocated to the connector. See
capacity
Block for details. - connector
Configuration Map<String> - A map of keys to values that represent the configuration for the connector.
- description String
- A summary description of the connector.
- kafka
Cluster Property Map - Specifies which Apache Kafka cluster to connect to. See
kafka_cluster
Block for details. - kafka
Cluster Property MapClient Authentication - Details of the client authentication used by the Apache Kafka cluster. See
kafka_cluster_client_authentication
Block for details. - kafka
Cluster Property MapEncryption In Transit - Details of encryption in transit to the Apache Kafka cluster. See
kafka_cluster_encryption_in_transit
Block for details. - kafkaconnect
Version String - The version of Kafka Connect. It has to be compatible with both the Apache Kafka cluster's version and the plugins.
- log
Delivery Property Map - Details about log delivery. See
log_delivery
Block for details. - name String
- The name of the connector.
- plugins List<Property Map>
- Specifies which plugins to use for the connector. See
plugin
Block for details. - service
Execution StringRole Arn The Amazon Resource Name (ARN) of the IAM role used by the connector to access the Amazon Web Services resources that it needs. The types of resources depends on the logic of the connector. For example, a connector that has Amazon S3 as a destination must have permissions that allow it to write to the S3 destination bucket.
The following arguments are optional:
- Map<String>
- A map of tags to assign to the resource. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - Map<String>
- A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block. - version String
- The current version of the connector.
- worker
Configuration Property Map - Specifies which worker configuration to use with the connector. See
worker_configuration
Block for details.
Supporting Types
ConnectorCapacity, ConnectorCapacityArgs
- Autoscaling
Connector
Capacity Autoscaling - Information about the auto scaling parameters for the connector. See
autoscaling
Block for details. - Provisioned
Capacity ConnectorCapacity Provisioned Capacity - Details about a fixed capacity allocated to a connector. See
provisioned_capacity
Block for details.
- Autoscaling
Connector
Capacity Autoscaling - Information about the auto scaling parameters for the connector. See
autoscaling
Block for details. - Provisioned
Capacity ConnectorCapacity Provisioned Capacity - Details about a fixed capacity allocated to a connector. See
provisioned_capacity
Block for details.
- autoscaling
Connector
Capacity Autoscaling - Information about the auto scaling parameters for the connector. See
autoscaling
Block for details. - provisioned
Capacity ConnectorCapacity Provisioned Capacity - Details about a fixed capacity allocated to a connector. See
provisioned_capacity
Block for details.
- autoscaling
Connector
Capacity Autoscaling - Information about the auto scaling parameters for the connector. See
autoscaling
Block for details. - provisioned
Capacity ConnectorCapacity Provisioned Capacity - Details about a fixed capacity allocated to a connector. See
provisioned_capacity
Block for details.
- autoscaling
Connector
Capacity Autoscaling - Information about the auto scaling parameters for the connector. See
autoscaling
Block for details. - provisioned_
capacity ConnectorCapacity Provisioned Capacity - Details about a fixed capacity allocated to a connector. See
provisioned_capacity
Block for details.
- autoscaling Property Map
- Information about the auto scaling parameters for the connector. See
autoscaling
Block for details. - provisioned
Capacity Property Map - Details about a fixed capacity allocated to a connector. See
provisioned_capacity
Block for details.
ConnectorCapacityAutoscaling, ConnectorCapacityAutoscalingArgs
- Max
Worker intCount - The maximum number of workers allocated to the connector.
- Min
Worker intCount - The minimum number of workers allocated to the connector.
- Mcu
Count int - The number of microcontroller units (MCUs) allocated to each connector worker. Valid values:
1
,2
,4
,8
. The default value is1
. - Scale
In ConnectorPolicy Capacity Autoscaling Scale In Policy - The scale-in policy for the connector. See
scale_in_policy
Block for details. - Scale
Out ConnectorPolicy Capacity Autoscaling Scale Out Policy - The scale-out policy for the connector. See
scale_out_policy
Block for details.
- Max
Worker intCount - The maximum number of workers allocated to the connector.
- Min
Worker intCount - The minimum number of workers allocated to the connector.
- Mcu
Count int - The number of microcontroller units (MCUs) allocated to each connector worker. Valid values:
1
,2
,4
,8
. The default value is1
. - Scale
In ConnectorPolicy Capacity Autoscaling Scale In Policy - The scale-in policy for the connector. See
scale_in_policy
Block for details. - Scale
Out ConnectorPolicy Capacity Autoscaling Scale Out Policy - The scale-out policy for the connector. See
scale_out_policy
Block for details.
- max
Worker IntegerCount - The maximum number of workers allocated to the connector.
- min
Worker IntegerCount - The minimum number of workers allocated to the connector.
- mcu
Count Integer - The number of microcontroller units (MCUs) allocated to each connector worker. Valid values:
1
,2
,4
,8
. The default value is1
. - scale
In ConnectorPolicy Capacity Autoscaling Scale In Policy - The scale-in policy for the connector. See
scale_in_policy
Block for details. - scale
Out ConnectorPolicy Capacity Autoscaling Scale Out Policy - The scale-out policy for the connector. See
scale_out_policy
Block for details.
- max
Worker numberCount - The maximum number of workers allocated to the connector.
- min
Worker numberCount - The minimum number of workers allocated to the connector.
- mcu
Count number - The number of microcontroller units (MCUs) allocated to each connector worker. Valid values:
1
,2
,4
,8
. The default value is1
. - scale
In ConnectorPolicy Capacity Autoscaling Scale In Policy - The scale-in policy for the connector. See
scale_in_policy
Block for details. - scale
Out ConnectorPolicy Capacity Autoscaling Scale Out Policy - The scale-out policy for the connector. See
scale_out_policy
Block for details.
- max_
worker_ intcount - The maximum number of workers allocated to the connector.
- min_
worker_ intcount - The minimum number of workers allocated to the connector.
- mcu_
count int - The number of microcontroller units (MCUs) allocated to each connector worker. Valid values:
1
,2
,4
,8
. The default value is1
. - scale_
in_ Connectorpolicy Capacity Autoscaling Scale In Policy - The scale-in policy for the connector. See
scale_in_policy
Block for details. - scale_
out_ Connectorpolicy Capacity Autoscaling Scale Out Policy - The scale-out policy for the connector. See
scale_out_policy
Block for details.
- max
Worker NumberCount - The maximum number of workers allocated to the connector.
- min
Worker NumberCount - The minimum number of workers allocated to the connector.
- mcu
Count Number - The number of microcontroller units (MCUs) allocated to each connector worker. Valid values:
1
,2
,4
,8
. The default value is1
. - scale
In Property MapPolicy - The scale-in policy for the connector. See
scale_in_policy
Block for details. - scale
Out Property MapPolicy - The scale-out policy for the connector. See
scale_out_policy
Block for details.
ConnectorCapacityAutoscalingScaleInPolicy, ConnectorCapacityAutoscalingScaleInPolicyArgs
- Cpu
Utilization intPercentage - Specifies the CPU utilization percentage threshold at which you want connector scale in to be triggered.
- Cpu
Utilization intPercentage - Specifies the CPU utilization percentage threshold at which you want connector scale in to be triggered.
- cpu
Utilization IntegerPercentage - Specifies the CPU utilization percentage threshold at which you want connector scale in to be triggered.
- cpu
Utilization numberPercentage - Specifies the CPU utilization percentage threshold at which you want connector scale in to be triggered.
- cpu_
utilization_ intpercentage - Specifies the CPU utilization percentage threshold at which you want connector scale in to be triggered.
- cpu
Utilization NumberPercentage - Specifies the CPU utilization percentage threshold at which you want connector scale in to be triggered.
ConnectorCapacityAutoscalingScaleOutPolicy, ConnectorCapacityAutoscalingScaleOutPolicyArgs
- Cpu
Utilization intPercentage - The CPU utilization percentage threshold at which you want connector scale out to be triggered.
- Cpu
Utilization intPercentage - The CPU utilization percentage threshold at which you want connector scale out to be triggered.
- cpu
Utilization IntegerPercentage - The CPU utilization percentage threshold at which you want connector scale out to be triggered.
- cpu
Utilization numberPercentage - The CPU utilization percentage threshold at which you want connector scale out to be triggered.
- cpu_
utilization_ intpercentage - The CPU utilization percentage threshold at which you want connector scale out to be triggered.
- cpu
Utilization NumberPercentage - The CPU utilization percentage threshold at which you want connector scale out to be triggered.
ConnectorCapacityProvisionedCapacity, ConnectorCapacityProvisionedCapacityArgs
- Worker
Count int - The number of workers that are allocated to the connector.
- Mcu
Count int - The number of microcontroller units (MCUs) allocated to each connector worker. Valid values:
1
,2
,4
,8
. The default value is1
.
- Worker
Count int - The number of workers that are allocated to the connector.
- Mcu
Count int - The number of microcontroller units (MCUs) allocated to each connector worker. Valid values:
1
,2
,4
,8
. The default value is1
.
- worker
Count Integer - The number of workers that are allocated to the connector.
- mcu
Count Integer - The number of microcontroller units (MCUs) allocated to each connector worker. Valid values:
1
,2
,4
,8
. The default value is1
.
- worker
Count number - The number of workers that are allocated to the connector.
- mcu
Count number - The number of microcontroller units (MCUs) allocated to each connector worker. Valid values:
1
,2
,4
,8
. The default value is1
.
- worker_
count int - The number of workers that are allocated to the connector.
- mcu_
count int - The number of microcontroller units (MCUs) allocated to each connector worker. Valid values:
1
,2
,4
,8
. The default value is1
.
- worker
Count Number - The number of workers that are allocated to the connector.
- mcu
Count Number - The number of microcontroller units (MCUs) allocated to each connector worker. Valid values:
1
,2
,4
,8
. The default value is1
.
ConnectorKafkaCluster, ConnectorKafkaClusterArgs
- Apache
Kafka ConnectorCluster Kafka Cluster Apache Kafka Cluster - The Apache Kafka cluster to which the connector is connected. See
apache_kafka_cluster
Block for details.
- Apache
Kafka ConnectorCluster Kafka Cluster Apache Kafka Cluster - The Apache Kafka cluster to which the connector is connected. See
apache_kafka_cluster
Block for details.
- apache
Kafka ConnectorCluster Kafka Cluster Apache Kafka Cluster - The Apache Kafka cluster to which the connector is connected. See
apache_kafka_cluster
Block for details.
- apache
Kafka ConnectorCluster Kafka Cluster Apache Kafka Cluster - The Apache Kafka cluster to which the connector is connected. See
apache_kafka_cluster
Block for details.
- apache_
kafka_ Connectorcluster Kafka Cluster Apache Kafka Cluster - The Apache Kafka cluster to which the connector is connected. See
apache_kafka_cluster
Block for details.
- apache
Kafka Property MapCluster - The Apache Kafka cluster to which the connector is connected. See
apache_kafka_cluster
Block for details.
ConnectorKafkaClusterApacheKafkaCluster, ConnectorKafkaClusterApacheKafkaClusterArgs
- Bootstrap
Servers string - The bootstrap servers of the cluster.
- Vpc
Connector
Kafka Cluster Apache Kafka Cluster Vpc - Details of an Amazon VPC which has network connectivity to the Apache Kafka cluster. See
vpc
Block for details.
- Bootstrap
Servers string - The bootstrap servers of the cluster.
- Vpc
Connector
Kafka Cluster Apache Kafka Cluster Vpc - Details of an Amazon VPC which has network connectivity to the Apache Kafka cluster. See
vpc
Block for details.
- bootstrap
Servers String - The bootstrap servers of the cluster.
- vpc
Connector
Kafka Cluster Apache Kafka Cluster Vpc - Details of an Amazon VPC which has network connectivity to the Apache Kafka cluster. See
vpc
Block for details.
- bootstrap
Servers string - The bootstrap servers of the cluster.
- vpc
Connector
Kafka Cluster Apache Kafka Cluster Vpc - Details of an Amazon VPC which has network connectivity to the Apache Kafka cluster. See
vpc
Block for details.
- bootstrap_
servers str - The bootstrap servers of the cluster.
- vpc
Connector
Kafka Cluster Apache Kafka Cluster Vpc - Details of an Amazon VPC which has network connectivity to the Apache Kafka cluster. See
vpc
Block for details.
- bootstrap
Servers String - The bootstrap servers of the cluster.
- vpc Property Map
- Details of an Amazon VPC which has network connectivity to the Apache Kafka cluster. See
vpc
Block for details.
ConnectorKafkaClusterApacheKafkaClusterVpc, ConnectorKafkaClusterApacheKafkaClusterVpcArgs
- Security
Groups List<string> - The security groups for the connector.
- Subnets List<string>
- The subnets for the connector.
- Security
Groups []string - The security groups for the connector.
- Subnets []string
- The subnets for the connector.
- security
Groups List<String> - The security groups for the connector.
- subnets List<String>
- The subnets for the connector.
- security
Groups string[] - The security groups for the connector.
- subnets string[]
- The subnets for the connector.
- security_
groups Sequence[str] - The security groups for the connector.
- subnets Sequence[str]
- The subnets for the connector.
- security
Groups List<String> - The security groups for the connector.
- subnets List<String>
- The subnets for the connector.
ConnectorKafkaClusterClientAuthentication, ConnectorKafkaClusterClientAuthenticationArgs
- Authentication
Type string - The type of client authentication used to connect to the Apache Kafka cluster. Valid values:
IAM
,NONE
. A value ofNONE
means that no client authentication is used. The default value isNONE
.
- Authentication
Type string - The type of client authentication used to connect to the Apache Kafka cluster. Valid values:
IAM
,NONE
. A value ofNONE
means that no client authentication is used. The default value isNONE
.
- authentication
Type String - The type of client authentication used to connect to the Apache Kafka cluster. Valid values:
IAM
,NONE
. A value ofNONE
means that no client authentication is used. The default value isNONE
.
- authentication
Type string - The type of client authentication used to connect to the Apache Kafka cluster. Valid values:
IAM
,NONE
. A value ofNONE
means that no client authentication is used. The default value isNONE
.
- authentication_
type str - The type of client authentication used to connect to the Apache Kafka cluster. Valid values:
IAM
,NONE
. A value ofNONE
means that no client authentication is used. The default value isNONE
.
- authentication
Type String - The type of client authentication used to connect to the Apache Kafka cluster. Valid values:
IAM
,NONE
. A value ofNONE
means that no client authentication is used. The default value isNONE
.
ConnectorKafkaClusterEncryptionInTransit, ConnectorKafkaClusterEncryptionInTransitArgs
- Encryption
Type string - The type of encryption in transit to the Apache Kafka cluster. Valid values:
PLAINTEXT
,TLS
. The default values isPLAINTEXT
.
- Encryption
Type string - The type of encryption in transit to the Apache Kafka cluster. Valid values:
PLAINTEXT
,TLS
. The default values isPLAINTEXT
.
- encryption
Type String - The type of encryption in transit to the Apache Kafka cluster. Valid values:
PLAINTEXT
,TLS
. The default values isPLAINTEXT
.
- encryption
Type string - The type of encryption in transit to the Apache Kafka cluster. Valid values:
PLAINTEXT
,TLS
. The default values isPLAINTEXT
.
- encryption_
type str - The type of encryption in transit to the Apache Kafka cluster. Valid values:
PLAINTEXT
,TLS
. The default values isPLAINTEXT
.
- encryption
Type String - The type of encryption in transit to the Apache Kafka cluster. Valid values:
PLAINTEXT
,TLS
. The default values isPLAINTEXT
.
ConnectorLogDelivery, ConnectorLogDeliveryArgs
- Worker
Log ConnectorDelivery Log Delivery Worker Log Delivery - The workers can send worker logs to different destination types. This configuration specifies the details of these destinations. See
worker_log_delivery
Block for details.
- Worker
Log ConnectorDelivery Log Delivery Worker Log Delivery - The workers can send worker logs to different destination types. This configuration specifies the details of these destinations. See
worker_log_delivery
Block for details.
- worker
Log ConnectorDelivery Log Delivery Worker Log Delivery - The workers can send worker logs to different destination types. This configuration specifies the details of these destinations. See
worker_log_delivery
Block for details.
- worker
Log ConnectorDelivery Log Delivery Worker Log Delivery - The workers can send worker logs to different destination types. This configuration specifies the details of these destinations. See
worker_log_delivery
Block for details.
- worker_
log_ Connectordelivery Log Delivery Worker Log Delivery - The workers can send worker logs to different destination types. This configuration specifies the details of these destinations. See
worker_log_delivery
Block for details.
- worker
Log Property MapDelivery - The workers can send worker logs to different destination types. This configuration specifies the details of these destinations. See
worker_log_delivery
Block for details.
ConnectorLogDeliveryWorkerLogDelivery, ConnectorLogDeliveryWorkerLogDeliveryArgs
- Cloudwatch
Logs ConnectorLog Delivery Worker Log Delivery Cloudwatch Logs - Details about delivering logs to Amazon CloudWatch Logs. See
cloudwatch_logs
Block for details. - Firehose
Connector
Log Delivery Worker Log Delivery Firehose - Details about delivering logs to Amazon Kinesis Data Firehose. See
firehose
Block for details. - S3
Connector
Log Delivery Worker Log Delivery S3 - Details about delivering logs to Amazon S3. See
s3
Block for deetails.
- Cloudwatch
Logs ConnectorLog Delivery Worker Log Delivery Cloudwatch Logs - Details about delivering logs to Amazon CloudWatch Logs. See
cloudwatch_logs
Block for details. - Firehose
Connector
Log Delivery Worker Log Delivery Firehose - Details about delivering logs to Amazon Kinesis Data Firehose. See
firehose
Block for details. - S3
Connector
Log Delivery Worker Log Delivery S3 - Details about delivering logs to Amazon S3. See
s3
Block for deetails.
- cloudwatch
Logs ConnectorLog Delivery Worker Log Delivery Cloudwatch Logs - Details about delivering logs to Amazon CloudWatch Logs. See
cloudwatch_logs
Block for details. - firehose
Connector
Log Delivery Worker Log Delivery Firehose - Details about delivering logs to Amazon Kinesis Data Firehose. See
firehose
Block for details. - s3
Connector
Log Delivery Worker Log Delivery S3 - Details about delivering logs to Amazon S3. See
s3
Block for deetails.
- cloudwatch
Logs ConnectorLog Delivery Worker Log Delivery Cloudwatch Logs - Details about delivering logs to Amazon CloudWatch Logs. See
cloudwatch_logs
Block for details. - firehose
Connector
Log Delivery Worker Log Delivery Firehose - Details about delivering logs to Amazon Kinesis Data Firehose. See
firehose
Block for details. - s3
Connector
Log Delivery Worker Log Delivery S3 - Details about delivering logs to Amazon S3. See
s3
Block for deetails.
- cloudwatch_
logs ConnectorLog Delivery Worker Log Delivery Cloudwatch Logs - Details about delivering logs to Amazon CloudWatch Logs. See
cloudwatch_logs
Block for details. - firehose
Connector
Log Delivery Worker Log Delivery Firehose - Details about delivering logs to Amazon Kinesis Data Firehose. See
firehose
Block for details. - s3
Connector
Log Delivery Worker Log Delivery S3 - Details about delivering logs to Amazon S3. See
s3
Block for deetails.
- cloudwatch
Logs Property Map - Details about delivering logs to Amazon CloudWatch Logs. See
cloudwatch_logs
Block for details. - firehose Property Map
- Details about delivering logs to Amazon Kinesis Data Firehose. See
firehose
Block for details. - s3 Property Map
- Details about delivering logs to Amazon S3. See
s3
Block for deetails.
ConnectorLogDeliveryWorkerLogDeliveryCloudwatchLogs, ConnectorLogDeliveryWorkerLogDeliveryCloudwatchLogsArgs
ConnectorLogDeliveryWorkerLogDeliveryFirehose, ConnectorLogDeliveryWorkerLogDeliveryFirehoseArgs
- Enabled bool
- Specifies whether connector logs get delivered to Amazon Kinesis Data Firehose.
- Delivery
Stream string - The name of the Kinesis Data Firehose delivery stream that is the destination for log delivery.
- Enabled bool
- Specifies whether connector logs get delivered to Amazon Kinesis Data Firehose.
- Delivery
Stream string - The name of the Kinesis Data Firehose delivery stream that is the destination for log delivery.
- enabled Boolean
- Specifies whether connector logs get delivered to Amazon Kinesis Data Firehose.
- delivery
Stream String - The name of the Kinesis Data Firehose delivery stream that is the destination for log delivery.
- enabled boolean
- Specifies whether connector logs get delivered to Amazon Kinesis Data Firehose.
- delivery
Stream string - The name of the Kinesis Data Firehose delivery stream that is the destination for log delivery.
- enabled bool
- Specifies whether connector logs get delivered to Amazon Kinesis Data Firehose.
- delivery_
stream str - The name of the Kinesis Data Firehose delivery stream that is the destination for log delivery.
- enabled Boolean
- Specifies whether connector logs get delivered to Amazon Kinesis Data Firehose.
- delivery
Stream String - The name of the Kinesis Data Firehose delivery stream that is the destination for log delivery.
ConnectorLogDeliveryWorkerLogDeliveryS3, ConnectorLogDeliveryWorkerLogDeliveryS3Args
ConnectorPlugin, ConnectorPluginArgs
- Custom
Plugin ConnectorPlugin Custom Plugin - Details about a custom plugin. See
custom_plugin
Block for details.
- Custom
Plugin ConnectorPlugin Custom Plugin - Details about a custom plugin. See
custom_plugin
Block for details.
- custom
Plugin ConnectorPlugin Custom Plugin - Details about a custom plugin. See
custom_plugin
Block for details.
- custom
Plugin ConnectorPlugin Custom Plugin - Details about a custom plugin. See
custom_plugin
Block for details.
- custom_
plugin ConnectorPlugin Custom Plugin - Details about a custom plugin. See
custom_plugin
Block for details.
- custom
Plugin Property Map - Details about a custom plugin. See
custom_plugin
Block for details.
ConnectorPluginCustomPlugin, ConnectorPluginCustomPluginArgs
ConnectorWorkerConfiguration, ConnectorWorkerConfigurationArgs
Import
Using pulumi import
, import MSK Connect Connector using the connector’s arn
. For example:
$ pulumi import aws:mskconnect/connector:Connector example 'arn:aws:kafkaconnect:eu-central-1:123456789012:connector/example/264edee4-17a3-412e-bd76-6681cfc93805-3'
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- AWS Classic pulumi/pulumi-aws
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
aws
Terraform Provider.