1. Packages
  2. Confluent Provider
  3. API Docs
  4. Connector
Confluent v2.10.0 published on Wednesday, Nov 20, 2024 by Pulumi

confluentcloud.Connector

Explore with Pulumi AI

confluentcloud logo
Confluent v2.10.0 published on Wednesday, Nov 20, 2024 by Pulumi

    Example Usage

    Example Managed Datagen Source Connector that uses a service account to communicate with your Kafka cluster

    import * as pulumi from "@pulumi/pulumi";
    import * as confluentcloud from "@pulumi/confluentcloud";
    
    // https://github.com/confluentinc/terraform-provider-confluent/tree/master/examples/configurations/connectors/managed-datagen-source-connector
    const source = new confluentcloud.Connector("source", {
        environment: {
            id: staging.id,
        },
        kafkaCluster: {
            id: basic.id,
        },
        configSensitive: {},
        configNonsensitive: {
            "connector.class": "DatagenSource",
            name: "DatagenSourceConnector_0",
            "kafka.auth.mode": "SERVICE_ACCOUNT",
            "kafka.service.account.id": app_connector.id,
            "kafka.topic": orders.topicName,
            "output.data.format": "JSON",
            quickstart: "ORDERS",
            "tasks.max": "1",
        },
    }, {
        dependsOn: [
            app_connector_describe_on_cluster,
            app_connector_write_on_target_topic,
            app_connector_create_on_data_preview_topics,
            app_connector_write_on_data_preview_topics,
        ],
    });
    
    import pulumi
    import pulumi_confluentcloud as confluentcloud
    
    # https://github.com/confluentinc/terraform-provider-confluent/tree/master/examples/configurations/connectors/managed-datagen-source-connector
    source = confluentcloud.Connector("source",
        environment={
            "id": staging["id"],
        },
        kafka_cluster={
            "id": basic["id"],
        },
        config_sensitive={},
        config_nonsensitive={
            "connector.class": "DatagenSource",
            "name": "DatagenSourceConnector_0",
            "kafka.auth.mode": "SERVICE_ACCOUNT",
            "kafka.service.account.id": app_connector["id"],
            "kafka.topic": orders["topicName"],
            "output.data.format": "JSON",
            "quickstart": "ORDERS",
            "tasks.max": "1",
        },
        opts = pulumi.ResourceOptions(depends_on=[
                app_connector_describe_on_cluster,
                app_connector_write_on_target_topic,
                app_connector_create_on_data_preview_topics,
                app_connector_write_on_data_preview_topics,
            ]))
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-confluentcloud/sdk/v2/go/confluentcloud"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		// https://github.com/confluentinc/terraform-provider-confluent/tree/master/examples/configurations/connectors/managed-datagen-source-connector
    		_, err := confluentcloud.NewConnector(ctx, "source", &confluentcloud.ConnectorArgs{
    			Environment: &confluentcloud.ConnectorEnvironmentArgs{
    				Id: pulumi.Any(staging.Id),
    			},
    			KafkaCluster: &confluentcloud.ConnectorKafkaClusterArgs{
    				Id: pulumi.Any(basic.Id),
    			},
    			ConfigSensitive: pulumi.StringMap{},
    			ConfigNonsensitive: pulumi.StringMap{
    				"connector.class":          pulumi.String("DatagenSource"),
    				"name":                     pulumi.String("DatagenSourceConnector_0"),
    				"kafka.auth.mode":          pulumi.String("SERVICE_ACCOUNT"),
    				"kafka.service.account.id": pulumi.Any(app_connector.Id),
    				"kafka.topic":              pulumi.Any(orders.TopicName),
    				"output.data.format":       pulumi.String("JSON"),
    				"quickstart":               pulumi.String("ORDERS"),
    				"tasks.max":                pulumi.String("1"),
    			},
    		}, pulumi.DependsOn([]pulumi.Resource{
    			app_connector_describe_on_cluster,
    			app_connector_write_on_target_topic,
    			app_connector_create_on_data_preview_topics,
    			app_connector_write_on_data_preview_topics,
    		}))
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using ConfluentCloud = Pulumi.ConfluentCloud;
    
    return await Deployment.RunAsync(() => 
    {
        // https://github.com/confluentinc/terraform-provider-confluent/tree/master/examples/configurations/connectors/managed-datagen-source-connector
        var source = new ConfluentCloud.Connector("source", new()
        {
            Environment = new ConfluentCloud.Inputs.ConnectorEnvironmentArgs
            {
                Id = staging.Id,
            },
            KafkaCluster = new ConfluentCloud.Inputs.ConnectorKafkaClusterArgs
            {
                Id = basic.Id,
            },
            ConfigSensitive = null,
            ConfigNonsensitive = 
            {
                { "connector.class", "DatagenSource" },
                { "name", "DatagenSourceConnector_0" },
                { "kafka.auth.mode", "SERVICE_ACCOUNT" },
                { "kafka.service.account.id", app_connector.Id },
                { "kafka.topic", orders.TopicName },
                { "output.data.format", "JSON" },
                { "quickstart", "ORDERS" },
                { "tasks.max", "1" },
            },
        }, new CustomResourceOptions
        {
            DependsOn =
            {
                app_connector_describe_on_cluster,
                app_connector_write_on_target_topic,
                app_connector_create_on_data_preview_topics,
                app_connector_write_on_data_preview_topics,
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.confluentcloud.Connector;
    import com.pulumi.confluentcloud.ConnectorArgs;
    import com.pulumi.confluentcloud.inputs.ConnectorEnvironmentArgs;
    import com.pulumi.confluentcloud.inputs.ConnectorKafkaClusterArgs;
    import com.pulumi.resources.CustomResourceOptions;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            // https://github.com/confluentinc/terraform-provider-confluent/tree/master/examples/configurations/connectors/managed-datagen-source-connector
            var source = new Connector("source", ConnectorArgs.builder()
                .environment(ConnectorEnvironmentArgs.builder()
                    .id(staging.id())
                    .build())
                .kafkaCluster(ConnectorKafkaClusterArgs.builder()
                    .id(basic.id())
                    .build())
                .configSensitive()
                .configNonsensitive(Map.ofEntries(
                    Map.entry("connector.class", "DatagenSource"),
                    Map.entry("name", "DatagenSourceConnector_0"),
                    Map.entry("kafka.auth.mode", "SERVICE_ACCOUNT"),
                    Map.entry("kafka.service.account.id", app_connector.id()),
                    Map.entry("kafka.topic", orders.topicName()),
                    Map.entry("output.data.format", "JSON"),
                    Map.entry("quickstart", "ORDERS"),
                    Map.entry("tasks.max", "1")
                ))
                .build(), CustomResourceOptions.builder()
                    .dependsOn(                
                        app_connector_describe_on_cluster,
                        app_connector_write_on_target_topic,
                        app_connector_create_on_data_preview_topics,
                        app_connector_write_on_data_preview_topics)
                    .build());
    
        }
    }
    
    resources:
      # https://github.com/confluentinc/terraform-provider-confluent/tree/master/examples/configurations/connectors/managed-datagen-source-connector
      source:
        type: confluentcloud:Connector
        properties:
          environment:
            id: ${staging.id}
          kafkaCluster:
            id: ${basic.id}
          configSensitive: {}
          configNonsensitive:
            connector.class: DatagenSource
            name: DatagenSourceConnector_0
            kafka.auth.mode: SERVICE_ACCOUNT
            kafka.service.account.id: ${["app-connector"].id}
            kafka.topic: ${orders.topicName}
            output.data.format: JSON
            quickstart: ORDERS
            tasks.max: '1'
        options:
          dependson:
            - ${["app-connector-describe-on-cluster"]}
            - ${["app-connector-write-on-target-topic"]}
            - ${["app-connector-create-on-data-preview-topics"]}
            - ${["app-connector-write-on-data-preview-topics"]}
    

    Example Managed Amazon S3 Sink Connector that uses a service account to communicate with your Kafka cluster

    import * as pulumi from "@pulumi/pulumi";
    import * as confluentcloud from "@pulumi/confluentcloud";
    
    // https://github.com/confluentinc/terraform-provider-confluent/tree/master/examples/configurations/connectors/s3-sink-connector
    const sink = new confluentcloud.Connector("sink", {
        environment: {
            id: staging.id,
        },
        kafkaCluster: {
            id: basic.id,
        },
        configSensitive: {
            "aws.access.key.id": "***REDACTED***",
            "aws.secret.access.key": "***REDACTED***",
        },
        configNonsensitive: {
            topics: orders.topicName,
            "input.data.format": "JSON",
            "connector.class": "S3_SINK",
            name: "S3_SINKConnector_0",
            "kafka.auth.mode": "SERVICE_ACCOUNT",
            "kafka.service.account.id": app_connector.id,
            "s3.bucket.name": "<s3-bucket-name>",
            "output.data.format": "JSON",
            "time.interval": "DAILY",
            "flush.size": "1000",
            "tasks.max": "1",
        },
    }, {
        dependsOn: [
            app_connector_describe_on_cluster,
            app_connector_read_on_target_topic,
            app_connector_create_on_dlq_lcc_topics,
            app_connector_write_on_dlq_lcc_topics,
            app_connector_create_on_success_lcc_topics,
            app_connector_write_on_success_lcc_topics,
            app_connector_create_on_error_lcc_topics,
            app_connector_write_on_error_lcc_topics,
            app_connector_read_on_connect_lcc_group,
        ],
    });
    
    import pulumi
    import pulumi_confluentcloud as confluentcloud
    
    # https://github.com/confluentinc/terraform-provider-confluent/tree/master/examples/configurations/connectors/s3-sink-connector
    sink = confluentcloud.Connector("sink",
        environment={
            "id": staging["id"],
        },
        kafka_cluster={
            "id": basic["id"],
        },
        config_sensitive={
            "aws.access.key.id": "***REDACTED***",
            "aws.secret.access.key": "***REDACTED***",
        },
        config_nonsensitive={
            "topics": orders["topicName"],
            "input.data.format": "JSON",
            "connector.class": "S3_SINK",
            "name": "S3_SINKConnector_0",
            "kafka.auth.mode": "SERVICE_ACCOUNT",
            "kafka.service.account.id": app_connector["id"],
            "s3.bucket.name": "<s3-bucket-name>",
            "output.data.format": "JSON",
            "time.interval": "DAILY",
            "flush.size": "1000",
            "tasks.max": "1",
        },
        opts = pulumi.ResourceOptions(depends_on=[
                app_connector_describe_on_cluster,
                app_connector_read_on_target_topic,
                app_connector_create_on_dlq_lcc_topics,
                app_connector_write_on_dlq_lcc_topics,
                app_connector_create_on_success_lcc_topics,
                app_connector_write_on_success_lcc_topics,
                app_connector_create_on_error_lcc_topics,
                app_connector_write_on_error_lcc_topics,
                app_connector_read_on_connect_lcc_group,
            ]))
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-confluentcloud/sdk/v2/go/confluentcloud"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		// https://github.com/confluentinc/terraform-provider-confluent/tree/master/examples/configurations/connectors/s3-sink-connector
    		_, err := confluentcloud.NewConnector(ctx, "sink", &confluentcloud.ConnectorArgs{
    			Environment: &confluentcloud.ConnectorEnvironmentArgs{
    				Id: pulumi.Any(staging.Id),
    			},
    			KafkaCluster: &confluentcloud.ConnectorKafkaClusterArgs{
    				Id: pulumi.Any(basic.Id),
    			},
    			ConfigSensitive: pulumi.StringMap{
    				"aws.access.key.id":     pulumi.String("***REDACTED***"),
    				"aws.secret.access.key": pulumi.String("***REDACTED***"),
    			},
    			ConfigNonsensitive: pulumi.StringMap{
    				"topics":                   pulumi.Any(orders.TopicName),
    				"input.data.format":        pulumi.String("JSON"),
    				"connector.class":          pulumi.String("S3_SINK"),
    				"name":                     pulumi.String("S3_SINKConnector_0"),
    				"kafka.auth.mode":          pulumi.String("SERVICE_ACCOUNT"),
    				"kafka.service.account.id": pulumi.Any(app_connector.Id),
    				"s3.bucket.name":           pulumi.String("<s3-bucket-name>"),
    				"output.data.format":       pulumi.String("JSON"),
    				"time.interval":            pulumi.String("DAILY"),
    				"flush.size":               pulumi.String("1000"),
    				"tasks.max":                pulumi.String("1"),
    			},
    		}, pulumi.DependsOn([]pulumi.Resource{
    			app_connector_describe_on_cluster,
    			app_connector_read_on_target_topic,
    			app_connector_create_on_dlq_lcc_topics,
    			app_connector_write_on_dlq_lcc_topics,
    			app_connector_create_on_success_lcc_topics,
    			app_connector_write_on_success_lcc_topics,
    			app_connector_create_on_error_lcc_topics,
    			app_connector_write_on_error_lcc_topics,
    			app_connector_read_on_connect_lcc_group,
    		}))
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using ConfluentCloud = Pulumi.ConfluentCloud;
    
    return await Deployment.RunAsync(() => 
    {
        // https://github.com/confluentinc/terraform-provider-confluent/tree/master/examples/configurations/connectors/s3-sink-connector
        var sink = new ConfluentCloud.Connector("sink", new()
        {
            Environment = new ConfluentCloud.Inputs.ConnectorEnvironmentArgs
            {
                Id = staging.Id,
            },
            KafkaCluster = new ConfluentCloud.Inputs.ConnectorKafkaClusterArgs
            {
                Id = basic.Id,
            },
            ConfigSensitive = 
            {
                { "aws.access.key.id", "***REDACTED***" },
                { "aws.secret.access.key", "***REDACTED***" },
            },
            ConfigNonsensitive = 
            {
                { "topics", orders.TopicName },
                { "input.data.format", "JSON" },
                { "connector.class", "S3_SINK" },
                { "name", "S3_SINKConnector_0" },
                { "kafka.auth.mode", "SERVICE_ACCOUNT" },
                { "kafka.service.account.id", app_connector.Id },
                { "s3.bucket.name", "<s3-bucket-name>" },
                { "output.data.format", "JSON" },
                { "time.interval", "DAILY" },
                { "flush.size", "1000" },
                { "tasks.max", "1" },
            },
        }, new CustomResourceOptions
        {
            DependsOn =
            {
                app_connector_describe_on_cluster,
                app_connector_read_on_target_topic,
                app_connector_create_on_dlq_lcc_topics,
                app_connector_write_on_dlq_lcc_topics,
                app_connector_create_on_success_lcc_topics,
                app_connector_write_on_success_lcc_topics,
                app_connector_create_on_error_lcc_topics,
                app_connector_write_on_error_lcc_topics,
                app_connector_read_on_connect_lcc_group,
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.confluentcloud.Connector;
    import com.pulumi.confluentcloud.ConnectorArgs;
    import com.pulumi.confluentcloud.inputs.ConnectorEnvironmentArgs;
    import com.pulumi.confluentcloud.inputs.ConnectorKafkaClusterArgs;
    import com.pulumi.resources.CustomResourceOptions;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            // https://github.com/confluentinc/terraform-provider-confluent/tree/master/examples/configurations/connectors/s3-sink-connector
            var sink = new Connector("sink", ConnectorArgs.builder()
                .environment(ConnectorEnvironmentArgs.builder()
                    .id(staging.id())
                    .build())
                .kafkaCluster(ConnectorKafkaClusterArgs.builder()
                    .id(basic.id())
                    .build())
                .configSensitive(Map.ofEntries(
                    Map.entry("aws.access.key.id", "***REDACTED***"),
                    Map.entry("aws.secret.access.key", "***REDACTED***")
                ))
                .configNonsensitive(Map.ofEntries(
                    Map.entry("topics", orders.topicName()),
                    Map.entry("input.data.format", "JSON"),
                    Map.entry("connector.class", "S3_SINK"),
                    Map.entry("name", "S3_SINKConnector_0"),
                    Map.entry("kafka.auth.mode", "SERVICE_ACCOUNT"),
                    Map.entry("kafka.service.account.id", app_connector.id()),
                    Map.entry("s3.bucket.name", "<s3-bucket-name>"),
                    Map.entry("output.data.format", "JSON"),
                    Map.entry("time.interval", "DAILY"),
                    Map.entry("flush.size", "1000"),
                    Map.entry("tasks.max", "1")
                ))
                .build(), CustomResourceOptions.builder()
                    .dependsOn(                
                        app_connector_describe_on_cluster,
                        app_connector_read_on_target_topic,
                        app_connector_create_on_dlq_lcc_topics,
                        app_connector_write_on_dlq_lcc_topics,
                        app_connector_create_on_success_lcc_topics,
                        app_connector_write_on_success_lcc_topics,
                        app_connector_create_on_error_lcc_topics,
                        app_connector_write_on_error_lcc_topics,
                        app_connector_read_on_connect_lcc_group)
                    .build());
    
        }
    }
    
    resources:
      # https://github.com/confluentinc/terraform-provider-confluent/tree/master/examples/configurations/connectors/s3-sink-connector
      sink:
        type: confluentcloud:Connector
        properties:
          environment:
            id: ${staging.id}
          kafkaCluster:
            id: ${basic.id}
          configSensitive:
            aws.access.key.id: '***REDACTED***'
            aws.secret.access.key: '***REDACTED***'
          configNonsensitive:
            topics: ${orders.topicName}
            input.data.format: JSON
            connector.class: S3_SINK
            name: S3_SINKConnector_0
            kafka.auth.mode: SERVICE_ACCOUNT
            kafka.service.account.id: ${["app-connector"].id}
            s3.bucket.name: <s3-bucket-name>
            output.data.format: JSON
            time.interval: DAILY
            flush.size: '1000'
            tasks.max: '1'
        options:
          dependson:
            - ${["app-connector-describe-on-cluster"]}
            - ${["app-connector-read-on-target-topic"]}
            - ${["app-connector-create-on-dlq-lcc-topics"]}
            - ${["app-connector-write-on-dlq-lcc-topics"]}
            - ${["app-connector-create-on-success-lcc-topics"]}
            - ${["app-connector-write-on-success-lcc-topics"]}
            - ${["app-connector-create-on-error-lcc-topics"]}
            - ${["app-connector-write-on-error-lcc-topics"]}
            - ${["app-connector-read-on-connect-lcc-group"]}
    

    Example Managed Amazon S3 Sink Connector that uses a service account to communicate with your Kafka cluster and IAM Roles for AWS authentication

    import * as pulumi from "@pulumi/pulumi";
    import * as confluentcloud from "@pulumi/confluentcloud";
    
    // https://github.com/confluentinc/terraform-provider-confluent/tree/master/examples/configurations/connectors/s3-sink-connector-assume-role
    const sink = new confluentcloud.Connector("sink", {
        environment: {
            id: staging.id,
        },
        kafkaCluster: {
            id: basic.id,
        },
        configSensitive: {},
        configNonsensitive: {
            topics: orders.topicName,
            "input.data.format": "JSON",
            "connector.class": "S3_SINK",
            name: "S3_SINKConnector_0",
            "kafka.auth.mode": "SERVICE_ACCOUNT",
            "kafka.service.account.id": app_connector.id,
            "s3.bucket.name": "<s3-bucket-name>",
            "output.data.format": "JSON",
            "time.interval": "DAILY",
            "flush.size": "1000",
            "tasks.max": "1",
            "authentication.method": "IAM Roles",
            "provider.integration.id": main.id,
        },
    }, {
        dependsOn: [
            app_connector_describe_on_cluster,
            app_connector_read_on_target_topic,
            app_connector_create_on_dlq_lcc_topics,
            app_connector_write_on_dlq_lcc_topics,
            app_connector_create_on_success_lcc_topics,
            app_connector_write_on_success_lcc_topics,
            app_connector_create_on_error_lcc_topics,
            app_connector_write_on_error_lcc_topics,
            app_connector_read_on_connect_lcc_group,
            main,
            s3AccessRole,
        ],
    });
    
    import pulumi
    import pulumi_confluentcloud as confluentcloud
    
    # https://github.com/confluentinc/terraform-provider-confluent/tree/master/examples/configurations/connectors/s3-sink-connector-assume-role
    sink = confluentcloud.Connector("sink",
        environment={
            "id": staging["id"],
        },
        kafka_cluster={
            "id": basic["id"],
        },
        config_sensitive={},
        config_nonsensitive={
            "topics": orders["topicName"],
            "input.data.format": "JSON",
            "connector.class": "S3_SINK",
            "name": "S3_SINKConnector_0",
            "kafka.auth.mode": "SERVICE_ACCOUNT",
            "kafka.service.account.id": app_connector["id"],
            "s3.bucket.name": "<s3-bucket-name>",
            "output.data.format": "JSON",
            "time.interval": "DAILY",
            "flush.size": "1000",
            "tasks.max": "1",
            "authentication.method": "IAM Roles",
            "provider.integration.id": main["id"],
        },
        opts = pulumi.ResourceOptions(depends_on=[
                app_connector_describe_on_cluster,
                app_connector_read_on_target_topic,
                app_connector_create_on_dlq_lcc_topics,
                app_connector_write_on_dlq_lcc_topics,
                app_connector_create_on_success_lcc_topics,
                app_connector_write_on_success_lcc_topics,
                app_connector_create_on_error_lcc_topics,
                app_connector_write_on_error_lcc_topics,
                app_connector_read_on_connect_lcc_group,
                main,
                s3_access_role,
            ]))
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-confluentcloud/sdk/v2/go/confluentcloud"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		// https://github.com/confluentinc/terraform-provider-confluent/tree/master/examples/configurations/connectors/s3-sink-connector-assume-role
    		_, err := confluentcloud.NewConnector(ctx, "sink", &confluentcloud.ConnectorArgs{
    			Environment: &confluentcloud.ConnectorEnvironmentArgs{
    				Id: pulumi.Any(staging.Id),
    			},
    			KafkaCluster: &confluentcloud.ConnectorKafkaClusterArgs{
    				Id: pulumi.Any(basic.Id),
    			},
    			ConfigSensitive: pulumi.StringMap{},
    			ConfigNonsensitive: pulumi.StringMap{
    				"topics":                   pulumi.Any(orders.TopicName),
    				"input.data.format":        pulumi.String("JSON"),
    				"connector.class":          pulumi.String("S3_SINK"),
    				"name":                     pulumi.String("S3_SINKConnector_0"),
    				"kafka.auth.mode":          pulumi.String("SERVICE_ACCOUNT"),
    				"kafka.service.account.id": pulumi.Any(app_connector.Id),
    				"s3.bucket.name":           pulumi.String("<s3-bucket-name>"),
    				"output.data.format":       pulumi.String("JSON"),
    				"time.interval":            pulumi.String("DAILY"),
    				"flush.size":               pulumi.String("1000"),
    				"tasks.max":                pulumi.String("1"),
    				"authentication.method":    pulumi.String("IAM Roles"),
    				"provider.integration.id":  pulumi.Any(main.Id),
    			},
    		}, pulumi.DependsOn([]pulumi.Resource{
    			app_connector_describe_on_cluster,
    			app_connector_read_on_target_topic,
    			app_connector_create_on_dlq_lcc_topics,
    			app_connector_write_on_dlq_lcc_topics,
    			app_connector_create_on_success_lcc_topics,
    			app_connector_write_on_success_lcc_topics,
    			app_connector_create_on_error_lcc_topics,
    			app_connector_write_on_error_lcc_topics,
    			app_connector_read_on_connect_lcc_group,
    			main,
    			s3AccessRole,
    		}))
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using ConfluentCloud = Pulumi.ConfluentCloud;
    
    return await Deployment.RunAsync(() => 
    {
        // https://github.com/confluentinc/terraform-provider-confluent/tree/master/examples/configurations/connectors/s3-sink-connector-assume-role
        var sink = new ConfluentCloud.Connector("sink", new()
        {
            Environment = new ConfluentCloud.Inputs.ConnectorEnvironmentArgs
            {
                Id = staging.Id,
            },
            KafkaCluster = new ConfluentCloud.Inputs.ConnectorKafkaClusterArgs
            {
                Id = basic.Id,
            },
            ConfigSensitive = null,
            ConfigNonsensitive = 
            {
                { "topics", orders.TopicName },
                { "input.data.format", "JSON" },
                { "connector.class", "S3_SINK" },
                { "name", "S3_SINKConnector_0" },
                { "kafka.auth.mode", "SERVICE_ACCOUNT" },
                { "kafka.service.account.id", app_connector.Id },
                { "s3.bucket.name", "<s3-bucket-name>" },
                { "output.data.format", "JSON" },
                { "time.interval", "DAILY" },
                { "flush.size", "1000" },
                { "tasks.max", "1" },
                { "authentication.method", "IAM Roles" },
                { "provider.integration.id", main.Id },
            },
        }, new CustomResourceOptions
        {
            DependsOn =
            {
                app_connector_describe_on_cluster,
                app_connector_read_on_target_topic,
                app_connector_create_on_dlq_lcc_topics,
                app_connector_write_on_dlq_lcc_topics,
                app_connector_create_on_success_lcc_topics,
                app_connector_write_on_success_lcc_topics,
                app_connector_create_on_error_lcc_topics,
                app_connector_write_on_error_lcc_topics,
                app_connector_read_on_connect_lcc_group,
                main,
                s3AccessRole,
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.confluentcloud.Connector;
    import com.pulumi.confluentcloud.ConnectorArgs;
    import com.pulumi.confluentcloud.inputs.ConnectorEnvironmentArgs;
    import com.pulumi.confluentcloud.inputs.ConnectorKafkaClusterArgs;
    import com.pulumi.resources.CustomResourceOptions;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            // https://github.com/confluentinc/terraform-provider-confluent/tree/master/examples/configurations/connectors/s3-sink-connector-assume-role
            var sink = new Connector("sink", ConnectorArgs.builder()
                .environment(ConnectorEnvironmentArgs.builder()
                    .id(staging.id())
                    .build())
                .kafkaCluster(ConnectorKafkaClusterArgs.builder()
                    .id(basic.id())
                    .build())
                .configSensitive()
                .configNonsensitive(Map.ofEntries(
                    Map.entry("topics", orders.topicName()),
                    Map.entry("input.data.format", "JSON"),
                    Map.entry("connector.class", "S3_SINK"),
                    Map.entry("name", "S3_SINKConnector_0"),
                    Map.entry("kafka.auth.mode", "SERVICE_ACCOUNT"),
                    Map.entry("kafka.service.account.id", app_connector.id()),
                    Map.entry("s3.bucket.name", "<s3-bucket-name>"),
                    Map.entry("output.data.format", "JSON"),
                    Map.entry("time.interval", "DAILY"),
                    Map.entry("flush.size", "1000"),
                    Map.entry("tasks.max", "1"),
                    Map.entry("authentication.method", "IAM Roles"),
                    Map.entry("provider.integration.id", main.id())
                ))
                .build(), CustomResourceOptions.builder()
                    .dependsOn(                
                        app_connector_describe_on_cluster,
                        app_connector_read_on_target_topic,
                        app_connector_create_on_dlq_lcc_topics,
                        app_connector_write_on_dlq_lcc_topics,
                        app_connector_create_on_success_lcc_topics,
                        app_connector_write_on_success_lcc_topics,
                        app_connector_create_on_error_lcc_topics,
                        app_connector_write_on_error_lcc_topics,
                        app_connector_read_on_connect_lcc_group,
                        main,
                        s3AccessRole)
                    .build());
    
        }
    }
    
    resources:
      # https://github.com/confluentinc/terraform-provider-confluent/tree/master/examples/configurations/connectors/s3-sink-connector-assume-role
      sink:
        type: confluentcloud:Connector
        properties:
          environment:
            id: ${staging.id}
          kafkaCluster:
            id: ${basic.id}
          configSensitive: {}
          configNonsensitive:
            topics: ${orders.topicName}
            input.data.format: JSON
            connector.class: S3_SINK
            name: S3_SINKConnector_0
            kafka.auth.mode: SERVICE_ACCOUNT
            kafka.service.account.id: ${["app-connector"].id}
            s3.bucket.name: <s3-bucket-name>
            output.data.format: JSON
            time.interval: DAILY
            flush.size: '1000'
            tasks.max: '1'
            authentication.method: IAM Roles
            provider.integration.id: ${main.id}
        options:
          dependson:
            - ${["app-connector-describe-on-cluster"]}
            - ${["app-connector-read-on-target-topic"]}
            - ${["app-connector-create-on-dlq-lcc-topics"]}
            - ${["app-connector-write-on-dlq-lcc-topics"]}
            - ${["app-connector-create-on-success-lcc-topics"]}
            - ${["app-connector-write-on-success-lcc-topics"]}
            - ${["app-connector-create-on-error-lcc-topics"]}
            - ${["app-connector-write-on-error-lcc-topics"]}
            - ${["app-connector-read-on-connect-lcc-group"]}
            - ${main}
            - ${s3AccessRole}
    

    Example Managed Amazon DynamoDB Connector that uses a service account to communicate with your Kafka cluster

    import * as pulumi from "@pulumi/pulumi";
    import * as confluentcloud from "@pulumi/confluentcloud";
    
    // https://github.com/confluentinc/terraform-provider-confluent/tree/master/examples/configurations/connectors/dynamo-db-sink-connector
    const sink = new confluentcloud.Connector("sink", {
        environment: {
            id: staging.id,
        },
        kafkaCluster: {
            id: basic.id,
        },
        configSensitive: {
            "aws.access.key.id": "***REDACTED***",
            "aws.secret.access.key": "***REDACTED***",
        },
        configNonsensitive: {
            topics: orders.topicName,
            "input.data.format": "JSON",
            "connector.class": "DynamoDbSink",
            name: "DynamoDbSinkConnector_0",
            "kafka.auth.mode": "SERVICE_ACCOUNT",
            "kafka.service.account.id": app_connector.id,
            "aws.dynamodb.pk.hash": "value.userid",
            "aws.dynamodb.pk.sort": "value.pageid",
            "tasks.max": "1",
        },
    }, {
        dependsOn: [
            app_connector_describe_on_cluster,
            app_connector_read_on_target_topic,
            app_connector_create_on_dlq_lcc_topics,
            app_connector_write_on_dlq_lcc_topics,
            app_connector_create_on_success_lcc_topics,
            app_connector_write_on_success_lcc_topics,
            app_connector_create_on_error_lcc_topics,
            app_connector_write_on_error_lcc_topics,
            app_connector_read_on_connect_lcc_group,
        ],
    });
    
    import pulumi
    import pulumi_confluentcloud as confluentcloud
    
    # https://github.com/confluentinc/terraform-provider-confluent/tree/master/examples/configurations/connectors/dynamo-db-sink-connector
    sink = confluentcloud.Connector("sink",
        environment={
            "id": staging["id"],
        },
        kafka_cluster={
            "id": basic["id"],
        },
        config_sensitive={
            "aws.access.key.id": "***REDACTED***",
            "aws.secret.access.key": "***REDACTED***",
        },
        config_nonsensitive={
            "topics": orders["topicName"],
            "input.data.format": "JSON",
            "connector.class": "DynamoDbSink",
            "name": "DynamoDbSinkConnector_0",
            "kafka.auth.mode": "SERVICE_ACCOUNT",
            "kafka.service.account.id": app_connector["id"],
            "aws.dynamodb.pk.hash": "value.userid",
            "aws.dynamodb.pk.sort": "value.pageid",
            "tasks.max": "1",
        },
        opts = pulumi.ResourceOptions(depends_on=[
                app_connector_describe_on_cluster,
                app_connector_read_on_target_topic,
                app_connector_create_on_dlq_lcc_topics,
                app_connector_write_on_dlq_lcc_topics,
                app_connector_create_on_success_lcc_topics,
                app_connector_write_on_success_lcc_topics,
                app_connector_create_on_error_lcc_topics,
                app_connector_write_on_error_lcc_topics,
                app_connector_read_on_connect_lcc_group,
            ]))
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-confluentcloud/sdk/v2/go/confluentcloud"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		// https://github.com/confluentinc/terraform-provider-confluent/tree/master/examples/configurations/connectors/dynamo-db-sink-connector
    		_, err := confluentcloud.NewConnector(ctx, "sink", &confluentcloud.ConnectorArgs{
    			Environment: &confluentcloud.ConnectorEnvironmentArgs{
    				Id: pulumi.Any(staging.Id),
    			},
    			KafkaCluster: &confluentcloud.ConnectorKafkaClusterArgs{
    				Id: pulumi.Any(basic.Id),
    			},
    			ConfigSensitive: pulumi.StringMap{
    				"aws.access.key.id":     pulumi.String("***REDACTED***"),
    				"aws.secret.access.key": pulumi.String("***REDACTED***"),
    			},
    			ConfigNonsensitive: pulumi.StringMap{
    				"topics":                   pulumi.Any(orders.TopicName),
    				"input.data.format":        pulumi.String("JSON"),
    				"connector.class":          pulumi.String("DynamoDbSink"),
    				"name":                     pulumi.String("DynamoDbSinkConnector_0"),
    				"kafka.auth.mode":          pulumi.String("SERVICE_ACCOUNT"),
    				"kafka.service.account.id": pulumi.Any(app_connector.Id),
    				"aws.dynamodb.pk.hash":     pulumi.String("value.userid"),
    				"aws.dynamodb.pk.sort":     pulumi.String("value.pageid"),
    				"tasks.max":                pulumi.String("1"),
    			},
    		}, pulumi.DependsOn([]pulumi.Resource{
    			app_connector_describe_on_cluster,
    			app_connector_read_on_target_topic,
    			app_connector_create_on_dlq_lcc_topics,
    			app_connector_write_on_dlq_lcc_topics,
    			app_connector_create_on_success_lcc_topics,
    			app_connector_write_on_success_lcc_topics,
    			app_connector_create_on_error_lcc_topics,
    			app_connector_write_on_error_lcc_topics,
    			app_connector_read_on_connect_lcc_group,
    		}))
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using ConfluentCloud = Pulumi.ConfluentCloud;
    
    return await Deployment.RunAsync(() => 
    {
        // https://github.com/confluentinc/terraform-provider-confluent/tree/master/examples/configurations/connectors/dynamo-db-sink-connector
        var sink = new ConfluentCloud.Connector("sink", new()
        {
            Environment = new ConfluentCloud.Inputs.ConnectorEnvironmentArgs
            {
                Id = staging.Id,
            },
            KafkaCluster = new ConfluentCloud.Inputs.ConnectorKafkaClusterArgs
            {
                Id = basic.Id,
            },
            ConfigSensitive = 
            {
                { "aws.access.key.id", "***REDACTED***" },
                { "aws.secret.access.key", "***REDACTED***" },
            },
            ConfigNonsensitive = 
            {
                { "topics", orders.TopicName },
                { "input.data.format", "JSON" },
                { "connector.class", "DynamoDbSink" },
                { "name", "DynamoDbSinkConnector_0" },
                { "kafka.auth.mode", "SERVICE_ACCOUNT" },
                { "kafka.service.account.id", app_connector.Id },
                { "aws.dynamodb.pk.hash", "value.userid" },
                { "aws.dynamodb.pk.sort", "value.pageid" },
                { "tasks.max", "1" },
            },
        }, new CustomResourceOptions
        {
            DependsOn =
            {
                app_connector_describe_on_cluster,
                app_connector_read_on_target_topic,
                app_connector_create_on_dlq_lcc_topics,
                app_connector_write_on_dlq_lcc_topics,
                app_connector_create_on_success_lcc_topics,
                app_connector_write_on_success_lcc_topics,
                app_connector_create_on_error_lcc_topics,
                app_connector_write_on_error_lcc_topics,
                app_connector_read_on_connect_lcc_group,
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.confluentcloud.Connector;
    import com.pulumi.confluentcloud.ConnectorArgs;
    import com.pulumi.confluentcloud.inputs.ConnectorEnvironmentArgs;
    import com.pulumi.confluentcloud.inputs.ConnectorKafkaClusterArgs;
    import com.pulumi.resources.CustomResourceOptions;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            // https://github.com/confluentinc/terraform-provider-confluent/tree/master/examples/configurations/connectors/dynamo-db-sink-connector
            var sink = new Connector("sink", ConnectorArgs.builder()
                .environment(ConnectorEnvironmentArgs.builder()
                    .id(staging.id())
                    .build())
                .kafkaCluster(ConnectorKafkaClusterArgs.builder()
                    .id(basic.id())
                    .build())
                .configSensitive(Map.ofEntries(
                    Map.entry("aws.access.key.id", "***REDACTED***"),
                    Map.entry("aws.secret.access.key", "***REDACTED***")
                ))
                .configNonsensitive(Map.ofEntries(
                    Map.entry("topics", orders.topicName()),
                    Map.entry("input.data.format", "JSON"),
                    Map.entry("connector.class", "DynamoDbSink"),
                    Map.entry("name", "DynamoDbSinkConnector_0"),
                    Map.entry("kafka.auth.mode", "SERVICE_ACCOUNT"),
                    Map.entry("kafka.service.account.id", app_connector.id()),
                    Map.entry("aws.dynamodb.pk.hash", "value.userid"),
                    Map.entry("aws.dynamodb.pk.sort", "value.pageid"),
                    Map.entry("tasks.max", "1")
                ))
                .build(), CustomResourceOptions.builder()
                    .dependsOn(                
                        app_connector_describe_on_cluster,
                        app_connector_read_on_target_topic,
                        app_connector_create_on_dlq_lcc_topics,
                        app_connector_write_on_dlq_lcc_topics,
                        app_connector_create_on_success_lcc_topics,
                        app_connector_write_on_success_lcc_topics,
                        app_connector_create_on_error_lcc_topics,
                        app_connector_write_on_error_lcc_topics,
                        app_connector_read_on_connect_lcc_group)
                    .build());
    
        }
    }
    
    resources:
      # https://github.com/confluentinc/terraform-provider-confluent/tree/master/examples/configurations/connectors/dynamo-db-sink-connector
      sink:
        type: confluentcloud:Connector
        properties:
          environment:
            id: ${staging.id}
          kafkaCluster:
            id: ${basic.id}
          configSensitive:
            aws.access.key.id: '***REDACTED***'
            aws.secret.access.key: '***REDACTED***'
          configNonsensitive:
            topics: ${orders.topicName}
            input.data.format: JSON
            connector.class: DynamoDbSink
            name: DynamoDbSinkConnector_0
            kafka.auth.mode: SERVICE_ACCOUNT
            kafka.service.account.id: ${["app-connector"].id}
            aws.dynamodb.pk.hash: value.userid
            aws.dynamodb.pk.sort: value.pageid
            tasks.max: '1'
        options:
          dependson:
            - ${["app-connector-describe-on-cluster"]}
            - ${["app-connector-read-on-target-topic"]}
            - ${["app-connector-create-on-dlq-lcc-topics"]}
            - ${["app-connector-write-on-dlq-lcc-topics"]}
            - ${["app-connector-create-on-success-lcc-topics"]}
            - ${["app-connector-write-on-success-lcc-topics"]}
            - ${["app-connector-create-on-error-lcc-topics"]}
            - ${["app-connector-write-on-error-lcc-topics"]}
            - ${["app-connector-read-on-connect-lcc-group"]}
    

    Example Custom Datagen Source Connector that uses a Kafka API Key to communicate with your Kafka cluster

    import * as pulumi from "@pulumi/pulumi";
    import * as confluentcloud from "@pulumi/confluentcloud";
    
    // https://github.com/confluentinc/terraform-provider-confluent/tree/master/examples/configurations/connectors/custom-datagen-source-connector
    const source = new confluentcloud.Connector("source", {
        environment: {
            id: staging.id,
        },
        kafkaCluster: {
            id: basic.id,
        },
        configSensitive: {
            "kafka.api.key": "***REDACTED***",
            "kafka.api.secret": "***REDACTED***",
        },
        configNonsensitive: {
            "confluent.connector.type": "CUSTOM",
            "connector.class": sourceConfluentCustomConnectorPlugin.connectorClass,
            name: "DatagenConnectorExampleName",
            "kafka.auth.mode": "KAFKA_API_KEY",
            "kafka.topic": orders.topicName,
            "output.data.format": "JSON",
            quickstart: "ORDERS",
            "confluent.custom.plugin.id": sourceConfluentCustomConnectorPlugin.id,
            "min.interval": "1000",
            "max.interval": "2000",
            "tasks.max": "1",
        },
    }, {
        dependsOn: [app_manager_kafka_cluster_admin],
    });
    
    import pulumi
    import pulumi_confluentcloud as confluentcloud
    
    # https://github.com/confluentinc/terraform-provider-confluent/tree/master/examples/configurations/connectors/custom-datagen-source-connector
    source = confluentcloud.Connector("source",
        environment={
            "id": staging["id"],
        },
        kafka_cluster={
            "id": basic["id"],
        },
        config_sensitive={
            "kafka.api.key": "***REDACTED***",
            "kafka.api.secret": "***REDACTED***",
        },
        config_nonsensitive={
            "confluent.connector.type": "CUSTOM",
            "connector.class": source_confluent_custom_connector_plugin["connectorClass"],
            "name": "DatagenConnectorExampleName",
            "kafka.auth.mode": "KAFKA_API_KEY",
            "kafka.topic": orders["topicName"],
            "output.data.format": "JSON",
            "quickstart": "ORDERS",
            "confluent.custom.plugin.id": source_confluent_custom_connector_plugin["id"],
            "min.interval": "1000",
            "max.interval": "2000",
            "tasks.max": "1",
        },
        opts = pulumi.ResourceOptions(depends_on=[app_manager_kafka_cluster_admin]))
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-confluentcloud/sdk/v2/go/confluentcloud"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		// https://github.com/confluentinc/terraform-provider-confluent/tree/master/examples/configurations/connectors/custom-datagen-source-connector
    		_, err := confluentcloud.NewConnector(ctx, "source", &confluentcloud.ConnectorArgs{
    			Environment: &confluentcloud.ConnectorEnvironmentArgs{
    				Id: pulumi.Any(staging.Id),
    			},
    			KafkaCluster: &confluentcloud.ConnectorKafkaClusterArgs{
    				Id: pulumi.Any(basic.Id),
    			},
    			ConfigSensitive: pulumi.StringMap{
    				"kafka.api.key":    pulumi.String("***REDACTED***"),
    				"kafka.api.secret": pulumi.String("***REDACTED***"),
    			},
    			ConfigNonsensitive: pulumi.StringMap{
    				"confluent.connector.type":   pulumi.String("CUSTOM"),
    				"connector.class":            pulumi.Any(sourceConfluentCustomConnectorPlugin.ConnectorClass),
    				"name":                       pulumi.String("DatagenConnectorExampleName"),
    				"kafka.auth.mode":            pulumi.String("KAFKA_API_KEY"),
    				"kafka.topic":                pulumi.Any(orders.TopicName),
    				"output.data.format":         pulumi.String("JSON"),
    				"quickstart":                 pulumi.String("ORDERS"),
    				"confluent.custom.plugin.id": pulumi.Any(sourceConfluentCustomConnectorPlugin.Id),
    				"min.interval":               pulumi.String("1000"),
    				"max.interval":               pulumi.String("2000"),
    				"tasks.max":                  pulumi.String("1"),
    			},
    		}, pulumi.DependsOn([]pulumi.Resource{
    			app_manager_kafka_cluster_admin,
    		}))
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using ConfluentCloud = Pulumi.ConfluentCloud;
    
    return await Deployment.RunAsync(() => 
    {
        // https://github.com/confluentinc/terraform-provider-confluent/tree/master/examples/configurations/connectors/custom-datagen-source-connector
        var source = new ConfluentCloud.Connector("source", new()
        {
            Environment = new ConfluentCloud.Inputs.ConnectorEnvironmentArgs
            {
                Id = staging.Id,
            },
            KafkaCluster = new ConfluentCloud.Inputs.ConnectorKafkaClusterArgs
            {
                Id = basic.Id,
            },
            ConfigSensitive = 
            {
                { "kafka.api.key", "***REDACTED***" },
                { "kafka.api.secret", "***REDACTED***" },
            },
            ConfigNonsensitive = 
            {
                { "confluent.connector.type", "CUSTOM" },
                { "connector.class", sourceConfluentCustomConnectorPlugin.ConnectorClass },
                { "name", "DatagenConnectorExampleName" },
                { "kafka.auth.mode", "KAFKA_API_KEY" },
                { "kafka.topic", orders.TopicName },
                { "output.data.format", "JSON" },
                { "quickstart", "ORDERS" },
                { "confluent.custom.plugin.id", sourceConfluentCustomConnectorPlugin.Id },
                { "min.interval", "1000" },
                { "max.interval", "2000" },
                { "tasks.max", "1" },
            },
        }, new CustomResourceOptions
        {
            DependsOn =
            {
                app_manager_kafka_cluster_admin,
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.confluentcloud.Connector;
    import com.pulumi.confluentcloud.ConnectorArgs;
    import com.pulumi.confluentcloud.inputs.ConnectorEnvironmentArgs;
    import com.pulumi.confluentcloud.inputs.ConnectorKafkaClusterArgs;
    import com.pulumi.resources.CustomResourceOptions;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            // https://github.com/confluentinc/terraform-provider-confluent/tree/master/examples/configurations/connectors/custom-datagen-source-connector
            var source = new Connector("source", ConnectorArgs.builder()
                .environment(ConnectorEnvironmentArgs.builder()
                    .id(staging.id())
                    .build())
                .kafkaCluster(ConnectorKafkaClusterArgs.builder()
                    .id(basic.id())
                    .build())
                .configSensitive(Map.ofEntries(
                    Map.entry("kafka.api.key", "***REDACTED***"),
                    Map.entry("kafka.api.secret", "***REDACTED***")
                ))
                .configNonsensitive(Map.ofEntries(
                    Map.entry("confluent.connector.type", "CUSTOM"),
                    Map.entry("connector.class", sourceConfluentCustomConnectorPlugin.connectorClass()),
                    Map.entry("name", "DatagenConnectorExampleName"),
                    Map.entry("kafka.auth.mode", "KAFKA_API_KEY"),
                    Map.entry("kafka.topic", orders.topicName()),
                    Map.entry("output.data.format", "JSON"),
                    Map.entry("quickstart", "ORDERS"),
                    Map.entry("confluent.custom.plugin.id", sourceConfluentCustomConnectorPlugin.id()),
                    Map.entry("min.interval", "1000"),
                    Map.entry("max.interval", "2000"),
                    Map.entry("tasks.max", "1")
                ))
                .build(), CustomResourceOptions.builder()
                    .dependsOn(app_manager_kafka_cluster_admin)
                    .build());
    
        }
    }
    
    resources:
      # https://github.com/confluentinc/terraform-provider-confluent/tree/master/examples/configurations/connectors/custom-datagen-source-connector
      source:
        type: confluentcloud:Connector
        properties:
          environment:
            id: ${staging.id}
          kafkaCluster:
            id: ${basic.id}
          configSensitive:
            kafka.api.key: '***REDACTED***'
            kafka.api.secret: '***REDACTED***'
          configNonsensitive:
            confluent.connector.type: CUSTOM
            connector.class: ${sourceConfluentCustomConnectorPlugin.connectorClass}
            name: DatagenConnectorExampleName
            kafka.auth.mode: KAFKA_API_KEY
            kafka.topic: ${orders.topicName}
            output.data.format: JSON
            quickstart: ORDERS
            confluent.custom.plugin.id: ${sourceConfluentCustomConnectorPlugin.id}
            min.interval: '1000'
            max.interval: '2000'
            tasks.max: '1'
        options:
          dependson:
            - ${["app-manager-kafka-cluster-admin"]}
    

    Getting Started

    The following end-to-end examples might help to get started with confluentcloud.Connector resource:

    • s3-sink-connector
    • s3-sink-connector-assume-role
    • snowflake-sink-connector
    • managed-datagen-source-connector
    • elasticsearch-sink-connector
    • dynamo-db-sink-connector
    • mongo-db-source-connector
    • mongo-db-sink-connector
    • sql-server-cdc-debezium-source-connector
    • postgre-sql-cdc-debezium-source-connector
    • custom-datagen-source-connector

    Note: Certain connectors require additional ACL entries. See Additional ACL entries for more details.

    Create Connector Resource

    Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

    Constructor syntax

    new Connector(name: string, args: ConnectorArgs, opts?: CustomResourceOptions);
    @overload
    def Connector(resource_name: str,
                  args: ConnectorArgs,
                  opts: Optional[ResourceOptions] = None)
    
    @overload
    def Connector(resource_name: str,
                  opts: Optional[ResourceOptions] = None,
                  config_nonsensitive: Optional[Mapping[str, str]] = None,
                  environment: Optional[ConnectorEnvironmentArgs] = None,
                  kafka_cluster: Optional[ConnectorKafkaClusterArgs] = None,
                  config_sensitive: Optional[Mapping[str, str]] = None,
                  status: Optional[str] = None)
    func NewConnector(ctx *Context, name string, args ConnectorArgs, opts ...ResourceOption) (*Connector, error)
    public Connector(string name, ConnectorArgs args, CustomResourceOptions? opts = null)
    public Connector(String name, ConnectorArgs args)
    public Connector(String name, ConnectorArgs args, CustomResourceOptions options)
    
    type: confluentcloud:Connector
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    

    Parameters

    name string
    The unique name of the resource.
    args ConnectorArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args ConnectorArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args ConnectorArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args ConnectorArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args ConnectorArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    Constructor example

    The following reference example uses placeholder values for all input properties.

    var connectorResource = new ConfluentCloud.Connector("connectorResource", new()
    {
        ConfigNonsensitive = 
        {
            { "string", "string" },
        },
        Environment = new ConfluentCloud.Inputs.ConnectorEnvironmentArgs
        {
            Id = "string",
        },
        KafkaCluster = new ConfluentCloud.Inputs.ConnectorKafkaClusterArgs
        {
            Id = "string",
        },
        ConfigSensitive = 
        {
            { "string", "string" },
        },
        Status = "string",
    });
    
    example, err := confluentcloud.NewConnector(ctx, "connectorResource", &confluentcloud.ConnectorArgs{
    	ConfigNonsensitive: pulumi.StringMap{
    		"string": pulumi.String("string"),
    	},
    	Environment: &confluentcloud.ConnectorEnvironmentArgs{
    		Id: pulumi.String("string"),
    	},
    	KafkaCluster: &confluentcloud.ConnectorKafkaClusterArgs{
    		Id: pulumi.String("string"),
    	},
    	ConfigSensitive: pulumi.StringMap{
    		"string": pulumi.String("string"),
    	},
    	Status: pulumi.String("string"),
    })
    
    var connectorResource = new Connector("connectorResource", ConnectorArgs.builder()
        .configNonsensitive(Map.of("string", "string"))
        .environment(ConnectorEnvironmentArgs.builder()
            .id("string")
            .build())
        .kafkaCluster(ConnectorKafkaClusterArgs.builder()
            .id("string")
            .build())
        .configSensitive(Map.of("string", "string"))
        .status("string")
        .build());
    
    connector_resource = confluentcloud.Connector("connectorResource",
        config_nonsensitive={
            "string": "string",
        },
        environment={
            "id": "string",
        },
        kafka_cluster={
            "id": "string",
        },
        config_sensitive={
            "string": "string",
        },
        status="string")
    
    const connectorResource = new confluentcloud.Connector("connectorResource", {
        configNonsensitive: {
            string: "string",
        },
        environment: {
            id: "string",
        },
        kafkaCluster: {
            id: "string",
        },
        configSensitive: {
            string: "string",
        },
        status: "string",
    });
    
    type: confluentcloud:Connector
    properties:
        configNonsensitive:
            string: string
        configSensitive:
            string: string
        environment:
            id: string
        kafkaCluster:
            id: string
        status: string
    

    Connector Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.

    The Connector resource accepts the following input properties:

    ConfigNonsensitive Dictionary<string, string>
    Block for custom nonsensitive configuration properties that are not labelled with "Type: password" under "Configuration Properties" section in the docs:
    Environment Pulumi.ConfluentCloud.Inputs.ConnectorEnvironment
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    KafkaCluster Pulumi.ConfluentCloud.Inputs.ConnectorKafkaCluster
    ConfigSensitive Dictionary<string, string>
    Block for custom sensitive configuration properties that are labelled with "Type: password" under "Configuration Properties" section in the docs:
    Status string

    The status of the connector (one of "NONE", "PROVISIONING", "RUNNING", "DEGRADED", "FAILED", "PAUSED", "DELETED"). Pausing ("RUNNING" > "PAUSED") and resuming ("PAUSED" > "RUNNING") a connector is supported via an update operation.

    Note: If there are no sensitive configuration settings for your connector, set config_sensitive = {} explicitly.

    Note: You may declare sensitive variables for secrets config_sensitive block and set them using environment variables (for example, export TF_VAR_aws_access_key_id="foo").

    ConfigNonsensitive map[string]string
    Block for custom nonsensitive configuration properties that are not labelled with "Type: password" under "Configuration Properties" section in the docs:
    Environment ConnectorEnvironmentArgs
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    KafkaCluster ConnectorKafkaClusterArgs
    ConfigSensitive map[string]string
    Block for custom sensitive configuration properties that are labelled with "Type: password" under "Configuration Properties" section in the docs:
    Status string

    The status of the connector (one of "NONE", "PROVISIONING", "RUNNING", "DEGRADED", "FAILED", "PAUSED", "DELETED"). Pausing ("RUNNING" > "PAUSED") and resuming ("PAUSED" > "RUNNING") a connector is supported via an update operation.

    Note: If there are no sensitive configuration settings for your connector, set config_sensitive = {} explicitly.

    Note: You may declare sensitive variables for secrets config_sensitive block and set them using environment variables (for example, export TF_VAR_aws_access_key_id="foo").

    configNonsensitive Map<String,String>
    Block for custom nonsensitive configuration properties that are not labelled with "Type: password" under "Configuration Properties" section in the docs:
    environment ConnectorEnvironment
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    kafkaCluster ConnectorKafkaCluster
    configSensitive Map<String,String>
    Block for custom sensitive configuration properties that are labelled with "Type: password" under "Configuration Properties" section in the docs:
    status String

    The status of the connector (one of "NONE", "PROVISIONING", "RUNNING", "DEGRADED", "FAILED", "PAUSED", "DELETED"). Pausing ("RUNNING" > "PAUSED") and resuming ("PAUSED" > "RUNNING") a connector is supported via an update operation.

    Note: If there are no sensitive configuration settings for your connector, set config_sensitive = {} explicitly.

    Note: You may declare sensitive variables for secrets config_sensitive block and set them using environment variables (for example, export TF_VAR_aws_access_key_id="foo").

    configNonsensitive {[key: string]: string}
    Block for custom nonsensitive configuration properties that are not labelled with "Type: password" under "Configuration Properties" section in the docs:
    environment ConnectorEnvironment
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    kafkaCluster ConnectorKafkaCluster
    configSensitive {[key: string]: string}
    Block for custom sensitive configuration properties that are labelled with "Type: password" under "Configuration Properties" section in the docs:
    status string

    The status of the connector (one of "NONE", "PROVISIONING", "RUNNING", "DEGRADED", "FAILED", "PAUSED", "DELETED"). Pausing ("RUNNING" > "PAUSED") and resuming ("PAUSED" > "RUNNING") a connector is supported via an update operation.

    Note: If there are no sensitive configuration settings for your connector, set config_sensitive = {} explicitly.

    Note: You may declare sensitive variables for secrets config_sensitive block and set them using environment variables (for example, export TF_VAR_aws_access_key_id="foo").

    config_nonsensitive Mapping[str, str]
    Block for custom nonsensitive configuration properties that are not labelled with "Type: password" under "Configuration Properties" section in the docs:
    environment ConnectorEnvironmentArgs
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    kafka_cluster ConnectorKafkaClusterArgs
    config_sensitive Mapping[str, str]
    Block for custom sensitive configuration properties that are labelled with "Type: password" under "Configuration Properties" section in the docs:
    status str

    The status of the connector (one of "NONE", "PROVISIONING", "RUNNING", "DEGRADED", "FAILED", "PAUSED", "DELETED"). Pausing ("RUNNING" > "PAUSED") and resuming ("PAUSED" > "RUNNING") a connector is supported via an update operation.

    Note: If there are no sensitive configuration settings for your connector, set config_sensitive = {} explicitly.

    Note: You may declare sensitive variables for secrets config_sensitive block and set them using environment variables (for example, export TF_VAR_aws_access_key_id="foo").

    configNonsensitive Map<String>
    Block for custom nonsensitive configuration properties that are not labelled with "Type: password" under "Configuration Properties" section in the docs:
    environment Property Map
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    kafkaCluster Property Map
    configSensitive Map<String>
    Block for custom sensitive configuration properties that are labelled with "Type: password" under "Configuration Properties" section in the docs:
    status String

    The status of the connector (one of "NONE", "PROVISIONING", "RUNNING", "DEGRADED", "FAILED", "PAUSED", "DELETED"). Pausing ("RUNNING" > "PAUSED") and resuming ("PAUSED" > "RUNNING") a connector is supported via an update operation.

    Note: If there are no sensitive configuration settings for your connector, set config_sensitive = {} explicitly.

    Note: You may declare sensitive variables for secrets config_sensitive block and set them using environment variables (for example, export TF_VAR_aws_access_key_id="foo").

    Outputs

    All input properties are implicitly available as output properties. Additionally, the Connector resource produces the following output properties:

    Id string
    The provider-assigned unique ID for this managed resource.
    Id string
    The provider-assigned unique ID for this managed resource.
    id String
    The provider-assigned unique ID for this managed resource.
    id string
    The provider-assigned unique ID for this managed resource.
    id str
    The provider-assigned unique ID for this managed resource.
    id String
    The provider-assigned unique ID for this managed resource.

    Look up Existing Connector Resource

    Get an existing Connector resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

    public static get(name: string, id: Input<ID>, state?: ConnectorState, opts?: CustomResourceOptions): Connector
    @staticmethod
    def get(resource_name: str,
            id: str,
            opts: Optional[ResourceOptions] = None,
            config_nonsensitive: Optional[Mapping[str, str]] = None,
            config_sensitive: Optional[Mapping[str, str]] = None,
            environment: Optional[ConnectorEnvironmentArgs] = None,
            kafka_cluster: Optional[ConnectorKafkaClusterArgs] = None,
            status: Optional[str] = None) -> Connector
    func GetConnector(ctx *Context, name string, id IDInput, state *ConnectorState, opts ...ResourceOption) (*Connector, error)
    public static Connector Get(string name, Input<string> id, ConnectorState? state, CustomResourceOptions? opts = null)
    public static Connector get(String name, Output<String> id, ConnectorState state, CustomResourceOptions options)
    Resource lookup is not supported in YAML
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    resource_name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    The following state arguments are supported:
    ConfigNonsensitive Dictionary<string, string>
    Block for custom nonsensitive configuration properties that are not labelled with "Type: password" under "Configuration Properties" section in the docs:
    ConfigSensitive Dictionary<string, string>
    Block for custom sensitive configuration properties that are labelled with "Type: password" under "Configuration Properties" section in the docs:
    Environment Pulumi.ConfluentCloud.Inputs.ConnectorEnvironment
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    KafkaCluster Pulumi.ConfluentCloud.Inputs.ConnectorKafkaCluster
    Status string

    The status of the connector (one of "NONE", "PROVISIONING", "RUNNING", "DEGRADED", "FAILED", "PAUSED", "DELETED"). Pausing ("RUNNING" > "PAUSED") and resuming ("PAUSED" > "RUNNING") a connector is supported via an update operation.

    Note: If there are no sensitive configuration settings for your connector, set config_sensitive = {} explicitly.

    Note: You may declare sensitive variables for secrets config_sensitive block and set them using environment variables (for example, export TF_VAR_aws_access_key_id="foo").

    ConfigNonsensitive map[string]string
    Block for custom nonsensitive configuration properties that are not labelled with "Type: password" under "Configuration Properties" section in the docs:
    ConfigSensitive map[string]string
    Block for custom sensitive configuration properties that are labelled with "Type: password" under "Configuration Properties" section in the docs:
    Environment ConnectorEnvironmentArgs
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    KafkaCluster ConnectorKafkaClusterArgs
    Status string

    The status of the connector (one of "NONE", "PROVISIONING", "RUNNING", "DEGRADED", "FAILED", "PAUSED", "DELETED"). Pausing ("RUNNING" > "PAUSED") and resuming ("PAUSED" > "RUNNING") a connector is supported via an update operation.

    Note: If there are no sensitive configuration settings for your connector, set config_sensitive = {} explicitly.

    Note: You may declare sensitive variables for secrets config_sensitive block and set them using environment variables (for example, export TF_VAR_aws_access_key_id="foo").

    configNonsensitive Map<String,String>
    Block for custom nonsensitive configuration properties that are not labelled with "Type: password" under "Configuration Properties" section in the docs:
    configSensitive Map<String,String>
    Block for custom sensitive configuration properties that are labelled with "Type: password" under "Configuration Properties" section in the docs:
    environment ConnectorEnvironment
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    kafkaCluster ConnectorKafkaCluster
    status String

    The status of the connector (one of "NONE", "PROVISIONING", "RUNNING", "DEGRADED", "FAILED", "PAUSED", "DELETED"). Pausing ("RUNNING" > "PAUSED") and resuming ("PAUSED" > "RUNNING") a connector is supported via an update operation.

    Note: If there are no sensitive configuration settings for your connector, set config_sensitive = {} explicitly.

    Note: You may declare sensitive variables for secrets config_sensitive block and set them using environment variables (for example, export TF_VAR_aws_access_key_id="foo").

    configNonsensitive {[key: string]: string}
    Block for custom nonsensitive configuration properties that are not labelled with "Type: password" under "Configuration Properties" section in the docs:
    configSensitive {[key: string]: string}
    Block for custom sensitive configuration properties that are labelled with "Type: password" under "Configuration Properties" section in the docs:
    environment ConnectorEnvironment
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    kafkaCluster ConnectorKafkaCluster
    status string

    The status of the connector (one of "NONE", "PROVISIONING", "RUNNING", "DEGRADED", "FAILED", "PAUSED", "DELETED"). Pausing ("RUNNING" > "PAUSED") and resuming ("PAUSED" > "RUNNING") a connector is supported via an update operation.

    Note: If there are no sensitive configuration settings for your connector, set config_sensitive = {} explicitly.

    Note: You may declare sensitive variables for secrets config_sensitive block and set them using environment variables (for example, export TF_VAR_aws_access_key_id="foo").

    config_nonsensitive Mapping[str, str]
    Block for custom nonsensitive configuration properties that are not labelled with "Type: password" under "Configuration Properties" section in the docs:
    config_sensitive Mapping[str, str]
    Block for custom sensitive configuration properties that are labelled with "Type: password" under "Configuration Properties" section in the docs:
    environment ConnectorEnvironmentArgs
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    kafka_cluster ConnectorKafkaClusterArgs
    status str

    The status of the connector (one of "NONE", "PROVISIONING", "RUNNING", "DEGRADED", "FAILED", "PAUSED", "DELETED"). Pausing ("RUNNING" > "PAUSED") and resuming ("PAUSED" > "RUNNING") a connector is supported via an update operation.

    Note: If there are no sensitive configuration settings for your connector, set config_sensitive = {} explicitly.

    Note: You may declare sensitive variables for secrets config_sensitive block and set them using environment variables (for example, export TF_VAR_aws_access_key_id="foo").

    configNonsensitive Map<String>
    Block for custom nonsensitive configuration properties that are not labelled with "Type: password" under "Configuration Properties" section in the docs:
    configSensitive Map<String>
    Block for custom sensitive configuration properties that are labelled with "Type: password" under "Configuration Properties" section in the docs:
    environment Property Map
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    kafkaCluster Property Map
    status String

    The status of the connector (one of "NONE", "PROVISIONING", "RUNNING", "DEGRADED", "FAILED", "PAUSED", "DELETED"). Pausing ("RUNNING" > "PAUSED") and resuming ("PAUSED" > "RUNNING") a connector is supported via an update operation.

    Note: If there are no sensitive configuration settings for your connector, set config_sensitive = {} explicitly.

    Note: You may declare sensitive variables for secrets config_sensitive block and set them using environment variables (for example, export TF_VAR_aws_access_key_id="foo").

    Supporting Types

    ConnectorEnvironment, ConnectorEnvironmentArgs

    Id string
    The ID of the Environment that the connector belongs to, for example, env-abc123.
    Id string
    The ID of the Environment that the connector belongs to, for example, env-abc123.
    id String
    The ID of the Environment that the connector belongs to, for example, env-abc123.
    id string
    The ID of the Environment that the connector belongs to, for example, env-abc123.
    id str
    The ID of the Environment that the connector belongs to, for example, env-abc123.
    id String
    The ID of the Environment that the connector belongs to, for example, env-abc123.

    ConnectorKafkaCluster, ConnectorKafkaClusterArgs

    Id string
    The ID of the Kafka cluster that the connector belongs to, for example, lkc-abc123.
    Id string
    The ID of the Kafka cluster that the connector belongs to, for example, lkc-abc123.
    id String
    The ID of the Kafka cluster that the connector belongs to, for example, lkc-abc123.
    id string
    The ID of the Kafka cluster that the connector belongs to, for example, lkc-abc123.
    id str
    The ID of the Kafka cluster that the connector belongs to, for example, lkc-abc123.
    id String
    The ID of the Kafka cluster that the connector belongs to, for example, lkc-abc123.

    Import

    You can import a connector by using Environment ID, Kafka cluster ID, and connector’s name, in the format <Environment ID>/<Kafka cluster ID>/<Connector name>, for example:

    $ export CONFLUENT_CLOUD_API_KEY="<cloud_api_key>"

    $ export CONFLUENT_CLOUD_API_SECRET="<cloud_api_secret>"

    $ pulumi import confluentcloud:index/connector:Connector my_connector "env-abc123/lkc-abc123/S3_SINKConnector_0"
    

    To learn more about importing existing cloud resources, see Importing resources.

    Package Details

    Repository
    Confluent Cloud pulumi/pulumi-confluentcloud
    License
    Apache-2.0
    Notes
    This Pulumi package is based on the confluent Terraform Provider.
    confluentcloud logo
    Confluent v2.10.0 published on Wednesday, Nov 20, 2024 by Pulumi