1. Packages
  2. Digitalocean Provider
  3. API Docs
  4. DatabaseKafkaConfig
DigitalOcean v4.35.0 published on Tuesday, Nov 19, 2024 by Pulumi

digitalocean.DatabaseKafkaConfig

Explore with Pulumi AI

digitalocean logo
DigitalOcean v4.35.0 published on Tuesday, Nov 19, 2024 by Pulumi

    Provides a virtual resource that can be used to change advanced configuration options for a DigitalOcean managed Kafka database cluster.

    Note Kafka configurations are only removed from state when destroyed. The remote configuration is not unset.

    Example Usage

    import * as pulumi from "@pulumi/pulumi";
    import * as digitalocean from "@pulumi/digitalocean";
    
    const exampleDatabaseCluster = new digitalocean.DatabaseCluster("example", {
        name: "example-kafka-cluster",
        engine: "kafka",
        version: "3.7",
        size: digitalocean.DatabaseSlug.DB_1VPCU1GB,
        region: digitalocean.Region.NYC3,
        nodeCount: 3,
    });
    const example = new digitalocean.DatabaseKafkaConfig("example", {
        clusterId: exampleDatabaseCluster.id,
        groupInitialRebalanceDelayMs: 3000,
        groupMinSessionTimeoutMs: 6000,
        groupMaxSessionTimeoutMs: 1800000,
        messageMaxBytes: 1048588,
        logCleanerDeleteRetentionMs: 86400000,
        logCleanerMinCompactionLagMs: "0",
        logFlushIntervalMs: "9223372036854775807",
        logIndexIntervalBytes: 4096,
        logMessageDownconversionEnable: true,
        logMessageTimestampDifferenceMaxMs: "9223372036854775807",
        logPreallocate: false,
        logRetentionBytes: "-1",
        logRetentionHours: 168,
        logRetentionMs: "604800000",
        logRollJitterMs: "0",
        logSegmentDeleteDelayMs: 60000,
        autoCreateTopicsEnable: true,
    });
    
    import pulumi
    import pulumi_digitalocean as digitalocean
    
    example_database_cluster = digitalocean.DatabaseCluster("example",
        name="example-kafka-cluster",
        engine="kafka",
        version="3.7",
        size=digitalocean.DatabaseSlug.D_B_1_VPCU1_GB,
        region=digitalocean.Region.NYC3,
        node_count=3)
    example = digitalocean.DatabaseKafkaConfig("example",
        cluster_id=example_database_cluster.id,
        group_initial_rebalance_delay_ms=3000,
        group_min_session_timeout_ms=6000,
        group_max_session_timeout_ms=1800000,
        message_max_bytes=1048588,
        log_cleaner_delete_retention_ms=86400000,
        log_cleaner_min_compaction_lag_ms="0",
        log_flush_interval_ms="9223372036854775807",
        log_index_interval_bytes=4096,
        log_message_downconversion_enable=True,
        log_message_timestamp_difference_max_ms="9223372036854775807",
        log_preallocate=False,
        log_retention_bytes="-1",
        log_retention_hours=168,
        log_retention_ms="604800000",
        log_roll_jitter_ms="0",
        log_segment_delete_delay_ms=60000,
        auto_create_topics_enable=True)
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-digitalocean/sdk/v4/go/digitalocean"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		exampleDatabaseCluster, err := digitalocean.NewDatabaseCluster(ctx, "example", &digitalocean.DatabaseClusterArgs{
    			Name:      pulumi.String("example-kafka-cluster"),
    			Engine:    pulumi.String("kafka"),
    			Version:   pulumi.String("3.7"),
    			Size:      pulumi.String(digitalocean.DatabaseSlug_DB_1VPCU1GB),
    			Region:    pulumi.String(digitalocean.RegionNYC3),
    			NodeCount: pulumi.Int(3),
    		})
    		if err != nil {
    			return err
    		}
    		_, err = digitalocean.NewDatabaseKafkaConfig(ctx, "example", &digitalocean.DatabaseKafkaConfigArgs{
    			ClusterId:                          exampleDatabaseCluster.ID(),
    			GroupInitialRebalanceDelayMs:       pulumi.Int(3000),
    			GroupMinSessionTimeoutMs:           pulumi.Int(6000),
    			GroupMaxSessionTimeoutMs:           pulumi.Int(1800000),
    			MessageMaxBytes:                    pulumi.Int(1048588),
    			LogCleanerDeleteRetentionMs:        pulumi.Int(86400000),
    			LogCleanerMinCompactionLagMs:       pulumi.String("0"),
    			LogFlushIntervalMs:                 pulumi.String("9223372036854775807"),
    			LogIndexIntervalBytes:              pulumi.Int(4096),
    			LogMessageDownconversionEnable:     pulumi.Bool(true),
    			LogMessageTimestampDifferenceMaxMs: pulumi.String("9223372036854775807"),
    			LogPreallocate:                     pulumi.Bool(false),
    			LogRetentionBytes:                  pulumi.String("-1"),
    			LogRetentionHours:                  pulumi.Int(168),
    			LogRetentionMs:                     pulumi.String("604800000"),
    			LogRollJitterMs:                    pulumi.String("0"),
    			LogSegmentDeleteDelayMs:            pulumi.Int(60000),
    			AutoCreateTopicsEnable:             pulumi.Bool(true),
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using DigitalOcean = Pulumi.DigitalOcean;
    
    return await Deployment.RunAsync(() => 
    {
        var exampleDatabaseCluster = new DigitalOcean.DatabaseCluster("example", new()
        {
            Name = "example-kafka-cluster",
            Engine = "kafka",
            Version = "3.7",
            Size = DigitalOcean.DatabaseSlug.DB_1VPCU1GB,
            Region = DigitalOcean.Region.NYC3,
            NodeCount = 3,
        });
    
        var example = new DigitalOcean.DatabaseKafkaConfig("example", new()
        {
            ClusterId = exampleDatabaseCluster.Id,
            GroupInitialRebalanceDelayMs = 3000,
            GroupMinSessionTimeoutMs = 6000,
            GroupMaxSessionTimeoutMs = 1800000,
            MessageMaxBytes = 1048588,
            LogCleanerDeleteRetentionMs = 86400000,
            LogCleanerMinCompactionLagMs = "0",
            LogFlushIntervalMs = "9223372036854775807",
            LogIndexIntervalBytes = 4096,
            LogMessageDownconversionEnable = true,
            LogMessageTimestampDifferenceMaxMs = "9223372036854775807",
            LogPreallocate = false,
            LogRetentionBytes = "-1",
            LogRetentionHours = 168,
            LogRetentionMs = "604800000",
            LogRollJitterMs = "0",
            LogSegmentDeleteDelayMs = 60000,
            AutoCreateTopicsEnable = true,
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.digitalocean.DatabaseCluster;
    import com.pulumi.digitalocean.DatabaseClusterArgs;
    import com.pulumi.digitalocean.DatabaseKafkaConfig;
    import com.pulumi.digitalocean.DatabaseKafkaConfigArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var exampleDatabaseCluster = new DatabaseCluster("exampleDatabaseCluster", DatabaseClusterArgs.builder()
                .name("example-kafka-cluster")
                .engine("kafka")
                .version("3.7")
                .size("db-s-1vcpu-1gb")
                .region("nyc3")
                .nodeCount(3)
                .build());
    
            var example = new DatabaseKafkaConfig("example", DatabaseKafkaConfigArgs.builder()
                .clusterId(exampleDatabaseCluster.id())
                .groupInitialRebalanceDelayMs(3000)
                .groupMinSessionTimeoutMs(6000)
                .groupMaxSessionTimeoutMs(1800000)
                .messageMaxBytes(1048588)
                .logCleanerDeleteRetentionMs(86400000)
                .logCleanerMinCompactionLagMs(0)
                .logFlushIntervalMs(9223372036854775807)
                .logIndexIntervalBytes(4096)
                .logMessageDownconversionEnable(true)
                .logMessageTimestampDifferenceMaxMs(9223372036854775807)
                .logPreallocate(false)
                .logRetentionBytes(-1)
                .logRetentionHours(168)
                .logRetentionMs(604800000)
                .logRollJitterMs(0)
                .logSegmentDeleteDelayMs(60000)
                .autoCreateTopicsEnable(true)
                .build());
    
        }
    }
    
    resources:
      example:
        type: digitalocean:DatabaseKafkaConfig
        properties:
          clusterId: ${exampleDatabaseCluster.id}
          groupInitialRebalanceDelayMs: 3000
          groupMinSessionTimeoutMs: 6000
          groupMaxSessionTimeoutMs: 1.8e+06
          messageMaxBytes: 1.048588e+06
          logCleanerDeleteRetentionMs: 8.64e+07
          logCleanerMinCompactionLagMs: 0
          logFlushIntervalMs: 9.223372036854776e+18
          logIndexIntervalBytes: 4096
          logMessageDownconversionEnable: true
          logMessageTimestampDifferenceMaxMs: 9.223372036854776e+18
          logPreallocate: false
          logRetentionBytes: -1
          logRetentionHours: 168
          logRetentionMs: 6.048e+08
          logRollJitterMs: 0
          logSegmentDeleteDelayMs: 60000
          autoCreateTopicsEnable: true
      exampleDatabaseCluster:
        type: digitalocean:DatabaseCluster
        name: example
        properties:
          name: example-kafka-cluster
          engine: kafka
          version: '3.7'
          size: db-s-1vcpu-1gb
          region: nyc3
          nodeCount: 3
    

    Create DatabaseKafkaConfig Resource

    Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

    Constructor syntax

    new DatabaseKafkaConfig(name: string, args: DatabaseKafkaConfigArgs, opts?: CustomResourceOptions);
    @overload
    def DatabaseKafkaConfig(resource_name: str,
                            args: DatabaseKafkaConfigArgs,
                            opts: Optional[ResourceOptions] = None)
    
    @overload
    def DatabaseKafkaConfig(resource_name: str,
                            opts: Optional[ResourceOptions] = None,
                            cluster_id: Optional[str] = None,
                            log_index_interval_bytes: Optional[int] = None,
                            log_cleaner_delete_retention_ms: Optional[int] = None,
                            log_message_downconversion_enable: Optional[bool] = None,
                            group_min_session_timeout_ms: Optional[int] = None,
                            log_message_timestamp_difference_max_ms: Optional[str] = None,
                            log_cleaner_min_compaction_lag_ms: Optional[str] = None,
                            log_flush_interval_ms: Optional[str] = None,
                            log_preallocate: Optional[bool] = None,
                            group_max_session_timeout_ms: Optional[int] = None,
                            group_initial_rebalance_delay_ms: Optional[int] = None,
                            auto_create_topics_enable: Optional[bool] = None,
                            log_retention_bytes: Optional[str] = None,
                            log_retention_hours: Optional[int] = None,
                            log_retention_ms: Optional[str] = None,
                            log_roll_jitter_ms: Optional[str] = None,
                            log_segment_delete_delay_ms: Optional[int] = None,
                            message_max_bytes: Optional[int] = None)
    func NewDatabaseKafkaConfig(ctx *Context, name string, args DatabaseKafkaConfigArgs, opts ...ResourceOption) (*DatabaseKafkaConfig, error)
    public DatabaseKafkaConfig(string name, DatabaseKafkaConfigArgs args, CustomResourceOptions? opts = null)
    public DatabaseKafkaConfig(String name, DatabaseKafkaConfigArgs args)
    public DatabaseKafkaConfig(String name, DatabaseKafkaConfigArgs args, CustomResourceOptions options)
    
    type: digitalocean:DatabaseKafkaConfig
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    

    Parameters

    name string
    The unique name of the resource.
    args DatabaseKafkaConfigArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args DatabaseKafkaConfigArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args DatabaseKafkaConfigArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args DatabaseKafkaConfigArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args DatabaseKafkaConfigArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    Constructor example

    The following reference example uses placeholder values for all input properties.

    var databaseKafkaConfigResource = new DigitalOcean.DatabaseKafkaConfig("databaseKafkaConfigResource", new()
    {
        ClusterId = "string",
        LogIndexIntervalBytes = 0,
        LogCleanerDeleteRetentionMs = 0,
        LogMessageDownconversionEnable = false,
        GroupMinSessionTimeoutMs = 0,
        LogMessageTimestampDifferenceMaxMs = "string",
        LogCleanerMinCompactionLagMs = "string",
        LogFlushIntervalMs = "string",
        LogPreallocate = false,
        GroupMaxSessionTimeoutMs = 0,
        GroupInitialRebalanceDelayMs = 0,
        AutoCreateTopicsEnable = false,
        LogRetentionBytes = "string",
        LogRetentionHours = 0,
        LogRetentionMs = "string",
        LogRollJitterMs = "string",
        LogSegmentDeleteDelayMs = 0,
        MessageMaxBytes = 0,
    });
    
    example, err := digitalocean.NewDatabaseKafkaConfig(ctx, "databaseKafkaConfigResource", &digitalocean.DatabaseKafkaConfigArgs{
    	ClusterId:                          pulumi.String("string"),
    	LogIndexIntervalBytes:              pulumi.Int(0),
    	LogCleanerDeleteRetentionMs:        pulumi.Int(0),
    	LogMessageDownconversionEnable:     pulumi.Bool(false),
    	GroupMinSessionTimeoutMs:           pulumi.Int(0),
    	LogMessageTimestampDifferenceMaxMs: pulumi.String("string"),
    	LogCleanerMinCompactionLagMs:       pulumi.String("string"),
    	LogFlushIntervalMs:                 pulumi.String("string"),
    	LogPreallocate:                     pulumi.Bool(false),
    	GroupMaxSessionTimeoutMs:           pulumi.Int(0),
    	GroupInitialRebalanceDelayMs:       pulumi.Int(0),
    	AutoCreateTopicsEnable:             pulumi.Bool(false),
    	LogRetentionBytes:                  pulumi.String("string"),
    	LogRetentionHours:                  pulumi.Int(0),
    	LogRetentionMs:                     pulumi.String("string"),
    	LogRollJitterMs:                    pulumi.String("string"),
    	LogSegmentDeleteDelayMs:            pulumi.Int(0),
    	MessageMaxBytes:                    pulumi.Int(0),
    })
    
    var databaseKafkaConfigResource = new DatabaseKafkaConfig("databaseKafkaConfigResource", DatabaseKafkaConfigArgs.builder()
        .clusterId("string")
        .logIndexIntervalBytes(0)
        .logCleanerDeleteRetentionMs(0)
        .logMessageDownconversionEnable(false)
        .groupMinSessionTimeoutMs(0)
        .logMessageTimestampDifferenceMaxMs("string")
        .logCleanerMinCompactionLagMs("string")
        .logFlushIntervalMs("string")
        .logPreallocate(false)
        .groupMaxSessionTimeoutMs(0)
        .groupInitialRebalanceDelayMs(0)
        .autoCreateTopicsEnable(false)
        .logRetentionBytes("string")
        .logRetentionHours(0)
        .logRetentionMs("string")
        .logRollJitterMs("string")
        .logSegmentDeleteDelayMs(0)
        .messageMaxBytes(0)
        .build());
    
    database_kafka_config_resource = digitalocean.DatabaseKafkaConfig("databaseKafkaConfigResource",
        cluster_id="string",
        log_index_interval_bytes=0,
        log_cleaner_delete_retention_ms=0,
        log_message_downconversion_enable=False,
        group_min_session_timeout_ms=0,
        log_message_timestamp_difference_max_ms="string",
        log_cleaner_min_compaction_lag_ms="string",
        log_flush_interval_ms="string",
        log_preallocate=False,
        group_max_session_timeout_ms=0,
        group_initial_rebalance_delay_ms=0,
        auto_create_topics_enable=False,
        log_retention_bytes="string",
        log_retention_hours=0,
        log_retention_ms="string",
        log_roll_jitter_ms="string",
        log_segment_delete_delay_ms=0,
        message_max_bytes=0)
    
    const databaseKafkaConfigResource = new digitalocean.DatabaseKafkaConfig("databaseKafkaConfigResource", {
        clusterId: "string",
        logIndexIntervalBytes: 0,
        logCleanerDeleteRetentionMs: 0,
        logMessageDownconversionEnable: false,
        groupMinSessionTimeoutMs: 0,
        logMessageTimestampDifferenceMaxMs: "string",
        logCleanerMinCompactionLagMs: "string",
        logFlushIntervalMs: "string",
        logPreallocate: false,
        groupMaxSessionTimeoutMs: 0,
        groupInitialRebalanceDelayMs: 0,
        autoCreateTopicsEnable: false,
        logRetentionBytes: "string",
        logRetentionHours: 0,
        logRetentionMs: "string",
        logRollJitterMs: "string",
        logSegmentDeleteDelayMs: 0,
        messageMaxBytes: 0,
    });
    
    type: digitalocean:DatabaseKafkaConfig
    properties:
        autoCreateTopicsEnable: false
        clusterId: string
        groupInitialRebalanceDelayMs: 0
        groupMaxSessionTimeoutMs: 0
        groupMinSessionTimeoutMs: 0
        logCleanerDeleteRetentionMs: 0
        logCleanerMinCompactionLagMs: string
        logFlushIntervalMs: string
        logIndexIntervalBytes: 0
        logMessageDownconversionEnable: false
        logMessageTimestampDifferenceMaxMs: string
        logPreallocate: false
        logRetentionBytes: string
        logRetentionHours: 0
        logRetentionMs: string
        logRollJitterMs: string
        logSegmentDeleteDelayMs: 0
        messageMaxBytes: 0
    

    DatabaseKafkaConfig Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.

    The DatabaseKafkaConfig resource accepts the following input properties:

    ClusterId string
    The ID of the target Kafka cluster.
    AutoCreateTopicsEnable bool
    Enable auto creation of topics.
    GroupInitialRebalanceDelayMs int
    The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time.
    GroupMaxSessionTimeoutMs int
    The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.
    GroupMinSessionTimeoutMs int
    The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.
    LogCleanerDeleteRetentionMs int
    How long are delete records retained?
    LogCleanerMinCompactionLagMs string
    The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
    LogFlushIntervalMs string
    The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.
    LogIndexIntervalBytes int
    The interval with which Kafka adds an entry to the offset index.
    LogMessageDownconversionEnable bool
    This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.
    LogMessageTimestampDifferenceMaxMs string
    The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.
    LogPreallocate bool
    Controls whether to preallocate a file when creating a new segment.
    LogRetentionBytes string
    The maximum size of the log before deleting messages.
    LogRetentionHours int
    The number of hours to keep a log file before deleting it.
    LogRetentionMs string
    The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.
    LogRollJitterMs string
    The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.
    LogSegmentDeleteDelayMs int
    The amount of time to wait before deleting a file from the filesystem.
    MessageMaxBytes int
    The maximum size of message that the server can receive.
    ClusterId string
    The ID of the target Kafka cluster.
    AutoCreateTopicsEnable bool
    Enable auto creation of topics.
    GroupInitialRebalanceDelayMs int
    The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time.
    GroupMaxSessionTimeoutMs int
    The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.
    GroupMinSessionTimeoutMs int
    The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.
    LogCleanerDeleteRetentionMs int
    How long are delete records retained?
    LogCleanerMinCompactionLagMs string
    The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
    LogFlushIntervalMs string
    The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.
    LogIndexIntervalBytes int
    The interval with which Kafka adds an entry to the offset index.
    LogMessageDownconversionEnable bool
    This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.
    LogMessageTimestampDifferenceMaxMs string
    The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.
    LogPreallocate bool
    Controls whether to preallocate a file when creating a new segment.
    LogRetentionBytes string
    The maximum size of the log before deleting messages.
    LogRetentionHours int
    The number of hours to keep a log file before deleting it.
    LogRetentionMs string
    The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.
    LogRollJitterMs string
    The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.
    LogSegmentDeleteDelayMs int
    The amount of time to wait before deleting a file from the filesystem.
    MessageMaxBytes int
    The maximum size of message that the server can receive.
    clusterId String
    The ID of the target Kafka cluster.
    autoCreateTopicsEnable Boolean
    Enable auto creation of topics.
    groupInitialRebalanceDelayMs Integer
    The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time.
    groupMaxSessionTimeoutMs Integer
    The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.
    groupMinSessionTimeoutMs Integer
    The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.
    logCleanerDeleteRetentionMs Integer
    How long are delete records retained?
    logCleanerMinCompactionLagMs String
    The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
    logFlushIntervalMs String
    The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.
    logIndexIntervalBytes Integer
    The interval with which Kafka adds an entry to the offset index.
    logMessageDownconversionEnable Boolean
    This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.
    logMessageTimestampDifferenceMaxMs String
    The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.
    logPreallocate Boolean
    Controls whether to preallocate a file when creating a new segment.
    logRetentionBytes String
    The maximum size of the log before deleting messages.
    logRetentionHours Integer
    The number of hours to keep a log file before deleting it.
    logRetentionMs String
    The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.
    logRollJitterMs String
    The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.
    logSegmentDeleteDelayMs Integer
    The amount of time to wait before deleting a file from the filesystem.
    messageMaxBytes Integer
    The maximum size of message that the server can receive.
    clusterId string
    The ID of the target Kafka cluster.
    autoCreateTopicsEnable boolean
    Enable auto creation of topics.
    groupInitialRebalanceDelayMs number
    The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time.
    groupMaxSessionTimeoutMs number
    The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.
    groupMinSessionTimeoutMs number
    The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.
    logCleanerDeleteRetentionMs number
    How long are delete records retained?
    logCleanerMinCompactionLagMs string
    The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
    logFlushIntervalMs string
    The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.
    logIndexIntervalBytes number
    The interval with which Kafka adds an entry to the offset index.
    logMessageDownconversionEnable boolean
    This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.
    logMessageTimestampDifferenceMaxMs string
    The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.
    logPreallocate boolean
    Controls whether to preallocate a file when creating a new segment.
    logRetentionBytes string
    The maximum size of the log before deleting messages.
    logRetentionHours number
    The number of hours to keep a log file before deleting it.
    logRetentionMs string
    The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.
    logRollJitterMs string
    The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.
    logSegmentDeleteDelayMs number
    The amount of time to wait before deleting a file from the filesystem.
    messageMaxBytes number
    The maximum size of message that the server can receive.
    cluster_id str
    The ID of the target Kafka cluster.
    auto_create_topics_enable bool
    Enable auto creation of topics.
    group_initial_rebalance_delay_ms int
    The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time.
    group_max_session_timeout_ms int
    The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.
    group_min_session_timeout_ms int
    The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.
    log_cleaner_delete_retention_ms int
    How long are delete records retained?
    log_cleaner_min_compaction_lag_ms str
    The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
    log_flush_interval_ms str
    The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.
    log_index_interval_bytes int
    The interval with which Kafka adds an entry to the offset index.
    log_message_downconversion_enable bool
    This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.
    log_message_timestamp_difference_max_ms str
    The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.
    log_preallocate bool
    Controls whether to preallocate a file when creating a new segment.
    log_retention_bytes str
    The maximum size of the log before deleting messages.
    log_retention_hours int
    The number of hours to keep a log file before deleting it.
    log_retention_ms str
    The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.
    log_roll_jitter_ms str
    The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.
    log_segment_delete_delay_ms int
    The amount of time to wait before deleting a file from the filesystem.
    message_max_bytes int
    The maximum size of message that the server can receive.
    clusterId String
    The ID of the target Kafka cluster.
    autoCreateTopicsEnable Boolean
    Enable auto creation of topics.
    groupInitialRebalanceDelayMs Number
    The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time.
    groupMaxSessionTimeoutMs Number
    The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.
    groupMinSessionTimeoutMs Number
    The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.
    logCleanerDeleteRetentionMs Number
    How long are delete records retained?
    logCleanerMinCompactionLagMs String
    The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
    logFlushIntervalMs String
    The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.
    logIndexIntervalBytes Number
    The interval with which Kafka adds an entry to the offset index.
    logMessageDownconversionEnable Boolean
    This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.
    logMessageTimestampDifferenceMaxMs String
    The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.
    logPreallocate Boolean
    Controls whether to preallocate a file when creating a new segment.
    logRetentionBytes String
    The maximum size of the log before deleting messages.
    logRetentionHours Number
    The number of hours to keep a log file before deleting it.
    logRetentionMs String
    The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.
    logRollJitterMs String
    The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.
    logSegmentDeleteDelayMs Number
    The amount of time to wait before deleting a file from the filesystem.
    messageMaxBytes Number
    The maximum size of message that the server can receive.

    Outputs

    All input properties are implicitly available as output properties. Additionally, the DatabaseKafkaConfig resource produces the following output properties:

    Id string
    The provider-assigned unique ID for this managed resource.
    Id string
    The provider-assigned unique ID for this managed resource.
    id String
    The provider-assigned unique ID for this managed resource.
    id string
    The provider-assigned unique ID for this managed resource.
    id str
    The provider-assigned unique ID for this managed resource.
    id String
    The provider-assigned unique ID for this managed resource.

    Look up Existing DatabaseKafkaConfig Resource

    Get an existing DatabaseKafkaConfig resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

    public static get(name: string, id: Input<ID>, state?: DatabaseKafkaConfigState, opts?: CustomResourceOptions): DatabaseKafkaConfig
    @staticmethod
    def get(resource_name: str,
            id: str,
            opts: Optional[ResourceOptions] = None,
            auto_create_topics_enable: Optional[bool] = None,
            cluster_id: Optional[str] = None,
            group_initial_rebalance_delay_ms: Optional[int] = None,
            group_max_session_timeout_ms: Optional[int] = None,
            group_min_session_timeout_ms: Optional[int] = None,
            log_cleaner_delete_retention_ms: Optional[int] = None,
            log_cleaner_min_compaction_lag_ms: Optional[str] = None,
            log_flush_interval_ms: Optional[str] = None,
            log_index_interval_bytes: Optional[int] = None,
            log_message_downconversion_enable: Optional[bool] = None,
            log_message_timestamp_difference_max_ms: Optional[str] = None,
            log_preallocate: Optional[bool] = None,
            log_retention_bytes: Optional[str] = None,
            log_retention_hours: Optional[int] = None,
            log_retention_ms: Optional[str] = None,
            log_roll_jitter_ms: Optional[str] = None,
            log_segment_delete_delay_ms: Optional[int] = None,
            message_max_bytes: Optional[int] = None) -> DatabaseKafkaConfig
    func GetDatabaseKafkaConfig(ctx *Context, name string, id IDInput, state *DatabaseKafkaConfigState, opts ...ResourceOption) (*DatabaseKafkaConfig, error)
    public static DatabaseKafkaConfig Get(string name, Input<string> id, DatabaseKafkaConfigState? state, CustomResourceOptions? opts = null)
    public static DatabaseKafkaConfig get(String name, Output<String> id, DatabaseKafkaConfigState state, CustomResourceOptions options)
    Resource lookup is not supported in YAML
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    resource_name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    The following state arguments are supported:
    AutoCreateTopicsEnable bool
    Enable auto creation of topics.
    ClusterId string
    The ID of the target Kafka cluster.
    GroupInitialRebalanceDelayMs int
    The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time.
    GroupMaxSessionTimeoutMs int
    The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.
    GroupMinSessionTimeoutMs int
    The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.
    LogCleanerDeleteRetentionMs int
    How long are delete records retained?
    LogCleanerMinCompactionLagMs string
    The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
    LogFlushIntervalMs string
    The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.
    LogIndexIntervalBytes int
    The interval with which Kafka adds an entry to the offset index.
    LogMessageDownconversionEnable bool
    This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.
    LogMessageTimestampDifferenceMaxMs string
    The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.
    LogPreallocate bool
    Controls whether to preallocate a file when creating a new segment.
    LogRetentionBytes string
    The maximum size of the log before deleting messages.
    LogRetentionHours int
    The number of hours to keep a log file before deleting it.
    LogRetentionMs string
    The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.
    LogRollJitterMs string
    The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.
    LogSegmentDeleteDelayMs int
    The amount of time to wait before deleting a file from the filesystem.
    MessageMaxBytes int
    The maximum size of message that the server can receive.
    AutoCreateTopicsEnable bool
    Enable auto creation of topics.
    ClusterId string
    The ID of the target Kafka cluster.
    GroupInitialRebalanceDelayMs int
    The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time.
    GroupMaxSessionTimeoutMs int
    The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.
    GroupMinSessionTimeoutMs int
    The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.
    LogCleanerDeleteRetentionMs int
    How long are delete records retained?
    LogCleanerMinCompactionLagMs string
    The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
    LogFlushIntervalMs string
    The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.
    LogIndexIntervalBytes int
    The interval with which Kafka adds an entry to the offset index.
    LogMessageDownconversionEnable bool
    This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.
    LogMessageTimestampDifferenceMaxMs string
    The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.
    LogPreallocate bool
    Controls whether to preallocate a file when creating a new segment.
    LogRetentionBytes string
    The maximum size of the log before deleting messages.
    LogRetentionHours int
    The number of hours to keep a log file before deleting it.
    LogRetentionMs string
    The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.
    LogRollJitterMs string
    The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.
    LogSegmentDeleteDelayMs int
    The amount of time to wait before deleting a file from the filesystem.
    MessageMaxBytes int
    The maximum size of message that the server can receive.
    autoCreateTopicsEnable Boolean
    Enable auto creation of topics.
    clusterId String
    The ID of the target Kafka cluster.
    groupInitialRebalanceDelayMs Integer
    The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time.
    groupMaxSessionTimeoutMs Integer
    The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.
    groupMinSessionTimeoutMs Integer
    The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.
    logCleanerDeleteRetentionMs Integer
    How long are delete records retained?
    logCleanerMinCompactionLagMs String
    The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
    logFlushIntervalMs String
    The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.
    logIndexIntervalBytes Integer
    The interval with which Kafka adds an entry to the offset index.
    logMessageDownconversionEnable Boolean
    This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.
    logMessageTimestampDifferenceMaxMs String
    The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.
    logPreallocate Boolean
    Controls whether to preallocate a file when creating a new segment.
    logRetentionBytes String
    The maximum size of the log before deleting messages.
    logRetentionHours Integer
    The number of hours to keep a log file before deleting it.
    logRetentionMs String
    The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.
    logRollJitterMs String
    The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.
    logSegmentDeleteDelayMs Integer
    The amount of time to wait before deleting a file from the filesystem.
    messageMaxBytes Integer
    The maximum size of message that the server can receive.
    autoCreateTopicsEnable boolean
    Enable auto creation of topics.
    clusterId string
    The ID of the target Kafka cluster.
    groupInitialRebalanceDelayMs number
    The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time.
    groupMaxSessionTimeoutMs number
    The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.
    groupMinSessionTimeoutMs number
    The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.
    logCleanerDeleteRetentionMs number
    How long are delete records retained?
    logCleanerMinCompactionLagMs string
    The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
    logFlushIntervalMs string
    The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.
    logIndexIntervalBytes number
    The interval with which Kafka adds an entry to the offset index.
    logMessageDownconversionEnable boolean
    This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.
    logMessageTimestampDifferenceMaxMs string
    The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.
    logPreallocate boolean
    Controls whether to preallocate a file when creating a new segment.
    logRetentionBytes string
    The maximum size of the log before deleting messages.
    logRetentionHours number
    The number of hours to keep a log file before deleting it.
    logRetentionMs string
    The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.
    logRollJitterMs string
    The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.
    logSegmentDeleteDelayMs number
    The amount of time to wait before deleting a file from the filesystem.
    messageMaxBytes number
    The maximum size of message that the server can receive.
    auto_create_topics_enable bool
    Enable auto creation of topics.
    cluster_id str
    The ID of the target Kafka cluster.
    group_initial_rebalance_delay_ms int
    The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time.
    group_max_session_timeout_ms int
    The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.
    group_min_session_timeout_ms int
    The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.
    log_cleaner_delete_retention_ms int
    How long are delete records retained?
    log_cleaner_min_compaction_lag_ms str
    The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
    log_flush_interval_ms str
    The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.
    log_index_interval_bytes int
    The interval with which Kafka adds an entry to the offset index.
    log_message_downconversion_enable bool
    This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.
    log_message_timestamp_difference_max_ms str
    The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.
    log_preallocate bool
    Controls whether to preallocate a file when creating a new segment.
    log_retention_bytes str
    The maximum size of the log before deleting messages.
    log_retention_hours int
    The number of hours to keep a log file before deleting it.
    log_retention_ms str
    The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.
    log_roll_jitter_ms str
    The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.
    log_segment_delete_delay_ms int
    The amount of time to wait before deleting a file from the filesystem.
    message_max_bytes int
    The maximum size of message that the server can receive.
    autoCreateTopicsEnable Boolean
    Enable auto creation of topics.
    clusterId String
    The ID of the target Kafka cluster.
    groupInitialRebalanceDelayMs Number
    The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time.
    groupMaxSessionTimeoutMs Number
    The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.
    groupMinSessionTimeoutMs Number
    The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.
    logCleanerDeleteRetentionMs Number
    How long are delete records retained?
    logCleanerMinCompactionLagMs String
    The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
    logFlushIntervalMs String
    The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.
    logIndexIntervalBytes Number
    The interval with which Kafka adds an entry to the offset index.
    logMessageDownconversionEnable Boolean
    This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.
    logMessageTimestampDifferenceMaxMs String
    The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.
    logPreallocate Boolean
    Controls whether to preallocate a file when creating a new segment.
    logRetentionBytes String
    The maximum size of the log before deleting messages.
    logRetentionHours Number
    The number of hours to keep a log file before deleting it.
    logRetentionMs String
    The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.
    logRollJitterMs String
    The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.
    logSegmentDeleteDelayMs Number
    The amount of time to wait before deleting a file from the filesystem.
    messageMaxBytes Number
    The maximum size of message that the server can receive.

    Import

    A Kafka database cluster’s configuration can be imported using the id the parent cluster, e.g.

    $ pulumi import digitalocean:index/databaseKafkaConfig:DatabaseKafkaConfig example 4b62829a-9c42-465b-aaa3-84051048e712
    

    To learn more about importing existing cloud resources, see Importing resources.

    Package Details

    Repository
    DigitalOcean pulumi/pulumi-digitalocean
    License
    Apache-2.0
    Notes
    This Pulumi package is based on the digitalocean Terraform Provider.
    digitalocean logo
    DigitalOcean v4.35.0 published on Tuesday, Nov 19, 2024 by Pulumi