digitalocean.DatabaseKafkaTopic
Explore with Pulumi AI
Provides a DigitalOcean Kafka topic for Kafka clusters.
Example Usage
Create a new Kafka topic
import * as pulumi from "@pulumi/pulumi";
import * as digitalocean from "@pulumi/digitalocean";
const kafka_example = new digitalocean.DatabaseCluster("kafka-example", {
name: "example-kafka-cluster",
engine: "kafka",
version: "3.5",
size: "db-s-2vcpu-2gb",
region: digitalocean.Region.NYC1,
nodeCount: 3,
tags: ["production"],
});
const topic_01 = new digitalocean.DatabaseKafkaTopic("topic-01", {
clusterId: kafka_example.id,
name: "topic-01",
partitionCount: 3,
replicationFactor: 2,
configs: [{
cleanupPolicy: "compact",
compressionType: "uncompressed",
deleteRetentionMs: "14000",
fileDeleteDelayMs: "170000",
flushMessages: "92233",
flushMs: "92233720368",
indexIntervalBytes: "40962",
maxCompactionLagMs: "9223372036854775807",
maxMessageBytes: "1048588",
messageDownConversionEnable: true,
messageFormatVersion: "3.0-IV1",
messageTimestampDifferenceMaxMs: "9223372036854775807",
messageTimestampType: "log_append_time",
minCleanableDirtyRatio: 0.5,
minCompactionLagMs: "20000",
minInsyncReplicas: 2,
preallocate: false,
retentionBytes: "-1",
retentionMs: "-1",
segmentBytes: "209715200",
segmentIndexBytes: "10485760",
segmentJitterMs: "0",
segmentMs: "604800000",
}],
});
import pulumi
import pulumi_digitalocean as digitalocean
kafka_example = digitalocean.DatabaseCluster("kafka-example",
name="example-kafka-cluster",
engine="kafka",
version="3.5",
size="db-s-2vcpu-2gb",
region=digitalocean.Region.NYC1,
node_count=3,
tags=["production"])
topic_01 = digitalocean.DatabaseKafkaTopic("topic-01",
cluster_id=kafka_example.id,
name="topic-01",
partition_count=3,
replication_factor=2,
configs=[{
"cleanup_policy": "compact",
"compression_type": "uncompressed",
"delete_retention_ms": "14000",
"file_delete_delay_ms": "170000",
"flush_messages": "92233",
"flush_ms": "92233720368",
"index_interval_bytes": "40962",
"max_compaction_lag_ms": "9223372036854775807",
"max_message_bytes": "1048588",
"message_down_conversion_enable": True,
"message_format_version": "3.0-IV1",
"message_timestamp_difference_max_ms": "9223372036854775807",
"message_timestamp_type": "log_append_time",
"min_cleanable_dirty_ratio": 0.5,
"min_compaction_lag_ms": "20000",
"min_insync_replicas": 2,
"preallocate": False,
"retention_bytes": "-1",
"retention_ms": "-1",
"segment_bytes": "209715200",
"segment_index_bytes": "10485760",
"segment_jitter_ms": "0",
"segment_ms": "604800000",
}])
package main
import (
"github.com/pulumi/pulumi-digitalocean/sdk/v4/go/digitalocean"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := digitalocean.NewDatabaseCluster(ctx, "kafka-example", &digitalocean.DatabaseClusterArgs{
Name: pulumi.String("example-kafka-cluster"),
Engine: pulumi.String("kafka"),
Version: pulumi.String("3.5"),
Size: pulumi.String("db-s-2vcpu-2gb"),
Region: pulumi.String(digitalocean.RegionNYC1),
NodeCount: pulumi.Int(3),
Tags: pulumi.StringArray{
pulumi.String("production"),
},
})
if err != nil {
return err
}
_, err = digitalocean.NewDatabaseKafkaTopic(ctx, "topic-01", &digitalocean.DatabaseKafkaTopicArgs{
ClusterId: kafka_example.ID(),
Name: pulumi.String("topic-01"),
PartitionCount: pulumi.Int(3),
ReplicationFactor: pulumi.Int(2),
Configs: digitalocean.DatabaseKafkaTopicConfigArray{
&digitalocean.DatabaseKafkaTopicConfigArgs{
CleanupPolicy: pulumi.String("compact"),
CompressionType: pulumi.String("uncompressed"),
DeleteRetentionMs: pulumi.String("14000"),
FileDeleteDelayMs: pulumi.String("170000"),
FlushMessages: pulumi.String("92233"),
FlushMs: pulumi.String("92233720368"),
IndexIntervalBytes: pulumi.String("40962"),
MaxCompactionLagMs: pulumi.String("9223372036854775807"),
MaxMessageBytes: pulumi.String("1048588"),
MessageDownConversionEnable: pulumi.Bool(true),
MessageFormatVersion: pulumi.String("3.0-IV1"),
MessageTimestampDifferenceMaxMs: pulumi.String("9223372036854775807"),
MessageTimestampType: pulumi.String("log_append_time"),
MinCleanableDirtyRatio: pulumi.Float64(0.5),
MinCompactionLagMs: pulumi.String("20000"),
MinInsyncReplicas: pulumi.Int(2),
Preallocate: pulumi.Bool(false),
RetentionBytes: pulumi.String("-1"),
RetentionMs: pulumi.String("-1"),
SegmentBytes: pulumi.String("209715200"),
SegmentIndexBytes: pulumi.String("10485760"),
SegmentJitterMs: pulumi.String("0"),
SegmentMs: pulumi.String("604800000"),
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using DigitalOcean = Pulumi.DigitalOcean;
return await Deployment.RunAsync(() =>
{
var kafka_example = new DigitalOcean.DatabaseCluster("kafka-example", new()
{
Name = "example-kafka-cluster",
Engine = "kafka",
Version = "3.5",
Size = "db-s-2vcpu-2gb",
Region = DigitalOcean.Region.NYC1,
NodeCount = 3,
Tags = new[]
{
"production",
},
});
var topic_01 = new DigitalOcean.DatabaseKafkaTopic("topic-01", new()
{
ClusterId = kafka_example.Id,
Name = "topic-01",
PartitionCount = 3,
ReplicationFactor = 2,
Configs = new[]
{
new DigitalOcean.Inputs.DatabaseKafkaTopicConfigArgs
{
CleanupPolicy = "compact",
CompressionType = "uncompressed",
DeleteRetentionMs = "14000",
FileDeleteDelayMs = "170000",
FlushMessages = "92233",
FlushMs = "92233720368",
IndexIntervalBytes = "40962",
MaxCompactionLagMs = "9223372036854775807",
MaxMessageBytes = "1048588",
MessageDownConversionEnable = true,
MessageFormatVersion = "3.0-IV1",
MessageTimestampDifferenceMaxMs = "9223372036854775807",
MessageTimestampType = "log_append_time",
MinCleanableDirtyRatio = 0.5,
MinCompactionLagMs = "20000",
MinInsyncReplicas = 2,
Preallocate = false,
RetentionBytes = "-1",
RetentionMs = "-1",
SegmentBytes = "209715200",
SegmentIndexBytes = "10485760",
SegmentJitterMs = "0",
SegmentMs = "604800000",
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.digitalocean.DatabaseCluster;
import com.pulumi.digitalocean.DatabaseClusterArgs;
import com.pulumi.digitalocean.DatabaseKafkaTopic;
import com.pulumi.digitalocean.DatabaseKafkaTopicArgs;
import com.pulumi.digitalocean.inputs.DatabaseKafkaTopicConfigArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var kafka_example = new DatabaseCluster("kafka-example", DatabaseClusterArgs.builder()
.name("example-kafka-cluster")
.engine("kafka")
.version("3.5")
.size("db-s-2vcpu-2gb")
.region("nyc1")
.nodeCount(3)
.tags("production")
.build());
var topic_01 = new DatabaseKafkaTopic("topic-01", DatabaseKafkaTopicArgs.builder()
.clusterId(kafka_example.id())
.name("topic-01")
.partitionCount(3)
.replicationFactor(2)
.configs(DatabaseKafkaTopicConfigArgs.builder()
.cleanupPolicy("compact")
.compressionType("uncompressed")
.deleteRetentionMs(14000)
.fileDeleteDelayMs(170000)
.flushMessages(92233)
.flushMs(92233720368)
.indexIntervalBytes(40962)
.maxCompactionLagMs(9223372036854775807)
.maxMessageBytes(1048588)
.messageDownConversionEnable(true)
.messageFormatVersion("3.0-IV1")
.messageTimestampDifferenceMaxMs(9223372036854775807)
.messageTimestampType("log_append_time")
.minCleanableDirtyRatio(0.5)
.minCompactionLagMs(20000)
.minInsyncReplicas(2)
.preallocate(false)
.retentionBytes(-1)
.retentionMs(-1)
.segmentBytes(209715200)
.segmentIndexBytes(10485760)
.segmentJitterMs(0)
.segmentMs(604800000)
.build())
.build());
}
}
resources:
topic-01:
type: digitalocean:DatabaseKafkaTopic
properties:
clusterId: ${["kafka-example"].id}
name: topic-01
partitionCount: 3
replicationFactor: 2
configs:
- cleanupPolicy: compact
compressionType: uncompressed
deleteRetentionMs: 14000
fileDeleteDelayMs: 170000
flushMessages: 92233
flushMs: 9.2233720368e+10
indexIntervalBytes: 40962
maxCompactionLagMs: 9.223372036854776e+18
maxMessageBytes: 1.048588e+06
messageDownConversionEnable: true
messageFormatVersion: 3.0-IV1
messageTimestampDifferenceMaxMs: 9.223372036854776e+18
messageTimestampType: log_append_time
minCleanableDirtyRatio: 0.5
minCompactionLagMs: 20000
minInsyncReplicas: 2
preallocate: false
retentionBytes: -1
retentionMs: -1
segmentBytes: 2.097152e+08
segmentIndexBytes: 1.048576e+07
segmentJitterMs: 0
segmentMs: 6.048e+08
kafka-example:
type: digitalocean:DatabaseCluster
properties:
name: example-kafka-cluster
engine: kafka
version: '3.5'
size: db-s-2vcpu-2gb
region: nyc1
nodeCount: 3
tags:
- production
Create DatabaseKafkaTopic Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new DatabaseKafkaTopic(name: string, args: DatabaseKafkaTopicArgs, opts?: CustomResourceOptions);
@overload
def DatabaseKafkaTopic(resource_name: str,
args: DatabaseKafkaTopicArgs,
opts: Optional[ResourceOptions] = None)
@overload
def DatabaseKafkaTopic(resource_name: str,
opts: Optional[ResourceOptions] = None,
cluster_id: Optional[str] = None,
configs: Optional[Sequence[DatabaseKafkaTopicConfigArgs]] = None,
name: Optional[str] = None,
partition_count: Optional[int] = None,
replication_factor: Optional[int] = None)
func NewDatabaseKafkaTopic(ctx *Context, name string, args DatabaseKafkaTopicArgs, opts ...ResourceOption) (*DatabaseKafkaTopic, error)
public DatabaseKafkaTopic(string name, DatabaseKafkaTopicArgs args, CustomResourceOptions? opts = null)
public DatabaseKafkaTopic(String name, DatabaseKafkaTopicArgs args)
public DatabaseKafkaTopic(String name, DatabaseKafkaTopicArgs args, CustomResourceOptions options)
type: digitalocean:DatabaseKafkaTopic
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args DatabaseKafkaTopicArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args DatabaseKafkaTopicArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args DatabaseKafkaTopicArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args DatabaseKafkaTopicArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args DatabaseKafkaTopicArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var databaseKafkaTopicResource = new DigitalOcean.DatabaseKafkaTopic("databaseKafkaTopicResource", new()
{
ClusterId = "string",
Configs = new[]
{
new DigitalOcean.Inputs.DatabaseKafkaTopicConfigArgs
{
CleanupPolicy = "string",
CompressionType = "string",
DeleteRetentionMs = "string",
FileDeleteDelayMs = "string",
FlushMessages = "string",
FlushMs = "string",
IndexIntervalBytes = "string",
MaxCompactionLagMs = "string",
MaxMessageBytes = "string",
MessageDownConversionEnable = false,
MessageFormatVersion = "string",
MessageTimestampDifferenceMaxMs = "string",
MessageTimestampType = "string",
MinCleanableDirtyRatio = 0,
MinCompactionLagMs = "string",
MinInsyncReplicas = 0,
Preallocate = false,
RetentionBytes = "string",
RetentionMs = "string",
SegmentBytes = "string",
SegmentIndexBytes = "string",
SegmentJitterMs = "string",
SegmentMs = "string",
},
},
Name = "string",
PartitionCount = 0,
ReplicationFactor = 0,
});
example, err := digitalocean.NewDatabaseKafkaTopic(ctx, "databaseKafkaTopicResource", &digitalocean.DatabaseKafkaTopicArgs{
ClusterId: pulumi.String("string"),
Configs: digitalocean.DatabaseKafkaTopicConfigArray{
&digitalocean.DatabaseKafkaTopicConfigArgs{
CleanupPolicy: pulumi.String("string"),
CompressionType: pulumi.String("string"),
DeleteRetentionMs: pulumi.String("string"),
FileDeleteDelayMs: pulumi.String("string"),
FlushMessages: pulumi.String("string"),
FlushMs: pulumi.String("string"),
IndexIntervalBytes: pulumi.String("string"),
MaxCompactionLagMs: pulumi.String("string"),
MaxMessageBytes: pulumi.String("string"),
MessageDownConversionEnable: pulumi.Bool(false),
MessageFormatVersion: pulumi.String("string"),
MessageTimestampDifferenceMaxMs: pulumi.String("string"),
MessageTimestampType: pulumi.String("string"),
MinCleanableDirtyRatio: pulumi.Float64(0),
MinCompactionLagMs: pulumi.String("string"),
MinInsyncReplicas: pulumi.Int(0),
Preallocate: pulumi.Bool(false),
RetentionBytes: pulumi.String("string"),
RetentionMs: pulumi.String("string"),
SegmentBytes: pulumi.String("string"),
SegmentIndexBytes: pulumi.String("string"),
SegmentJitterMs: pulumi.String("string"),
SegmentMs: pulumi.String("string"),
},
},
Name: pulumi.String("string"),
PartitionCount: pulumi.Int(0),
ReplicationFactor: pulumi.Int(0),
})
var databaseKafkaTopicResource = new DatabaseKafkaTopic("databaseKafkaTopicResource", DatabaseKafkaTopicArgs.builder()
.clusterId("string")
.configs(DatabaseKafkaTopicConfigArgs.builder()
.cleanupPolicy("string")
.compressionType("string")
.deleteRetentionMs("string")
.fileDeleteDelayMs("string")
.flushMessages("string")
.flushMs("string")
.indexIntervalBytes("string")
.maxCompactionLagMs("string")
.maxMessageBytes("string")
.messageDownConversionEnable(false)
.messageFormatVersion("string")
.messageTimestampDifferenceMaxMs("string")
.messageTimestampType("string")
.minCleanableDirtyRatio(0)
.minCompactionLagMs("string")
.minInsyncReplicas(0)
.preallocate(false)
.retentionBytes("string")
.retentionMs("string")
.segmentBytes("string")
.segmentIndexBytes("string")
.segmentJitterMs("string")
.segmentMs("string")
.build())
.name("string")
.partitionCount(0)
.replicationFactor(0)
.build());
database_kafka_topic_resource = digitalocean.DatabaseKafkaTopic("databaseKafkaTopicResource",
cluster_id="string",
configs=[{
"cleanup_policy": "string",
"compression_type": "string",
"delete_retention_ms": "string",
"file_delete_delay_ms": "string",
"flush_messages": "string",
"flush_ms": "string",
"index_interval_bytes": "string",
"max_compaction_lag_ms": "string",
"max_message_bytes": "string",
"message_down_conversion_enable": False,
"message_format_version": "string",
"message_timestamp_difference_max_ms": "string",
"message_timestamp_type": "string",
"min_cleanable_dirty_ratio": 0,
"min_compaction_lag_ms": "string",
"min_insync_replicas": 0,
"preallocate": False,
"retention_bytes": "string",
"retention_ms": "string",
"segment_bytes": "string",
"segment_index_bytes": "string",
"segment_jitter_ms": "string",
"segment_ms": "string",
}],
name="string",
partition_count=0,
replication_factor=0)
const databaseKafkaTopicResource = new digitalocean.DatabaseKafkaTopic("databaseKafkaTopicResource", {
clusterId: "string",
configs: [{
cleanupPolicy: "string",
compressionType: "string",
deleteRetentionMs: "string",
fileDeleteDelayMs: "string",
flushMessages: "string",
flushMs: "string",
indexIntervalBytes: "string",
maxCompactionLagMs: "string",
maxMessageBytes: "string",
messageDownConversionEnable: false,
messageFormatVersion: "string",
messageTimestampDifferenceMaxMs: "string",
messageTimestampType: "string",
minCleanableDirtyRatio: 0,
minCompactionLagMs: "string",
minInsyncReplicas: 0,
preallocate: false,
retentionBytes: "string",
retentionMs: "string",
segmentBytes: "string",
segmentIndexBytes: "string",
segmentJitterMs: "string",
segmentMs: "string",
}],
name: "string",
partitionCount: 0,
replicationFactor: 0,
});
type: digitalocean:DatabaseKafkaTopic
properties:
clusterId: string
configs:
- cleanupPolicy: string
compressionType: string
deleteRetentionMs: string
fileDeleteDelayMs: string
flushMessages: string
flushMs: string
indexIntervalBytes: string
maxCompactionLagMs: string
maxMessageBytes: string
messageDownConversionEnable: false
messageFormatVersion: string
messageTimestampDifferenceMaxMs: string
messageTimestampType: string
minCleanableDirtyRatio: 0
minCompactionLagMs: string
minInsyncReplicas: 0
preallocate: false
retentionBytes: string
retentionMs: string
segmentBytes: string
segmentIndexBytes: string
segmentJitterMs: string
segmentMs: string
name: string
partitionCount: 0
replicationFactor: 0
DatabaseKafkaTopic Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The DatabaseKafkaTopic resource accepts the following input properties:
- Cluster
Id string - The ID of the source database cluster. Note: This must be a Kafka cluster.
- Configs
List<Pulumi.
Digital Ocean. Inputs. Database Kafka Topic Config> - A set of advanced configuration parameters. Defaults will be set for any of the parameters that are not included.
The
config
block is documented below. - Name string
- The name for the topic.
- Partition
Count int - The number of partitions for the topic. Default and minimum set at 3, maximum is 2048.
- Replication
Factor int - The number of nodes that topics are replicated across. Default and minimum set at 2, maximum is the number of nodes in the cluster.
- Cluster
Id string - The ID of the source database cluster. Note: This must be a Kafka cluster.
- Configs
[]Database
Kafka Topic Config Args - A set of advanced configuration parameters. Defaults will be set for any of the parameters that are not included.
The
config
block is documented below. - Name string
- The name for the topic.
- Partition
Count int - The number of partitions for the topic. Default and minimum set at 3, maximum is 2048.
- Replication
Factor int - The number of nodes that topics are replicated across. Default and minimum set at 2, maximum is the number of nodes in the cluster.
- cluster
Id String - The ID of the source database cluster. Note: This must be a Kafka cluster.
- configs
List<Database
Kafka Topic Config> - A set of advanced configuration parameters. Defaults will be set for any of the parameters that are not included.
The
config
block is documented below. - name String
- The name for the topic.
- partition
Count Integer - The number of partitions for the topic. Default and minimum set at 3, maximum is 2048.
- replication
Factor Integer - The number of nodes that topics are replicated across. Default and minimum set at 2, maximum is the number of nodes in the cluster.
- cluster
Id string - The ID of the source database cluster. Note: This must be a Kafka cluster.
- configs
Database
Kafka Topic Config[] - A set of advanced configuration parameters. Defaults will be set for any of the parameters that are not included.
The
config
block is documented below. - name string
- The name for the topic.
- partition
Count number - The number of partitions for the topic. Default and minimum set at 3, maximum is 2048.
- replication
Factor number - The number of nodes that topics are replicated across. Default and minimum set at 2, maximum is the number of nodes in the cluster.
- cluster_
id str - The ID of the source database cluster. Note: This must be a Kafka cluster.
- configs
Sequence[Database
Kafka Topic Config Args] - A set of advanced configuration parameters. Defaults will be set for any of the parameters that are not included.
The
config
block is documented below. - name str
- The name for the topic.
- partition_
count int - The number of partitions for the topic. Default and minimum set at 3, maximum is 2048.
- replication_
factor int - The number of nodes that topics are replicated across. Default and minimum set at 2, maximum is the number of nodes in the cluster.
- cluster
Id String - The ID of the source database cluster. Note: This must be a Kafka cluster.
- configs List<Property Map>
- A set of advanced configuration parameters. Defaults will be set for any of the parameters that are not included.
The
config
block is documented below. - name String
- The name for the topic.
- partition
Count Number - The number of partitions for the topic. Default and minimum set at 3, maximum is 2048.
- replication
Factor Number - The number of nodes that topics are replicated across. Default and minimum set at 2, maximum is the number of nodes in the cluster.
Outputs
All input properties are implicitly available as output properties. Additionally, the DatabaseKafkaTopic resource produces the following output properties:
Look up Existing DatabaseKafkaTopic Resource
Get an existing DatabaseKafkaTopic resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: DatabaseKafkaTopicState, opts?: CustomResourceOptions): DatabaseKafkaTopic
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
cluster_id: Optional[str] = None,
configs: Optional[Sequence[DatabaseKafkaTopicConfigArgs]] = None,
name: Optional[str] = None,
partition_count: Optional[int] = None,
replication_factor: Optional[int] = None,
state: Optional[str] = None) -> DatabaseKafkaTopic
func GetDatabaseKafkaTopic(ctx *Context, name string, id IDInput, state *DatabaseKafkaTopicState, opts ...ResourceOption) (*DatabaseKafkaTopic, error)
public static DatabaseKafkaTopic Get(string name, Input<string> id, DatabaseKafkaTopicState? state, CustomResourceOptions? opts = null)
public static DatabaseKafkaTopic get(String name, Output<String> id, DatabaseKafkaTopicState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Cluster
Id string - The ID of the source database cluster. Note: This must be a Kafka cluster.
- Configs
List<Pulumi.
Digital Ocean. Inputs. Database Kafka Topic Config> - A set of advanced configuration parameters. Defaults will be set for any of the parameters that are not included.
The
config
block is documented below. - Name string
- The name for the topic.
- Partition
Count int - The number of partitions for the topic. Default and minimum set at 3, maximum is 2048.
- Replication
Factor int - The number of nodes that topics are replicated across. Default and minimum set at 2, maximum is the number of nodes in the cluster.
- State string
- The current status of the topic. Possible values are 'active', 'configuring', and 'deleting'.
- Cluster
Id string - The ID of the source database cluster. Note: This must be a Kafka cluster.
- Configs
[]Database
Kafka Topic Config Args - A set of advanced configuration parameters. Defaults will be set for any of the parameters that are not included.
The
config
block is documented below. - Name string
- The name for the topic.
- Partition
Count int - The number of partitions for the topic. Default and minimum set at 3, maximum is 2048.
- Replication
Factor int - The number of nodes that topics are replicated across. Default and minimum set at 2, maximum is the number of nodes in the cluster.
- State string
- The current status of the topic. Possible values are 'active', 'configuring', and 'deleting'.
- cluster
Id String - The ID of the source database cluster. Note: This must be a Kafka cluster.
- configs
List<Database
Kafka Topic Config> - A set of advanced configuration parameters. Defaults will be set for any of the parameters that are not included.
The
config
block is documented below. - name String
- The name for the topic.
- partition
Count Integer - The number of partitions for the topic. Default and minimum set at 3, maximum is 2048.
- replication
Factor Integer - The number of nodes that topics are replicated across. Default and minimum set at 2, maximum is the number of nodes in the cluster.
- state String
- The current status of the topic. Possible values are 'active', 'configuring', and 'deleting'.
- cluster
Id string - The ID of the source database cluster. Note: This must be a Kafka cluster.
- configs
Database
Kafka Topic Config[] - A set of advanced configuration parameters. Defaults will be set for any of the parameters that are not included.
The
config
block is documented below. - name string
- The name for the topic.
- partition
Count number - The number of partitions for the topic. Default and minimum set at 3, maximum is 2048.
- replication
Factor number - The number of nodes that topics are replicated across. Default and minimum set at 2, maximum is the number of nodes in the cluster.
- state string
- The current status of the topic. Possible values are 'active', 'configuring', and 'deleting'.
- cluster_
id str - The ID of the source database cluster. Note: This must be a Kafka cluster.
- configs
Sequence[Database
Kafka Topic Config Args] - A set of advanced configuration parameters. Defaults will be set for any of the parameters that are not included.
The
config
block is documented below. - name str
- The name for the topic.
- partition_
count int - The number of partitions for the topic. Default and minimum set at 3, maximum is 2048.
- replication_
factor int - The number of nodes that topics are replicated across. Default and minimum set at 2, maximum is the number of nodes in the cluster.
- state str
- The current status of the topic. Possible values are 'active', 'configuring', and 'deleting'.
- cluster
Id String - The ID of the source database cluster. Note: This must be a Kafka cluster.
- configs List<Property Map>
- A set of advanced configuration parameters. Defaults will be set for any of the parameters that are not included.
The
config
block is documented below. - name String
- The name for the topic.
- partition
Count Number - The number of partitions for the topic. Default and minimum set at 3, maximum is 2048.
- replication
Factor Number - The number of nodes that topics are replicated across. Default and minimum set at 2, maximum is the number of nodes in the cluster.
- state String
- The current status of the topic. Possible values are 'active', 'configuring', and 'deleting'.
Supporting Types
DatabaseKafkaTopicConfig, DatabaseKafkaTopicConfigArgs
- Cleanup
Policy string - The topic cleanup policy that describes whether messages should be deleted, compacted, or both when retention policies are violated. This may be one of "delete", "compact", or "compact_delete".
- Compression
Type string - The topic compression codecs used for a given topic. This may be one of "uncompressed", "gzip", "snappy", "lz4", "producer", "zstd". "uncompressed" indicates that there is no compression and "producer" retains the original compression codec set by the producer.
- Delete
Retention stringMs - The amount of time, in ms, that deleted records are retained.
- File
Delete stringDelay Ms - The amount of time, in ms, to wait before deleting a topic log segment from the filesystem.
- Flush
Messages string - The number of messages accumulated on a topic partition before they are flushed to disk.
- Flush
Ms string - The maximum time, in ms, that a topic is kept in memory before being flushed to disk.
- Index
Interval stringBytes - The interval, in bytes, in which entries are added to the offset index.
- Max
Compaction stringLag Ms - The maximum time, in ms, that a particular message will remain uncompacted. This will not apply if the
compression_type
is set to "uncompressed" or it is set toproducer
and the producer is not using compression. - Max
Message stringBytes - The maximum size, in bytes, of a message.
- Message
Down boolConversion Enable - Determines whether down-conversion of message formats for consumers is enabled.
- Message
Format stringVersion - The version of the inter-broker protocol that will be used. This may be one of "0.8.0", "0.8.1", "0.8.2", "0.9.0", "0.10.0", "0.10.0-IV0", "0.10.0-IV1", "0.10.1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", "0.10.2", "0.10.2-IV0", "0.11.0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0", "1.0-IV0", "1.1", "1.1-IV0", "2.0", "2.0-IV0", "2.0-IV1", "2.1", "2.1-IV0", "2.1-IV1", "2.1-IV2", "2.2", "2.2-IV0", "2.2-IV1", "2.3", "2.3-IV0", "2.3-IV1", "2.4", "2.4-IV0", "2.4-IV1", "2.5", "2.5-IV0", "2.6", "2.6-IV0", "2.7", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8", "2.8-IV0", "2.8-IV1", "3.0", "3.0-IV0", "3.0-IV1", "3.1", "3.1-IV0", "3.2", "3.2-IV0", "3.3", "3.3-IV0", "3.3-IV1", "3.3-IV2", "3.3-IV3", "3.4", "3.4-IV0", "3.5", "3.5-IV0", "3.5-IV1", "3.5-IV2", "3.6", "3.6-IV0", "3.6-IV1", "3.6-IV2".
- Message
Timestamp stringDifference Max Ms - The maximum difference, in ms, between the timestamp specific in a message and when the broker receives the message.
- Message
Timestamp stringType - Specifies which timestamp to use for the message. This may be one of "create_time" or "log_append_time".
- Min
Cleanable doubleDirty Ratio - A scale between 0.0 and 1.0 which controls the frequency of the compactor. Larger values mean more frequent compactions. This is often paired with
max_compaction_lag_ms
to control the compactor frequency. - Min
Compaction stringLag Ms - Min
Insync intReplicas - The number of replicas that must acknowledge a write before it is considered successful. -1 is a special setting to indicate that all nodes must ack a message before a write is considered successful. Default is 1, indicating at least 1 replica must acknowledge a write to be considered successful.
- Preallocate bool
- Determines whether to preallocate a file on disk when creating a new log segment within a topic.
- Retention
Bytes string - The maximum size, in bytes, of a topic before messages are deleted. -1 is a special setting indicating that this setting has no limit.
- Retention
Ms string - The maximum time, in ms, that a topic log file is retained before deleting it. -1 is a special setting indicating that this setting has no limit.
- Segment
Bytes string - The maximum size, in bytes, of a single topic log file.
- Segment
Index stringBytes - The maximum size, in bytes, of the offset index.
- Segment
Jitter stringMs - The maximum time, in ms, subtracted from the scheduled segment disk flush time to avoid the thundering herd problem for segment flushing.
- Segment
Ms string - The maximum time, in ms, before the topic log will flush to disk.
- Cleanup
Policy string - The topic cleanup policy that describes whether messages should be deleted, compacted, or both when retention policies are violated. This may be one of "delete", "compact", or "compact_delete".
- Compression
Type string - The topic compression codecs used for a given topic. This may be one of "uncompressed", "gzip", "snappy", "lz4", "producer", "zstd". "uncompressed" indicates that there is no compression and "producer" retains the original compression codec set by the producer.
- Delete
Retention stringMs - The amount of time, in ms, that deleted records are retained.
- File
Delete stringDelay Ms - The amount of time, in ms, to wait before deleting a topic log segment from the filesystem.
- Flush
Messages string - The number of messages accumulated on a topic partition before they are flushed to disk.
- Flush
Ms string - The maximum time, in ms, that a topic is kept in memory before being flushed to disk.
- Index
Interval stringBytes - The interval, in bytes, in which entries are added to the offset index.
- Max
Compaction stringLag Ms - The maximum time, in ms, that a particular message will remain uncompacted. This will not apply if the
compression_type
is set to "uncompressed" or it is set toproducer
and the producer is not using compression. - Max
Message stringBytes - The maximum size, in bytes, of a message.
- Message
Down boolConversion Enable - Determines whether down-conversion of message formats for consumers is enabled.
- Message
Format stringVersion - The version of the inter-broker protocol that will be used. This may be one of "0.8.0", "0.8.1", "0.8.2", "0.9.0", "0.10.0", "0.10.0-IV0", "0.10.0-IV1", "0.10.1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", "0.10.2", "0.10.2-IV0", "0.11.0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0", "1.0-IV0", "1.1", "1.1-IV0", "2.0", "2.0-IV0", "2.0-IV1", "2.1", "2.1-IV0", "2.1-IV1", "2.1-IV2", "2.2", "2.2-IV0", "2.2-IV1", "2.3", "2.3-IV0", "2.3-IV1", "2.4", "2.4-IV0", "2.4-IV1", "2.5", "2.5-IV0", "2.6", "2.6-IV0", "2.7", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8", "2.8-IV0", "2.8-IV1", "3.0", "3.0-IV0", "3.0-IV1", "3.1", "3.1-IV0", "3.2", "3.2-IV0", "3.3", "3.3-IV0", "3.3-IV1", "3.3-IV2", "3.3-IV3", "3.4", "3.4-IV0", "3.5", "3.5-IV0", "3.5-IV1", "3.5-IV2", "3.6", "3.6-IV0", "3.6-IV1", "3.6-IV2".
- Message
Timestamp stringDifference Max Ms - The maximum difference, in ms, between the timestamp specific in a message and when the broker receives the message.
- Message
Timestamp stringType - Specifies which timestamp to use for the message. This may be one of "create_time" or "log_append_time".
- Min
Cleanable float64Dirty Ratio - A scale between 0.0 and 1.0 which controls the frequency of the compactor. Larger values mean more frequent compactions. This is often paired with
max_compaction_lag_ms
to control the compactor frequency. - Min
Compaction stringLag Ms - Min
Insync intReplicas - The number of replicas that must acknowledge a write before it is considered successful. -1 is a special setting to indicate that all nodes must ack a message before a write is considered successful. Default is 1, indicating at least 1 replica must acknowledge a write to be considered successful.
- Preallocate bool
- Determines whether to preallocate a file on disk when creating a new log segment within a topic.
- Retention
Bytes string - The maximum size, in bytes, of a topic before messages are deleted. -1 is a special setting indicating that this setting has no limit.
- Retention
Ms string - The maximum time, in ms, that a topic log file is retained before deleting it. -1 is a special setting indicating that this setting has no limit.
- Segment
Bytes string - The maximum size, in bytes, of a single topic log file.
- Segment
Index stringBytes - The maximum size, in bytes, of the offset index.
- Segment
Jitter stringMs - The maximum time, in ms, subtracted from the scheduled segment disk flush time to avoid the thundering herd problem for segment flushing.
- Segment
Ms string - The maximum time, in ms, before the topic log will flush to disk.
- cleanup
Policy String - The topic cleanup policy that describes whether messages should be deleted, compacted, or both when retention policies are violated. This may be one of "delete", "compact", or "compact_delete".
- compression
Type String - The topic compression codecs used for a given topic. This may be one of "uncompressed", "gzip", "snappy", "lz4", "producer", "zstd". "uncompressed" indicates that there is no compression and "producer" retains the original compression codec set by the producer.
- delete
Retention StringMs - The amount of time, in ms, that deleted records are retained.
- file
Delete StringDelay Ms - The amount of time, in ms, to wait before deleting a topic log segment from the filesystem.
- flush
Messages String - The number of messages accumulated on a topic partition before they are flushed to disk.
- flush
Ms String - The maximum time, in ms, that a topic is kept in memory before being flushed to disk.
- index
Interval StringBytes - The interval, in bytes, in which entries are added to the offset index.
- max
Compaction StringLag Ms - The maximum time, in ms, that a particular message will remain uncompacted. This will not apply if the
compression_type
is set to "uncompressed" or it is set toproducer
and the producer is not using compression. - max
Message StringBytes - The maximum size, in bytes, of a message.
- message
Down BooleanConversion Enable - Determines whether down-conversion of message formats for consumers is enabled.
- message
Format StringVersion - The version of the inter-broker protocol that will be used. This may be one of "0.8.0", "0.8.1", "0.8.2", "0.9.0", "0.10.0", "0.10.0-IV0", "0.10.0-IV1", "0.10.1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", "0.10.2", "0.10.2-IV0", "0.11.0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0", "1.0-IV0", "1.1", "1.1-IV0", "2.0", "2.0-IV0", "2.0-IV1", "2.1", "2.1-IV0", "2.1-IV1", "2.1-IV2", "2.2", "2.2-IV0", "2.2-IV1", "2.3", "2.3-IV0", "2.3-IV1", "2.4", "2.4-IV0", "2.4-IV1", "2.5", "2.5-IV0", "2.6", "2.6-IV0", "2.7", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8", "2.8-IV0", "2.8-IV1", "3.0", "3.0-IV0", "3.0-IV1", "3.1", "3.1-IV0", "3.2", "3.2-IV0", "3.3", "3.3-IV0", "3.3-IV1", "3.3-IV2", "3.3-IV3", "3.4", "3.4-IV0", "3.5", "3.5-IV0", "3.5-IV1", "3.5-IV2", "3.6", "3.6-IV0", "3.6-IV1", "3.6-IV2".
- message
Timestamp StringDifference Max Ms - The maximum difference, in ms, between the timestamp specific in a message and when the broker receives the message.
- message
Timestamp StringType - Specifies which timestamp to use for the message. This may be one of "create_time" or "log_append_time".
- min
Cleanable DoubleDirty Ratio - A scale between 0.0 and 1.0 which controls the frequency of the compactor. Larger values mean more frequent compactions. This is often paired with
max_compaction_lag_ms
to control the compactor frequency. - min
Compaction StringLag Ms - min
Insync IntegerReplicas - The number of replicas that must acknowledge a write before it is considered successful. -1 is a special setting to indicate that all nodes must ack a message before a write is considered successful. Default is 1, indicating at least 1 replica must acknowledge a write to be considered successful.
- preallocate Boolean
- Determines whether to preallocate a file on disk when creating a new log segment within a topic.
- retention
Bytes String - The maximum size, in bytes, of a topic before messages are deleted. -1 is a special setting indicating that this setting has no limit.
- retention
Ms String - The maximum time, in ms, that a topic log file is retained before deleting it. -1 is a special setting indicating that this setting has no limit.
- segment
Bytes String - The maximum size, in bytes, of a single topic log file.
- segment
Index StringBytes - The maximum size, in bytes, of the offset index.
- segment
Jitter StringMs - The maximum time, in ms, subtracted from the scheduled segment disk flush time to avoid the thundering herd problem for segment flushing.
- segment
Ms String - The maximum time, in ms, before the topic log will flush to disk.
- cleanup
Policy string - The topic cleanup policy that describes whether messages should be deleted, compacted, or both when retention policies are violated. This may be one of "delete", "compact", or "compact_delete".
- compression
Type string - The topic compression codecs used for a given topic. This may be one of "uncompressed", "gzip", "snappy", "lz4", "producer", "zstd". "uncompressed" indicates that there is no compression and "producer" retains the original compression codec set by the producer.
- delete
Retention stringMs - The amount of time, in ms, that deleted records are retained.
- file
Delete stringDelay Ms - The amount of time, in ms, to wait before deleting a topic log segment from the filesystem.
- flush
Messages string - The number of messages accumulated on a topic partition before they are flushed to disk.
- flush
Ms string - The maximum time, in ms, that a topic is kept in memory before being flushed to disk.
- index
Interval stringBytes - The interval, in bytes, in which entries are added to the offset index.
- max
Compaction stringLag Ms - The maximum time, in ms, that a particular message will remain uncompacted. This will not apply if the
compression_type
is set to "uncompressed" or it is set toproducer
and the producer is not using compression. - max
Message stringBytes - The maximum size, in bytes, of a message.
- message
Down booleanConversion Enable - Determines whether down-conversion of message formats for consumers is enabled.
- message
Format stringVersion - The version of the inter-broker protocol that will be used. This may be one of "0.8.0", "0.8.1", "0.8.2", "0.9.0", "0.10.0", "0.10.0-IV0", "0.10.0-IV1", "0.10.1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", "0.10.2", "0.10.2-IV0", "0.11.0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0", "1.0-IV0", "1.1", "1.1-IV0", "2.0", "2.0-IV0", "2.0-IV1", "2.1", "2.1-IV0", "2.1-IV1", "2.1-IV2", "2.2", "2.2-IV0", "2.2-IV1", "2.3", "2.3-IV0", "2.3-IV1", "2.4", "2.4-IV0", "2.4-IV1", "2.5", "2.5-IV0", "2.6", "2.6-IV0", "2.7", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8", "2.8-IV0", "2.8-IV1", "3.0", "3.0-IV0", "3.0-IV1", "3.1", "3.1-IV0", "3.2", "3.2-IV0", "3.3", "3.3-IV0", "3.3-IV1", "3.3-IV2", "3.3-IV3", "3.4", "3.4-IV0", "3.5", "3.5-IV0", "3.5-IV1", "3.5-IV2", "3.6", "3.6-IV0", "3.6-IV1", "3.6-IV2".
- message
Timestamp stringDifference Max Ms - The maximum difference, in ms, between the timestamp specific in a message and when the broker receives the message.
- message
Timestamp stringType - Specifies which timestamp to use for the message. This may be one of "create_time" or "log_append_time".
- min
Cleanable numberDirty Ratio - A scale between 0.0 and 1.0 which controls the frequency of the compactor. Larger values mean more frequent compactions. This is often paired with
max_compaction_lag_ms
to control the compactor frequency. - min
Compaction stringLag Ms - min
Insync numberReplicas - The number of replicas that must acknowledge a write before it is considered successful. -1 is a special setting to indicate that all nodes must ack a message before a write is considered successful. Default is 1, indicating at least 1 replica must acknowledge a write to be considered successful.
- preallocate boolean
- Determines whether to preallocate a file on disk when creating a new log segment within a topic.
- retention
Bytes string - The maximum size, in bytes, of a topic before messages are deleted. -1 is a special setting indicating that this setting has no limit.
- retention
Ms string - The maximum time, in ms, that a topic log file is retained before deleting it. -1 is a special setting indicating that this setting has no limit.
- segment
Bytes string - The maximum size, in bytes, of a single topic log file.
- segment
Index stringBytes - The maximum size, in bytes, of the offset index.
- segment
Jitter stringMs - The maximum time, in ms, subtracted from the scheduled segment disk flush time to avoid the thundering herd problem for segment flushing.
- segment
Ms string - The maximum time, in ms, before the topic log will flush to disk.
- cleanup_
policy str - The topic cleanup policy that describes whether messages should be deleted, compacted, or both when retention policies are violated. This may be one of "delete", "compact", or "compact_delete".
- compression_
type str - The topic compression codecs used for a given topic. This may be one of "uncompressed", "gzip", "snappy", "lz4", "producer", "zstd". "uncompressed" indicates that there is no compression and "producer" retains the original compression codec set by the producer.
- delete_
retention_ strms - The amount of time, in ms, that deleted records are retained.
- file_
delete_ strdelay_ ms - The amount of time, in ms, to wait before deleting a topic log segment from the filesystem.
- flush_
messages str - The number of messages accumulated on a topic partition before they are flushed to disk.
- flush_
ms str - The maximum time, in ms, that a topic is kept in memory before being flushed to disk.
- index_
interval_ strbytes - The interval, in bytes, in which entries are added to the offset index.
- max_
compaction_ strlag_ ms - The maximum time, in ms, that a particular message will remain uncompacted. This will not apply if the
compression_type
is set to "uncompressed" or it is set toproducer
and the producer is not using compression. - max_
message_ strbytes - The maximum size, in bytes, of a message.
- message_
down_ boolconversion_ enable - Determines whether down-conversion of message formats for consumers is enabled.
- message_
format_ strversion - The version of the inter-broker protocol that will be used. This may be one of "0.8.0", "0.8.1", "0.8.2", "0.9.0", "0.10.0", "0.10.0-IV0", "0.10.0-IV1", "0.10.1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", "0.10.2", "0.10.2-IV0", "0.11.0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0", "1.0-IV0", "1.1", "1.1-IV0", "2.0", "2.0-IV0", "2.0-IV1", "2.1", "2.1-IV0", "2.1-IV1", "2.1-IV2", "2.2", "2.2-IV0", "2.2-IV1", "2.3", "2.3-IV0", "2.3-IV1", "2.4", "2.4-IV0", "2.4-IV1", "2.5", "2.5-IV0", "2.6", "2.6-IV0", "2.7", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8", "2.8-IV0", "2.8-IV1", "3.0", "3.0-IV0", "3.0-IV1", "3.1", "3.1-IV0", "3.2", "3.2-IV0", "3.3", "3.3-IV0", "3.3-IV1", "3.3-IV2", "3.3-IV3", "3.4", "3.4-IV0", "3.5", "3.5-IV0", "3.5-IV1", "3.5-IV2", "3.6", "3.6-IV0", "3.6-IV1", "3.6-IV2".
- message_
timestamp_ strdifference_ max_ ms - The maximum difference, in ms, between the timestamp specific in a message and when the broker receives the message.
- message_
timestamp_ strtype - Specifies which timestamp to use for the message. This may be one of "create_time" or "log_append_time".
- min_
cleanable_ floatdirty_ ratio - A scale between 0.0 and 1.0 which controls the frequency of the compactor. Larger values mean more frequent compactions. This is often paired with
max_compaction_lag_ms
to control the compactor frequency. - min_
compaction_ strlag_ ms - min_
insync_ intreplicas - The number of replicas that must acknowledge a write before it is considered successful. -1 is a special setting to indicate that all nodes must ack a message before a write is considered successful. Default is 1, indicating at least 1 replica must acknowledge a write to be considered successful.
- preallocate bool
- Determines whether to preallocate a file on disk when creating a new log segment within a topic.
- retention_
bytes str - The maximum size, in bytes, of a topic before messages are deleted. -1 is a special setting indicating that this setting has no limit.
- retention_
ms str - The maximum time, in ms, that a topic log file is retained before deleting it. -1 is a special setting indicating that this setting has no limit.
- segment_
bytes str - The maximum size, in bytes, of a single topic log file.
- segment_
index_ strbytes - The maximum size, in bytes, of the offset index.
- segment_
jitter_ strms - The maximum time, in ms, subtracted from the scheduled segment disk flush time to avoid the thundering herd problem for segment flushing.
- segment_
ms str - The maximum time, in ms, before the topic log will flush to disk.
- cleanup
Policy String - The topic cleanup policy that describes whether messages should be deleted, compacted, or both when retention policies are violated. This may be one of "delete", "compact", or "compact_delete".
- compression
Type String - The topic compression codecs used for a given topic. This may be one of "uncompressed", "gzip", "snappy", "lz4", "producer", "zstd". "uncompressed" indicates that there is no compression and "producer" retains the original compression codec set by the producer.
- delete
Retention StringMs - The amount of time, in ms, that deleted records are retained.
- file
Delete StringDelay Ms - The amount of time, in ms, to wait before deleting a topic log segment from the filesystem.
- flush
Messages String - The number of messages accumulated on a topic partition before they are flushed to disk.
- flush
Ms String - The maximum time, in ms, that a topic is kept in memory before being flushed to disk.
- index
Interval StringBytes - The interval, in bytes, in which entries are added to the offset index.
- max
Compaction StringLag Ms - The maximum time, in ms, that a particular message will remain uncompacted. This will not apply if the
compression_type
is set to "uncompressed" or it is set toproducer
and the producer is not using compression. - max
Message StringBytes - The maximum size, in bytes, of a message.
- message
Down BooleanConversion Enable - Determines whether down-conversion of message formats for consumers is enabled.
- message
Format StringVersion - The version of the inter-broker protocol that will be used. This may be one of "0.8.0", "0.8.1", "0.8.2", "0.9.0", "0.10.0", "0.10.0-IV0", "0.10.0-IV1", "0.10.1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", "0.10.2", "0.10.2-IV0", "0.11.0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0", "1.0-IV0", "1.1", "1.1-IV0", "2.0", "2.0-IV0", "2.0-IV1", "2.1", "2.1-IV0", "2.1-IV1", "2.1-IV2", "2.2", "2.2-IV0", "2.2-IV1", "2.3", "2.3-IV0", "2.3-IV1", "2.4", "2.4-IV0", "2.4-IV1", "2.5", "2.5-IV0", "2.6", "2.6-IV0", "2.7", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8", "2.8-IV0", "2.8-IV1", "3.0", "3.0-IV0", "3.0-IV1", "3.1", "3.1-IV0", "3.2", "3.2-IV0", "3.3", "3.3-IV0", "3.3-IV1", "3.3-IV2", "3.3-IV3", "3.4", "3.4-IV0", "3.5", "3.5-IV0", "3.5-IV1", "3.5-IV2", "3.6", "3.6-IV0", "3.6-IV1", "3.6-IV2".
- message
Timestamp StringDifference Max Ms - The maximum difference, in ms, between the timestamp specific in a message and when the broker receives the message.
- message
Timestamp StringType - Specifies which timestamp to use for the message. This may be one of "create_time" or "log_append_time".
- min
Cleanable NumberDirty Ratio - A scale between 0.0 and 1.0 which controls the frequency of the compactor. Larger values mean more frequent compactions. This is often paired with
max_compaction_lag_ms
to control the compactor frequency. - min
Compaction StringLag Ms - min
Insync NumberReplicas - The number of replicas that must acknowledge a write before it is considered successful. -1 is a special setting to indicate that all nodes must ack a message before a write is considered successful. Default is 1, indicating at least 1 replica must acknowledge a write to be considered successful.
- preallocate Boolean
- Determines whether to preallocate a file on disk when creating a new log segment within a topic.
- retention
Bytes String - The maximum size, in bytes, of a topic before messages are deleted. -1 is a special setting indicating that this setting has no limit.
- retention
Ms String - The maximum time, in ms, that a topic log file is retained before deleting it. -1 is a special setting indicating that this setting has no limit.
- segment
Bytes String - The maximum size, in bytes, of a single topic log file.
- segment
Index StringBytes - The maximum size, in bytes, of the offset index.
- segment
Jitter StringMs - The maximum time, in ms, subtracted from the scheduled segment disk flush time to avoid the thundering herd problem for segment flushing.
- segment
Ms String - The maximum time, in ms, before the topic log will flush to disk.
Import
Topics can be imported using the id
of the source cluster and the name
of the topic joined with a comma. For example:
$ pulumi import digitalocean:index/databaseKafkaTopic:DatabaseKafkaTopic topic-01 245bcfd0-7f31-4ce6-a2bc-475a116cca97,topic-01
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- DigitalOcean pulumi/pulumi-digitalocean
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
digitalocean
Terraform Provider.