yandex.MdbClickhouseCluster
Explore with Pulumi AI
Manages a ClickHouse cluster within the Yandex.Cloud. For more information, see the official documentation.
Example Usage
Example of creating a Single Node ClickHouse.
using Pulumi;
using Yandex = Pulumi.Yandex;
class MyStack : Stack
{
public MyStack()
{
var fooVpcNetwork = new Yandex.VpcNetwork("fooVpcNetwork", new Yandex.VpcNetworkArgs
{
});
var fooVpcSubnet = new Yandex.VpcSubnet("fooVpcSubnet", new Yandex.VpcSubnetArgs
{
NetworkId = fooVpcNetwork.Id,
V4CidrBlocks =
{
"10.5.0.0/24",
},
Zone = "ru-central1-a",
});
var fooMdbClickhouseCluster = new Yandex.MdbClickhouseCluster("fooMdbClickhouseCluster", new Yandex.MdbClickhouseClusterArgs
{
Clickhouse = new Yandex.Inputs.MdbClickhouseClusterClickhouseArgs
{
Config = new Yandex.Inputs.MdbClickhouseClusterClickhouseConfigArgs
{
BackgroundPoolSize = 16,
BackgroundSchedulePoolSize = 16,
Compression =
{
{
{ "method", "LZ4" },
{ "minPartSize", 1024 },
{ "minPartSizeRatio", 0.5 },
},
{
{ "method", "ZSTD" },
{ "minPartSize", 2048 },
{ "minPartSizeRatio", 0.7 },
},
},
GeobaseUri = "",
GraphiteRollup =
{
{
{ "name", "rollup1" },
{ "pattern",
{
{
{ "function", "func1" },
{ "regexp", "abc" },
{ "retention",
{
{
{ "age", 1000 },
{ "precision", 3 },
},
} },
},
} },
},
{
{ "name", "rollup2" },
{ "pattern",
{
{
{ "function", "func2" },
{ "retention",
{
{
{ "age", 2000 },
{ "precision", 5 },
},
} },
},
} },
},
},
Kafka = new Yandex.Inputs.MdbClickhouseClusterClickhouseConfigKafkaArgs
{
SaslMechanism = "SASL_MECHANISM_GSSAPI",
SaslPassword = "pass1",
SaslUsername = "user1",
SecurityProtocol = "SECURITY_PROTOCOL_PLAINTEXT",
},
KafkaTopic =
{
{
{ "name", "topic1" },
{ "settings",
{
{ "saslMechanism", "SASL_MECHANISM_SCRAM_SHA_256" },
{ "saslPassword", "pass2" },
{ "saslUsername", "user2" },
{ "securityProtocol", "SECURITY_PROTOCOL_SSL" },
} },
},
{
{ "name", "topic2" },
{ "settings",
{
{ "saslMechanism", "SASL_MECHANISM_PLAIN" },
{ "securityProtocol", "SECURITY_PROTOCOL_SASL_PLAINTEXT" },
} },
},
},
KeepAliveTimeout = 3000,
LogLevel = "TRACE",
MarkCacheSize = 5368709120,
MaxConcurrentQueries = 50,
MaxConnections = 100,
MaxPartitionSizeToDrop = 53687091200,
MaxTableSizeToDrop = 53687091200,
MergeTree = new Yandex.Inputs.MdbClickhouseClusterClickhouseConfigMergeTreeArgs
{
MaxBytesToMergeAtMinSpaceInPool = 1048576,
MaxReplicatedMergesInQueue = 16,
NumberOfFreeEntriesInPoolToLowerMaxSizeOfMerge = 8,
PartsToDelayInsert = 150,
PartsToThrowInsert = 300,
ReplicatedDeduplicationWindow = 100,
ReplicatedDeduplicationWindowSeconds = 604800,
},
MetricLogEnabled = true,
MetricLogRetentionSize = 536870912,
MetricLogRetentionTime = 2592000,
PartLogRetentionSize = 536870912,
PartLogRetentionTime = 2592000,
QueryLogRetentionSize = 1073741824,
QueryLogRetentionTime = 2592000,
QueryThreadLogEnabled = true,
QueryThreadLogRetentionSize = 536870912,
QueryThreadLogRetentionTime = 2592000,
Rabbitmq = new Yandex.Inputs.MdbClickhouseClusterClickhouseConfigRabbitmqArgs
{
Password = "rabbit_pass",
Username = "rabbit_user",
},
TextLogEnabled = true,
TextLogLevel = "TRACE",
TextLogRetentionSize = 536870912,
TextLogRetentionTime = 2592000,
Timezone = "UTC",
TraceLogEnabled = true,
TraceLogRetentionSize = 536870912,
TraceLogRetentionTime = 2592000,
UncompressedCacheSize = 8589934592,
},
Resources = new Yandex.Inputs.MdbClickhouseClusterClickhouseResourcesArgs
{
DiskSize = 32,
DiskTypeId = "network-ssd",
ResourcePresetId = "s2.micro",
},
},
CloudStorage = new Yandex.Inputs.MdbClickhouseClusterCloudStorageArgs
{
Enabled = false,
},
Databases =
{
new Yandex.Inputs.MdbClickhouseClusterDatabaseArgs
{
Name = "db_name",
},
},
Environment = "PRESTABLE",
FormatSchemas =
{
new Yandex.Inputs.MdbClickhouseClusterFormatSchemaArgs
{
Name = "test_schema",
Type = "FORMAT_SCHEMA_TYPE_CAPNPROTO",
Uri = "https://storage.yandexcloud.net/ch-data/schema.proto",
},
},
Hosts =
{
new Yandex.Inputs.MdbClickhouseClusterHostArgs
{
SubnetId = fooVpcSubnet.Id,
Type = "CLICKHOUSE",
Zone = "ru-central1-a",
},
},
MaintenanceWindow = new Yandex.Inputs.MdbClickhouseClusterMaintenanceWindowArgs
{
Type = "ANYTIME",
},
MlModels =
{
new Yandex.Inputs.MdbClickhouseClusterMlModelArgs
{
Name = "test_model",
Type = "ML_MODEL_TYPE_CATBOOST",
Uri = "https://storage.yandexcloud.net/ch-data/train.csv",
},
},
NetworkId = fooVpcNetwork.Id,
ServiceAccountId = "your_service_account_id",
Users =
{
new Yandex.Inputs.MdbClickhouseClusterUserArgs
{
Name = "user",
Password = "your_password",
Permissions =
{
new Yandex.Inputs.MdbClickhouseClusterUserPermissionArgs
{
DatabaseName = "db_name",
},
},
Quotas =
{
new Yandex.Inputs.MdbClickhouseClusterUserQuotaArgs
{
Errors = 1000,
IntervalDuration = 3600000,
Queries = 10000,
},
new Yandex.Inputs.MdbClickhouseClusterUserQuotaArgs
{
Error = 5000,
IntervalDuration = 79800000,
Queries = 50000,
},
},
Settings = new Yandex.Inputs.MdbClickhouseClusterUserSettingsArgs
{
MaxMemoryUsageForUser = 1000000000,
OutputFormatJsonQuote64bitIntegers = true,
ReadOverflowMode = "throw",
},
},
},
});
}
}
package main
import (
"github.com/pulumi/pulumi-yandex/sdk/go/yandex"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
fooVpcNetwork, err := yandex.NewVpcNetwork(ctx, "fooVpcNetwork", nil)
if err != nil {
return err
}
fooVpcSubnet, err := yandex.NewVpcSubnet(ctx, "fooVpcSubnet", &yandex.VpcSubnetArgs{
NetworkId: fooVpcNetwork.ID(),
V4CidrBlocks: pulumi.StringArray{
pulumi.String("10.5.0.0/24"),
},
Zone: pulumi.String("ru-central1-a"),
})
if err != nil {
return err
}
_, err = yandex.NewMdbClickhouseCluster(ctx, "fooMdbClickhouseCluster", &yandex.MdbClickhouseClusterArgs{
Clickhouse: &MdbClickhouseClusterClickhouseArgs{
Config: &MdbClickhouseClusterClickhouseConfigArgs{
BackgroundPoolSize: pulumi.Int(16),
BackgroundSchedulePoolSize: pulumi.Int(16),
Compression: []interface{}{
map[string]interface{}{
"method": "LZ4",
"minPartSize": 1024,
"minPartSizeRatio": 0.5,
},
map[string]interface{}{
"method": "ZSTD",
"minPartSize": 2048,
"minPartSizeRatio": 0.7,
},
},
GeobaseUri: pulumi.String(""),
GraphiteRollup: []interface{}{
map[string]interface{}{
"name": "rollup1",
"pattern": []map[string]interface{}{
map[string]interface{}{
"function": "func1",
"regexp": "abc",
"retention": []map[string]interface{}{
map[string]interface{}{
"age": 1000,
"precision": 3,
},
},
},
},
},
map[string]interface{}{
"name": "rollup2",
"pattern": []map[string]interface{}{
map[string]interface{}{
"function": "func2",
"retention": []map[string]interface{}{
map[string]interface{}{
"age": 2000,
"precision": 5,
},
},
},
},
},
},
Kafka: &MdbClickhouseClusterClickhouseConfigKafkaArgs{
SaslMechanism: pulumi.String("SASL_MECHANISM_GSSAPI"),
SaslPassword: pulumi.String("pass1"),
SaslUsername: pulumi.String("user1"),
SecurityProtocol: pulumi.String("SECURITY_PROTOCOL_PLAINTEXT"),
},
KafkaTopic: []interface{}{
map[string]interface{}{
"name": "topic1",
"settings": map[string]interface{}{
"saslMechanism": "SASL_MECHANISM_SCRAM_SHA_256",
"saslPassword": "pass2",
"saslUsername": "user2",
"securityProtocol": "SECURITY_PROTOCOL_SSL",
},
},
map[string]interface{}{
"name": "topic2",
"settings": map[string]interface{}{
"saslMechanism": "SASL_MECHANISM_PLAIN",
"securityProtocol": "SECURITY_PROTOCOL_SASL_PLAINTEXT",
},
},
},
KeepAliveTimeout: pulumi.Int(3000),
LogLevel: pulumi.String("TRACE"),
MarkCacheSize: pulumi.Int(5368709120),
MaxConcurrentQueries: pulumi.Int(50),
MaxConnections: pulumi.Int(100),
MaxPartitionSizeToDrop: pulumi.Int(53687091200),
MaxTableSizeToDrop: pulumi.Int(53687091200),
MergeTree: &MdbClickhouseClusterClickhouseConfigMergeTreeArgs{
MaxBytesToMergeAtMinSpaceInPool: pulumi.Int(1048576),
MaxReplicatedMergesInQueue: pulumi.Int(16),
NumberOfFreeEntriesInPoolToLowerMaxSizeOfMerge: pulumi.Int(8),
PartsToDelayInsert: pulumi.Int(150),
PartsToThrowInsert: pulumi.Int(300),
ReplicatedDeduplicationWindow: pulumi.Int(100),
ReplicatedDeduplicationWindowSeconds: pulumi.Int(604800),
},
MetricLogEnabled: pulumi.Bool(true),
MetricLogRetentionSize: pulumi.Int(536870912),
MetricLogRetentionTime: pulumi.Int(2592000),
PartLogRetentionSize: pulumi.Int(536870912),
PartLogRetentionTime: pulumi.Int(2592000),
QueryLogRetentionSize: pulumi.Int(1073741824),
QueryLogRetentionTime: pulumi.Int(2592000),
QueryThreadLogEnabled: pulumi.Bool(true),
QueryThreadLogRetentionSize: pulumi.Int(536870912),
QueryThreadLogRetentionTime: pulumi.Int(2592000),
Rabbitmq: &MdbClickhouseClusterClickhouseConfigRabbitmqArgs{
Password: pulumi.String("rabbit_pass"),
Username: pulumi.String("rabbit_user"),
},
TextLogEnabled: pulumi.Bool(true),
TextLogLevel: pulumi.String("TRACE"),
TextLogRetentionSize: pulumi.Int(536870912),
TextLogRetentionTime: pulumi.Int(2592000),
Timezone: pulumi.String("UTC"),
TraceLogEnabled: pulumi.Bool(true),
TraceLogRetentionSize: pulumi.Int(536870912),
TraceLogRetentionTime: pulumi.Int(2592000),
UncompressedCacheSize: pulumi.Int(8589934592),
},
Resources: &MdbClickhouseClusterClickhouseResourcesArgs{
DiskSize: pulumi.Int(32),
DiskTypeId: pulumi.String("network-ssd"),
ResourcePresetId: pulumi.String("s2.micro"),
},
},
CloudStorage: &MdbClickhouseClusterCloudStorageArgs{
Enabled: pulumi.Bool(false),
},
Databases: MdbClickhouseClusterDatabaseArray{
&MdbClickhouseClusterDatabaseArgs{
Name: pulumi.String("db_name"),
},
},
Environment: pulumi.String("PRESTABLE"),
FormatSchemas: MdbClickhouseClusterFormatSchemaArray{
&MdbClickhouseClusterFormatSchemaArgs{
Name: pulumi.String("test_schema"),
Type: pulumi.String("FORMAT_SCHEMA_TYPE_CAPNPROTO"),
Uri: pulumi.String("https://storage.yandexcloud.net/ch-data/schema.proto"),
},
},
Hosts: MdbClickhouseClusterHostArray{
&MdbClickhouseClusterHostArgs{
SubnetId: fooVpcSubnet.ID(),
Type: pulumi.String("CLICKHOUSE"),
Zone: pulumi.String("ru-central1-a"),
},
},
MaintenanceWindow: &MdbClickhouseClusterMaintenanceWindowArgs{
Type: pulumi.String("ANYTIME"),
},
MlModels: MdbClickhouseClusterMlModelArray{
&MdbClickhouseClusterMlModelArgs{
Name: pulumi.String("test_model"),
Type: pulumi.String("ML_MODEL_TYPE_CATBOOST"),
Uri: pulumi.String("https://storage.yandexcloud.net/ch-data/train.csv"),
},
},
NetworkId: fooVpcNetwork.ID(),
ServiceAccountId: pulumi.String("your_service_account_id"),
Users: MdbClickhouseClusterUserArray{
&MdbClickhouseClusterUserArgs{
Name: pulumi.String("user"),
Password: pulumi.String("your_password"),
Permissions: MdbClickhouseClusterUserPermissionArray{
&MdbClickhouseClusterUserPermissionArgs{
DatabaseName: pulumi.String("db_name"),
},
},
Quotas: MdbClickhouseClusterUserQuotaArray{
&MdbClickhouseClusterUserQuotaArgs{
Errors: pulumi.Int(1000),
IntervalDuration: pulumi.Int(3600000),
Queries: pulumi.Int(10000),
},
&MdbClickhouseClusterUserQuotaArgs{
Error: 5000,
IntervalDuration: pulumi.Int(79800000),
Queries: pulumi.Int(50000),
},
},
Settings: &MdbClickhouseClusterUserSettingsArgs{
MaxMemoryUsageForUser: pulumi.Int(1000000000),
OutputFormatJsonQuote64bitIntegers: pulumi.Bool(true),
ReadOverflowMode: pulumi.String("throw"),
},
},
},
})
if err != nil {
return err
}
return nil
})
}
Coming soon!
import pulumi
import pulumi_yandex as yandex
foo_vpc_network = yandex.VpcNetwork("fooVpcNetwork")
foo_vpc_subnet = yandex.VpcSubnet("fooVpcSubnet",
network_id=foo_vpc_network.id,
v4_cidr_blocks=["10.5.0.0/24"],
zone="ru-central1-a")
foo_mdb_clickhouse_cluster = yandex.MdbClickhouseCluster("fooMdbClickhouseCluster",
clickhouse=yandex.MdbClickhouseClusterClickhouseArgs(
config=yandex.MdbClickhouseClusterClickhouseConfigArgs(
background_pool_size=16,
background_schedule_pool_size=16,
compression=[
{
"method": "LZ4",
"minPartSize": 1024,
"minPartSizeRatio": 0.5,
},
{
"method": "ZSTD",
"minPartSize": 2048,
"minPartSizeRatio": 0.7,
},
],
geobase_uri="",
graphite_rollup=[
{
"name": "rollup1",
"pattern": [{
"function": "func1",
"regexp": "abc",
"retention": [{
"age": 1000,
"precision": 3,
}],
}],
},
{
"name": "rollup2",
"pattern": [{
"function": "func2",
"retention": [{
"age": 2000,
"precision": 5,
}],
}],
},
],
kafka=yandex.MdbClickhouseClusterClickhouseConfigKafkaArgs(
sasl_mechanism="SASL_MECHANISM_GSSAPI",
sasl_password="pass1",
sasl_username="user1",
security_protocol="SECURITY_PROTOCOL_PLAINTEXT",
),
kafka_topic=[
{
"name": "topic1",
"settings": {
"saslMechanism": "SASL_MECHANISM_SCRAM_SHA_256",
"saslPassword": "pass2",
"saslUsername": "user2",
"securityProtocol": "SECURITY_PROTOCOL_SSL",
},
},
{
"name": "topic2",
"settings": {
"saslMechanism": "SASL_MECHANISM_PLAIN",
"securityProtocol": "SECURITY_PROTOCOL_SASL_PLAINTEXT",
},
},
],
keep_alive_timeout=3000,
log_level="TRACE",
mark_cache_size=5368709120,
max_concurrent_queries=50,
max_connections=100,
max_partition_size_to_drop=53687091200,
max_table_size_to_drop=53687091200,
merge_tree=yandex.MdbClickhouseClusterClickhouseConfigMergeTreeArgs(
max_bytes_to_merge_at_min_space_in_pool=1048576,
max_replicated_merges_in_queue=16,
number_of_free_entries_in_pool_to_lower_max_size_of_merge=8,
parts_to_delay_insert=150,
parts_to_throw_insert=300,
replicated_deduplication_window=100,
replicated_deduplication_window_seconds=604800,
),
metric_log_enabled=True,
metric_log_retention_size=536870912,
metric_log_retention_time=2592000,
part_log_retention_size=536870912,
part_log_retention_time=2592000,
query_log_retention_size=1073741824,
query_log_retention_time=2592000,
query_thread_log_enabled=True,
query_thread_log_retention_size=536870912,
query_thread_log_retention_time=2592000,
rabbitmq=yandex.MdbClickhouseClusterClickhouseConfigRabbitmqArgs(
password="rabbit_pass",
username="rabbit_user",
),
text_log_enabled=True,
text_log_level="TRACE",
text_log_retention_size=536870912,
text_log_retention_time=2592000,
timezone="UTC",
trace_log_enabled=True,
trace_log_retention_size=536870912,
trace_log_retention_time=2592000,
uncompressed_cache_size=8589934592,
),
resources=yandex.MdbClickhouseClusterClickhouseResourcesArgs(
disk_size=32,
disk_type_id="network-ssd",
resource_preset_id="s2.micro",
),
),
cloud_storage=yandex.MdbClickhouseClusterCloudStorageArgs(
enabled=False,
),
databases=[yandex.MdbClickhouseClusterDatabaseArgs(
name="db_name",
)],
environment="PRESTABLE",
format_schemas=[yandex.MdbClickhouseClusterFormatSchemaArgs(
name="test_schema",
type="FORMAT_SCHEMA_TYPE_CAPNPROTO",
uri="https://storage.yandexcloud.net/ch-data/schema.proto",
)],
hosts=[yandex.MdbClickhouseClusterHostArgs(
subnet_id=foo_vpc_subnet.id,
type="CLICKHOUSE",
zone="ru-central1-a",
)],
maintenance_window=yandex.MdbClickhouseClusterMaintenanceWindowArgs(
type="ANYTIME",
),
ml_models=[yandex.MdbClickhouseClusterMlModelArgs(
name="test_model",
type="ML_MODEL_TYPE_CATBOOST",
uri="https://storage.yandexcloud.net/ch-data/train.csv",
)],
network_id=foo_vpc_network.id,
service_account_id="your_service_account_id",
users=[yandex.MdbClickhouseClusterUserArgs(
name="user",
password="your_password",
permissions=[yandex.MdbClickhouseClusterUserPermissionArgs(
database_name="db_name",
)],
quotas=[
yandex.MdbClickhouseClusterUserQuotaArgs(
errors=1000,
interval_duration=3600000,
queries=10000,
),
yandex.MdbClickhouseClusterUserQuotaArgs(
error=5000,
interval_duration=79800000,
queries=50000,
),
],
settings=yandex.MdbClickhouseClusterUserSettingsArgs(
max_memory_usage_for_user=1000000000,
output_format_json_quote64bit_integers=True,
read_overflow_mode="throw",
),
)])
import * as pulumi from "@pulumi/pulumi";
import * as yandex from "@pulumi/yandex";
const fooVpcNetwork = new yandex.VpcNetwork("foo", {});
const fooVpcSubnet = new yandex.VpcSubnet("foo", {
networkId: fooVpcNetwork.id,
v4CidrBlocks: ["10.5.0.0/24"],
zone: "ru-central1-a",
});
const fooMdbClickhouseCluster = new yandex.MdbClickhouseCluster("foo", {
clickhouse: {
config: {
backgroundPoolSize: 16,
backgroundSchedulePoolSize: 16,
compressions: [
{
method: "LZ4",
minPartSize: 1024,
minPartSizeRatio: 0.5,
},
{
method: "ZSTD",
minPartSize: 2048,
minPartSizeRatio: 0.7,
},
],
geobaseUri: "",
graphiteRollups: [
{
name: "rollup1",
patterns: [{
function: "func1",
regexp: "abc",
retentions: [{
age: 1000,
precision: 3,
}],
}],
},
{
name: "rollup2",
patterns: [{
function: "func2",
retentions: [{
age: 2000,
precision: 5,
}],
}],
},
],
kafka: {
saslMechanism: "SASL_MECHANISM_GSSAPI",
saslPassword: "pass1",
saslUsername: "user1",
securityProtocol: "SECURITY_PROTOCOL_PLAINTEXT",
},
kafkaTopics: [
{
name: "topic1",
settings: {
saslMechanism: "SASL_MECHANISM_SCRAM_SHA_256",
saslPassword: "pass2",
saslUsername: "user2",
securityProtocol: "SECURITY_PROTOCOL_SSL",
},
},
{
name: "topic2",
settings: {
saslMechanism: "SASL_MECHANISM_PLAIN",
securityProtocol: "SECURITY_PROTOCOL_SASL_PLAINTEXT",
},
},
],
keepAliveTimeout: 3000,
logLevel: "TRACE",
markCacheSize: 5368709120,
maxConcurrentQueries: 50,
maxConnections: 100,
maxPartitionSizeToDrop: 53687091200,
maxTableSizeToDrop: 53687091200,
mergeTree: {
maxBytesToMergeAtMinSpaceInPool: 1048576,
maxReplicatedMergesInQueue: 16,
numberOfFreeEntriesInPoolToLowerMaxSizeOfMerge: 8,
partsToDelayInsert: 150,
partsToThrowInsert: 300,
replicatedDeduplicationWindow: 100,
replicatedDeduplicationWindowSeconds: 604800,
},
metricLogEnabled: true,
metricLogRetentionSize: 536870912,
metricLogRetentionTime: 2592000,
partLogRetentionSize: 536870912,
partLogRetentionTime: 2592000,
queryLogRetentionSize: 1073741824,
queryLogRetentionTime: 2592000,
queryThreadLogEnabled: true,
queryThreadLogRetentionSize: 536870912,
queryThreadLogRetentionTime: 2592000,
rabbitmq: {
password: "rabbit_pass",
username: "rabbit_user",
},
textLogEnabled: true,
textLogLevel: "TRACE",
textLogRetentionSize: 536870912,
textLogRetentionTime: 2592000,
timezone: "UTC",
traceLogEnabled: true,
traceLogRetentionSize: 536870912,
traceLogRetentionTime: 2592000,
uncompressedCacheSize: 8589934592,
},
resources: {
diskSize: 32,
diskTypeId: "network-ssd",
resourcePresetId: "s2.micro",
},
},
cloudStorage: {
enabled: false,
},
databases: [{
name: "db_name",
}],
environment: "PRESTABLE",
formatSchemas: [{
name: "test_schema",
type: "FORMAT_SCHEMA_TYPE_CAPNPROTO",
uri: "https://storage.yandexcloud.net/ch-data/schema.proto",
}],
hosts: [{
subnetId: fooVpcSubnet.id,
type: "CLICKHOUSE",
zone: "ru-central1-a",
}],
maintenanceWindow: {
type: "ANYTIME",
},
mlModels: [{
name: "test_model",
type: "ML_MODEL_TYPE_CATBOOST",
uri: "https://storage.yandexcloud.net/ch-data/train.csv",
}],
networkId: fooVpcNetwork.id,
serviceAccountId: "your_service_account_id",
users: [{
name: "user",
password: "your_password",
permissions: [{
databaseName: "db_name",
}],
quotas: [
{
errors: 1000,
intervalDuration: 3600000,
queries: 10000,
},
{
error: 5000,
intervalDuration: 79800000,
queries: 50000,
},
],
settings: {
maxMemoryUsageForUser: 1000000000,
outputFormatJsonQuote64bitIntegers: true,
readOverflowMode: "throw",
},
}],
});
Coming soon!
Example of creating a HA ClickHouse Cluster.
using Pulumi;
using Yandex = Pulumi.Yandex;
class MyStack : Stack
{
public MyStack()
{
var fooVpcNetwork = new Yandex.VpcNetwork("fooVpcNetwork", new Yandex.VpcNetworkArgs
{
});
var fooVpcSubnet = new Yandex.VpcSubnet("fooVpcSubnet", new Yandex.VpcSubnetArgs
{
NetworkId = fooVpcNetwork.Id,
V4CidrBlocks =
{
"10.1.0.0/24",
},
Zone = "ru-central1-a",
});
var bar = new Yandex.VpcSubnet("bar", new Yandex.VpcSubnetArgs
{
NetworkId = fooVpcNetwork.Id,
V4CidrBlocks =
{
"10.2.0.0/24",
},
Zone = "ru-central1-b",
});
var baz = new Yandex.VpcSubnet("baz", new Yandex.VpcSubnetArgs
{
NetworkId = fooVpcNetwork.Id,
V4CidrBlocks =
{
"10.3.0.0/24",
},
Zone = "ru-central1-c",
});
var fooMdbClickhouseCluster = new Yandex.MdbClickhouseCluster("fooMdbClickhouseCluster", new Yandex.MdbClickhouseClusterArgs
{
Clickhouse = new Yandex.Inputs.MdbClickhouseClusterClickhouseArgs
{
Resources = new Yandex.Inputs.MdbClickhouseClusterClickhouseResourcesArgs
{
DiskSize = 16,
DiskTypeId = "network-ssd",
ResourcePresetId = "s2.micro",
},
},
CloudStorage = new Yandex.Inputs.MdbClickhouseClusterCloudStorageArgs
{
Enabled = false,
},
Databases =
{
new Yandex.Inputs.MdbClickhouseClusterDatabaseArgs
{
Name = "db_name",
},
},
Environment = "PRESTABLE",
Hosts =
{
new Yandex.Inputs.MdbClickhouseClusterHostArgs
{
SubnetId = fooVpcSubnet.Id,
Type = "CLICKHOUSE",
Zone = "ru-central1-a",
},
new Yandex.Inputs.MdbClickhouseClusterHostArgs
{
SubnetId = bar.Id,
Type = "CLICKHOUSE",
Zone = "ru-central1-b",
},
new Yandex.Inputs.MdbClickhouseClusterHostArgs
{
SubnetId = fooVpcSubnet.Id,
Type = "ZOOKEEPER",
Zone = "ru-central1-a",
},
new Yandex.Inputs.MdbClickhouseClusterHostArgs
{
SubnetId = bar.Id,
Type = "ZOOKEEPER",
Zone = "ru-central1-b",
},
new Yandex.Inputs.MdbClickhouseClusterHostArgs
{
SubnetId = baz.Id,
Type = "ZOOKEEPER",
Zone = "ru-central1-c",
},
},
NetworkId = fooVpcNetwork.Id,
Users =
{
new Yandex.Inputs.MdbClickhouseClusterUserArgs
{
Name = "user",
Password = "password",
Permissions =
{
new Yandex.Inputs.MdbClickhouseClusterUserPermissionArgs
{
DatabaseName = "db_name",
},
},
Quotas =
{
new Yandex.Inputs.MdbClickhouseClusterUserQuotaArgs
{
Errors = 1000,
IntervalDuration = 3600000,
Queries = 10000,
},
new Yandex.Inputs.MdbClickhouseClusterUserQuotaArgs
{
Error = 5000,
IntervalDuration = 79800000,
Queries = 50000,
},
},
Settings = new Yandex.Inputs.MdbClickhouseClusterUserSettingsArgs
{
MaxMemoryUsageForUser = 1000000000,
OutputFormatJsonQuote64bitIntegers = true,
ReadOverflowMode = "throw",
},
},
},
Zookeeper = new Yandex.Inputs.MdbClickhouseClusterZookeeperArgs
{
Resources = new Yandex.Inputs.MdbClickhouseClusterZookeeperResourcesArgs
{
DiskSize = 10,
DiskTypeId = "network-ssd",
ResourcePresetId = "s2.micro",
},
},
});
}
}
package main
import (
"github.com/pulumi/pulumi-yandex/sdk/go/yandex"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
fooVpcNetwork, err := yandex.NewVpcNetwork(ctx, "fooVpcNetwork", nil)
if err != nil {
return err
}
fooVpcSubnet, err := yandex.NewVpcSubnet(ctx, "fooVpcSubnet", &yandex.VpcSubnetArgs{
NetworkId: fooVpcNetwork.ID(),
V4CidrBlocks: pulumi.StringArray{
pulumi.String("10.1.0.0/24"),
},
Zone: pulumi.String("ru-central1-a"),
})
if err != nil {
return err
}
bar, err := yandex.NewVpcSubnet(ctx, "bar", &yandex.VpcSubnetArgs{
NetworkId: fooVpcNetwork.ID(),
V4CidrBlocks: pulumi.StringArray{
pulumi.String("10.2.0.0/24"),
},
Zone: pulumi.String("ru-central1-b"),
})
if err != nil {
return err
}
baz, err := yandex.NewVpcSubnet(ctx, "baz", &yandex.VpcSubnetArgs{
NetworkId: fooVpcNetwork.ID(),
V4CidrBlocks: pulumi.StringArray{
pulumi.String("10.3.0.0/24"),
},
Zone: pulumi.String("ru-central1-c"),
})
if err != nil {
return err
}
_, err = yandex.NewMdbClickhouseCluster(ctx, "fooMdbClickhouseCluster", &yandex.MdbClickhouseClusterArgs{
Clickhouse: &MdbClickhouseClusterClickhouseArgs{
Resources: &MdbClickhouseClusterClickhouseResourcesArgs{
DiskSize: pulumi.Int(16),
DiskTypeId: pulumi.String("network-ssd"),
ResourcePresetId: pulumi.String("s2.micro"),
},
},
CloudStorage: &MdbClickhouseClusterCloudStorageArgs{
Enabled: pulumi.Bool(false),
},
Databases: MdbClickhouseClusterDatabaseArray{
&MdbClickhouseClusterDatabaseArgs{
Name: pulumi.String("db_name"),
},
},
Environment: pulumi.String("PRESTABLE"),
Hosts: MdbClickhouseClusterHostArray{
&MdbClickhouseClusterHostArgs{
SubnetId: fooVpcSubnet.ID(),
Type: pulumi.String("CLICKHOUSE"),
Zone: pulumi.String("ru-central1-a"),
},
&MdbClickhouseClusterHostArgs{
SubnetId: bar.ID(),
Type: pulumi.String("CLICKHOUSE"),
Zone: pulumi.String("ru-central1-b"),
},
&MdbClickhouseClusterHostArgs{
SubnetId: fooVpcSubnet.ID(),
Type: pulumi.String("ZOOKEEPER"),
Zone: pulumi.String("ru-central1-a"),
},
&MdbClickhouseClusterHostArgs{
SubnetId: bar.ID(),
Type: pulumi.String("ZOOKEEPER"),
Zone: pulumi.String("ru-central1-b"),
},
&MdbClickhouseClusterHostArgs{
SubnetId: baz.ID(),
Type: pulumi.String("ZOOKEEPER"),
Zone: pulumi.String("ru-central1-c"),
},
},
NetworkId: fooVpcNetwork.ID(),
Users: MdbClickhouseClusterUserArray{
&MdbClickhouseClusterUserArgs{
Name: pulumi.String("user"),
Password: pulumi.String("password"),
Permissions: MdbClickhouseClusterUserPermissionArray{
&MdbClickhouseClusterUserPermissionArgs{
DatabaseName: pulumi.String("db_name"),
},
},
Quotas: MdbClickhouseClusterUserQuotaArray{
&MdbClickhouseClusterUserQuotaArgs{
Errors: pulumi.Int(1000),
IntervalDuration: pulumi.Int(3600000),
Queries: pulumi.Int(10000),
},
&MdbClickhouseClusterUserQuotaArgs{
Error: 5000,
IntervalDuration: pulumi.Int(79800000),
Queries: pulumi.Int(50000),
},
},
Settings: &MdbClickhouseClusterUserSettingsArgs{
MaxMemoryUsageForUser: pulumi.Int(1000000000),
OutputFormatJsonQuote64bitIntegers: pulumi.Bool(true),
ReadOverflowMode: pulumi.String("throw"),
},
},
},
Zookeeper: &MdbClickhouseClusterZookeeperArgs{
Resources: &MdbClickhouseClusterZookeeperResourcesArgs{
DiskSize: pulumi.Int(10),
DiskTypeId: pulumi.String("network-ssd"),
ResourcePresetId: pulumi.String("s2.micro"),
},
},
})
if err != nil {
return err
}
return nil
})
}
Coming soon!
import pulumi
import pulumi_yandex as yandex
foo_vpc_network = yandex.VpcNetwork("fooVpcNetwork")
foo_vpc_subnet = yandex.VpcSubnet("fooVpcSubnet",
network_id=foo_vpc_network.id,
v4_cidr_blocks=["10.1.0.0/24"],
zone="ru-central1-a")
bar = yandex.VpcSubnet("bar",
network_id=foo_vpc_network.id,
v4_cidr_blocks=["10.2.0.0/24"],
zone="ru-central1-b")
baz = yandex.VpcSubnet("baz",
network_id=foo_vpc_network.id,
v4_cidr_blocks=["10.3.0.0/24"],
zone="ru-central1-c")
foo_mdb_clickhouse_cluster = yandex.MdbClickhouseCluster("fooMdbClickhouseCluster",
clickhouse=yandex.MdbClickhouseClusterClickhouseArgs(
resources=yandex.MdbClickhouseClusterClickhouseResourcesArgs(
disk_size=16,
disk_type_id="network-ssd",
resource_preset_id="s2.micro",
),
),
cloud_storage=yandex.MdbClickhouseClusterCloudStorageArgs(
enabled=False,
),
databases=[yandex.MdbClickhouseClusterDatabaseArgs(
name="db_name",
)],
environment="PRESTABLE",
hosts=[
yandex.MdbClickhouseClusterHostArgs(
subnet_id=foo_vpc_subnet.id,
type="CLICKHOUSE",
zone="ru-central1-a",
),
yandex.MdbClickhouseClusterHostArgs(
subnet_id=bar.id,
type="CLICKHOUSE",
zone="ru-central1-b",
),
yandex.MdbClickhouseClusterHostArgs(
subnet_id=foo_vpc_subnet.id,
type="ZOOKEEPER",
zone="ru-central1-a",
),
yandex.MdbClickhouseClusterHostArgs(
subnet_id=bar.id,
type="ZOOKEEPER",
zone="ru-central1-b",
),
yandex.MdbClickhouseClusterHostArgs(
subnet_id=baz.id,
type="ZOOKEEPER",
zone="ru-central1-c",
),
],
network_id=foo_vpc_network.id,
users=[yandex.MdbClickhouseClusterUserArgs(
name="user",
password="password",
permissions=[yandex.MdbClickhouseClusterUserPermissionArgs(
database_name="db_name",
)],
quotas=[
yandex.MdbClickhouseClusterUserQuotaArgs(
errors=1000,
interval_duration=3600000,
queries=10000,
),
yandex.MdbClickhouseClusterUserQuotaArgs(
error=5000,
interval_duration=79800000,
queries=50000,
),
],
settings=yandex.MdbClickhouseClusterUserSettingsArgs(
max_memory_usage_for_user=1000000000,
output_format_json_quote64bit_integers=True,
read_overflow_mode="throw",
),
)],
zookeeper=yandex.MdbClickhouseClusterZookeeperArgs(
resources=yandex.MdbClickhouseClusterZookeeperResourcesArgs(
disk_size=10,
disk_type_id="network-ssd",
resource_preset_id="s2.micro",
),
))
import * as pulumi from "@pulumi/pulumi";
import * as yandex from "@pulumi/yandex";
const fooVpcNetwork = new yandex.VpcNetwork("foo", {});
const fooVpcSubnet = new yandex.VpcSubnet("foo", {
networkId: fooVpcNetwork.id,
v4CidrBlocks: ["10.1.0.0/24"],
zone: "ru-central1-a",
});
const bar = new yandex.VpcSubnet("bar", {
networkId: fooVpcNetwork.id,
v4CidrBlocks: ["10.2.0.0/24"],
zone: "ru-central1-b",
});
const baz = new yandex.VpcSubnet("baz", {
networkId: fooVpcNetwork.id,
v4CidrBlocks: ["10.3.0.0/24"],
zone: "ru-central1-c",
});
const fooMdbClickhouseCluster = new yandex.MdbClickhouseCluster("foo", {
clickhouse: {
resources: {
diskSize: 16,
diskTypeId: "network-ssd",
resourcePresetId: "s2.micro",
},
},
cloudStorage: {
enabled: false,
},
databases: [{
name: "db_name",
}],
environment: "PRESTABLE",
hosts: [
{
subnetId: fooVpcSubnet.id,
type: "CLICKHOUSE",
zone: "ru-central1-a",
},
{
subnetId: bar.id,
type: "CLICKHOUSE",
zone: "ru-central1-b",
},
{
subnetId: fooVpcSubnet.id,
type: "ZOOKEEPER",
zone: "ru-central1-a",
},
{
subnetId: bar.id,
type: "ZOOKEEPER",
zone: "ru-central1-b",
},
{
subnetId: baz.id,
type: "ZOOKEEPER",
zone: "ru-central1-c",
},
],
networkId: fooVpcNetwork.id,
users: [{
name: "user",
password: "password",
permissions: [{
databaseName: "db_name",
}],
quotas: [
{
errors: 1000,
intervalDuration: 3600000,
queries: 10000,
},
{
error: 5000,
intervalDuration: 79800000,
queries: 50000,
},
],
settings: {
maxMemoryUsageForUser: 1000000000,
outputFormatJsonQuote64bitIntegers: true,
readOverflowMode: "throw",
},
}],
zookeeper: {
resources: {
diskSize: 10,
diskTypeId: "network-ssd",
resourcePresetId: "s2.micro",
},
},
});
Coming soon!
Example of creating a sharded ClickHouse Cluster.
using Pulumi;
using Yandex = Pulumi.Yandex;
class MyStack : Stack
{
public MyStack()
{
var fooVpcNetwork = new Yandex.VpcNetwork("fooVpcNetwork", new Yandex.VpcNetworkArgs
{
});
var fooVpcSubnet = new Yandex.VpcSubnet("fooVpcSubnet", new Yandex.VpcSubnetArgs
{
NetworkId = fooVpcNetwork.Id,
V4CidrBlocks =
{
"10.1.0.0/24",
},
Zone = "ru-central1-a",
});
var bar = new Yandex.VpcSubnet("bar", new Yandex.VpcSubnetArgs
{
NetworkId = fooVpcNetwork.Id,
V4CidrBlocks =
{
"10.2.0.0/24",
},
Zone = "ru-central1-b",
});
var baz = new Yandex.VpcSubnet("baz", new Yandex.VpcSubnetArgs
{
NetworkId = fooVpcNetwork.Id,
V4CidrBlocks =
{
"10.3.0.0/24",
},
Zone = "ru-central1-c",
});
var fooMdbClickhouseCluster = new Yandex.MdbClickhouseCluster("fooMdbClickhouseCluster", new Yandex.MdbClickhouseClusterArgs
{
Clickhouse = new Yandex.Inputs.MdbClickhouseClusterClickhouseArgs
{
Resources = new Yandex.Inputs.MdbClickhouseClusterClickhouseResourcesArgs
{
DiskSize = 16,
DiskTypeId = "network-ssd",
ResourcePresetId = "s2.micro",
},
},
CloudStorage = new Yandex.Inputs.MdbClickhouseClusterCloudStorageArgs
{
Enabled = false,
},
Databases =
{
new Yandex.Inputs.MdbClickhouseClusterDatabaseArgs
{
Name = "db_name",
},
},
Environment = "PRODUCTION",
Hosts =
{
new Yandex.Inputs.MdbClickhouseClusterHostArgs
{
ShardName = "shard1",
SubnetId = fooVpcSubnet.Id,
Type = "CLICKHOUSE",
Zone = "ru-central1-a",
},
new Yandex.Inputs.MdbClickhouseClusterHostArgs
{
ShardName = "shard1",
SubnetId = bar.Id,
Type = "CLICKHOUSE",
Zone = "ru-central1-b",
},
new Yandex.Inputs.MdbClickhouseClusterHostArgs
{
ShardName = "shard2",
SubnetId = bar.Id,
Type = "CLICKHOUSE",
Zone = "ru-central1-b",
},
new Yandex.Inputs.MdbClickhouseClusterHostArgs
{
ShardName = "shard2",
SubnetId = baz.Id,
Type = "CLICKHOUSE",
Zone = "ru-central1-c",
},
},
NetworkId = fooVpcNetwork.Id,
ShardGroups =
{
new Yandex.Inputs.MdbClickhouseClusterShardGroupArgs
{
Description = "Cluster configuration that contain only shard1",
Name = "single_shard_group",
ShardNames =
{
"shard1",
},
},
},
Users =
{
new Yandex.Inputs.MdbClickhouseClusterUserArgs
{
Name = "user",
Password = "password",
Permissions =
{
new Yandex.Inputs.MdbClickhouseClusterUserPermissionArgs
{
DatabaseName = "db_name",
},
},
Quotas =
{
new Yandex.Inputs.MdbClickhouseClusterUserQuotaArgs
{
Errors = 1000,
IntervalDuration = 3600000,
Queries = 10000,
},
new Yandex.Inputs.MdbClickhouseClusterUserQuotaArgs
{
Error = 5000,
IntervalDuration = 79800000,
Queries = 50000,
},
},
Settings = new Yandex.Inputs.MdbClickhouseClusterUserSettingsArgs
{
MaxMemoryUsageForUser = 1000000000,
OutputFormatJsonQuote64bitIntegers = true,
ReadOverflowMode = "throw",
},
},
},
Zookeeper = new Yandex.Inputs.MdbClickhouseClusterZookeeperArgs
{
Resources = new Yandex.Inputs.MdbClickhouseClusterZookeeperResourcesArgs
{
DiskSize = 10,
DiskTypeId = "network-ssd",
ResourcePresetId = "s2.micro",
},
},
});
}
}
package main
import (
"github.com/pulumi/pulumi-yandex/sdk/go/yandex"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
fooVpcNetwork, err := yandex.NewVpcNetwork(ctx, "fooVpcNetwork", nil)
if err != nil {
return err
}
fooVpcSubnet, err := yandex.NewVpcSubnet(ctx, "fooVpcSubnet", &yandex.VpcSubnetArgs{
NetworkId: fooVpcNetwork.ID(),
V4CidrBlocks: pulumi.StringArray{
pulumi.String("10.1.0.0/24"),
},
Zone: pulumi.String("ru-central1-a"),
})
if err != nil {
return err
}
bar, err := yandex.NewVpcSubnet(ctx, "bar", &yandex.VpcSubnetArgs{
NetworkId: fooVpcNetwork.ID(),
V4CidrBlocks: pulumi.StringArray{
pulumi.String("10.2.0.0/24"),
},
Zone: pulumi.String("ru-central1-b"),
})
if err != nil {
return err
}
baz, err := yandex.NewVpcSubnet(ctx, "baz", &yandex.VpcSubnetArgs{
NetworkId: fooVpcNetwork.ID(),
V4CidrBlocks: pulumi.StringArray{
pulumi.String("10.3.0.0/24"),
},
Zone: pulumi.String("ru-central1-c"),
})
if err != nil {
return err
}
_, err = yandex.NewMdbClickhouseCluster(ctx, "fooMdbClickhouseCluster", &yandex.MdbClickhouseClusterArgs{
Clickhouse: &MdbClickhouseClusterClickhouseArgs{
Resources: &MdbClickhouseClusterClickhouseResourcesArgs{
DiskSize: pulumi.Int(16),
DiskTypeId: pulumi.String("network-ssd"),
ResourcePresetId: pulumi.String("s2.micro"),
},
},
CloudStorage: &MdbClickhouseClusterCloudStorageArgs{
Enabled: pulumi.Bool(false),
},
Databases: MdbClickhouseClusterDatabaseArray{
&MdbClickhouseClusterDatabaseArgs{
Name: pulumi.String("db_name"),
},
},
Environment: pulumi.String("PRODUCTION"),
Hosts: MdbClickhouseClusterHostArray{
&MdbClickhouseClusterHostArgs{
ShardName: pulumi.String("shard1"),
SubnetId: fooVpcSubnet.ID(),
Type: pulumi.String("CLICKHOUSE"),
Zone: pulumi.String("ru-central1-a"),
},
&MdbClickhouseClusterHostArgs{
ShardName: pulumi.String("shard1"),
SubnetId: bar.ID(),
Type: pulumi.String("CLICKHOUSE"),
Zone: pulumi.String("ru-central1-b"),
},
&MdbClickhouseClusterHostArgs{
ShardName: pulumi.String("shard2"),
SubnetId: bar.ID(),
Type: pulumi.String("CLICKHOUSE"),
Zone: pulumi.String("ru-central1-b"),
},
&MdbClickhouseClusterHostArgs{
ShardName: pulumi.String("shard2"),
SubnetId: baz.ID(),
Type: pulumi.String("CLICKHOUSE"),
Zone: pulumi.String("ru-central1-c"),
},
},
NetworkId: fooVpcNetwork.ID(),
ShardGroups: MdbClickhouseClusterShardGroupArray{
&MdbClickhouseClusterShardGroupArgs{
Description: pulumi.String("Cluster configuration that contain only shard1"),
Name: pulumi.String("single_shard_group"),
ShardNames: pulumi.StringArray{
pulumi.String("shard1"),
},
},
},
Users: MdbClickhouseClusterUserArray{
&MdbClickhouseClusterUserArgs{
Name: pulumi.String("user"),
Password: pulumi.String("password"),
Permissions: MdbClickhouseClusterUserPermissionArray{
&MdbClickhouseClusterUserPermissionArgs{
DatabaseName: pulumi.String("db_name"),
},
},
Quotas: MdbClickhouseClusterUserQuotaArray{
&MdbClickhouseClusterUserQuotaArgs{
Errors: pulumi.Int(1000),
IntervalDuration: pulumi.Int(3600000),
Queries: pulumi.Int(10000),
},
&MdbClickhouseClusterUserQuotaArgs{
Error: 5000,
IntervalDuration: pulumi.Int(79800000),
Queries: pulumi.Int(50000),
},
},
Settings: &MdbClickhouseClusterUserSettingsArgs{
MaxMemoryUsageForUser: pulumi.Int(1000000000),
OutputFormatJsonQuote64bitIntegers: pulumi.Bool(true),
ReadOverflowMode: pulumi.String("throw"),
},
},
},
Zookeeper: &MdbClickhouseClusterZookeeperArgs{
Resources: &MdbClickhouseClusterZookeeperResourcesArgs{
DiskSize: pulumi.Int(10),
DiskTypeId: pulumi.String("network-ssd"),
ResourcePresetId: pulumi.String("s2.micro"),
},
},
})
if err != nil {
return err
}
return nil
})
}
Coming soon!
import pulumi
import pulumi_yandex as yandex
foo_vpc_network = yandex.VpcNetwork("fooVpcNetwork")
foo_vpc_subnet = yandex.VpcSubnet("fooVpcSubnet",
network_id=foo_vpc_network.id,
v4_cidr_blocks=["10.1.0.0/24"],
zone="ru-central1-a")
bar = yandex.VpcSubnet("bar",
network_id=foo_vpc_network.id,
v4_cidr_blocks=["10.2.0.0/24"],
zone="ru-central1-b")
baz = yandex.VpcSubnet("baz",
network_id=foo_vpc_network.id,
v4_cidr_blocks=["10.3.0.0/24"],
zone="ru-central1-c")
foo_mdb_clickhouse_cluster = yandex.MdbClickhouseCluster("fooMdbClickhouseCluster",
clickhouse=yandex.MdbClickhouseClusterClickhouseArgs(
resources=yandex.MdbClickhouseClusterClickhouseResourcesArgs(
disk_size=16,
disk_type_id="network-ssd",
resource_preset_id="s2.micro",
),
),
cloud_storage=yandex.MdbClickhouseClusterCloudStorageArgs(
enabled=False,
),
databases=[yandex.MdbClickhouseClusterDatabaseArgs(
name="db_name",
)],
environment="PRODUCTION",
hosts=[
yandex.MdbClickhouseClusterHostArgs(
shard_name="shard1",
subnet_id=foo_vpc_subnet.id,
type="CLICKHOUSE",
zone="ru-central1-a",
),
yandex.MdbClickhouseClusterHostArgs(
shard_name="shard1",
subnet_id=bar.id,
type="CLICKHOUSE",
zone="ru-central1-b",
),
yandex.MdbClickhouseClusterHostArgs(
shard_name="shard2",
subnet_id=bar.id,
type="CLICKHOUSE",
zone="ru-central1-b",
),
yandex.MdbClickhouseClusterHostArgs(
shard_name="shard2",
subnet_id=baz.id,
type="CLICKHOUSE",
zone="ru-central1-c",
),
],
network_id=foo_vpc_network.id,
shard_groups=[yandex.MdbClickhouseClusterShardGroupArgs(
description="Cluster configuration that contain only shard1",
name="single_shard_group",
shard_names=["shard1"],
)],
users=[yandex.MdbClickhouseClusterUserArgs(
name="user",
password="password",
permissions=[yandex.MdbClickhouseClusterUserPermissionArgs(
database_name="db_name",
)],
quotas=[
yandex.MdbClickhouseClusterUserQuotaArgs(
errors=1000,
interval_duration=3600000,
queries=10000,
),
yandex.MdbClickhouseClusterUserQuotaArgs(
error=5000,
interval_duration=79800000,
queries=50000,
),
],
settings=yandex.MdbClickhouseClusterUserSettingsArgs(
max_memory_usage_for_user=1000000000,
output_format_json_quote64bit_integers=True,
read_overflow_mode="throw",
),
)],
zookeeper=yandex.MdbClickhouseClusterZookeeperArgs(
resources=yandex.MdbClickhouseClusterZookeeperResourcesArgs(
disk_size=10,
disk_type_id="network-ssd",
resource_preset_id="s2.micro",
),
))
import * as pulumi from "@pulumi/pulumi";
import * as yandex from "@pulumi/yandex";
const fooVpcNetwork = new yandex.VpcNetwork("foo", {});
const fooVpcSubnet = new yandex.VpcSubnet("foo", {
networkId: fooVpcNetwork.id,
v4CidrBlocks: ["10.1.0.0/24"],
zone: "ru-central1-a",
});
const bar = new yandex.VpcSubnet("bar", {
networkId: fooVpcNetwork.id,
v4CidrBlocks: ["10.2.0.0/24"],
zone: "ru-central1-b",
});
const baz = new yandex.VpcSubnet("baz", {
networkId: fooVpcNetwork.id,
v4CidrBlocks: ["10.3.0.0/24"],
zone: "ru-central1-c",
});
const fooMdbClickhouseCluster = new yandex.MdbClickhouseCluster("foo", {
clickhouse: {
resources: {
diskSize: 16,
diskTypeId: "network-ssd",
resourcePresetId: "s2.micro",
},
},
cloudStorage: {
enabled: false,
},
databases: [{
name: "db_name",
}],
environment: "PRODUCTION",
hosts: [
{
shardName: "shard1",
subnetId: fooVpcSubnet.id,
type: "CLICKHOUSE",
zone: "ru-central1-a",
},
{
shardName: "shard1",
subnetId: bar.id,
type: "CLICKHOUSE",
zone: "ru-central1-b",
},
{
shardName: "shard2",
subnetId: bar.id,
type: "CLICKHOUSE",
zone: "ru-central1-b",
},
{
shardName: "shard2",
subnetId: baz.id,
type: "CLICKHOUSE",
zone: "ru-central1-c",
},
],
networkId: fooVpcNetwork.id,
shardGroups: [{
description: "Cluster configuration that contain only shard1",
name: "single_shard_group",
shardNames: ["shard1"],
}],
users: [{
name: "user",
password: "password",
permissions: [{
databaseName: "db_name",
}],
quotas: [
{
errors: 1000,
intervalDuration: 3600000,
queries: 10000,
},
{
error: 5000,
intervalDuration: 79800000,
queries: 50000,
},
],
settings: {
maxMemoryUsageForUser: 1000000000,
outputFormatJsonQuote64bitIntegers: true,
readOverflowMode: "throw",
},
}],
zookeeper: {
resources: {
diskSize: 10,
diskTypeId: "network-ssd",
resourcePresetId: "s2.micro",
},
},
});
Coming soon!
Create MdbClickhouseCluster Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new MdbClickhouseCluster(name: string, args: MdbClickhouseClusterArgs, opts?: CustomResourceOptions);
@overload
def MdbClickhouseCluster(resource_name: str,
args: MdbClickhouseClusterArgs,
opts: Optional[ResourceOptions] = None)
@overload
def MdbClickhouseCluster(resource_name: str,
opts: Optional[ResourceOptions] = None,
environment: Optional[str] = None,
network_id: Optional[str] = None,
hosts: Optional[Sequence[MdbClickhouseClusterHostArgs]] = None,
clickhouse: Optional[MdbClickhouseClusterClickhouseArgs] = None,
databases: Optional[Sequence[MdbClickhouseClusterDatabaseArgs]] = None,
ml_models: Optional[Sequence[MdbClickhouseClusterMlModelArgs]] = None,
access: Optional[MdbClickhouseClusterAccessArgs] = None,
deletion_protection: Optional[bool] = None,
description: Optional[str] = None,
cloud_storage: Optional[MdbClickhouseClusterCloudStorageArgs] = None,
folder_id: Optional[str] = None,
format_schemas: Optional[Sequence[MdbClickhouseClusterFormatSchemaArgs]] = None,
backup_window_start: Optional[MdbClickhouseClusterBackupWindowStartArgs] = None,
labels: Optional[Mapping[str, str]] = None,
maintenance_window: Optional[MdbClickhouseClusterMaintenanceWindowArgs] = None,
copy_schema_on_new_hosts: Optional[bool] = None,
name: Optional[str] = None,
admin_password: Optional[str] = None,
security_group_ids: Optional[Sequence[str]] = None,
service_account_id: Optional[str] = None,
shard_groups: Optional[Sequence[MdbClickhouseClusterShardGroupArgs]] = None,
sql_database_management: Optional[bool] = None,
sql_user_management: Optional[bool] = None,
users: Optional[Sequence[MdbClickhouseClusterUserArgs]] = None,
version: Optional[str] = None,
zookeeper: Optional[MdbClickhouseClusterZookeeperArgs] = None)
func NewMdbClickhouseCluster(ctx *Context, name string, args MdbClickhouseClusterArgs, opts ...ResourceOption) (*MdbClickhouseCluster, error)
public MdbClickhouseCluster(string name, MdbClickhouseClusterArgs args, CustomResourceOptions? opts = null)
public MdbClickhouseCluster(String name, MdbClickhouseClusterArgs args)
public MdbClickhouseCluster(String name, MdbClickhouseClusterArgs args, CustomResourceOptions options)
type: yandex:MdbClickhouseCluster
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args MdbClickhouseClusterArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args MdbClickhouseClusterArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args MdbClickhouseClusterArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args MdbClickhouseClusterArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args MdbClickhouseClusterArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var mdbClickhouseClusterResource = new Yandex.MdbClickhouseCluster("mdbClickhouseClusterResource", new()
{
Environment = "string",
NetworkId = "string",
Hosts = new[]
{
new Yandex.Inputs.MdbClickhouseClusterHostArgs
{
Type = "string",
Zone = "string",
AssignPublicIp = false,
Fqdn = "string",
ShardName = "string",
SubnetId = "string",
},
},
Clickhouse = new Yandex.Inputs.MdbClickhouseClusterClickhouseArgs
{
Resources = new Yandex.Inputs.MdbClickhouseClusterClickhouseResourcesArgs
{
DiskSize = 0,
DiskTypeId = "string",
ResourcePresetId = "string",
},
Config = new Yandex.Inputs.MdbClickhouseClusterClickhouseConfigArgs
{
BackgroundPoolSize = 0,
BackgroundSchedulePoolSize = 0,
Compressions = new[]
{
new Yandex.Inputs.MdbClickhouseClusterClickhouseConfigCompressionArgs
{
Method = "string",
MinPartSize = 0,
MinPartSizeRatio = 0,
},
},
GeobaseUri = "string",
GraphiteRollups = new[]
{
new Yandex.Inputs.MdbClickhouseClusterClickhouseConfigGraphiteRollupArgs
{
Name = "string",
Patterns = new[]
{
new Yandex.Inputs.MdbClickhouseClusterClickhouseConfigGraphiteRollupPatternArgs
{
Function = "string",
Regexp = "string",
Retentions = new[]
{
new Yandex.Inputs.MdbClickhouseClusterClickhouseConfigGraphiteRollupPatternRetentionArgs
{
Age = 0,
Precision = 0,
},
},
},
},
},
},
Kafka = new Yandex.Inputs.MdbClickhouseClusterClickhouseConfigKafkaArgs
{
SaslMechanism = "string",
SaslPassword = "string",
SaslUsername = "string",
SecurityProtocol = "string",
},
KafkaTopics = new[]
{
new Yandex.Inputs.MdbClickhouseClusterClickhouseConfigKafkaTopicArgs
{
Name = "string",
Settings = new Yandex.Inputs.MdbClickhouseClusterClickhouseConfigKafkaTopicSettingsArgs
{
SaslMechanism = "string",
SaslPassword = "string",
SaslUsername = "string",
SecurityProtocol = "string",
},
},
},
KeepAliveTimeout = 0,
LogLevel = "string",
MarkCacheSize = 0,
MaxConcurrentQueries = 0,
MaxConnections = 0,
MaxPartitionSizeToDrop = 0,
MaxTableSizeToDrop = 0,
MergeTree = new Yandex.Inputs.MdbClickhouseClusterClickhouseConfigMergeTreeArgs
{
MaxBytesToMergeAtMinSpaceInPool = 0,
MaxReplicatedMergesInQueue = 0,
NumberOfFreeEntriesInPoolToLowerMaxSizeOfMerge = 0,
PartsToDelayInsert = 0,
PartsToThrowInsert = 0,
ReplicatedDeduplicationWindow = 0,
ReplicatedDeduplicationWindowSeconds = 0,
},
MetricLogEnabled = false,
MetricLogRetentionSize = 0,
MetricLogRetentionTime = 0,
PartLogRetentionSize = 0,
PartLogRetentionTime = 0,
QueryLogRetentionSize = 0,
QueryLogRetentionTime = 0,
QueryThreadLogEnabled = false,
QueryThreadLogRetentionSize = 0,
QueryThreadLogRetentionTime = 0,
Rabbitmq = new Yandex.Inputs.MdbClickhouseClusterClickhouseConfigRabbitmqArgs
{
Password = "string",
Username = "string",
},
TextLogEnabled = false,
TextLogLevel = "string",
TextLogRetentionSize = 0,
TextLogRetentionTime = 0,
Timezone = "string",
TraceLogEnabled = false,
TraceLogRetentionSize = 0,
TraceLogRetentionTime = 0,
UncompressedCacheSize = 0,
},
},
Databases = new[]
{
new Yandex.Inputs.MdbClickhouseClusterDatabaseArgs
{
Name = "string",
},
},
MlModels = new[]
{
new Yandex.Inputs.MdbClickhouseClusterMlModelArgs
{
Name = "string",
Type = "string",
Uri = "string",
},
},
Access = new Yandex.Inputs.MdbClickhouseClusterAccessArgs
{
DataLens = false,
Metrika = false,
Serverless = false,
WebSql = false,
},
DeletionProtection = false,
Description = "string",
CloudStorage = new Yandex.Inputs.MdbClickhouseClusterCloudStorageArgs
{
Enabled = false,
},
FolderId = "string",
FormatSchemas = new[]
{
new Yandex.Inputs.MdbClickhouseClusterFormatSchemaArgs
{
Name = "string",
Type = "string",
Uri = "string",
},
},
BackupWindowStart = new Yandex.Inputs.MdbClickhouseClusterBackupWindowStartArgs
{
Hours = 0,
Minutes = 0,
},
Labels =
{
{ "string", "string" },
},
MaintenanceWindow = new Yandex.Inputs.MdbClickhouseClusterMaintenanceWindowArgs
{
Type = "string",
Day = "string",
Hour = 0,
},
CopySchemaOnNewHosts = false,
Name = "string",
AdminPassword = "string",
SecurityGroupIds = new[]
{
"string",
},
ServiceAccountId = "string",
ShardGroups = new[]
{
new Yandex.Inputs.MdbClickhouseClusterShardGroupArgs
{
Name = "string",
ShardNames = new[]
{
"string",
},
Description = "string",
},
},
SqlDatabaseManagement = false,
SqlUserManagement = false,
Users = new[]
{
new Yandex.Inputs.MdbClickhouseClusterUserArgs
{
Name = "string",
Password = "string",
Permissions = new[]
{
new Yandex.Inputs.MdbClickhouseClusterUserPermissionArgs
{
DatabaseName = "string",
},
},
Quotas = new[]
{
new Yandex.Inputs.MdbClickhouseClusterUserQuotaArgs
{
IntervalDuration = 0,
Errors = 0,
ExecutionTime = 0,
Queries = 0,
ReadRows = 0,
ResultRows = 0,
},
},
Settings = new Yandex.Inputs.MdbClickhouseClusterUserSettingsArgs
{
AddHttpCorsHeader = false,
AllowDdl = false,
Compile = false,
CompileExpressions = false,
ConnectTimeout = 0,
CountDistinctImplementation = "string",
DistinctOverflowMode = "string",
DistributedAggregationMemoryEfficient = false,
DistributedDdlTaskTimeout = 0,
DistributedProductMode = "string",
EmptyResultForAggregationByEmptySet = false,
EnableHttpCompression = false,
FallbackToStaleReplicasForDistributedQueries = false,
ForceIndexByDate = false,
ForcePrimaryKey = false,
GroupByOverflowMode = "string",
GroupByTwoLevelThreshold = 0,
GroupByTwoLevelThresholdBytes = 0,
HttpConnectionTimeout = 0,
HttpHeadersProgressInterval = 0,
HttpReceiveTimeout = 0,
HttpSendTimeout = 0,
InputFormatDefaultsForOmittedFields = false,
InputFormatValuesInterpretExpressions = false,
InsertQuorum = 0,
InsertQuorumTimeout = 0,
JoinOverflowMode = "string",
JoinUseNulls = false,
JoinedSubqueryRequiresAlias = false,
LowCardinalityAllowInNativeFormat = false,
MaxAstDepth = 0,
MaxAstElements = 0,
MaxBlockSize = 0,
MaxBytesBeforeExternalGroupBy = 0,
MaxBytesBeforeExternalSort = 0,
MaxBytesInDistinct = 0,
MaxBytesInJoin = 0,
MaxBytesInSet = 0,
MaxBytesToRead = 0,
MaxBytesToSort = 0,
MaxBytesToTransfer = 0,
MaxColumnsToRead = 0,
MaxExecutionTime = 0,
MaxExpandedAstElements = 0,
MaxInsertBlockSize = 0,
MaxMemoryUsage = 0,
MaxMemoryUsageForUser = 0,
MaxNetworkBandwidth = 0,
MaxNetworkBandwidthForUser = 0,
MaxQuerySize = 0,
MaxReplicaDelayForDistributedQueries = 0,
MaxResultBytes = 0,
MaxResultRows = 0,
MaxRowsInDistinct = 0,
MaxRowsInJoin = 0,
MaxRowsInSet = 0,
MaxRowsToGroupBy = 0,
MaxRowsToRead = 0,
MaxRowsToSort = 0,
MaxRowsToTransfer = 0,
MaxTemporaryColumns = 0,
MaxTemporaryNonConstColumns = 0,
MaxThreads = 0,
MergeTreeMaxBytesToUseCache = 0,
MergeTreeMaxRowsToUseCache = 0,
MergeTreeMinBytesForConcurrentRead = 0,
MergeTreeMinRowsForConcurrentRead = 0,
MinBytesToUseDirectIo = 0,
MinCountToCompile = 0,
MinCountToCompileExpression = 0,
MinExecutionSpeed = 0,
MinExecutionSpeedBytes = 0,
MinInsertBlockSizeBytes = 0,
MinInsertBlockSizeRows = 0,
OutputFormatJsonQuote64bitIntegers = false,
OutputFormatJsonQuoteDenormals = false,
Priority = 0,
QuotaMode = "string",
ReadOverflowMode = "string",
Readonly = 0,
ReceiveTimeout = 0,
ReplicationAlterPartitionsSync = 0,
ResultOverflowMode = "string",
SelectSequentialConsistency = false,
SendProgressInHttpHeaders = false,
SendTimeout = 0,
SetOverflowMode = "string",
SkipUnavailableShards = false,
SortOverflowMode = "string",
TimeoutOverflowMode = "string",
TransferOverflowMode = "string",
TransformNullIn = false,
UseUncompressedCache = false,
},
},
},
Version = "string",
Zookeeper = new Yandex.Inputs.MdbClickhouseClusterZookeeperArgs
{
Resources = new Yandex.Inputs.MdbClickhouseClusterZookeeperResourcesArgs
{
DiskSize = 0,
DiskTypeId = "string",
ResourcePresetId = "string",
},
},
});
example, err := yandex.NewMdbClickhouseCluster(ctx, "mdbClickhouseClusterResource", &yandex.MdbClickhouseClusterArgs{
Environment: pulumi.String("string"),
NetworkId: pulumi.String("string"),
Hosts: yandex.MdbClickhouseClusterHostArray{
&yandex.MdbClickhouseClusterHostArgs{
Type: pulumi.String("string"),
Zone: pulumi.String("string"),
AssignPublicIp: pulumi.Bool(false),
Fqdn: pulumi.String("string"),
ShardName: pulumi.String("string"),
SubnetId: pulumi.String("string"),
},
},
Clickhouse: &yandex.MdbClickhouseClusterClickhouseArgs{
Resources: &yandex.MdbClickhouseClusterClickhouseResourcesArgs{
DiskSize: pulumi.Int(0),
DiskTypeId: pulumi.String("string"),
ResourcePresetId: pulumi.String("string"),
},
Config: &yandex.MdbClickhouseClusterClickhouseConfigArgs{
BackgroundPoolSize: pulumi.Int(0),
BackgroundSchedulePoolSize: pulumi.Int(0),
Compressions: yandex.MdbClickhouseClusterClickhouseConfigCompressionArray{
&yandex.MdbClickhouseClusterClickhouseConfigCompressionArgs{
Method: pulumi.String("string"),
MinPartSize: pulumi.Int(0),
MinPartSizeRatio: pulumi.Float64(0),
},
},
GeobaseUri: pulumi.String("string"),
GraphiteRollups: yandex.MdbClickhouseClusterClickhouseConfigGraphiteRollupArray{
&yandex.MdbClickhouseClusterClickhouseConfigGraphiteRollupArgs{
Name: pulumi.String("string"),
Patterns: yandex.MdbClickhouseClusterClickhouseConfigGraphiteRollupPatternArray{
&yandex.MdbClickhouseClusterClickhouseConfigGraphiteRollupPatternArgs{
Function: pulumi.String("string"),
Regexp: pulumi.String("string"),
Retentions: yandex.MdbClickhouseClusterClickhouseConfigGraphiteRollupPatternRetentionArray{
&yandex.MdbClickhouseClusterClickhouseConfigGraphiteRollupPatternRetentionArgs{
Age: pulumi.Int(0),
Precision: pulumi.Int(0),
},
},
},
},
},
},
Kafka: &yandex.MdbClickhouseClusterClickhouseConfigKafkaArgs{
SaslMechanism: pulumi.String("string"),
SaslPassword: pulumi.String("string"),
SaslUsername: pulumi.String("string"),
SecurityProtocol: pulumi.String("string"),
},
KafkaTopics: yandex.MdbClickhouseClusterClickhouseConfigKafkaTopicArray{
&yandex.MdbClickhouseClusterClickhouseConfigKafkaTopicArgs{
Name: pulumi.String("string"),
Settings: &yandex.MdbClickhouseClusterClickhouseConfigKafkaTopicSettingsArgs{
SaslMechanism: pulumi.String("string"),
SaslPassword: pulumi.String("string"),
SaslUsername: pulumi.String("string"),
SecurityProtocol: pulumi.String("string"),
},
},
},
KeepAliveTimeout: pulumi.Int(0),
LogLevel: pulumi.String("string"),
MarkCacheSize: pulumi.Int(0),
MaxConcurrentQueries: pulumi.Int(0),
MaxConnections: pulumi.Int(0),
MaxPartitionSizeToDrop: pulumi.Int(0),
MaxTableSizeToDrop: pulumi.Int(0),
MergeTree: &yandex.MdbClickhouseClusterClickhouseConfigMergeTreeArgs{
MaxBytesToMergeAtMinSpaceInPool: pulumi.Int(0),
MaxReplicatedMergesInQueue: pulumi.Int(0),
NumberOfFreeEntriesInPoolToLowerMaxSizeOfMerge: pulumi.Int(0),
PartsToDelayInsert: pulumi.Int(0),
PartsToThrowInsert: pulumi.Int(0),
ReplicatedDeduplicationWindow: pulumi.Int(0),
ReplicatedDeduplicationWindowSeconds: pulumi.Int(0),
},
MetricLogEnabled: pulumi.Bool(false),
MetricLogRetentionSize: pulumi.Int(0),
MetricLogRetentionTime: pulumi.Int(0),
PartLogRetentionSize: pulumi.Int(0),
PartLogRetentionTime: pulumi.Int(0),
QueryLogRetentionSize: pulumi.Int(0),
QueryLogRetentionTime: pulumi.Int(0),
QueryThreadLogEnabled: pulumi.Bool(false),
QueryThreadLogRetentionSize: pulumi.Int(0),
QueryThreadLogRetentionTime: pulumi.Int(0),
Rabbitmq: &yandex.MdbClickhouseClusterClickhouseConfigRabbitmqArgs{
Password: pulumi.String("string"),
Username: pulumi.String("string"),
},
TextLogEnabled: pulumi.Bool(false),
TextLogLevel: pulumi.String("string"),
TextLogRetentionSize: pulumi.Int(0),
TextLogRetentionTime: pulumi.Int(0),
Timezone: pulumi.String("string"),
TraceLogEnabled: pulumi.Bool(false),
TraceLogRetentionSize: pulumi.Int(0),
TraceLogRetentionTime: pulumi.Int(0),
UncompressedCacheSize: pulumi.Int(0),
},
},
Databases: yandex.MdbClickhouseClusterDatabaseArray{
&yandex.MdbClickhouseClusterDatabaseArgs{
Name: pulumi.String("string"),
},
},
MlModels: yandex.MdbClickhouseClusterMlModelArray{
&yandex.MdbClickhouseClusterMlModelArgs{
Name: pulumi.String("string"),
Type: pulumi.String("string"),
Uri: pulumi.String("string"),
},
},
Access: &yandex.MdbClickhouseClusterAccessArgs{
DataLens: pulumi.Bool(false),
Metrika: pulumi.Bool(false),
Serverless: pulumi.Bool(false),
WebSql: pulumi.Bool(false),
},
DeletionProtection: pulumi.Bool(false),
Description: pulumi.String("string"),
CloudStorage: &yandex.MdbClickhouseClusterCloudStorageArgs{
Enabled: pulumi.Bool(false),
},
FolderId: pulumi.String("string"),
FormatSchemas: yandex.MdbClickhouseClusterFormatSchemaArray{
&yandex.MdbClickhouseClusterFormatSchemaArgs{
Name: pulumi.String("string"),
Type: pulumi.String("string"),
Uri: pulumi.String("string"),
},
},
BackupWindowStart: &yandex.MdbClickhouseClusterBackupWindowStartArgs{
Hours: pulumi.Int(0),
Minutes: pulumi.Int(0),
},
Labels: pulumi.StringMap{
"string": pulumi.String("string"),
},
MaintenanceWindow: &yandex.MdbClickhouseClusterMaintenanceWindowArgs{
Type: pulumi.String("string"),
Day: pulumi.String("string"),
Hour: pulumi.Int(0),
},
CopySchemaOnNewHosts: pulumi.Bool(false),
Name: pulumi.String("string"),
AdminPassword: pulumi.String("string"),
SecurityGroupIds: pulumi.StringArray{
pulumi.String("string"),
},
ServiceAccountId: pulumi.String("string"),
ShardGroups: yandex.MdbClickhouseClusterShardGroupArray{
&yandex.MdbClickhouseClusterShardGroupArgs{
Name: pulumi.String("string"),
ShardNames: pulumi.StringArray{
pulumi.String("string"),
},
Description: pulumi.String("string"),
},
},
SqlDatabaseManagement: pulumi.Bool(false),
SqlUserManagement: pulumi.Bool(false),
Users: yandex.MdbClickhouseClusterUserArray{
&yandex.MdbClickhouseClusterUserArgs{
Name: pulumi.String("string"),
Password: pulumi.String("string"),
Permissions: yandex.MdbClickhouseClusterUserPermissionArray{
&yandex.MdbClickhouseClusterUserPermissionArgs{
DatabaseName: pulumi.String("string"),
},
},
Quotas: yandex.MdbClickhouseClusterUserQuotaArray{
&yandex.MdbClickhouseClusterUserQuotaArgs{
IntervalDuration: pulumi.Int(0),
Errors: pulumi.Int(0),
ExecutionTime: pulumi.Int(0),
Queries: pulumi.Int(0),
ReadRows: pulumi.Int(0),
ResultRows: pulumi.Int(0),
},
},
Settings: &yandex.MdbClickhouseClusterUserSettingsArgs{
AddHttpCorsHeader: pulumi.Bool(false),
AllowDdl: pulumi.Bool(false),
Compile: pulumi.Bool(false),
CompileExpressions: pulumi.Bool(false),
ConnectTimeout: pulumi.Int(0),
CountDistinctImplementation: pulumi.String("string"),
DistinctOverflowMode: pulumi.String("string"),
DistributedAggregationMemoryEfficient: pulumi.Bool(false),
DistributedDdlTaskTimeout: pulumi.Int(0),
DistributedProductMode: pulumi.String("string"),
EmptyResultForAggregationByEmptySet: pulumi.Bool(false),
EnableHttpCompression: pulumi.Bool(false),
FallbackToStaleReplicasForDistributedQueries: pulumi.Bool(false),
ForceIndexByDate: pulumi.Bool(false),
ForcePrimaryKey: pulumi.Bool(false),
GroupByOverflowMode: pulumi.String("string"),
GroupByTwoLevelThreshold: pulumi.Int(0),
GroupByTwoLevelThresholdBytes: pulumi.Int(0),
HttpConnectionTimeout: pulumi.Int(0),
HttpHeadersProgressInterval: pulumi.Int(0),
HttpReceiveTimeout: pulumi.Int(0),
HttpSendTimeout: pulumi.Int(0),
InputFormatDefaultsForOmittedFields: pulumi.Bool(false),
InputFormatValuesInterpretExpressions: pulumi.Bool(false),
InsertQuorum: pulumi.Int(0),
InsertQuorumTimeout: pulumi.Int(0),
JoinOverflowMode: pulumi.String("string"),
JoinUseNulls: pulumi.Bool(false),
JoinedSubqueryRequiresAlias: pulumi.Bool(false),
LowCardinalityAllowInNativeFormat: pulumi.Bool(false),
MaxAstDepth: pulumi.Int(0),
MaxAstElements: pulumi.Int(0),
MaxBlockSize: pulumi.Int(0),
MaxBytesBeforeExternalGroupBy: pulumi.Int(0),
MaxBytesBeforeExternalSort: pulumi.Int(0),
MaxBytesInDistinct: pulumi.Int(0),
MaxBytesInJoin: pulumi.Int(0),
MaxBytesInSet: pulumi.Int(0),
MaxBytesToRead: pulumi.Int(0),
MaxBytesToSort: pulumi.Int(0),
MaxBytesToTransfer: pulumi.Int(0),
MaxColumnsToRead: pulumi.Int(0),
MaxExecutionTime: pulumi.Int(0),
MaxExpandedAstElements: pulumi.Int(0),
MaxInsertBlockSize: pulumi.Int(0),
MaxMemoryUsage: pulumi.Int(0),
MaxMemoryUsageForUser: pulumi.Int(0),
MaxNetworkBandwidth: pulumi.Int(0),
MaxNetworkBandwidthForUser: pulumi.Int(0),
MaxQuerySize: pulumi.Int(0),
MaxReplicaDelayForDistributedQueries: pulumi.Int(0),
MaxResultBytes: pulumi.Int(0),
MaxResultRows: pulumi.Int(0),
MaxRowsInDistinct: pulumi.Int(0),
MaxRowsInJoin: pulumi.Int(0),
MaxRowsInSet: pulumi.Int(0),
MaxRowsToGroupBy: pulumi.Int(0),
MaxRowsToRead: pulumi.Int(0),
MaxRowsToSort: pulumi.Int(0),
MaxRowsToTransfer: pulumi.Int(0),
MaxTemporaryColumns: pulumi.Int(0),
MaxTemporaryNonConstColumns: pulumi.Int(0),
MaxThreads: pulumi.Int(0),
MergeTreeMaxBytesToUseCache: pulumi.Int(0),
MergeTreeMaxRowsToUseCache: pulumi.Int(0),
MergeTreeMinBytesForConcurrentRead: pulumi.Int(0),
MergeTreeMinRowsForConcurrentRead: pulumi.Int(0),
MinBytesToUseDirectIo: pulumi.Int(0),
MinCountToCompile: pulumi.Int(0),
MinCountToCompileExpression: pulumi.Int(0),
MinExecutionSpeed: pulumi.Int(0),
MinExecutionSpeedBytes: pulumi.Int(0),
MinInsertBlockSizeBytes: pulumi.Int(0),
MinInsertBlockSizeRows: pulumi.Int(0),
OutputFormatJsonQuote64bitIntegers: pulumi.Bool(false),
OutputFormatJsonQuoteDenormals: pulumi.Bool(false),
Priority: pulumi.Int(0),
QuotaMode: pulumi.String("string"),
ReadOverflowMode: pulumi.String("string"),
Readonly: pulumi.Int(0),
ReceiveTimeout: pulumi.Int(0),
ReplicationAlterPartitionsSync: pulumi.Int(0),
ResultOverflowMode: pulumi.String("string"),
SelectSequentialConsistency: pulumi.Bool(false),
SendProgressInHttpHeaders: pulumi.Bool(false),
SendTimeout: pulumi.Int(0),
SetOverflowMode: pulumi.String("string"),
SkipUnavailableShards: pulumi.Bool(false),
SortOverflowMode: pulumi.String("string"),
TimeoutOverflowMode: pulumi.String("string"),
TransferOverflowMode: pulumi.String("string"),
TransformNullIn: pulumi.Bool(false),
UseUncompressedCache: pulumi.Bool(false),
},
},
},
Version: pulumi.String("string"),
Zookeeper: &yandex.MdbClickhouseClusterZookeeperArgs{
Resources: &yandex.MdbClickhouseClusterZookeeperResourcesArgs{
DiskSize: pulumi.Int(0),
DiskTypeId: pulumi.String("string"),
ResourcePresetId: pulumi.String("string"),
},
},
})
var mdbClickhouseClusterResource = new MdbClickhouseCluster("mdbClickhouseClusterResource", MdbClickhouseClusterArgs.builder()
.environment("string")
.networkId("string")
.hosts(MdbClickhouseClusterHostArgs.builder()
.type("string")
.zone("string")
.assignPublicIp(false)
.fqdn("string")
.shardName("string")
.subnetId("string")
.build())
.clickhouse(MdbClickhouseClusterClickhouseArgs.builder()
.resources(MdbClickhouseClusterClickhouseResourcesArgs.builder()
.diskSize(0)
.diskTypeId("string")
.resourcePresetId("string")
.build())
.config(MdbClickhouseClusterClickhouseConfigArgs.builder()
.backgroundPoolSize(0)
.backgroundSchedulePoolSize(0)
.compressions(MdbClickhouseClusterClickhouseConfigCompressionArgs.builder()
.method("string")
.minPartSize(0)
.minPartSizeRatio(0)
.build())
.geobaseUri("string")
.graphiteRollups(MdbClickhouseClusterClickhouseConfigGraphiteRollupArgs.builder()
.name("string")
.patterns(MdbClickhouseClusterClickhouseConfigGraphiteRollupPatternArgs.builder()
.function("string")
.regexp("string")
.retentions(MdbClickhouseClusterClickhouseConfigGraphiteRollupPatternRetentionArgs.builder()
.age(0)
.precision(0)
.build())
.build())
.build())
.kafka(MdbClickhouseClusterClickhouseConfigKafkaArgs.builder()
.saslMechanism("string")
.saslPassword("string")
.saslUsername("string")
.securityProtocol("string")
.build())
.kafkaTopics(MdbClickhouseClusterClickhouseConfigKafkaTopicArgs.builder()
.name("string")
.settings(MdbClickhouseClusterClickhouseConfigKafkaTopicSettingsArgs.builder()
.saslMechanism("string")
.saslPassword("string")
.saslUsername("string")
.securityProtocol("string")
.build())
.build())
.keepAliveTimeout(0)
.logLevel("string")
.markCacheSize(0)
.maxConcurrentQueries(0)
.maxConnections(0)
.maxPartitionSizeToDrop(0)
.maxTableSizeToDrop(0)
.mergeTree(MdbClickhouseClusterClickhouseConfigMergeTreeArgs.builder()
.maxBytesToMergeAtMinSpaceInPool(0)
.maxReplicatedMergesInQueue(0)
.numberOfFreeEntriesInPoolToLowerMaxSizeOfMerge(0)
.partsToDelayInsert(0)
.partsToThrowInsert(0)
.replicatedDeduplicationWindow(0)
.replicatedDeduplicationWindowSeconds(0)
.build())
.metricLogEnabled(false)
.metricLogRetentionSize(0)
.metricLogRetentionTime(0)
.partLogRetentionSize(0)
.partLogRetentionTime(0)
.queryLogRetentionSize(0)
.queryLogRetentionTime(0)
.queryThreadLogEnabled(false)
.queryThreadLogRetentionSize(0)
.queryThreadLogRetentionTime(0)
.rabbitmq(MdbClickhouseClusterClickhouseConfigRabbitmqArgs.builder()
.password("string")
.username("string")
.build())
.textLogEnabled(false)
.textLogLevel("string")
.textLogRetentionSize(0)
.textLogRetentionTime(0)
.timezone("string")
.traceLogEnabled(false)
.traceLogRetentionSize(0)
.traceLogRetentionTime(0)
.uncompressedCacheSize(0)
.build())
.build())
.databases(MdbClickhouseClusterDatabaseArgs.builder()
.name("string")
.build())
.mlModels(MdbClickhouseClusterMlModelArgs.builder()
.name("string")
.type("string")
.uri("string")
.build())
.access(MdbClickhouseClusterAccessArgs.builder()
.dataLens(false)
.metrika(false)
.serverless(false)
.webSql(false)
.build())
.deletionProtection(false)
.description("string")
.cloudStorage(MdbClickhouseClusterCloudStorageArgs.builder()
.enabled(false)
.build())
.folderId("string")
.formatSchemas(MdbClickhouseClusterFormatSchemaArgs.builder()
.name("string")
.type("string")
.uri("string")
.build())
.backupWindowStart(MdbClickhouseClusterBackupWindowStartArgs.builder()
.hours(0)
.minutes(0)
.build())
.labels(Map.of("string", "string"))
.maintenanceWindow(MdbClickhouseClusterMaintenanceWindowArgs.builder()
.type("string")
.day("string")
.hour(0)
.build())
.copySchemaOnNewHosts(false)
.name("string")
.adminPassword("string")
.securityGroupIds("string")
.serviceAccountId("string")
.shardGroups(MdbClickhouseClusterShardGroupArgs.builder()
.name("string")
.shardNames("string")
.description("string")
.build())
.sqlDatabaseManagement(false)
.sqlUserManagement(false)
.users(MdbClickhouseClusterUserArgs.builder()
.name("string")
.password("string")
.permissions(MdbClickhouseClusterUserPermissionArgs.builder()
.databaseName("string")
.build())
.quotas(MdbClickhouseClusterUserQuotaArgs.builder()
.intervalDuration(0)
.errors(0)
.executionTime(0)
.queries(0)
.readRows(0)
.resultRows(0)
.build())
.settings(MdbClickhouseClusterUserSettingsArgs.builder()
.addHttpCorsHeader(false)
.allowDdl(false)
.compile(false)
.compileExpressions(false)
.connectTimeout(0)
.countDistinctImplementation("string")
.distinctOverflowMode("string")
.distributedAggregationMemoryEfficient(false)
.distributedDdlTaskTimeout(0)
.distributedProductMode("string")
.emptyResultForAggregationByEmptySet(false)
.enableHttpCompression(false)
.fallbackToStaleReplicasForDistributedQueries(false)
.forceIndexByDate(false)
.forcePrimaryKey(false)
.groupByOverflowMode("string")
.groupByTwoLevelThreshold(0)
.groupByTwoLevelThresholdBytes(0)
.httpConnectionTimeout(0)
.httpHeadersProgressInterval(0)
.httpReceiveTimeout(0)
.httpSendTimeout(0)
.inputFormatDefaultsForOmittedFields(false)
.inputFormatValuesInterpretExpressions(false)
.insertQuorum(0)
.insertQuorumTimeout(0)
.joinOverflowMode("string")
.joinUseNulls(false)
.joinedSubqueryRequiresAlias(false)
.lowCardinalityAllowInNativeFormat(false)
.maxAstDepth(0)
.maxAstElements(0)
.maxBlockSize(0)
.maxBytesBeforeExternalGroupBy(0)
.maxBytesBeforeExternalSort(0)
.maxBytesInDistinct(0)
.maxBytesInJoin(0)
.maxBytesInSet(0)
.maxBytesToRead(0)
.maxBytesToSort(0)
.maxBytesToTransfer(0)
.maxColumnsToRead(0)
.maxExecutionTime(0)
.maxExpandedAstElements(0)
.maxInsertBlockSize(0)
.maxMemoryUsage(0)
.maxMemoryUsageForUser(0)
.maxNetworkBandwidth(0)
.maxNetworkBandwidthForUser(0)
.maxQuerySize(0)
.maxReplicaDelayForDistributedQueries(0)
.maxResultBytes(0)
.maxResultRows(0)
.maxRowsInDistinct(0)
.maxRowsInJoin(0)
.maxRowsInSet(0)
.maxRowsToGroupBy(0)
.maxRowsToRead(0)
.maxRowsToSort(0)
.maxRowsToTransfer(0)
.maxTemporaryColumns(0)
.maxTemporaryNonConstColumns(0)
.maxThreads(0)
.mergeTreeMaxBytesToUseCache(0)
.mergeTreeMaxRowsToUseCache(0)
.mergeTreeMinBytesForConcurrentRead(0)
.mergeTreeMinRowsForConcurrentRead(0)
.minBytesToUseDirectIo(0)
.minCountToCompile(0)
.minCountToCompileExpression(0)
.minExecutionSpeed(0)
.minExecutionSpeedBytes(0)
.minInsertBlockSizeBytes(0)
.minInsertBlockSizeRows(0)
.outputFormatJsonQuote64bitIntegers(false)
.outputFormatJsonQuoteDenormals(false)
.priority(0)
.quotaMode("string")
.readOverflowMode("string")
.readonly(0)
.receiveTimeout(0)
.replicationAlterPartitionsSync(0)
.resultOverflowMode("string")
.selectSequentialConsistency(false)
.sendProgressInHttpHeaders(false)
.sendTimeout(0)
.setOverflowMode("string")
.skipUnavailableShards(false)
.sortOverflowMode("string")
.timeoutOverflowMode("string")
.transferOverflowMode("string")
.transformNullIn(false)
.useUncompressedCache(false)
.build())
.build())
.version("string")
.zookeeper(MdbClickhouseClusterZookeeperArgs.builder()
.resources(MdbClickhouseClusterZookeeperResourcesArgs.builder()
.diskSize(0)
.diskTypeId("string")
.resourcePresetId("string")
.build())
.build())
.build());
mdb_clickhouse_cluster_resource = yandex.MdbClickhouseCluster("mdbClickhouseClusterResource",
environment="string",
network_id="string",
hosts=[{
"type": "string",
"zone": "string",
"assign_public_ip": False,
"fqdn": "string",
"shard_name": "string",
"subnet_id": "string",
}],
clickhouse={
"resources": {
"disk_size": 0,
"disk_type_id": "string",
"resource_preset_id": "string",
},
"config": {
"background_pool_size": 0,
"background_schedule_pool_size": 0,
"compressions": [{
"method": "string",
"min_part_size": 0,
"min_part_size_ratio": 0,
}],
"geobase_uri": "string",
"graphite_rollups": [{
"name": "string",
"patterns": [{
"function": "string",
"regexp": "string",
"retentions": [{
"age": 0,
"precision": 0,
}],
}],
}],
"kafka": {
"sasl_mechanism": "string",
"sasl_password": "string",
"sasl_username": "string",
"security_protocol": "string",
},
"kafka_topics": [{
"name": "string",
"settings": {
"sasl_mechanism": "string",
"sasl_password": "string",
"sasl_username": "string",
"security_protocol": "string",
},
}],
"keep_alive_timeout": 0,
"log_level": "string",
"mark_cache_size": 0,
"max_concurrent_queries": 0,
"max_connections": 0,
"max_partition_size_to_drop": 0,
"max_table_size_to_drop": 0,
"merge_tree": {
"max_bytes_to_merge_at_min_space_in_pool": 0,
"max_replicated_merges_in_queue": 0,
"number_of_free_entries_in_pool_to_lower_max_size_of_merge": 0,
"parts_to_delay_insert": 0,
"parts_to_throw_insert": 0,
"replicated_deduplication_window": 0,
"replicated_deduplication_window_seconds": 0,
},
"metric_log_enabled": False,
"metric_log_retention_size": 0,
"metric_log_retention_time": 0,
"part_log_retention_size": 0,
"part_log_retention_time": 0,
"query_log_retention_size": 0,
"query_log_retention_time": 0,
"query_thread_log_enabled": False,
"query_thread_log_retention_size": 0,
"query_thread_log_retention_time": 0,
"rabbitmq": {
"password": "string",
"username": "string",
},
"text_log_enabled": False,
"text_log_level": "string",
"text_log_retention_size": 0,
"text_log_retention_time": 0,
"timezone": "string",
"trace_log_enabled": False,
"trace_log_retention_size": 0,
"trace_log_retention_time": 0,
"uncompressed_cache_size": 0,
},
},
databases=[{
"name": "string",
}],
ml_models=[{
"name": "string",
"type": "string",
"uri": "string",
}],
access={
"data_lens": False,
"metrika": False,
"serverless": False,
"web_sql": False,
},
deletion_protection=False,
description="string",
cloud_storage={
"enabled": False,
},
folder_id="string",
format_schemas=[{
"name": "string",
"type": "string",
"uri": "string",
}],
backup_window_start={
"hours": 0,
"minutes": 0,
},
labels={
"string": "string",
},
maintenance_window={
"type": "string",
"day": "string",
"hour": 0,
},
copy_schema_on_new_hosts=False,
name="string",
admin_password="string",
security_group_ids=["string"],
service_account_id="string",
shard_groups=[{
"name": "string",
"shard_names": ["string"],
"description": "string",
}],
sql_database_management=False,
sql_user_management=False,
users=[{
"name": "string",
"password": "string",
"permissions": [{
"database_name": "string",
}],
"quotas": [{
"interval_duration": 0,
"errors": 0,
"execution_time": 0,
"queries": 0,
"read_rows": 0,
"result_rows": 0,
}],
"settings": {
"add_http_cors_header": False,
"allow_ddl": False,
"compile": False,
"compile_expressions": False,
"connect_timeout": 0,
"count_distinct_implementation": "string",
"distinct_overflow_mode": "string",
"distributed_aggregation_memory_efficient": False,
"distributed_ddl_task_timeout": 0,
"distributed_product_mode": "string",
"empty_result_for_aggregation_by_empty_set": False,
"enable_http_compression": False,
"fallback_to_stale_replicas_for_distributed_queries": False,
"force_index_by_date": False,
"force_primary_key": False,
"group_by_overflow_mode": "string",
"group_by_two_level_threshold": 0,
"group_by_two_level_threshold_bytes": 0,
"http_connection_timeout": 0,
"http_headers_progress_interval": 0,
"http_receive_timeout": 0,
"http_send_timeout": 0,
"input_format_defaults_for_omitted_fields": False,
"input_format_values_interpret_expressions": False,
"insert_quorum": 0,
"insert_quorum_timeout": 0,
"join_overflow_mode": "string",
"join_use_nulls": False,
"joined_subquery_requires_alias": False,
"low_cardinality_allow_in_native_format": False,
"max_ast_depth": 0,
"max_ast_elements": 0,
"max_block_size": 0,
"max_bytes_before_external_group_by": 0,
"max_bytes_before_external_sort": 0,
"max_bytes_in_distinct": 0,
"max_bytes_in_join": 0,
"max_bytes_in_set": 0,
"max_bytes_to_read": 0,
"max_bytes_to_sort": 0,
"max_bytes_to_transfer": 0,
"max_columns_to_read": 0,
"max_execution_time": 0,
"max_expanded_ast_elements": 0,
"max_insert_block_size": 0,
"max_memory_usage": 0,
"max_memory_usage_for_user": 0,
"max_network_bandwidth": 0,
"max_network_bandwidth_for_user": 0,
"max_query_size": 0,
"max_replica_delay_for_distributed_queries": 0,
"max_result_bytes": 0,
"max_result_rows": 0,
"max_rows_in_distinct": 0,
"max_rows_in_join": 0,
"max_rows_in_set": 0,
"max_rows_to_group_by": 0,
"max_rows_to_read": 0,
"max_rows_to_sort": 0,
"max_rows_to_transfer": 0,
"max_temporary_columns": 0,
"max_temporary_non_const_columns": 0,
"max_threads": 0,
"merge_tree_max_bytes_to_use_cache": 0,
"merge_tree_max_rows_to_use_cache": 0,
"merge_tree_min_bytes_for_concurrent_read": 0,
"merge_tree_min_rows_for_concurrent_read": 0,
"min_bytes_to_use_direct_io": 0,
"min_count_to_compile": 0,
"min_count_to_compile_expression": 0,
"min_execution_speed": 0,
"min_execution_speed_bytes": 0,
"min_insert_block_size_bytes": 0,
"min_insert_block_size_rows": 0,
"output_format_json_quote64bit_integers": False,
"output_format_json_quote_denormals": False,
"priority": 0,
"quota_mode": "string",
"read_overflow_mode": "string",
"readonly": 0,
"receive_timeout": 0,
"replication_alter_partitions_sync": 0,
"result_overflow_mode": "string",
"select_sequential_consistency": False,
"send_progress_in_http_headers": False,
"send_timeout": 0,
"set_overflow_mode": "string",
"skip_unavailable_shards": False,
"sort_overflow_mode": "string",
"timeout_overflow_mode": "string",
"transfer_overflow_mode": "string",
"transform_null_in": False,
"use_uncompressed_cache": False,
},
}],
version="string",
zookeeper={
"resources": {
"disk_size": 0,
"disk_type_id": "string",
"resource_preset_id": "string",
},
})
const mdbClickhouseClusterResource = new yandex.MdbClickhouseCluster("mdbClickhouseClusterResource", {
environment: "string",
networkId: "string",
hosts: [{
type: "string",
zone: "string",
assignPublicIp: false,
fqdn: "string",
shardName: "string",
subnetId: "string",
}],
clickhouse: {
resources: {
diskSize: 0,
diskTypeId: "string",
resourcePresetId: "string",
},
config: {
backgroundPoolSize: 0,
backgroundSchedulePoolSize: 0,
compressions: [{
method: "string",
minPartSize: 0,
minPartSizeRatio: 0,
}],
geobaseUri: "string",
graphiteRollups: [{
name: "string",
patterns: [{
"function": "string",
regexp: "string",
retentions: [{
age: 0,
precision: 0,
}],
}],
}],
kafka: {
saslMechanism: "string",
saslPassword: "string",
saslUsername: "string",
securityProtocol: "string",
},
kafkaTopics: [{
name: "string",
settings: {
saslMechanism: "string",
saslPassword: "string",
saslUsername: "string",
securityProtocol: "string",
},
}],
keepAliveTimeout: 0,
logLevel: "string",
markCacheSize: 0,
maxConcurrentQueries: 0,
maxConnections: 0,
maxPartitionSizeToDrop: 0,
maxTableSizeToDrop: 0,
mergeTree: {
maxBytesToMergeAtMinSpaceInPool: 0,
maxReplicatedMergesInQueue: 0,
numberOfFreeEntriesInPoolToLowerMaxSizeOfMerge: 0,
partsToDelayInsert: 0,
partsToThrowInsert: 0,
replicatedDeduplicationWindow: 0,
replicatedDeduplicationWindowSeconds: 0,
},
metricLogEnabled: false,
metricLogRetentionSize: 0,
metricLogRetentionTime: 0,
partLogRetentionSize: 0,
partLogRetentionTime: 0,
queryLogRetentionSize: 0,
queryLogRetentionTime: 0,
queryThreadLogEnabled: false,
queryThreadLogRetentionSize: 0,
queryThreadLogRetentionTime: 0,
rabbitmq: {
password: "string",
username: "string",
},
textLogEnabled: false,
textLogLevel: "string",
textLogRetentionSize: 0,
textLogRetentionTime: 0,
timezone: "string",
traceLogEnabled: false,
traceLogRetentionSize: 0,
traceLogRetentionTime: 0,
uncompressedCacheSize: 0,
},
},
databases: [{
name: "string",
}],
mlModels: [{
name: "string",
type: "string",
uri: "string",
}],
access: {
dataLens: false,
metrika: false,
serverless: false,
webSql: false,
},
deletionProtection: false,
description: "string",
cloudStorage: {
enabled: false,
},
folderId: "string",
formatSchemas: [{
name: "string",
type: "string",
uri: "string",
}],
backupWindowStart: {
hours: 0,
minutes: 0,
},
labels: {
string: "string",
},
maintenanceWindow: {
type: "string",
day: "string",
hour: 0,
},
copySchemaOnNewHosts: false,
name: "string",
adminPassword: "string",
securityGroupIds: ["string"],
serviceAccountId: "string",
shardGroups: [{
name: "string",
shardNames: ["string"],
description: "string",
}],
sqlDatabaseManagement: false,
sqlUserManagement: false,
users: [{
name: "string",
password: "string",
permissions: [{
databaseName: "string",
}],
quotas: [{
intervalDuration: 0,
errors: 0,
executionTime: 0,
queries: 0,
readRows: 0,
resultRows: 0,
}],
settings: {
addHttpCorsHeader: false,
allowDdl: false,
compile: false,
compileExpressions: false,
connectTimeout: 0,
countDistinctImplementation: "string",
distinctOverflowMode: "string",
distributedAggregationMemoryEfficient: false,
distributedDdlTaskTimeout: 0,
distributedProductMode: "string",
emptyResultForAggregationByEmptySet: false,
enableHttpCompression: false,
fallbackToStaleReplicasForDistributedQueries: false,
forceIndexByDate: false,
forcePrimaryKey: false,
groupByOverflowMode: "string",
groupByTwoLevelThreshold: 0,
groupByTwoLevelThresholdBytes: 0,
httpConnectionTimeout: 0,
httpHeadersProgressInterval: 0,
httpReceiveTimeout: 0,
httpSendTimeout: 0,
inputFormatDefaultsForOmittedFields: false,
inputFormatValuesInterpretExpressions: false,
insertQuorum: 0,
insertQuorumTimeout: 0,
joinOverflowMode: "string",
joinUseNulls: false,
joinedSubqueryRequiresAlias: false,
lowCardinalityAllowInNativeFormat: false,
maxAstDepth: 0,
maxAstElements: 0,
maxBlockSize: 0,
maxBytesBeforeExternalGroupBy: 0,
maxBytesBeforeExternalSort: 0,
maxBytesInDistinct: 0,
maxBytesInJoin: 0,
maxBytesInSet: 0,
maxBytesToRead: 0,
maxBytesToSort: 0,
maxBytesToTransfer: 0,
maxColumnsToRead: 0,
maxExecutionTime: 0,
maxExpandedAstElements: 0,
maxInsertBlockSize: 0,
maxMemoryUsage: 0,
maxMemoryUsageForUser: 0,
maxNetworkBandwidth: 0,
maxNetworkBandwidthForUser: 0,
maxQuerySize: 0,
maxReplicaDelayForDistributedQueries: 0,
maxResultBytes: 0,
maxResultRows: 0,
maxRowsInDistinct: 0,
maxRowsInJoin: 0,
maxRowsInSet: 0,
maxRowsToGroupBy: 0,
maxRowsToRead: 0,
maxRowsToSort: 0,
maxRowsToTransfer: 0,
maxTemporaryColumns: 0,
maxTemporaryNonConstColumns: 0,
maxThreads: 0,
mergeTreeMaxBytesToUseCache: 0,
mergeTreeMaxRowsToUseCache: 0,
mergeTreeMinBytesForConcurrentRead: 0,
mergeTreeMinRowsForConcurrentRead: 0,
minBytesToUseDirectIo: 0,
minCountToCompile: 0,
minCountToCompileExpression: 0,
minExecutionSpeed: 0,
minExecutionSpeedBytes: 0,
minInsertBlockSizeBytes: 0,
minInsertBlockSizeRows: 0,
outputFormatJsonQuote64bitIntegers: false,
outputFormatJsonQuoteDenormals: false,
priority: 0,
quotaMode: "string",
readOverflowMode: "string",
readonly: 0,
receiveTimeout: 0,
replicationAlterPartitionsSync: 0,
resultOverflowMode: "string",
selectSequentialConsistency: false,
sendProgressInHttpHeaders: false,
sendTimeout: 0,
setOverflowMode: "string",
skipUnavailableShards: false,
sortOverflowMode: "string",
timeoutOverflowMode: "string",
transferOverflowMode: "string",
transformNullIn: false,
useUncompressedCache: false,
},
}],
version: "string",
zookeeper: {
resources: {
diskSize: 0,
diskTypeId: "string",
resourcePresetId: "string",
},
},
});
type: yandex:MdbClickhouseCluster
properties:
access:
dataLens: false
metrika: false
serverless: false
webSql: false
adminPassword: string
backupWindowStart:
hours: 0
minutes: 0
clickhouse:
config:
backgroundPoolSize: 0
backgroundSchedulePoolSize: 0
compressions:
- method: string
minPartSize: 0
minPartSizeRatio: 0
geobaseUri: string
graphiteRollups:
- name: string
patterns:
- function: string
regexp: string
retentions:
- age: 0
precision: 0
kafka:
saslMechanism: string
saslPassword: string
saslUsername: string
securityProtocol: string
kafkaTopics:
- name: string
settings:
saslMechanism: string
saslPassword: string
saslUsername: string
securityProtocol: string
keepAliveTimeout: 0
logLevel: string
markCacheSize: 0
maxConcurrentQueries: 0
maxConnections: 0
maxPartitionSizeToDrop: 0
maxTableSizeToDrop: 0
mergeTree:
maxBytesToMergeAtMinSpaceInPool: 0
maxReplicatedMergesInQueue: 0
numberOfFreeEntriesInPoolToLowerMaxSizeOfMerge: 0
partsToDelayInsert: 0
partsToThrowInsert: 0
replicatedDeduplicationWindow: 0
replicatedDeduplicationWindowSeconds: 0
metricLogEnabled: false
metricLogRetentionSize: 0
metricLogRetentionTime: 0
partLogRetentionSize: 0
partLogRetentionTime: 0
queryLogRetentionSize: 0
queryLogRetentionTime: 0
queryThreadLogEnabled: false
queryThreadLogRetentionSize: 0
queryThreadLogRetentionTime: 0
rabbitmq:
password: string
username: string
textLogEnabled: false
textLogLevel: string
textLogRetentionSize: 0
textLogRetentionTime: 0
timezone: string
traceLogEnabled: false
traceLogRetentionSize: 0
traceLogRetentionTime: 0
uncompressedCacheSize: 0
resources:
diskSize: 0
diskTypeId: string
resourcePresetId: string
cloudStorage:
enabled: false
copySchemaOnNewHosts: false
databases:
- name: string
deletionProtection: false
description: string
environment: string
folderId: string
formatSchemas:
- name: string
type: string
uri: string
hosts:
- assignPublicIp: false
fqdn: string
shardName: string
subnetId: string
type: string
zone: string
labels:
string: string
maintenanceWindow:
day: string
hour: 0
type: string
mlModels:
- name: string
type: string
uri: string
name: string
networkId: string
securityGroupIds:
- string
serviceAccountId: string
shardGroups:
- description: string
name: string
shardNames:
- string
sqlDatabaseManagement: false
sqlUserManagement: false
users:
- name: string
password: string
permissions:
- databaseName: string
quotas:
- errors: 0
executionTime: 0
intervalDuration: 0
queries: 0
readRows: 0
resultRows: 0
settings:
addHttpCorsHeader: false
allowDdl: false
compile: false
compileExpressions: false
connectTimeout: 0
countDistinctImplementation: string
distinctOverflowMode: string
distributedAggregationMemoryEfficient: false
distributedDdlTaskTimeout: 0
distributedProductMode: string
emptyResultForAggregationByEmptySet: false
enableHttpCompression: false
fallbackToStaleReplicasForDistributedQueries: false
forceIndexByDate: false
forcePrimaryKey: false
groupByOverflowMode: string
groupByTwoLevelThreshold: 0
groupByTwoLevelThresholdBytes: 0
httpConnectionTimeout: 0
httpHeadersProgressInterval: 0
httpReceiveTimeout: 0
httpSendTimeout: 0
inputFormatDefaultsForOmittedFields: false
inputFormatValuesInterpretExpressions: false
insertQuorum: 0
insertQuorumTimeout: 0
joinOverflowMode: string
joinUseNulls: false
joinedSubqueryRequiresAlias: false
lowCardinalityAllowInNativeFormat: false
maxAstDepth: 0
maxAstElements: 0
maxBlockSize: 0
maxBytesBeforeExternalGroupBy: 0
maxBytesBeforeExternalSort: 0
maxBytesInDistinct: 0
maxBytesInJoin: 0
maxBytesInSet: 0
maxBytesToRead: 0
maxBytesToSort: 0
maxBytesToTransfer: 0
maxColumnsToRead: 0
maxExecutionTime: 0
maxExpandedAstElements: 0
maxInsertBlockSize: 0
maxMemoryUsage: 0
maxMemoryUsageForUser: 0
maxNetworkBandwidth: 0
maxNetworkBandwidthForUser: 0
maxQuerySize: 0
maxReplicaDelayForDistributedQueries: 0
maxResultBytes: 0
maxResultRows: 0
maxRowsInDistinct: 0
maxRowsInJoin: 0
maxRowsInSet: 0
maxRowsToGroupBy: 0
maxRowsToRead: 0
maxRowsToSort: 0
maxRowsToTransfer: 0
maxTemporaryColumns: 0
maxTemporaryNonConstColumns: 0
maxThreads: 0
mergeTreeMaxBytesToUseCache: 0
mergeTreeMaxRowsToUseCache: 0
mergeTreeMinBytesForConcurrentRead: 0
mergeTreeMinRowsForConcurrentRead: 0
minBytesToUseDirectIo: 0
minCountToCompile: 0
minCountToCompileExpression: 0
minExecutionSpeed: 0
minExecutionSpeedBytes: 0
minInsertBlockSizeBytes: 0
minInsertBlockSizeRows: 0
outputFormatJsonQuote64bitIntegers: false
outputFormatJsonQuoteDenormals: false
priority: 0
quotaMode: string
readOverflowMode: string
readonly: 0
receiveTimeout: 0
replicationAlterPartitionsSync: 0
resultOverflowMode: string
selectSequentialConsistency: false
sendProgressInHttpHeaders: false
sendTimeout: 0
setOverflowMode: string
skipUnavailableShards: false
sortOverflowMode: string
timeoutOverflowMode: string
transferOverflowMode: string
transformNullIn: false
useUncompressedCache: false
version: string
zookeeper:
resources:
diskSize: 0
diskTypeId: string
resourcePresetId: string
MdbClickhouseCluster Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The MdbClickhouseCluster resource accepts the following input properties:
- Clickhouse
Mdb
Clickhouse Cluster Clickhouse - Configuration of the ClickHouse subcluster. The structure is documented below.
- Environment string
- Deployment environment of the ClickHouse cluster. Can be either
PRESTABLE
orPRODUCTION
. - Hosts
List<Mdb
Clickhouse Cluster Host> - A host of the ClickHouse cluster. The structure is documented below.
- Network
Id string - ID of the network, to which the ClickHouse cluster belongs.
- Access
Mdb
Clickhouse Cluster Access - Access policy to the ClickHouse cluster. The structure is documented below.
- Admin
Password string - A password used to authorize as user
admin
whensql_user_management
enabled. - Backup
Window MdbStart Clickhouse Cluster Backup Window Start - Time to start the daily backup, in the UTC timezone. The structure is documented below.
- Cloud
Storage MdbClickhouse Cluster Cloud Storage - Copy
Schema boolOn New Hosts - Whether to copy schema on new ClickHouse hosts.
- Databases
List<Mdb
Clickhouse Cluster Database> - A database of the ClickHouse cluster. The structure is documented below.
- Deletion
Protection bool - Inhibits deletion of the cluster. Can be either
true
orfalse
. - Description string
- Description of the shard group.
- Folder
Id string - The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
- Format
Schemas List<MdbClickhouse Cluster Format Schema> - A set of protobuf or capnproto format schemas. The structure is documented below.
- Labels Dictionary<string, string>
- A set of key/value label pairs to assign to the ClickHouse cluster.
- Maintenance
Window MdbClickhouse Cluster Maintenance Window - Ml
Models List<MdbClickhouse Cluster Ml Model> - A group of machine learning models. The structure is documented below
- Name string
- Graphite rollup configuration name.
- Security
Group List<string>Ids - A set of ids of security groups assigned to hosts of the cluster.
- Service
Account stringId - ID of the service account used for access to Yandex Object Storage.
- List<Mdb
Clickhouse Cluster Shard Group> - A group of clickhouse shards. The structure is documented below.
- Sql
Database boolManagement - Grants
admin
user database management permission. - Sql
User boolManagement - Enables
admin
user with user management permission. - Users
List<Mdb
Clickhouse Cluster User> - A user of the ClickHouse cluster. The structure is documented below.
- Version string
- Version of the ClickHouse server software.
- Zookeeper
Mdb
Clickhouse Cluster Zookeeper - Configuration of the ZooKeeper subcluster. The structure is documented below.
- Clickhouse
Mdb
Clickhouse Cluster Clickhouse Args - Configuration of the ClickHouse subcluster. The structure is documented below.
- Environment string
- Deployment environment of the ClickHouse cluster. Can be either
PRESTABLE
orPRODUCTION
. - Hosts
[]Mdb
Clickhouse Cluster Host Args - A host of the ClickHouse cluster. The structure is documented below.
- Network
Id string - ID of the network, to which the ClickHouse cluster belongs.
- Access
Mdb
Clickhouse Cluster Access Args - Access policy to the ClickHouse cluster. The structure is documented below.
- Admin
Password string - A password used to authorize as user
admin
whensql_user_management
enabled. - Backup
Window MdbStart Clickhouse Cluster Backup Window Start Args - Time to start the daily backup, in the UTC timezone. The structure is documented below.
- Cloud
Storage MdbClickhouse Cluster Cloud Storage Args - Copy
Schema boolOn New Hosts - Whether to copy schema on new ClickHouse hosts.
- Databases
[]Mdb
Clickhouse Cluster Database Args - A database of the ClickHouse cluster. The structure is documented below.
- Deletion
Protection bool - Inhibits deletion of the cluster. Can be either
true
orfalse
. - Description string
- Description of the shard group.
- Folder
Id string - The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
- Format
Schemas []MdbClickhouse Cluster Format Schema Args - A set of protobuf or capnproto format schemas. The structure is documented below.
- Labels map[string]string
- A set of key/value label pairs to assign to the ClickHouse cluster.
- Maintenance
Window MdbClickhouse Cluster Maintenance Window Args - Ml
Models []MdbClickhouse Cluster Ml Model Args - A group of machine learning models. The structure is documented below
- Name string
- Graphite rollup configuration name.
- Security
Group []stringIds - A set of ids of security groups assigned to hosts of the cluster.
- Service
Account stringId - ID of the service account used for access to Yandex Object Storage.
- []Mdb
Clickhouse Cluster Shard Group Args - A group of clickhouse shards. The structure is documented below.
- Sql
Database boolManagement - Grants
admin
user database management permission. - Sql
User boolManagement - Enables
admin
user with user management permission. - Users
[]Mdb
Clickhouse Cluster User Args - A user of the ClickHouse cluster. The structure is documented below.
- Version string
- Version of the ClickHouse server software.
- Zookeeper
Mdb
Clickhouse Cluster Zookeeper Args - Configuration of the ZooKeeper subcluster. The structure is documented below.
- clickhouse
Mdb
Clickhouse Cluster Clickhouse - Configuration of the ClickHouse subcluster. The structure is documented below.
- environment String
- Deployment environment of the ClickHouse cluster. Can be either
PRESTABLE
orPRODUCTION
. - hosts
List<Mdb
Clickhouse Cluster Host> - A host of the ClickHouse cluster. The structure is documented below.
- network
Id String - ID of the network, to which the ClickHouse cluster belongs.
- access
Mdb
Clickhouse Cluster Access - Access policy to the ClickHouse cluster. The structure is documented below.
- admin
Password String - A password used to authorize as user
admin
whensql_user_management
enabled. - backup
Window MdbStart Clickhouse Cluster Backup Window Start - Time to start the daily backup, in the UTC timezone. The structure is documented below.
- cloud
Storage MdbClickhouse Cluster Cloud Storage - copy
Schema BooleanOn New Hosts - Whether to copy schema on new ClickHouse hosts.
- databases
List<Mdb
Clickhouse Cluster Database> - A database of the ClickHouse cluster. The structure is documented below.
- deletion
Protection Boolean - Inhibits deletion of the cluster. Can be either
true
orfalse
. - description String
- Description of the shard group.
- folder
Id String - The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
- format
Schemas List<MdbClickhouse Cluster Format Schema> - A set of protobuf or capnproto format schemas. The structure is documented below.
- labels Map<String,String>
- A set of key/value label pairs to assign to the ClickHouse cluster.
- maintenance
Window MdbClickhouse Cluster Maintenance Window - ml
Models List<MdbClickhouse Cluster Ml Model> - A group of machine learning models. The structure is documented below
- name String
- Graphite rollup configuration name.
- security
Group List<String>Ids - A set of ids of security groups assigned to hosts of the cluster.
- service
Account StringId - ID of the service account used for access to Yandex Object Storage.
- List<Mdb
Clickhouse Cluster Shard Group> - A group of clickhouse shards. The structure is documented below.
- sql
Database BooleanManagement - Grants
admin
user database management permission. - sql
User BooleanManagement - Enables
admin
user with user management permission. - users
List<Mdb
Clickhouse Cluster User> - A user of the ClickHouse cluster. The structure is documented below.
- version String
- Version of the ClickHouse server software.
- zookeeper
Mdb
Clickhouse Cluster Zookeeper - Configuration of the ZooKeeper subcluster. The structure is documented below.
- clickhouse
Mdb
Clickhouse Cluster Clickhouse - Configuration of the ClickHouse subcluster. The structure is documented below.
- environment string
- Deployment environment of the ClickHouse cluster. Can be either
PRESTABLE
orPRODUCTION
. - hosts
Mdb
Clickhouse Cluster Host[] - A host of the ClickHouse cluster. The structure is documented below.
- network
Id string - ID of the network, to which the ClickHouse cluster belongs.
- access
Mdb
Clickhouse Cluster Access - Access policy to the ClickHouse cluster. The structure is documented below.
- admin
Password string - A password used to authorize as user
admin
whensql_user_management
enabled. - backup
Window MdbStart Clickhouse Cluster Backup Window Start - Time to start the daily backup, in the UTC timezone. The structure is documented below.
- cloud
Storage MdbClickhouse Cluster Cloud Storage - copy
Schema booleanOn New Hosts - Whether to copy schema on new ClickHouse hosts.
- databases
Mdb
Clickhouse Cluster Database[] - A database of the ClickHouse cluster. The structure is documented below.
- deletion
Protection boolean - Inhibits deletion of the cluster. Can be either
true
orfalse
. - description string
- Description of the shard group.
- folder
Id string - The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
- format
Schemas MdbClickhouse Cluster Format Schema[] - A set of protobuf or capnproto format schemas. The structure is documented below.
- labels {[key: string]: string}
- A set of key/value label pairs to assign to the ClickHouse cluster.
- maintenance
Window MdbClickhouse Cluster Maintenance Window - ml
Models MdbClickhouse Cluster Ml Model[] - A group of machine learning models. The structure is documented below
- name string
- Graphite rollup configuration name.
- security
Group string[]Ids - A set of ids of security groups assigned to hosts of the cluster.
- service
Account stringId - ID of the service account used for access to Yandex Object Storage.
- Mdb
Clickhouse Cluster Shard Group[] - A group of clickhouse shards. The structure is documented below.
- sql
Database booleanManagement - Grants
admin
user database management permission. - sql
User booleanManagement - Enables
admin
user with user management permission. - users
Mdb
Clickhouse Cluster User[] - A user of the ClickHouse cluster. The structure is documented below.
- version string
- Version of the ClickHouse server software.
- zookeeper
Mdb
Clickhouse Cluster Zookeeper - Configuration of the ZooKeeper subcluster. The structure is documented below.
- clickhouse
Mdb
Clickhouse Cluster Clickhouse Args - Configuration of the ClickHouse subcluster. The structure is documented below.
- environment str
- Deployment environment of the ClickHouse cluster. Can be either
PRESTABLE
orPRODUCTION
. - hosts
Sequence[Mdb
Clickhouse Cluster Host Args] - A host of the ClickHouse cluster. The structure is documented below.
- network_
id str - ID of the network, to which the ClickHouse cluster belongs.
- access
Mdb
Clickhouse Cluster Access Args - Access policy to the ClickHouse cluster. The structure is documented below.
- admin_
password str - A password used to authorize as user
admin
whensql_user_management
enabled. - backup_
window_ Mdbstart Clickhouse Cluster Backup Window Start Args - Time to start the daily backup, in the UTC timezone. The structure is documented below.
- cloud_
storage MdbClickhouse Cluster Cloud Storage Args - copy_
schema_ boolon_ new_ hosts - Whether to copy schema on new ClickHouse hosts.
- databases
Sequence[Mdb
Clickhouse Cluster Database Args] - A database of the ClickHouse cluster. The structure is documented below.
- deletion_
protection bool - Inhibits deletion of the cluster. Can be either
true
orfalse
. - description str
- Description of the shard group.
- folder_
id str - The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
- format_
schemas Sequence[MdbClickhouse Cluster Format Schema Args] - A set of protobuf or capnproto format schemas. The structure is documented below.
- labels Mapping[str, str]
- A set of key/value label pairs to assign to the ClickHouse cluster.
- maintenance_
window MdbClickhouse Cluster Maintenance Window Args - ml_
models Sequence[MdbClickhouse Cluster Ml Model Args] - A group of machine learning models. The structure is documented below
- name str
- Graphite rollup configuration name.
- security_
group_ Sequence[str]ids - A set of ids of security groups assigned to hosts of the cluster.
- service_
account_ strid - ID of the service account used for access to Yandex Object Storage.
- Sequence[Mdb
Clickhouse Cluster Shard Group Args] - A group of clickhouse shards. The structure is documented below.
- sql_
database_ boolmanagement - Grants
admin
user database management permission. - sql_
user_ boolmanagement - Enables
admin
user with user management permission. - users
Sequence[Mdb
Clickhouse Cluster User Args] - A user of the ClickHouse cluster. The structure is documented below.
- version str
- Version of the ClickHouse server software.
- zookeeper
Mdb
Clickhouse Cluster Zookeeper Args - Configuration of the ZooKeeper subcluster. The structure is documented below.
- clickhouse Property Map
- Configuration of the ClickHouse subcluster. The structure is documented below.
- environment String
- Deployment environment of the ClickHouse cluster. Can be either
PRESTABLE
orPRODUCTION
. - hosts List<Property Map>
- A host of the ClickHouse cluster. The structure is documented below.
- network
Id String - ID of the network, to which the ClickHouse cluster belongs.
- access Property Map
- Access policy to the ClickHouse cluster. The structure is documented below.
- admin
Password String - A password used to authorize as user
admin
whensql_user_management
enabled. - backup
Window Property MapStart - Time to start the daily backup, in the UTC timezone. The structure is documented below.
- cloud
Storage Property Map - copy
Schema BooleanOn New Hosts - Whether to copy schema on new ClickHouse hosts.
- databases List<Property Map>
- A database of the ClickHouse cluster. The structure is documented below.
- deletion
Protection Boolean - Inhibits deletion of the cluster. Can be either
true
orfalse
. - description String
- Description of the shard group.
- folder
Id String - The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
- format
Schemas List<Property Map> - A set of protobuf or capnproto format schemas. The structure is documented below.
- labels Map<String>
- A set of key/value label pairs to assign to the ClickHouse cluster.
- maintenance
Window Property Map - ml
Models List<Property Map> - A group of machine learning models. The structure is documented below
- name String
- Graphite rollup configuration name.
- security
Group List<String>Ids - A set of ids of security groups assigned to hosts of the cluster.
- service
Account StringId - ID of the service account used for access to Yandex Object Storage.
- List<Property Map>
- A group of clickhouse shards. The structure is documented below.
- sql
Database BooleanManagement - Grants
admin
user database management permission. - sql
User BooleanManagement - Enables
admin
user with user management permission. - users List<Property Map>
- A user of the ClickHouse cluster. The structure is documented below.
- version String
- Version of the ClickHouse server software.
- zookeeper Property Map
- Configuration of the ZooKeeper subcluster. The structure is documented below.
Outputs
All input properties are implicitly available as output properties. Additionally, the MdbClickhouseCluster resource produces the following output properties:
- Created
At string - Timestamp of cluster creation.
- Health string
- Aggregated health of the cluster. Can be
ALIVE
,DEGRADED
,DEAD
orHEALTH_UNKNOWN
. For more information seehealth
field of JSON representation in the official documentation. - Id string
- The provider-assigned unique ID for this managed resource.
- Status string
- Status of the cluster. Can be
CREATING
,STARTING
,RUNNING
,UPDATING
,STOPPING
,STOPPED
,ERROR
orSTATUS_UNKNOWN
. For more information seestatus
field of JSON representation in the official documentation.
- Created
At string - Timestamp of cluster creation.
- Health string
- Aggregated health of the cluster. Can be
ALIVE
,DEGRADED
,DEAD
orHEALTH_UNKNOWN
. For more information seehealth
field of JSON representation in the official documentation. - Id string
- The provider-assigned unique ID for this managed resource.
- Status string
- Status of the cluster. Can be
CREATING
,STARTING
,RUNNING
,UPDATING
,STOPPING
,STOPPED
,ERROR
orSTATUS_UNKNOWN
. For more information seestatus
field of JSON representation in the official documentation.
- created
At String - Timestamp of cluster creation.
- health String
- Aggregated health of the cluster. Can be
ALIVE
,DEGRADED
,DEAD
orHEALTH_UNKNOWN
. For more information seehealth
field of JSON representation in the official documentation. - id String
- The provider-assigned unique ID for this managed resource.
- status String
- Status of the cluster. Can be
CREATING
,STARTING
,RUNNING
,UPDATING
,STOPPING
,STOPPED
,ERROR
orSTATUS_UNKNOWN
. For more information seestatus
field of JSON representation in the official documentation.
- created
At string - Timestamp of cluster creation.
- health string
- Aggregated health of the cluster. Can be
ALIVE
,DEGRADED
,DEAD
orHEALTH_UNKNOWN
. For more information seehealth
field of JSON representation in the official documentation. - id string
- The provider-assigned unique ID for this managed resource.
- status string
- Status of the cluster. Can be
CREATING
,STARTING
,RUNNING
,UPDATING
,STOPPING
,STOPPED
,ERROR
orSTATUS_UNKNOWN
. For more information seestatus
field of JSON representation in the official documentation.
- created_
at str - Timestamp of cluster creation.
- health str
- Aggregated health of the cluster. Can be
ALIVE
,DEGRADED
,DEAD
orHEALTH_UNKNOWN
. For more information seehealth
field of JSON representation in the official documentation. - id str
- The provider-assigned unique ID for this managed resource.
- status str
- Status of the cluster. Can be
CREATING
,STARTING
,RUNNING
,UPDATING
,STOPPING
,STOPPED
,ERROR
orSTATUS_UNKNOWN
. For more information seestatus
field of JSON representation in the official documentation.
- created
At String - Timestamp of cluster creation.
- health String
- Aggregated health of the cluster. Can be
ALIVE
,DEGRADED
,DEAD
orHEALTH_UNKNOWN
. For more information seehealth
field of JSON representation in the official documentation. - id String
- The provider-assigned unique ID for this managed resource.
- status String
- Status of the cluster. Can be
CREATING
,STARTING
,RUNNING
,UPDATING
,STOPPING
,STOPPED
,ERROR
orSTATUS_UNKNOWN
. For more information seestatus
field of JSON representation in the official documentation.
Look up Existing MdbClickhouseCluster Resource
Get an existing MdbClickhouseCluster resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: MdbClickhouseClusterState, opts?: CustomResourceOptions): MdbClickhouseCluster
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
access: Optional[MdbClickhouseClusterAccessArgs] = None,
admin_password: Optional[str] = None,
backup_window_start: Optional[MdbClickhouseClusterBackupWindowStartArgs] = None,
clickhouse: Optional[MdbClickhouseClusterClickhouseArgs] = None,
cloud_storage: Optional[MdbClickhouseClusterCloudStorageArgs] = None,
copy_schema_on_new_hosts: Optional[bool] = None,
created_at: Optional[str] = None,
databases: Optional[Sequence[MdbClickhouseClusterDatabaseArgs]] = None,
deletion_protection: Optional[bool] = None,
description: Optional[str] = None,
environment: Optional[str] = None,
folder_id: Optional[str] = None,
format_schemas: Optional[Sequence[MdbClickhouseClusterFormatSchemaArgs]] = None,
health: Optional[str] = None,
hosts: Optional[Sequence[MdbClickhouseClusterHostArgs]] = None,
labels: Optional[Mapping[str, str]] = None,
maintenance_window: Optional[MdbClickhouseClusterMaintenanceWindowArgs] = None,
ml_models: Optional[Sequence[MdbClickhouseClusterMlModelArgs]] = None,
name: Optional[str] = None,
network_id: Optional[str] = None,
security_group_ids: Optional[Sequence[str]] = None,
service_account_id: Optional[str] = None,
shard_groups: Optional[Sequence[MdbClickhouseClusterShardGroupArgs]] = None,
sql_database_management: Optional[bool] = None,
sql_user_management: Optional[bool] = None,
status: Optional[str] = None,
users: Optional[Sequence[MdbClickhouseClusterUserArgs]] = None,
version: Optional[str] = None,
zookeeper: Optional[MdbClickhouseClusterZookeeperArgs] = None) -> MdbClickhouseCluster
func GetMdbClickhouseCluster(ctx *Context, name string, id IDInput, state *MdbClickhouseClusterState, opts ...ResourceOption) (*MdbClickhouseCluster, error)
public static MdbClickhouseCluster Get(string name, Input<string> id, MdbClickhouseClusterState? state, CustomResourceOptions? opts = null)
public static MdbClickhouseCluster get(String name, Output<String> id, MdbClickhouseClusterState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Access
Mdb
Clickhouse Cluster Access - Access policy to the ClickHouse cluster. The structure is documented below.
- Admin
Password string - A password used to authorize as user
admin
whensql_user_management
enabled. - Backup
Window MdbStart Clickhouse Cluster Backup Window Start - Time to start the daily backup, in the UTC timezone. The structure is documented below.
- Clickhouse
Mdb
Clickhouse Cluster Clickhouse - Configuration of the ClickHouse subcluster. The structure is documented below.
- Cloud
Storage MdbClickhouse Cluster Cloud Storage - Copy
Schema boolOn New Hosts - Whether to copy schema on new ClickHouse hosts.
- Created
At string - Timestamp of cluster creation.
- Databases
List<Mdb
Clickhouse Cluster Database> - A database of the ClickHouse cluster. The structure is documented below.
- Deletion
Protection bool - Inhibits deletion of the cluster. Can be either
true
orfalse
. - Description string
- Description of the shard group.
- Environment string
- Deployment environment of the ClickHouse cluster. Can be either
PRESTABLE
orPRODUCTION
. - Folder
Id string - The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
- Format
Schemas List<MdbClickhouse Cluster Format Schema> - A set of protobuf or capnproto format schemas. The structure is documented below.
- Health string
- Aggregated health of the cluster. Can be
ALIVE
,DEGRADED
,DEAD
orHEALTH_UNKNOWN
. For more information seehealth
field of JSON representation in the official documentation. - Hosts
List<Mdb
Clickhouse Cluster Host> - A host of the ClickHouse cluster. The structure is documented below.
- Labels Dictionary<string, string>
- A set of key/value label pairs to assign to the ClickHouse cluster.
- Maintenance
Window MdbClickhouse Cluster Maintenance Window - Ml
Models List<MdbClickhouse Cluster Ml Model> - A group of machine learning models. The structure is documented below
- Name string
- Graphite rollup configuration name.
- Network
Id string - ID of the network, to which the ClickHouse cluster belongs.
- Security
Group List<string>Ids - A set of ids of security groups assigned to hosts of the cluster.
- Service
Account stringId - ID of the service account used for access to Yandex Object Storage.
- Shard
Groups List<MdbClickhouse Cluster Shard Group> - A group of clickhouse shards. The structure is documented below.
- Sql
Database boolManagement - Grants
admin
user database management permission. - Sql
User boolManagement - Enables
admin
user with user management permission. - Status string
- Status of the cluster. Can be
CREATING
,STARTING
,RUNNING
,UPDATING
,STOPPING
,STOPPED
,ERROR
orSTATUS_UNKNOWN
. For more information seestatus
field of JSON representation in the official documentation. - Users
List<Mdb
Clickhouse Cluster User> - A user of the ClickHouse cluster. The structure is documented below.
- Version string
- Version of the ClickHouse server software.
- Zookeeper
Mdb
Clickhouse Cluster Zookeeper - Configuration of the ZooKeeper subcluster. The structure is documented below.
- Access
Mdb
Clickhouse Cluster Access Args - Access policy to the ClickHouse cluster. The structure is documented below.
- Admin
Password string - A password used to authorize as user
admin
whensql_user_management
enabled. - Backup
Window MdbStart Clickhouse Cluster Backup Window Start Args - Time to start the daily backup, in the UTC timezone. The structure is documented below.
- Clickhouse
Mdb
Clickhouse Cluster Clickhouse Args - Configuration of the ClickHouse subcluster. The structure is documented below.
- Cloud
Storage MdbClickhouse Cluster Cloud Storage Args - Copy
Schema boolOn New Hosts - Whether to copy schema on new ClickHouse hosts.
- Created
At string - Timestamp of cluster creation.
- Databases
[]Mdb
Clickhouse Cluster Database Args - A database of the ClickHouse cluster. The structure is documented below.
- Deletion
Protection bool - Inhibits deletion of the cluster. Can be either
true
orfalse
. - Description string
- Description of the shard group.
- Environment string
- Deployment environment of the ClickHouse cluster. Can be either
PRESTABLE
orPRODUCTION
. - Folder
Id string - The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
- Format
Schemas []MdbClickhouse Cluster Format Schema Args - A set of protobuf or capnproto format schemas. The structure is documented below.
- Health string
- Aggregated health of the cluster. Can be
ALIVE
,DEGRADED
,DEAD
orHEALTH_UNKNOWN
. For more information seehealth
field of JSON representation in the official documentation. - Hosts
[]Mdb
Clickhouse Cluster Host Args - A host of the ClickHouse cluster. The structure is documented below.
- Labels map[string]string
- A set of key/value label pairs to assign to the ClickHouse cluster.
- Maintenance
Window MdbClickhouse Cluster Maintenance Window Args - Ml
Models []MdbClickhouse Cluster Ml Model Args - A group of machine learning models. The structure is documented below
- Name string
- Graphite rollup configuration name.
- Network
Id string - ID of the network, to which the ClickHouse cluster belongs.
- Security
Group []stringIds - A set of ids of security groups assigned to hosts of the cluster.
- Service
Account stringId - ID of the service account used for access to Yandex Object Storage.
- Shard
Groups []MdbClickhouse Cluster Shard Group Args - A group of clickhouse shards. The structure is documented below.
- Sql
Database boolManagement - Grants
admin
user database management permission. - Sql
User boolManagement - Enables
admin
user with user management permission. - Status string
- Status of the cluster. Can be
CREATING
,STARTING
,RUNNING
,UPDATING
,STOPPING
,STOPPED
,ERROR
orSTATUS_UNKNOWN
. For more information seestatus
field of JSON representation in the official documentation. - Users
[]Mdb
Clickhouse Cluster User Args - A user of the ClickHouse cluster. The structure is documented below.
- Version string
- Version of the ClickHouse server software.
- Zookeeper
Mdb
Clickhouse Cluster Zookeeper Args - Configuration of the ZooKeeper subcluster. The structure is documented below.
- access
Mdb
Clickhouse Cluster Access - Access policy to the ClickHouse cluster. The structure is documented below.
- admin
Password String - A password used to authorize as user
admin
whensql_user_management
enabled. - backup
Window MdbStart Clickhouse Cluster Backup Window Start - Time to start the daily backup, in the UTC timezone. The structure is documented below.
- clickhouse
Mdb
Clickhouse Cluster Clickhouse - Configuration of the ClickHouse subcluster. The structure is documented below.
- cloud
Storage MdbClickhouse Cluster Cloud Storage - copy
Schema BooleanOn New Hosts - Whether to copy schema on new ClickHouse hosts.
- created
At String - Timestamp of cluster creation.
- databases
List<Mdb
Clickhouse Cluster Database> - A database of the ClickHouse cluster. The structure is documented below.
- deletion
Protection Boolean - Inhibits deletion of the cluster. Can be either
true
orfalse
. - description String
- Description of the shard group.
- environment String
- Deployment environment of the ClickHouse cluster. Can be either
PRESTABLE
orPRODUCTION
. - folder
Id String - The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
- format
Schemas List<MdbClickhouse Cluster Format Schema> - A set of protobuf or capnproto format schemas. The structure is documented below.
- health String
- Aggregated health of the cluster. Can be
ALIVE
,DEGRADED
,DEAD
orHEALTH_UNKNOWN
. For more information seehealth
field of JSON representation in the official documentation. - hosts
List<Mdb
Clickhouse Cluster Host> - A host of the ClickHouse cluster. The structure is documented below.
- labels Map<String,String>
- A set of key/value label pairs to assign to the ClickHouse cluster.
- maintenance
Window MdbClickhouse Cluster Maintenance Window - ml
Models List<MdbClickhouse Cluster Ml Model> - A group of machine learning models. The structure is documented below
- name String
- Graphite rollup configuration name.
- network
Id String - ID of the network, to which the ClickHouse cluster belongs.
- security
Group List<String>Ids - A set of ids of security groups assigned to hosts of the cluster.
- service
Account StringId - ID of the service account used for access to Yandex Object Storage.
- shard
Groups List<MdbClickhouse Cluster Shard Group> - A group of clickhouse shards. The structure is documented below.
- sql
Database BooleanManagement - Grants
admin
user database management permission. - sql
User BooleanManagement - Enables
admin
user with user management permission. - status String
- Status of the cluster. Can be
CREATING
,STARTING
,RUNNING
,UPDATING
,STOPPING
,STOPPED
,ERROR
orSTATUS_UNKNOWN
. For more information seestatus
field of JSON representation in the official documentation. - users
List<Mdb
Clickhouse Cluster User> - A user of the ClickHouse cluster. The structure is documented below.
- version String
- Version of the ClickHouse server software.
- zookeeper
Mdb
Clickhouse Cluster Zookeeper - Configuration of the ZooKeeper subcluster. The structure is documented below.
- access
Mdb
Clickhouse Cluster Access - Access policy to the ClickHouse cluster. The structure is documented below.
- admin
Password string - A password used to authorize as user
admin
whensql_user_management
enabled. - backup
Window MdbStart Clickhouse Cluster Backup Window Start - Time to start the daily backup, in the UTC timezone. The structure is documented below.
- clickhouse
Mdb
Clickhouse Cluster Clickhouse - Configuration of the ClickHouse subcluster. The structure is documented below.
- cloud
Storage MdbClickhouse Cluster Cloud Storage - copy
Schema booleanOn New Hosts - Whether to copy schema on new ClickHouse hosts.
- created
At string - Timestamp of cluster creation.
- databases
Mdb
Clickhouse Cluster Database[] - A database of the ClickHouse cluster. The structure is documented below.
- deletion
Protection boolean - Inhibits deletion of the cluster. Can be either
true
orfalse
. - description string
- Description of the shard group.
- environment string
- Deployment environment of the ClickHouse cluster. Can be either
PRESTABLE
orPRODUCTION
. - folder
Id string - The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
- format
Schemas MdbClickhouse Cluster Format Schema[] - A set of protobuf or capnproto format schemas. The structure is documented below.
- health string
- Aggregated health of the cluster. Can be
ALIVE
,DEGRADED
,DEAD
orHEALTH_UNKNOWN
. For more information seehealth
field of JSON representation in the official documentation. - hosts
Mdb
Clickhouse Cluster Host[] - A host of the ClickHouse cluster. The structure is documented below.
- labels {[key: string]: string}
- A set of key/value label pairs to assign to the ClickHouse cluster.
- maintenance
Window MdbClickhouse Cluster Maintenance Window - ml
Models MdbClickhouse Cluster Ml Model[] - A group of machine learning models. The structure is documented below
- name string
- Graphite rollup configuration name.
- network
Id string - ID of the network, to which the ClickHouse cluster belongs.
- security
Group string[]Ids - A set of ids of security groups assigned to hosts of the cluster.
- service
Account stringId - ID of the service account used for access to Yandex Object Storage.
- shard
Groups MdbClickhouse Cluster Shard Group[] - A group of clickhouse shards. The structure is documented below.
- sql
Database booleanManagement - Grants
admin
user database management permission. - sql
User booleanManagement - Enables
admin
user with user management permission. - status string
- Status of the cluster. Can be
CREATING
,STARTING
,RUNNING
,UPDATING
,STOPPING
,STOPPED
,ERROR
orSTATUS_UNKNOWN
. For more information seestatus
field of JSON representation in the official documentation. - users
Mdb
Clickhouse Cluster User[] - A user of the ClickHouse cluster. The structure is documented below.
- version string
- Version of the ClickHouse server software.
- zookeeper
Mdb
Clickhouse Cluster Zookeeper - Configuration of the ZooKeeper subcluster. The structure is documented below.
- access
Mdb
Clickhouse Cluster Access Args - Access policy to the ClickHouse cluster. The structure is documented below.
- admin_
password str - A password used to authorize as user
admin
whensql_user_management
enabled. - backup_
window_ Mdbstart Clickhouse Cluster Backup Window Start Args - Time to start the daily backup, in the UTC timezone. The structure is documented below.
- clickhouse
Mdb
Clickhouse Cluster Clickhouse Args - Configuration of the ClickHouse subcluster. The structure is documented below.
- cloud_
storage MdbClickhouse Cluster Cloud Storage Args - copy_
schema_ boolon_ new_ hosts - Whether to copy schema on new ClickHouse hosts.
- created_
at str - Timestamp of cluster creation.
- databases
Sequence[Mdb
Clickhouse Cluster Database Args] - A database of the ClickHouse cluster. The structure is documented below.
- deletion_
protection bool - Inhibits deletion of the cluster. Can be either
true
orfalse
. - description str
- Description of the shard group.
- environment str
- Deployment environment of the ClickHouse cluster. Can be either
PRESTABLE
orPRODUCTION
. - folder_
id str - The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
- format_
schemas Sequence[MdbClickhouse Cluster Format Schema Args] - A set of protobuf or capnproto format schemas. The structure is documented below.
- health str
- Aggregated health of the cluster. Can be
ALIVE
,DEGRADED
,DEAD
orHEALTH_UNKNOWN
. For more information seehealth
field of JSON representation in the official documentation. - hosts
Sequence[Mdb
Clickhouse Cluster Host Args] - A host of the ClickHouse cluster. The structure is documented below.
- labels Mapping[str, str]
- A set of key/value label pairs to assign to the ClickHouse cluster.
- maintenance_
window MdbClickhouse Cluster Maintenance Window Args - ml_
models Sequence[MdbClickhouse Cluster Ml Model Args] - A group of machine learning models. The structure is documented below
- name str
- Graphite rollup configuration name.
- network_
id str - ID of the network, to which the ClickHouse cluster belongs.
- security_
group_ Sequence[str]ids - A set of ids of security groups assigned to hosts of the cluster.
- service_
account_ strid - ID of the service account used for access to Yandex Object Storage.
- shard_
groups Sequence[MdbClickhouse Cluster Shard Group Args] - A group of clickhouse shards. The structure is documented below.
- sql_
database_ boolmanagement - Grants
admin
user database management permission. - sql_
user_ boolmanagement - Enables
admin
user with user management permission. - status str
- Status of the cluster. Can be
CREATING
,STARTING
,RUNNING
,UPDATING
,STOPPING
,STOPPED
,ERROR
orSTATUS_UNKNOWN
. For more information seestatus
field of JSON representation in the official documentation. - users
Sequence[Mdb
Clickhouse Cluster User Args] - A user of the ClickHouse cluster. The structure is documented below.
- version str
- Version of the ClickHouse server software.
- zookeeper
Mdb
Clickhouse Cluster Zookeeper Args - Configuration of the ZooKeeper subcluster. The structure is documented below.
- access Property Map
- Access policy to the ClickHouse cluster. The structure is documented below.
- admin
Password String - A password used to authorize as user
admin
whensql_user_management
enabled. - backup
Window Property MapStart - Time to start the daily backup, in the UTC timezone. The structure is documented below.
- clickhouse Property Map
- Configuration of the ClickHouse subcluster. The structure is documented below.
- cloud
Storage Property Map - copy
Schema BooleanOn New Hosts - Whether to copy schema on new ClickHouse hosts.
- created
At String - Timestamp of cluster creation.
- databases List<Property Map>
- A database of the ClickHouse cluster. The structure is documented below.
- deletion
Protection Boolean - Inhibits deletion of the cluster. Can be either
true
orfalse
. - description String
- Description of the shard group.
- environment String
- Deployment environment of the ClickHouse cluster. Can be either
PRESTABLE
orPRODUCTION
. - folder
Id String - The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
- format
Schemas List<Property Map> - A set of protobuf or capnproto format schemas. The structure is documented below.
- health String
- Aggregated health of the cluster. Can be
ALIVE
,DEGRADED
,DEAD
orHEALTH_UNKNOWN
. For more information seehealth
field of JSON representation in the official documentation. - hosts List<Property Map>
- A host of the ClickHouse cluster. The structure is documented below.
- labels Map<String>
- A set of key/value label pairs to assign to the ClickHouse cluster.
- maintenance
Window Property Map - ml
Models List<Property Map> - A group of machine learning models. The structure is documented below
- name String
- Graphite rollup configuration name.
- network
Id String - ID of the network, to which the ClickHouse cluster belongs.
- security
Group List<String>Ids - A set of ids of security groups assigned to hosts of the cluster.
- service
Account StringId - ID of the service account used for access to Yandex Object Storage.
- shard
Groups List<Property Map> - A group of clickhouse shards. The structure is documented below.
- sql
Database BooleanManagement - Grants
admin
user database management permission. - sql
User BooleanManagement - Enables
admin
user with user management permission. - status String
- Status of the cluster. Can be
CREATING
,STARTING
,RUNNING
,UPDATING
,STOPPING
,STOPPED
,ERROR
orSTATUS_UNKNOWN
. For more information seestatus
field of JSON representation in the official documentation. - users List<Property Map>
- A user of the ClickHouse cluster. The structure is documented below.
- version String
- Version of the ClickHouse server software.
- zookeeper Property Map
- Configuration of the ZooKeeper subcluster. The structure is documented below.
Supporting Types
MdbClickhouseClusterAccess, MdbClickhouseClusterAccessArgs
- Data
Lens bool - Allow access for DataLens. Can be either
true
orfalse
. - Metrika bool
- Allow access for Yandex.Metrika. Can be either
true
orfalse
. - Serverless bool
- Allow access for Serverless. Can be either
true
orfalse
. - Web
Sql bool - Allow access for Web SQL. Can be either
true
orfalse
.
- Data
Lens bool - Allow access for DataLens. Can be either
true
orfalse
. - Metrika bool
- Allow access for Yandex.Metrika. Can be either
true
orfalse
. - Serverless bool
- Allow access for Serverless. Can be either
true
orfalse
. - Web
Sql bool - Allow access for Web SQL. Can be either
true
orfalse
.
- data
Lens Boolean - Allow access for DataLens. Can be either
true
orfalse
. - metrika Boolean
- Allow access for Yandex.Metrika. Can be either
true
orfalse
. - serverless Boolean
- Allow access for Serverless. Can be either
true
orfalse
. - web
Sql Boolean - Allow access for Web SQL. Can be either
true
orfalse
.
- data
Lens boolean - Allow access for DataLens. Can be either
true
orfalse
. - metrika boolean
- Allow access for Yandex.Metrika. Can be either
true
orfalse
. - serverless boolean
- Allow access for Serverless. Can be either
true
orfalse
. - web
Sql boolean - Allow access for Web SQL. Can be either
true
orfalse
.
- data_
lens bool - Allow access for DataLens. Can be either
true
orfalse
. - metrika bool
- Allow access for Yandex.Metrika. Can be either
true
orfalse
. - serverless bool
- Allow access for Serverless. Can be either
true
orfalse
. - web_
sql bool - Allow access for Web SQL. Can be either
true
orfalse
.
- data
Lens Boolean - Allow access for DataLens. Can be either
true
orfalse
. - metrika Boolean
- Allow access for Yandex.Metrika. Can be either
true
orfalse
. - serverless Boolean
- Allow access for Serverless. Can be either
true
orfalse
. - web
Sql Boolean - Allow access for Web SQL. Can be either
true
orfalse
.
MdbClickhouseClusterBackupWindowStart, MdbClickhouseClusterBackupWindowStartArgs
MdbClickhouseClusterClickhouse, MdbClickhouseClusterClickhouseArgs
- Resources
Mdb
Clickhouse Cluster Clickhouse Resources - Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
- Config
Mdb
Clickhouse Cluster Clickhouse Config - Main ClickHouse cluster configuration.
- Resources
Mdb
Clickhouse Cluster Clickhouse Resources - Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
- Config
Mdb
Clickhouse Cluster Clickhouse Config - Main ClickHouse cluster configuration.
- resources
Mdb
Clickhouse Cluster Clickhouse Resources - Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
- config
Mdb
Clickhouse Cluster Clickhouse Config - Main ClickHouse cluster configuration.
- resources
Mdb
Clickhouse Cluster Clickhouse Resources - Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
- config
Mdb
Clickhouse Cluster Clickhouse Config - Main ClickHouse cluster configuration.
- resources
Mdb
Clickhouse Cluster Clickhouse Resources - Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
- config
Mdb
Clickhouse Cluster Clickhouse Config - Main ClickHouse cluster configuration.
- resources Property Map
- Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
- config Property Map
- Main ClickHouse cluster configuration.
MdbClickhouseClusterClickhouseConfig, MdbClickhouseClusterClickhouseConfigArgs
- Background
Pool intSize - Background
Schedule intPool Size - Compressions
List<Mdb
Clickhouse Cluster Clickhouse Config Compression> - Data compression configuration. The structure is documented below.
- Geobase
Uri string - Graphite
Rollups List<MdbClickhouse Cluster Clickhouse Config Graphite Rollup> - Graphite rollup configuration. The structure is documented below.
- Kafka
Mdb
Clickhouse Cluster Clickhouse Config Kafka - Kafka connection configuration. The structure is documented below.
- Kafka
Topics List<MdbClickhouse Cluster Clickhouse Config Kafka Topic> - Kafka topic connection configuration. The structure is documented below.
- Keep
Alive intTimeout - Log
Level string - Mark
Cache intSize - Max
Concurrent intQueries - Max
Connections int - Max
Partition intSize To Drop - Max
Table intSize To Drop - Merge
Tree MdbClickhouse Cluster Clickhouse Config Merge Tree - MergeTree engine configuration. The structure is documented below.
- Metric
Log boolEnabled - Metric
Log intRetention Size - Metric
Log intRetention Time - Part
Log intRetention Size - Part
Log intRetention Time - Query
Log intRetention Size - Query
Log intRetention Time - Query
Thread boolLog Enabled - Query
Thread intLog Retention Size - Query
Thread intLog Retention Time - Rabbitmq
Mdb
Clickhouse Cluster Clickhouse Config Rabbitmq - RabbitMQ connection configuration. The structure is documented below.
- Text
Log boolEnabled - Text
Log stringLevel - Text
Log intRetention Size - Text
Log intRetention Time - Timezone string
- Trace
Log boolEnabled - Trace
Log intRetention Size - Trace
Log intRetention Time - Uncompressed
Cache intSize
- Background
Pool intSize - Background
Schedule intPool Size - Compressions
[]Mdb
Clickhouse Cluster Clickhouse Config Compression - Data compression configuration. The structure is documented below.
- Geobase
Uri string - Graphite
Rollups []MdbClickhouse Cluster Clickhouse Config Graphite Rollup - Graphite rollup configuration. The structure is documented below.
- Kafka
Mdb
Clickhouse Cluster Clickhouse Config Kafka - Kafka connection configuration. The structure is documented below.
- Kafka
Topics []MdbClickhouse Cluster Clickhouse Config Kafka Topic - Kafka topic connection configuration. The structure is documented below.
- Keep
Alive intTimeout - Log
Level string - Mark
Cache intSize - Max
Concurrent intQueries - Max
Connections int - Max
Partition intSize To Drop - Max
Table intSize To Drop - Merge
Tree MdbClickhouse Cluster Clickhouse Config Merge Tree - MergeTree engine configuration. The structure is documented below.
- Metric
Log boolEnabled - Metric
Log intRetention Size - Metric
Log intRetention Time - Part
Log intRetention Size - Part
Log intRetention Time - Query
Log intRetention Size - Query
Log intRetention Time - Query
Thread boolLog Enabled - Query
Thread intLog Retention Size - Query
Thread intLog Retention Time - Rabbitmq
Mdb
Clickhouse Cluster Clickhouse Config Rabbitmq - RabbitMQ connection configuration. The structure is documented below.
- Text
Log boolEnabled - Text
Log stringLevel - Text
Log intRetention Size - Text
Log intRetention Time - Timezone string
- Trace
Log boolEnabled - Trace
Log intRetention Size - Trace
Log intRetention Time - Uncompressed
Cache intSize
- background
Pool IntegerSize - background
Schedule IntegerPool Size - compressions
List<Mdb
Clickhouse Cluster Clickhouse Config Compression> - Data compression configuration. The structure is documented below.
- geobase
Uri String - graphite
Rollups List<MdbClickhouse Cluster Clickhouse Config Graphite Rollup> - Graphite rollup configuration. The structure is documented below.
- kafka
Mdb
Clickhouse Cluster Clickhouse Config Kafka - Kafka connection configuration. The structure is documented below.
- kafka
Topics List<MdbClickhouse Cluster Clickhouse Config Kafka Topic> - Kafka topic connection configuration. The structure is documented below.
- keep
Alive IntegerTimeout - log
Level String - mark
Cache IntegerSize - max
Concurrent IntegerQueries - max
Connections Integer - max
Partition IntegerSize To Drop - max
Table IntegerSize To Drop - merge
Tree MdbClickhouse Cluster Clickhouse Config Merge Tree - MergeTree engine configuration. The structure is documented below.
- metric
Log BooleanEnabled - metric
Log IntegerRetention Size - metric
Log IntegerRetention Time - part
Log IntegerRetention Size - part
Log IntegerRetention Time - query
Log IntegerRetention Size - query
Log IntegerRetention Time - query
Thread BooleanLog Enabled - query
Thread IntegerLog Retention Size - query
Thread IntegerLog Retention Time - rabbitmq
Mdb
Clickhouse Cluster Clickhouse Config Rabbitmq - RabbitMQ connection configuration. The structure is documented below.
- text
Log BooleanEnabled - text
Log StringLevel - text
Log IntegerRetention Size - text
Log IntegerRetention Time - timezone String
- trace
Log BooleanEnabled - trace
Log IntegerRetention Size - trace
Log IntegerRetention Time - uncompressed
Cache IntegerSize
- background
Pool numberSize - background
Schedule numberPool Size - compressions
Mdb
Clickhouse Cluster Clickhouse Config Compression[] - Data compression configuration. The structure is documented below.
- geobase
Uri string - graphite
Rollups MdbClickhouse Cluster Clickhouse Config Graphite Rollup[] - Graphite rollup configuration. The structure is documented below.
- kafka
Mdb
Clickhouse Cluster Clickhouse Config Kafka - Kafka connection configuration. The structure is documented below.
- kafka
Topics MdbClickhouse Cluster Clickhouse Config Kafka Topic[] - Kafka topic connection configuration. The structure is documented below.
- keep
Alive numberTimeout - log
Level string - mark
Cache numberSize - max
Concurrent numberQueries - max
Connections number - max
Partition numberSize To Drop - max
Table numberSize To Drop - merge
Tree MdbClickhouse Cluster Clickhouse Config Merge Tree - MergeTree engine configuration. The structure is documented below.
- metric
Log booleanEnabled - metric
Log numberRetention Size - metric
Log numberRetention Time - part
Log numberRetention Size - part
Log numberRetention Time - query
Log numberRetention Size - query
Log numberRetention Time - query
Thread booleanLog Enabled - query
Thread numberLog Retention Size - query
Thread numberLog Retention Time - rabbitmq
Mdb
Clickhouse Cluster Clickhouse Config Rabbitmq - RabbitMQ connection configuration. The structure is documented below.
- text
Log booleanEnabled - text
Log stringLevel - text
Log numberRetention Size - text
Log numberRetention Time - timezone string
- trace
Log booleanEnabled - trace
Log numberRetention Size - trace
Log numberRetention Time - uncompressed
Cache numberSize
- background_
pool_ intsize - background_
schedule_ intpool_ size - compressions
Sequence[Mdb
Clickhouse Cluster Clickhouse Config Compression] - Data compression configuration. The structure is documented below.
- geobase_
uri str - graphite_
rollups Sequence[MdbClickhouse Cluster Clickhouse Config Graphite Rollup] - Graphite rollup configuration. The structure is documented below.
- kafka
Mdb
Clickhouse Cluster Clickhouse Config Kafka - Kafka connection configuration. The structure is documented below.
- kafka_
topics Sequence[MdbClickhouse Cluster Clickhouse Config Kafka Topic] - Kafka topic connection configuration. The structure is documented below.
- keep_
alive_ inttimeout - log_
level str - mark_
cache_ intsize - max_
concurrent_ intqueries - max_
connections int - max_
partition_ intsize_ to_ drop - max_
table_ intsize_ to_ drop - merge_
tree MdbClickhouse Cluster Clickhouse Config Merge Tree - MergeTree engine configuration. The structure is documented below.
- metric_
log_ boolenabled - metric_
log_ intretention_ size - metric_
log_ intretention_ time - part_
log_ intretention_ size - part_
log_ intretention_ time - query_
log_ intretention_ size - query_
log_ intretention_ time - query_
thread_ boollog_ enabled - query_
thread_ intlog_ retention_ size - query_
thread_ intlog_ retention_ time - rabbitmq
Mdb
Clickhouse Cluster Clickhouse Config Rabbitmq - RabbitMQ connection configuration. The structure is documented below.
- text_
log_ boolenabled - text_
log_ strlevel - text_
log_ intretention_ size - text_
log_ intretention_ time - timezone str
- trace_
log_ boolenabled - trace_
log_ intretention_ size - trace_
log_ intretention_ time - uncompressed_
cache_ intsize
- background
Pool NumberSize - background
Schedule NumberPool Size - compressions List<Property Map>
- Data compression configuration. The structure is documented below.
- geobase
Uri String - graphite
Rollups List<Property Map> - Graphite rollup configuration. The structure is documented below.
- kafka Property Map
- Kafka connection configuration. The structure is documented below.
- kafka
Topics List<Property Map> - Kafka topic connection configuration. The structure is documented below.
- keep
Alive NumberTimeout - log
Level String - mark
Cache NumberSize - max
Concurrent NumberQueries - max
Connections Number - max
Partition NumberSize To Drop - max
Table NumberSize To Drop - merge
Tree Property Map - MergeTree engine configuration. The structure is documented below.
- metric
Log BooleanEnabled - metric
Log NumberRetention Size - metric
Log NumberRetention Time - part
Log NumberRetention Size - part
Log NumberRetention Time - query
Log NumberRetention Size - query
Log NumberRetention Time - query
Thread BooleanLog Enabled - query
Thread NumberLog Retention Size - query
Thread NumberLog Retention Time - rabbitmq Property Map
- RabbitMQ connection configuration. The structure is documented below.
- text
Log BooleanEnabled - text
Log StringLevel - text
Log NumberRetention Size - text
Log NumberRetention Time - timezone String
- trace
Log BooleanEnabled - trace
Log NumberRetention Size - trace
Log NumberRetention Time - uncompressed
Cache NumberSize
MdbClickhouseClusterClickhouseConfigCompression, MdbClickhouseClusterClickhouseConfigCompressionArgs
- Method string
- Method: Compression method. Two methods are available: LZ4 and zstd.
- Min
Part intSize - Min part size: Minimum size (in bytes) of a data part in a table. ClickHouse only applies the rule to tables with data parts greater than or equal to the Min part size value.
- Min
Part doubleSize Ratio - Min part size ratio: Minimum table part size to total table size ratio. ClickHouse only applies the rule to tables in which this ratio is greater than or equal to the Min part size ratio value.
- Method string
- Method: Compression method. Two methods are available: LZ4 and zstd.
- Min
Part intSize - Min part size: Minimum size (in bytes) of a data part in a table. ClickHouse only applies the rule to tables with data parts greater than or equal to the Min part size value.
- Min
Part float64Size Ratio - Min part size ratio: Minimum table part size to total table size ratio. ClickHouse only applies the rule to tables in which this ratio is greater than or equal to the Min part size ratio value.
- method String
- Method: Compression method. Two methods are available: LZ4 and zstd.
- min
Part IntegerSize - Min part size: Minimum size (in bytes) of a data part in a table. ClickHouse only applies the rule to tables with data parts greater than or equal to the Min part size value.
- min
Part DoubleSize Ratio - Min part size ratio: Minimum table part size to total table size ratio. ClickHouse only applies the rule to tables in which this ratio is greater than or equal to the Min part size ratio value.
- method string
- Method: Compression method. Two methods are available: LZ4 and zstd.
- min
Part numberSize - Min part size: Minimum size (in bytes) of a data part in a table. ClickHouse only applies the rule to tables with data parts greater than or equal to the Min part size value.
- min
Part numberSize Ratio - Min part size ratio: Minimum table part size to total table size ratio. ClickHouse only applies the rule to tables in which this ratio is greater than or equal to the Min part size ratio value.
- method str
- Method: Compression method. Two methods are available: LZ4 and zstd.
- min_
part_ intsize - Min part size: Minimum size (in bytes) of a data part in a table. ClickHouse only applies the rule to tables with data parts greater than or equal to the Min part size value.
- min_
part_ floatsize_ ratio - Min part size ratio: Minimum table part size to total table size ratio. ClickHouse only applies the rule to tables in which this ratio is greater than or equal to the Min part size ratio value.
- method String
- Method: Compression method. Two methods are available: LZ4 and zstd.
- min
Part NumberSize - Min part size: Minimum size (in bytes) of a data part in a table. ClickHouse only applies the rule to tables with data parts greater than or equal to the Min part size value.
- min
Part NumberSize Ratio - Min part size ratio: Minimum table part size to total table size ratio. ClickHouse only applies the rule to tables in which this ratio is greater than or equal to the Min part size ratio value.
MdbClickhouseClusterClickhouseConfigGraphiteRollup, MdbClickhouseClusterClickhouseConfigGraphiteRollupArgs
- Name string
- Graphite rollup configuration name.
- Patterns
List<Mdb
Clickhouse Cluster Clickhouse Config Graphite Rollup Pattern> - Set of thinning rules.
- Name string
- Graphite rollup configuration name.
- Patterns
[]Mdb
Clickhouse Cluster Clickhouse Config Graphite Rollup Pattern - Set of thinning rules.
- name String
- Graphite rollup configuration name.
- patterns
List<Mdb
Clickhouse Cluster Clickhouse Config Graphite Rollup Pattern> - Set of thinning rules.
- name string
- Graphite rollup configuration name.
- patterns
Mdb
Clickhouse Cluster Clickhouse Config Graphite Rollup Pattern[] - Set of thinning rules.
- name str
- Graphite rollup configuration name.
- patterns
Sequence[Mdb
Clickhouse Cluster Clickhouse Config Graphite Rollup Pattern] - Set of thinning rules.
- name String
- Graphite rollup configuration name.
- patterns List<Property Map>
- Set of thinning rules.
MdbClickhouseClusterClickhouseConfigGraphiteRollupPattern, MdbClickhouseClusterClickhouseConfigGraphiteRollupPatternArgs
- Function string
- Aggregation function name.
- Regexp string
- Regular expression that the metric name must match.
- Retentions
List<Mdb
Clickhouse Cluster Clickhouse Config Graphite Rollup Pattern Retention> - Retain parameters.
- Function string
- Aggregation function name.
- Regexp string
- Regular expression that the metric name must match.
- Retentions
[]Mdb
Clickhouse Cluster Clickhouse Config Graphite Rollup Pattern Retention - Retain parameters.
- function String
- Aggregation function name.
- regexp String
- Regular expression that the metric name must match.
- retentions
List<Mdb
Clickhouse Cluster Clickhouse Config Graphite Rollup Pattern Retention> - Retain parameters.
- function string
- Aggregation function name.
- regexp string
- Regular expression that the metric name must match.
- retentions
Mdb
Clickhouse Cluster Clickhouse Config Graphite Rollup Pattern Retention[] - Retain parameters.
- function str
- Aggregation function name.
- regexp str
- Regular expression that the metric name must match.
- retentions
Sequence[Mdb
Clickhouse Cluster Clickhouse Config Graphite Rollup Pattern Retention] - Retain parameters.
- function String
- Aggregation function name.
- regexp String
- Regular expression that the metric name must match.
- retentions List<Property Map>
- Retain parameters.
MdbClickhouseClusterClickhouseConfigGraphiteRollupPatternRetention, MdbClickhouseClusterClickhouseConfigGraphiteRollupPatternRetentionArgs
MdbClickhouseClusterClickhouseConfigKafka, MdbClickhouseClusterClickhouseConfigKafkaArgs
- Sasl
Mechanism string - SASL mechanism used in kafka authentication.
- Sasl
Password string - User password on kafka server.
- Sasl
Username string - Username on kafka server.
- Security
Protocol string - Security protocol used to connect to kafka server.
- Sasl
Mechanism string - SASL mechanism used in kafka authentication.
- Sasl
Password string - User password on kafka server.
- Sasl
Username string - Username on kafka server.
- Security
Protocol string - Security protocol used to connect to kafka server.
- sasl
Mechanism String - SASL mechanism used in kafka authentication.
- sasl
Password String - User password on kafka server.
- sasl
Username String - Username on kafka server.
- security
Protocol String - Security protocol used to connect to kafka server.
- sasl
Mechanism string - SASL mechanism used in kafka authentication.
- sasl
Password string - User password on kafka server.
- sasl
Username string - Username on kafka server.
- security
Protocol string - Security protocol used to connect to kafka server.
- sasl_
mechanism str - SASL mechanism used in kafka authentication.
- sasl_
password str - User password on kafka server.
- sasl_
username str - Username on kafka server.
- security_
protocol str - Security protocol used to connect to kafka server.
- sasl
Mechanism String - SASL mechanism used in kafka authentication.
- sasl
Password String - User password on kafka server.
- sasl
Username String - Username on kafka server.
- security
Protocol String - Security protocol used to connect to kafka server.
MdbClickhouseClusterClickhouseConfigKafkaTopic, MdbClickhouseClusterClickhouseConfigKafkaTopicArgs
- Name string
- Graphite rollup configuration name.
- Settings
Mdb
Clickhouse Cluster Clickhouse Config Kafka Topic Settings - Kafka connection settngs sanem as
kafka
block.
- Name string
- Graphite rollup configuration name.
- Settings
Mdb
Clickhouse Cluster Clickhouse Config Kafka Topic Settings - Kafka connection settngs sanem as
kafka
block.
- name String
- Graphite rollup configuration name.
- settings
Mdb
Clickhouse Cluster Clickhouse Config Kafka Topic Settings - Kafka connection settngs sanem as
kafka
block.
- name string
- Graphite rollup configuration name.
- settings
Mdb
Clickhouse Cluster Clickhouse Config Kafka Topic Settings - Kafka connection settngs sanem as
kafka
block.
- name str
- Graphite rollup configuration name.
- settings
Mdb
Clickhouse Cluster Clickhouse Config Kafka Topic Settings - Kafka connection settngs sanem as
kafka
block.
- name String
- Graphite rollup configuration name.
- settings Property Map
- Kafka connection settngs sanem as
kafka
block.
MdbClickhouseClusterClickhouseConfigKafkaTopicSettings, MdbClickhouseClusterClickhouseConfigKafkaTopicSettingsArgs
- Sasl
Mechanism string - SASL mechanism used in kafka authentication.
- Sasl
Password string - User password on kafka server.
- Sasl
Username string - Username on kafka server.
- Security
Protocol string - Security protocol used to connect to kafka server.
- Sasl
Mechanism string - SASL mechanism used in kafka authentication.
- Sasl
Password string - User password on kafka server.
- Sasl
Username string - Username on kafka server.
- Security
Protocol string - Security protocol used to connect to kafka server.
- sasl
Mechanism String - SASL mechanism used in kafka authentication.
- sasl
Password String - User password on kafka server.
- sasl
Username String - Username on kafka server.
- security
Protocol String - Security protocol used to connect to kafka server.
- sasl
Mechanism string - SASL mechanism used in kafka authentication.
- sasl
Password string - User password on kafka server.
- sasl
Username string - Username on kafka server.
- security
Protocol string - Security protocol used to connect to kafka server.
- sasl_
mechanism str - SASL mechanism used in kafka authentication.
- sasl_
password str - User password on kafka server.
- sasl_
username str - Username on kafka server.
- security_
protocol str - Security protocol used to connect to kafka server.
- sasl
Mechanism String - SASL mechanism used in kafka authentication.
- sasl
Password String - User password on kafka server.
- sasl
Username String - Username on kafka server.
- security
Protocol String - Security protocol used to connect to kafka server.
MdbClickhouseClusterClickhouseConfigMergeTree, MdbClickhouseClusterClickhouseConfigMergeTreeArgs
- Max
Bytes intTo Merge At Min Space In Pool - Max bytes to merge at min space in pool: Maximum total size of a data part to merge when the number of free threads in the background pool is minimum.
- Max
Replicated intMerges In Queue - Max replicated merges in queue: Maximum number of merge tasks that can be in the ReplicatedMergeTree queue at the same time.
- Number
Of intFree Entries In Pool To Lower Max Size Of Merge - Number of free entries in pool to lower max size of merge: Threshold value of free entries in the pool. If the number of entries in the pool falls below this value, ClickHouse reduces the maximum size of a data part to merge. This helps handle small merges faster, rather than filling the pool with lengthy merges.
- Parts
To intDelay Insert - Parts to delay insert: Number of active data parts in a table, on exceeding which ClickHouse starts artificially reduce the rate of inserting data into the table.
- Parts
To intThrow Insert - Parts to throw insert: Threshold value of active data parts in a table, on exceeding which ClickHouse throws the 'Too many parts ...' exception.
- Replicated
Deduplication intWindow - Replicated deduplication window: Number of recent hash blocks that ZooKeeper will store (the old ones will be deleted).
- Replicated
Deduplication intWindow Seconds - Replicated deduplication window seconds: Time during which ZooKeeper stores the hash blocks (the old ones wil be deleted).
- Max
Bytes intTo Merge At Min Space In Pool - Max bytes to merge at min space in pool: Maximum total size of a data part to merge when the number of free threads in the background pool is minimum.
- Max
Replicated intMerges In Queue - Max replicated merges in queue: Maximum number of merge tasks that can be in the ReplicatedMergeTree queue at the same time.
- Number
Of intFree Entries In Pool To Lower Max Size Of Merge - Number of free entries in pool to lower max size of merge: Threshold value of free entries in the pool. If the number of entries in the pool falls below this value, ClickHouse reduces the maximum size of a data part to merge. This helps handle small merges faster, rather than filling the pool with lengthy merges.
- Parts
To intDelay Insert - Parts to delay insert: Number of active data parts in a table, on exceeding which ClickHouse starts artificially reduce the rate of inserting data into the table.
- Parts
To intThrow Insert - Parts to throw insert: Threshold value of active data parts in a table, on exceeding which ClickHouse throws the 'Too many parts ...' exception.
- Replicated
Deduplication intWindow - Replicated deduplication window: Number of recent hash blocks that ZooKeeper will store (the old ones will be deleted).
- Replicated
Deduplication intWindow Seconds - Replicated deduplication window seconds: Time during which ZooKeeper stores the hash blocks (the old ones wil be deleted).
- max
Bytes IntegerTo Merge At Min Space In Pool - Max bytes to merge at min space in pool: Maximum total size of a data part to merge when the number of free threads in the background pool is minimum.
- max
Replicated IntegerMerges In Queue - Max replicated merges in queue: Maximum number of merge tasks that can be in the ReplicatedMergeTree queue at the same time.
- number
Of IntegerFree Entries In Pool To Lower Max Size Of Merge - Number of free entries in pool to lower max size of merge: Threshold value of free entries in the pool. If the number of entries in the pool falls below this value, ClickHouse reduces the maximum size of a data part to merge. This helps handle small merges faster, rather than filling the pool with lengthy merges.
- parts
To IntegerDelay Insert - Parts to delay insert: Number of active data parts in a table, on exceeding which ClickHouse starts artificially reduce the rate of inserting data into the table.
- parts
To IntegerThrow Insert - Parts to throw insert: Threshold value of active data parts in a table, on exceeding which ClickHouse throws the 'Too many parts ...' exception.
- replicated
Deduplication IntegerWindow - Replicated deduplication window: Number of recent hash blocks that ZooKeeper will store (the old ones will be deleted).
- replicated
Deduplication IntegerWindow Seconds - Replicated deduplication window seconds: Time during which ZooKeeper stores the hash blocks (the old ones wil be deleted).
- max
Bytes numberTo Merge At Min Space In Pool - Max bytes to merge at min space in pool: Maximum total size of a data part to merge when the number of free threads in the background pool is minimum.
- max
Replicated numberMerges In Queue - Max replicated merges in queue: Maximum number of merge tasks that can be in the ReplicatedMergeTree queue at the same time.
- number
Of numberFree Entries In Pool To Lower Max Size Of Merge - Number of free entries in pool to lower max size of merge: Threshold value of free entries in the pool. If the number of entries in the pool falls below this value, ClickHouse reduces the maximum size of a data part to merge. This helps handle small merges faster, rather than filling the pool with lengthy merges.
- parts
To numberDelay Insert - Parts to delay insert: Number of active data parts in a table, on exceeding which ClickHouse starts artificially reduce the rate of inserting data into the table.
- parts
To numberThrow Insert - Parts to throw insert: Threshold value of active data parts in a table, on exceeding which ClickHouse throws the 'Too many parts ...' exception.
- replicated
Deduplication numberWindow - Replicated deduplication window: Number of recent hash blocks that ZooKeeper will store (the old ones will be deleted).
- replicated
Deduplication numberWindow Seconds - Replicated deduplication window seconds: Time during which ZooKeeper stores the hash blocks (the old ones wil be deleted).
- max_
bytes_ intto_ merge_ at_ min_ space_ in_ pool - Max bytes to merge at min space in pool: Maximum total size of a data part to merge when the number of free threads in the background pool is minimum.
- max_
replicated_ intmerges_ in_ queue - Max replicated merges in queue: Maximum number of merge tasks that can be in the ReplicatedMergeTree queue at the same time.
- number_
of_ intfree_ entries_ in_ pool_ to_ lower_ max_ size_ of_ merge - Number of free entries in pool to lower max size of merge: Threshold value of free entries in the pool. If the number of entries in the pool falls below this value, ClickHouse reduces the maximum size of a data part to merge. This helps handle small merges faster, rather than filling the pool with lengthy merges.
- parts_
to_ intdelay_ insert - Parts to delay insert: Number of active data parts in a table, on exceeding which ClickHouse starts artificially reduce the rate of inserting data into the table.
- parts_
to_ intthrow_ insert - Parts to throw insert: Threshold value of active data parts in a table, on exceeding which ClickHouse throws the 'Too many parts ...' exception.
- replicated_
deduplication_ intwindow - Replicated deduplication window: Number of recent hash blocks that ZooKeeper will store (the old ones will be deleted).
- replicated_
deduplication_ intwindow_ seconds - Replicated deduplication window seconds: Time during which ZooKeeper stores the hash blocks (the old ones wil be deleted).
- max
Bytes NumberTo Merge At Min Space In Pool - Max bytes to merge at min space in pool: Maximum total size of a data part to merge when the number of free threads in the background pool is minimum.
- max
Replicated NumberMerges In Queue - Max replicated merges in queue: Maximum number of merge tasks that can be in the ReplicatedMergeTree queue at the same time.
- number
Of NumberFree Entries In Pool To Lower Max Size Of Merge - Number of free entries in pool to lower max size of merge: Threshold value of free entries in the pool. If the number of entries in the pool falls below this value, ClickHouse reduces the maximum size of a data part to merge. This helps handle small merges faster, rather than filling the pool with lengthy merges.
- parts
To NumberDelay Insert - Parts to delay insert: Number of active data parts in a table, on exceeding which ClickHouse starts artificially reduce the rate of inserting data into the table.
- parts
To NumberThrow Insert - Parts to throw insert: Threshold value of active data parts in a table, on exceeding which ClickHouse throws the 'Too many parts ...' exception.
- replicated
Deduplication NumberWindow - Replicated deduplication window: Number of recent hash blocks that ZooKeeper will store (the old ones will be deleted).
- replicated
Deduplication NumberWindow Seconds - Replicated deduplication window seconds: Time during which ZooKeeper stores the hash blocks (the old ones wil be deleted).
MdbClickhouseClusterClickhouseConfigRabbitmq, MdbClickhouseClusterClickhouseConfigRabbitmqArgs
MdbClickhouseClusterClickhouseResources, MdbClickhouseClusterClickhouseResourcesArgs
- Disk
Size int - Volume of the storage available to a ZooKeeper host, in gigabytes.
- Disk
Type stringId - Type of the storage of ZooKeeper hosts. For more information see the official documentation.
- Resource
Preset stringId
- Disk
Size int - Volume of the storage available to a ZooKeeper host, in gigabytes.
- Disk
Type stringId - Type of the storage of ZooKeeper hosts. For more information see the official documentation.
- Resource
Preset stringId
- disk
Size Integer - Volume of the storage available to a ZooKeeper host, in gigabytes.
- disk
Type StringId - Type of the storage of ZooKeeper hosts. For more information see the official documentation.
- resource
Preset StringId
- disk
Size number - Volume of the storage available to a ZooKeeper host, in gigabytes.
- disk
Type stringId - Type of the storage of ZooKeeper hosts. For more information see the official documentation.
- resource
Preset stringId
- disk_
size int - Volume of the storage available to a ZooKeeper host, in gigabytes.
- disk_
type_ strid - Type of the storage of ZooKeeper hosts. For more information see the official documentation.
- resource_
preset_ strid
- disk
Size Number - Volume of the storage available to a ZooKeeper host, in gigabytes.
- disk
Type StringId - Type of the storage of ZooKeeper hosts. For more information see the official documentation.
- resource
Preset StringId
MdbClickhouseClusterCloudStorage, MdbClickhouseClusterCloudStorageArgs
- Enabled bool
- Whether to use Yandex Object Storage for storing ClickHouse data. Can be either
true
orfalse
.
- Enabled bool
- Whether to use Yandex Object Storage for storing ClickHouse data. Can be either
true
orfalse
.
- enabled Boolean
- Whether to use Yandex Object Storage for storing ClickHouse data. Can be either
true
orfalse
.
- enabled boolean
- Whether to use Yandex Object Storage for storing ClickHouse data. Can be either
true
orfalse
.
- enabled bool
- Whether to use Yandex Object Storage for storing ClickHouse data. Can be either
true
orfalse
.
- enabled Boolean
- Whether to use Yandex Object Storage for storing ClickHouse data. Can be either
true
orfalse
.
MdbClickhouseClusterDatabase, MdbClickhouseClusterDatabaseArgs
- Name string
- Graphite rollup configuration name.
- Name string
- Graphite rollup configuration name.
- name String
- Graphite rollup configuration name.
- name string
- Graphite rollup configuration name.
- name str
- Graphite rollup configuration name.
- name String
- Graphite rollup configuration name.
MdbClickhouseClusterFormatSchema, MdbClickhouseClusterFormatSchemaArgs
MdbClickhouseClusterHost, MdbClickhouseClusterHostArgs
- Type string
- Type of maintenance window. Can be either
ANYTIME
orWEEKLY
. A day and hour of window need to be specified with weekly window. - Zone string
- The availability zone where the ClickHouse host will be created. For more information see the official documentation.
- Assign
Public boolIp - Sets whether the host should get a public IP address on creation. Can be either
true
orfalse
. - Fqdn string
- The fully qualified domain name of the host.
- string
- The name of the shard to which the host belongs.
- Subnet
Id string - The ID of the subnet, to which the host belongs. The subnet must be a part of the network to which the cluster belongs.
- Type string
- Type of maintenance window. Can be either
ANYTIME
orWEEKLY
. A day and hour of window need to be specified with weekly window. - Zone string
- The availability zone where the ClickHouse host will be created. For more information see the official documentation.
- Assign
Public boolIp - Sets whether the host should get a public IP address on creation. Can be either
true
orfalse
. - Fqdn string
- The fully qualified domain name of the host.
- string
- The name of the shard to which the host belongs.
- Subnet
Id string - The ID of the subnet, to which the host belongs. The subnet must be a part of the network to which the cluster belongs.
- type String
- Type of maintenance window. Can be either
ANYTIME
orWEEKLY
. A day and hour of window need to be specified with weekly window. - zone String
- The availability zone where the ClickHouse host will be created. For more information see the official documentation.
- assign
Public BooleanIp - Sets whether the host should get a public IP address on creation. Can be either
true
orfalse
. - fqdn String
- The fully qualified domain name of the host.
- String
- The name of the shard to which the host belongs.
- subnet
Id String - The ID of the subnet, to which the host belongs. The subnet must be a part of the network to which the cluster belongs.
- type string
- Type of maintenance window. Can be either
ANYTIME
orWEEKLY
. A day and hour of window need to be specified with weekly window. - zone string
- The availability zone where the ClickHouse host will be created. For more information see the official documentation.
- assign
Public booleanIp - Sets whether the host should get a public IP address on creation. Can be either
true
orfalse
. - fqdn string
- The fully qualified domain name of the host.
- string
- The name of the shard to which the host belongs.
- subnet
Id string - The ID of the subnet, to which the host belongs. The subnet must be a part of the network to which the cluster belongs.
- type str
- Type of maintenance window. Can be either
ANYTIME
orWEEKLY
. A day and hour of window need to be specified with weekly window. - zone str
- The availability zone where the ClickHouse host will be created. For more information see the official documentation.
- assign_
public_ boolip - Sets whether the host should get a public IP address on creation. Can be either
true
orfalse
. - fqdn str
- The fully qualified domain name of the host.
- str
- The name of the shard to which the host belongs.
- subnet_
id str - The ID of the subnet, to which the host belongs. The subnet must be a part of the network to which the cluster belongs.
- type String
- Type of maintenance window. Can be either
ANYTIME
orWEEKLY
. A day and hour of window need to be specified with weekly window. - zone String
- The availability zone where the ClickHouse host will be created. For more information see the official documentation.
- assign
Public BooleanIp - Sets whether the host should get a public IP address on creation. Can be either
true
orfalse
. - fqdn String
- The fully qualified domain name of the host.
- String
- The name of the shard to which the host belongs.
- subnet
Id String - The ID of the subnet, to which the host belongs. The subnet must be a part of the network to which the cluster belongs.
MdbClickhouseClusterMaintenanceWindow, MdbClickhouseClusterMaintenanceWindowArgs
- Type string
- Type of maintenance window. Can be either
ANYTIME
orWEEKLY
. A day and hour of window need to be specified with weekly window. - Day string
- Day of week for maintenance window if window type is weekly. Possible values:
MON
,TUE
,WED
,THU
,FRI
,SAT
,SUN
. - Hour int
- Hour of day in UTC time zone (1-24) for maintenance window if window type is weekly.
- Type string
- Type of maintenance window. Can be either
ANYTIME
orWEEKLY
. A day and hour of window need to be specified with weekly window. - Day string
- Day of week for maintenance window if window type is weekly. Possible values:
MON
,TUE
,WED
,THU
,FRI
,SAT
,SUN
. - Hour int
- Hour of day in UTC time zone (1-24) for maintenance window if window type is weekly.
- type String
- Type of maintenance window. Can be either
ANYTIME
orWEEKLY
. A day and hour of window need to be specified with weekly window. - day String
- Day of week for maintenance window if window type is weekly. Possible values:
MON
,TUE
,WED
,THU
,FRI
,SAT
,SUN
. - hour Integer
- Hour of day in UTC time zone (1-24) for maintenance window if window type is weekly.
- type string
- Type of maintenance window. Can be either
ANYTIME
orWEEKLY
. A day and hour of window need to be specified with weekly window. - day string
- Day of week for maintenance window if window type is weekly. Possible values:
MON
,TUE
,WED
,THU
,FRI
,SAT
,SUN
. - hour number
- Hour of day in UTC time zone (1-24) for maintenance window if window type is weekly.
- type str
- Type of maintenance window. Can be either
ANYTIME
orWEEKLY
. A day and hour of window need to be specified with weekly window. - day str
- Day of week for maintenance window if window type is weekly. Possible values:
MON
,TUE
,WED
,THU
,FRI
,SAT
,SUN
. - hour int
- Hour of day in UTC time zone (1-24) for maintenance window if window type is weekly.
- type String
- Type of maintenance window. Can be either
ANYTIME
orWEEKLY
. A day and hour of window need to be specified with weekly window. - day String
- Day of week for maintenance window if window type is weekly. Possible values:
MON
,TUE
,WED
,THU
,FRI
,SAT
,SUN
. - hour Number
- Hour of day in UTC time zone (1-24) for maintenance window if window type is weekly.
MdbClickhouseClusterMlModel, MdbClickhouseClusterMlModelArgs
MdbClickhouseClusterShardGroup, MdbClickhouseClusterShardGroupArgs
- Name string
- Graphite rollup configuration name.
- List<string>
- List of shards names that belong to the shard group.
- Description string
- Description of the shard group.
- Name string
- Graphite rollup configuration name.
- []string
- List of shards names that belong to the shard group.
- Description string
- Description of the shard group.
- name String
- Graphite rollup configuration name.
- List<String>
- List of shards names that belong to the shard group.
- description String
- Description of the shard group.
- name string
- Graphite rollup configuration name.
- string[]
- List of shards names that belong to the shard group.
- description string
- Description of the shard group.
- name str
- Graphite rollup configuration name.
- Sequence[str]
- List of shards names that belong to the shard group.
- description str
- Description of the shard group.
- name String
- Graphite rollup configuration name.
- List<String>
- List of shards names that belong to the shard group.
- description String
- Description of the shard group.
MdbClickhouseClusterUser, MdbClickhouseClusterUserArgs
- Name string
- Graphite rollup configuration name.
- Password string
- RabbitMQ user password.
- Permissions
List<Mdb
Clickhouse Cluster User Permission> - Set of permissions granted to the user. The structure is documented below.
- Quotas
List<Mdb
Clickhouse Cluster User Quota> - Set of user quotas. The structure is documented below.
- Settings
Mdb
Clickhouse Cluster User Settings - Kafka connection settngs sanem as
kafka
block.
- Name string
- Graphite rollup configuration name.
- Password string
- RabbitMQ user password.
- Permissions
[]Mdb
Clickhouse Cluster User Permission - Set of permissions granted to the user. The structure is documented below.
- Quotas
[]Mdb
Clickhouse Cluster User Quota - Set of user quotas. The structure is documented below.
- Settings
Mdb
Clickhouse Cluster User Settings - Kafka connection settngs sanem as
kafka
block.
- name String
- Graphite rollup configuration name.
- password String
- RabbitMQ user password.
- permissions
List<Mdb
Clickhouse Cluster User Permission> - Set of permissions granted to the user. The structure is documented below.
- quotas
List<Mdb
Clickhouse Cluster User Quota> - Set of user quotas. The structure is documented below.
- settings
Mdb
Clickhouse Cluster User Settings - Kafka connection settngs sanem as
kafka
block.
- name string
- Graphite rollup configuration name.
- password string
- RabbitMQ user password.
- permissions
Mdb
Clickhouse Cluster User Permission[] - Set of permissions granted to the user. The structure is documented below.
- quotas
Mdb
Clickhouse Cluster User Quota[] - Set of user quotas. The structure is documented below.
- settings
Mdb
Clickhouse Cluster User Settings - Kafka connection settngs sanem as
kafka
block.
- name str
- Graphite rollup configuration name.
- password str
- RabbitMQ user password.
- permissions
Sequence[Mdb
Clickhouse Cluster User Permission] - Set of permissions granted to the user. The structure is documented below.
- quotas
Sequence[Mdb
Clickhouse Cluster User Quota] - Set of user quotas. The structure is documented below.
- settings
Mdb
Clickhouse Cluster User Settings - Kafka connection settngs sanem as
kafka
block.
- name String
- Graphite rollup configuration name.
- password String
- RabbitMQ user password.
- permissions List<Property Map>
- Set of permissions granted to the user. The structure is documented below.
- quotas List<Property Map>
- Set of user quotas. The structure is documented below.
- settings Property Map
- Kafka connection settngs sanem as
kafka
block.
MdbClickhouseClusterUserPermission, MdbClickhouseClusterUserPermissionArgs
- Database
Name string - The name of the database that the permission grants access to.
- Database
Name string - The name of the database that the permission grants access to.
- database
Name String - The name of the database that the permission grants access to.
- database
Name string - The name of the database that the permission grants access to.
- database_
name str - The name of the database that the permission grants access to.
- database
Name String - The name of the database that the permission grants access to.
MdbClickhouseClusterUserQuota, MdbClickhouseClusterUserQuotaArgs
- Interval
Duration int - Duration of interval for quota in milliseconds.
- Errors int
- The number of queries that threw exception.
- Execution
Time int - The total query execution time, in milliseconds (wall time).
- Queries int
- The total number of queries.
- Read
Rows int - The total number of source rows read from tables for running the query, on all remote servers.
- Result
Rows int - The total number of rows given as the result.
- Interval
Duration int - Duration of interval for quota in milliseconds.
- Errors int
- The number of queries that threw exception.
- Execution
Time int - The total query execution time, in milliseconds (wall time).
- Queries int
- The total number of queries.
- Read
Rows int - The total number of source rows read from tables for running the query, on all remote servers.
- Result
Rows int - The total number of rows given as the result.
- interval
Duration Integer - Duration of interval for quota in milliseconds.
- errors Integer
- The number of queries that threw exception.
- execution
Time Integer - The total query execution time, in milliseconds (wall time).
- queries Integer
- The total number of queries.
- read
Rows Integer - The total number of source rows read from tables for running the query, on all remote servers.
- result
Rows Integer - The total number of rows given as the result.
- interval
Duration number - Duration of interval for quota in milliseconds.
- errors number
- The number of queries that threw exception.
- execution
Time number - The total query execution time, in milliseconds (wall time).
- queries number
- The total number of queries.
- read
Rows number - The total number of source rows read from tables for running the query, on all remote servers.
- result
Rows number - The total number of rows given as the result.
- interval_
duration int - Duration of interval for quota in milliseconds.
- errors int
- The number of queries that threw exception.
- execution_
time int - The total query execution time, in milliseconds (wall time).
- queries int
- The total number of queries.
- read_
rows int - The total number of source rows read from tables for running the query, on all remote servers.
- result_
rows int - The total number of rows given as the result.
- interval
Duration Number - Duration of interval for quota in milliseconds.
- errors Number
- The number of queries that threw exception.
- execution
Time Number - The total query execution time, in milliseconds (wall time).
- queries Number
- The total number of queries.
- read
Rows Number - The total number of source rows read from tables for running the query, on all remote servers.
- result
Rows Number - The total number of rows given as the result.
MdbClickhouseClusterUserSettings, MdbClickhouseClusterUserSettingsArgs
- Add
Http boolCors Header - Include CORS headers in HTTP responces.
- Allow
Ddl bool - Allows or denies DDL queries.
- Compile bool
- Enable compilation of queries.
- Compile
Expressions bool - Turn on expression compilation.
- Connect
Timeout int - Connect timeout in milliseconds on the socket used for communicating with the client.
- Count
Distinct stringImplementation - Specifies which of the uniq* functions should be used to perform the COUNT(DISTINCT …) construction.
- Distinct
Overflow stringMode - Sets behaviour on overflow when using DISTINCT. Possible values:
- Distributed
Aggregation boolMemory Efficient - Determine the behavior of distributed subqueries.
- Distributed
Ddl intTask Timeout - Timeout for DDL queries, in milliseconds.
- Distributed
Product stringMode - Changes the behaviour of distributed subqueries.
- Empty
Result boolFor Aggregation By Empty Set - Allows to retunr empty result.
- Enable
Http boolCompression - Enables or disables data compression in the response to an HTTP request.
- Fallback
To boolStale Replicas For Distributed Queries - Forces a query to an out-of-date replica if updated data is not available.
- Force
Index boolBy Date - Disables query execution if the index can’t be used by date.
- Force
Primary boolKey - Disables query execution if indexing by the primary key is not possible.
- Group
By stringOverflow Mode - Sets behaviour on overflow while GROUP BY operation. Possible values:
- Group
By intTwo Level Threshold - Sets the threshold of the number of keys, after that the two-level aggregation should be used.
- Group
By intTwo Level Threshold Bytes - Sets the threshold of the number of bytes, after that the two-level aggregation should be used.
- Http
Connection intTimeout - Timeout for HTTP connection in milliseconds.
- Http
Headers intProgress Interval - Sets minimal interval between notifications about request process in HTTP header X-ClickHouse-Progress.
- Http
Receive intTimeout - Timeout for HTTP connection in milliseconds.
- Http
Send intTimeout - Timeout for HTTP connection in milliseconds.
- Input
Format boolDefaults For Omitted Fields - When performing INSERT queries, replace omitted input column values with default values of the respective columns.
- Input
Format boolValues Interpret Expressions - Enables or disables the full SQL parser if the fast stream parser can’t parse the data.
- Insert
Quorum int - Enables the quorum writes.
- Insert
Quorum intTimeout - Write to a quorum timeout in milliseconds.
- Join
Overflow stringMode - Sets behaviour on overflow in JOIN. Possible values:
- Join
Use boolNulls - Sets the type of JOIN behaviour. When merging tables, empty cells may appear. ClickHouse fills them differently based on this setting.
- Joined
Subquery boolRequires Alias - Require aliases for subselects and table functions in FROM that more than one table is present.
- Low
Cardinality boolAllow In Native Format - Allows or restricts using the LowCardinality data type with the Native format.
- Max
Ast intDepth - Maximum abstract syntax tree depth.
- Max
Ast intElements - Maximum abstract syntax tree elements.
- Max
Block intSize - A recommendation for what size of the block (in a count of rows) to load from tables.
- Max
Bytes intBefore External Group By - Limit in bytes for using memoru for GROUP BY before using swap on disk.
- Max
Bytes intBefore External Sort - This setting is equivalent of the max_bytes_before_external_group_by setting, except for it is for sort operation (ORDER BY), not aggregation.
- Max
Bytes intIn Distinct - Limits the maximum size of a hash table in bytes (uncompressed data) when using DISTINCT.
- Max
Bytes intIn Join - Limit on maximum size of the hash table for JOIN, in bytes.
- Max
Bytes intIn Set - Limit on the number of bytes in the set resulting from the execution of the IN section.
- Max
Bytes intTo Read - Limits the maximum number of bytes (uncompressed data) that can be read from a table when running a query.
- Max
Bytes intTo Sort - Limits the maximum number of bytes (uncompressed data) that can be read from a table for sorting.
- Max
Bytes intTo Transfer - Limits the maximum number of bytes (uncompressed data) that can be passed to a remote server or saved in a temporary table when using GLOBAL IN.
- Max
Columns intTo Read - Limits the maximum number of columns that can be read from a table in a single query.
- Max
Execution intTime - Limits the maximum query execution time in milliseconds.
- Max
Expanded intAst Elements - Maximum abstract syntax tree depth after after expansion of aliases.
- Max
Insert intBlock Size - The size of blocks (in a count of rows) to form for insertion into a table.
- Max
Memory intUsage - Limits the maximum memory usage (in bytes) for processing queries on a single server.
- Max
Memory intUsage For User - Limits the maximum memory usage (in bytes) for processing of user's queries on a single server.
- Max
Network intBandwidth - Limits the speed of the data exchange over the network in bytes per second.
- Max
Network intBandwidth For User - Limits the speed of the data exchange over the network in bytes per second.
- Max
Query intSize - The maximum part of a query that can be taken to RAM for parsing with the SQL parser.
- Max
Replica intDelay For Distributed Queries - Disables lagging replicas for distributed queries.
- Max
Result intBytes - Limits the number of bytes in the result.
- Max
Result intRows - Limits the number of rows in the result.
- Max
Rows intIn Distinct - Limits the maximum number of different rows when using DISTINCT.
- Max
Rows intIn Join - Limit on maximum size of the hash table for JOIN, in rows.
- Max
Rows intIn Set - Limit on the number of rows in the set resulting from the execution of the IN section.
- Max
Rows intTo Group By - Limits the maximum number of unique keys received from aggregation function.
- Max
Rows intTo Read - Limits the maximum number of rows that can be read from a table when running a query.
- Max
Rows intTo Sort - Limits the maximum number of rows that can be read from a table for sorting.
- Max
Rows intTo Transfer - Limits the maximum number of rows that can be passed to a remote server or saved in a temporary table when using GLOBAL IN.
- Max
Temporary intColumns - Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, including constant columns.
- Max
Temporary intNon Const Columns - Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, excluding constant columns.
- Max
Threads int - The maximum number of query processing threads, excluding threads for retrieving data from remote servers.
- Merge
Tree intMax Bytes To Use Cache - If ClickHouse should read more than merge_tree_max_bytes_to_use_cache bytes in one query, it doesn’t use the cache of uncompressed blocks.
- Merge
Tree intMax Rows To Use Cache - If ClickHouse should read more than merge_tree_max_rows_to_use_cache rows in one query, it doesn’t use the cache of uncompressed blocks.
- Merge
Tree intMin Bytes For Concurrent Read - If the number of bytes to read from one file of a MergeTree-engine table exceeds merge_tree_min_bytes_for_concurrent_read, then ClickHouse tries to concurrently read from this file in several threads.
- Merge
Tree intMin Rows For Concurrent Read - If the number of rows to be read from a file of a MergeTree table exceeds merge_tree_min_rows_for_concurrent_read then ClickHouse tries to perform a concurrent reading from this file on several threads.
- Min
Bytes intTo Use Direct Io - The minimum data volume required for using direct I/O access to the storage disk.
- Min
Count intTo Compile - How many times to potentially use a compiled chunk of code before running compilation.
- Min
Count intTo Compile Expression - A query waits for expression compilation process to complete prior to continuing execution.
- Min
Execution intSpeed - Minimal execution speed in rows per second.
- Min
Execution intSpeed Bytes - Minimal execution speed in bytes per second.
- Min
Insert intBlock Size Bytes - Sets the minimum number of bytes in the block which can be inserted into a table by an INSERT query.
- Min
Insert intBlock Size Rows - Sets the minimum number of rows in the block which can be inserted into a table by an INSERT query.
- Output
Format boolJson Quote64bit Integers - If the value is true, integers appear in quotes when using JSON* Int64 and UInt64 formats (for compatibility with most JavaScript implementations); otherwise, integers are output without the quotes.
- Output
Format boolJson Quote Denormals - Enables +nan, -nan, +inf, -inf outputs in JSON output format.
- Priority int
- Query priority.
- Quota
Mode string - Quota accounting mode.
- Read
Overflow stringMode - Sets behaviour on overflow while read. Possible values:
- Readonly int
- Restricts permissions for reading data, write data and change settings queries.
- Receive
Timeout int - Receive timeout in milliseconds on the socket used for communicating with the client.
- Replication
Alter intPartitions Sync - For ALTER ... ATTACH|DETACH|DROP queries, you can use the replication_alter_partitions_sync setting to set up waiting.
- Result
Overflow stringMode - Sets behaviour on overflow in result. Possible values:
- Select
Sequential boolConsistency - Enables or disables sequential consistency for SELECT queries.
- Send
Progress boolIn Http Headers - Enables or disables X-ClickHouse-Progress HTTP response headers in clickhouse-server responses.
- Send
Timeout int - Send timeout in milliseconds on the socket used for communicating with the client.
- Set
Overflow stringMode - Sets behaviour on overflow in the set resulting. Possible values:
- bool
- Enables or disables silently skipping of unavailable shards.
- Sort
Overflow stringMode - Sets behaviour on overflow while sort. Possible values:
- Timeout
Overflow stringMode - Sets behaviour on overflow. Possible values:
- Transfer
Overflow stringMode - Sets behaviour on overflow. Possible values:
- Transform
Null boolIn - Enables equality of NULL values for IN operator.
- Use
Uncompressed boolCache - Whether to use a cache of uncompressed blocks.
- Add
Http boolCors Header - Include CORS headers in HTTP responces.
- Allow
Ddl bool - Allows or denies DDL queries.
- Compile bool
- Enable compilation of queries.
- Compile
Expressions bool - Turn on expression compilation.
- Connect
Timeout int - Connect timeout in milliseconds on the socket used for communicating with the client.
- Count
Distinct stringImplementation - Specifies which of the uniq* functions should be used to perform the COUNT(DISTINCT …) construction.
- Distinct
Overflow stringMode - Sets behaviour on overflow when using DISTINCT. Possible values:
- Distributed
Aggregation boolMemory Efficient - Determine the behavior of distributed subqueries.
- Distributed
Ddl intTask Timeout - Timeout for DDL queries, in milliseconds.
- Distributed
Product stringMode - Changes the behaviour of distributed subqueries.
- Empty
Result boolFor Aggregation By Empty Set - Allows to retunr empty result.
- Enable
Http boolCompression - Enables or disables data compression in the response to an HTTP request.
- Fallback
To boolStale Replicas For Distributed Queries - Forces a query to an out-of-date replica if updated data is not available.
- Force
Index boolBy Date - Disables query execution if the index can’t be used by date.
- Force
Primary boolKey - Disables query execution if indexing by the primary key is not possible.
- Group
By stringOverflow Mode - Sets behaviour on overflow while GROUP BY operation. Possible values:
- Group
By intTwo Level Threshold - Sets the threshold of the number of keys, after that the two-level aggregation should be used.
- Group
By intTwo Level Threshold Bytes - Sets the threshold of the number of bytes, after that the two-level aggregation should be used.
- Http
Connection intTimeout - Timeout for HTTP connection in milliseconds.
- Http
Headers intProgress Interval - Sets minimal interval between notifications about request process in HTTP header X-ClickHouse-Progress.
- Http
Receive intTimeout - Timeout for HTTP connection in milliseconds.
- Http
Send intTimeout - Timeout for HTTP connection in milliseconds.
- Input
Format boolDefaults For Omitted Fields - When performing INSERT queries, replace omitted input column values with default values of the respective columns.
- Input
Format boolValues Interpret Expressions - Enables or disables the full SQL parser if the fast stream parser can’t parse the data.
- Insert
Quorum int - Enables the quorum writes.
- Insert
Quorum intTimeout - Write to a quorum timeout in milliseconds.
- Join
Overflow stringMode - Sets behaviour on overflow in JOIN. Possible values:
- Join
Use boolNulls - Sets the type of JOIN behaviour. When merging tables, empty cells may appear. ClickHouse fills them differently based on this setting.
- Joined
Subquery boolRequires Alias - Require aliases for subselects and table functions in FROM that more than one table is present.
- Low
Cardinality boolAllow In Native Format - Allows or restricts using the LowCardinality data type with the Native format.
- Max
Ast intDepth - Maximum abstract syntax tree depth.
- Max
Ast intElements - Maximum abstract syntax tree elements.
- Max
Block intSize - A recommendation for what size of the block (in a count of rows) to load from tables.
- Max
Bytes intBefore External Group By - Limit in bytes for using memoru for GROUP BY before using swap on disk.
- Max
Bytes intBefore External Sort - This setting is equivalent of the max_bytes_before_external_group_by setting, except for it is for sort operation (ORDER BY), not aggregation.
- Max
Bytes intIn Distinct - Limits the maximum size of a hash table in bytes (uncompressed data) when using DISTINCT.
- Max
Bytes intIn Join - Limit on maximum size of the hash table for JOIN, in bytes.
- Max
Bytes intIn Set - Limit on the number of bytes in the set resulting from the execution of the IN section.
- Max
Bytes intTo Read - Limits the maximum number of bytes (uncompressed data) that can be read from a table when running a query.
- Max
Bytes intTo Sort - Limits the maximum number of bytes (uncompressed data) that can be read from a table for sorting.
- Max
Bytes intTo Transfer - Limits the maximum number of bytes (uncompressed data) that can be passed to a remote server or saved in a temporary table when using GLOBAL IN.
- Max
Columns intTo Read - Limits the maximum number of columns that can be read from a table in a single query.
- Max
Execution intTime - Limits the maximum query execution time in milliseconds.
- Max
Expanded intAst Elements - Maximum abstract syntax tree depth after after expansion of aliases.
- Max
Insert intBlock Size - The size of blocks (in a count of rows) to form for insertion into a table.
- Max
Memory intUsage - Limits the maximum memory usage (in bytes) for processing queries on a single server.
- Max
Memory intUsage For User - Limits the maximum memory usage (in bytes) for processing of user's queries on a single server.
- Max
Network intBandwidth - Limits the speed of the data exchange over the network in bytes per second.
- Max
Network intBandwidth For User - Limits the speed of the data exchange over the network in bytes per second.
- Max
Query intSize - The maximum part of a query that can be taken to RAM for parsing with the SQL parser.
- Max
Replica intDelay For Distributed Queries - Disables lagging replicas for distributed queries.
- Max
Result intBytes - Limits the number of bytes in the result.
- Max
Result intRows - Limits the number of rows in the result.
- Max
Rows intIn Distinct - Limits the maximum number of different rows when using DISTINCT.
- Max
Rows intIn Join - Limit on maximum size of the hash table for JOIN, in rows.
- Max
Rows intIn Set - Limit on the number of rows in the set resulting from the execution of the IN section.
- Max
Rows intTo Group By - Limits the maximum number of unique keys received from aggregation function.
- Max
Rows intTo Read - Limits the maximum number of rows that can be read from a table when running a query.
- Max
Rows intTo Sort - Limits the maximum number of rows that can be read from a table for sorting.
- Max
Rows intTo Transfer - Limits the maximum number of rows that can be passed to a remote server or saved in a temporary table when using GLOBAL IN.
- Max
Temporary intColumns - Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, including constant columns.
- Max
Temporary intNon Const Columns - Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, excluding constant columns.
- Max
Threads int - The maximum number of query processing threads, excluding threads for retrieving data from remote servers.
- Merge
Tree intMax Bytes To Use Cache - If ClickHouse should read more than merge_tree_max_bytes_to_use_cache bytes in one query, it doesn’t use the cache of uncompressed blocks.
- Merge
Tree intMax Rows To Use Cache - If ClickHouse should read more than merge_tree_max_rows_to_use_cache rows in one query, it doesn’t use the cache of uncompressed blocks.
- Merge
Tree intMin Bytes For Concurrent Read - If the number of bytes to read from one file of a MergeTree-engine table exceeds merge_tree_min_bytes_for_concurrent_read, then ClickHouse tries to concurrently read from this file in several threads.
- Merge
Tree intMin Rows For Concurrent Read - If the number of rows to be read from a file of a MergeTree table exceeds merge_tree_min_rows_for_concurrent_read then ClickHouse tries to perform a concurrent reading from this file on several threads.
- Min
Bytes intTo Use Direct Io - The minimum data volume required for using direct I/O access to the storage disk.
- Min
Count intTo Compile - How many times to potentially use a compiled chunk of code before running compilation.
- Min
Count intTo Compile Expression - A query waits for expression compilation process to complete prior to continuing execution.
- Min
Execution intSpeed - Minimal execution speed in rows per second.
- Min
Execution intSpeed Bytes - Minimal execution speed in bytes per second.
- Min
Insert intBlock Size Bytes - Sets the minimum number of bytes in the block which can be inserted into a table by an INSERT query.
- Min
Insert intBlock Size Rows - Sets the minimum number of rows in the block which can be inserted into a table by an INSERT query.
- Output
Format boolJson Quote64bit Integers - If the value is true, integers appear in quotes when using JSON* Int64 and UInt64 formats (for compatibility with most JavaScript implementations); otherwise, integers are output without the quotes.
- Output
Format boolJson Quote Denormals - Enables +nan, -nan, +inf, -inf outputs in JSON output format.
- Priority int
- Query priority.
- Quota
Mode string - Quota accounting mode.
- Read
Overflow stringMode - Sets behaviour on overflow while read. Possible values:
- Readonly int
- Restricts permissions for reading data, write data and change settings queries.
- Receive
Timeout int - Receive timeout in milliseconds on the socket used for communicating with the client.
- Replication
Alter intPartitions Sync - For ALTER ... ATTACH|DETACH|DROP queries, you can use the replication_alter_partitions_sync setting to set up waiting.
- Result
Overflow stringMode - Sets behaviour on overflow in result. Possible values:
- Select
Sequential boolConsistency - Enables or disables sequential consistency for SELECT queries.
- Send
Progress boolIn Http Headers - Enables or disables X-ClickHouse-Progress HTTP response headers in clickhouse-server responses.
- Send
Timeout int - Send timeout in milliseconds on the socket used for communicating with the client.
- Set
Overflow stringMode - Sets behaviour on overflow in the set resulting. Possible values:
- bool
- Enables or disables silently skipping of unavailable shards.
- Sort
Overflow stringMode - Sets behaviour on overflow while sort. Possible values:
- Timeout
Overflow stringMode - Sets behaviour on overflow. Possible values:
- Transfer
Overflow stringMode - Sets behaviour on overflow. Possible values:
- Transform
Null boolIn - Enables equality of NULL values for IN operator.
- Use
Uncompressed boolCache - Whether to use a cache of uncompressed blocks.
- add
Http BooleanCors Header - Include CORS headers in HTTP responces.
- allow
Ddl Boolean - Allows or denies DDL queries.
- compile Boolean
- Enable compilation of queries.
- compile
Expressions Boolean - Turn on expression compilation.
- connect
Timeout Integer - Connect timeout in milliseconds on the socket used for communicating with the client.
- count
Distinct StringImplementation - Specifies which of the uniq* functions should be used to perform the COUNT(DISTINCT …) construction.
- distinct
Overflow StringMode - Sets behaviour on overflow when using DISTINCT. Possible values:
- distributed
Aggregation BooleanMemory Efficient - Determine the behavior of distributed subqueries.
- distributed
Ddl IntegerTask Timeout - Timeout for DDL queries, in milliseconds.
- distributed
Product StringMode - Changes the behaviour of distributed subqueries.
- empty
Result BooleanFor Aggregation By Empty Set - Allows to retunr empty result.
- enable
Http BooleanCompression - Enables or disables data compression in the response to an HTTP request.
- fallback
To BooleanStale Replicas For Distributed Queries - Forces a query to an out-of-date replica if updated data is not available.
- force
Index BooleanBy Date - Disables query execution if the index can’t be used by date.
- force
Primary BooleanKey - Disables query execution if indexing by the primary key is not possible.
- group
By StringOverflow Mode - Sets behaviour on overflow while GROUP BY operation. Possible values:
- group
By IntegerTwo Level Threshold - Sets the threshold of the number of keys, after that the two-level aggregation should be used.
- group
By IntegerTwo Level Threshold Bytes - Sets the threshold of the number of bytes, after that the two-level aggregation should be used.
- http
Connection IntegerTimeout - Timeout for HTTP connection in milliseconds.
- http
Headers IntegerProgress Interval - Sets minimal interval between notifications about request process in HTTP header X-ClickHouse-Progress.
- http
Receive IntegerTimeout - Timeout for HTTP connection in milliseconds.
- http
Send IntegerTimeout - Timeout for HTTP connection in milliseconds.
- input
Format BooleanDefaults For Omitted Fields - When performing INSERT queries, replace omitted input column values with default values of the respective columns.
- input
Format BooleanValues Interpret Expressions - Enables or disables the full SQL parser if the fast stream parser can’t parse the data.
- insert
Quorum Integer - Enables the quorum writes.
- insert
Quorum IntegerTimeout - Write to a quorum timeout in milliseconds.
- join
Overflow StringMode - Sets behaviour on overflow in JOIN. Possible values:
- join
Use BooleanNulls - Sets the type of JOIN behaviour. When merging tables, empty cells may appear. ClickHouse fills them differently based on this setting.
- joined
Subquery BooleanRequires Alias - Require aliases for subselects and table functions in FROM that more than one table is present.
- low
Cardinality BooleanAllow In Native Format - Allows or restricts using the LowCardinality data type with the Native format.
- max
Ast IntegerDepth - Maximum abstract syntax tree depth.
- max
Ast IntegerElements - Maximum abstract syntax tree elements.
- max
Block IntegerSize - A recommendation for what size of the block (in a count of rows) to load from tables.
- max
Bytes IntegerBefore External Group By - Limit in bytes for using memoru for GROUP BY before using swap on disk.
- max
Bytes IntegerBefore External Sort - This setting is equivalent of the max_bytes_before_external_group_by setting, except for it is for sort operation (ORDER BY), not aggregation.
- max
Bytes IntegerIn Distinct - Limits the maximum size of a hash table in bytes (uncompressed data) when using DISTINCT.
- max
Bytes IntegerIn Join - Limit on maximum size of the hash table for JOIN, in bytes.
- max
Bytes IntegerIn Set - Limit on the number of bytes in the set resulting from the execution of the IN section.
- max
Bytes IntegerTo Read - Limits the maximum number of bytes (uncompressed data) that can be read from a table when running a query.
- max
Bytes IntegerTo Sort - Limits the maximum number of bytes (uncompressed data) that can be read from a table for sorting.
- max
Bytes IntegerTo Transfer - Limits the maximum number of bytes (uncompressed data) that can be passed to a remote server or saved in a temporary table when using GLOBAL IN.
- max
Columns IntegerTo Read - Limits the maximum number of columns that can be read from a table in a single query.
- max
Execution IntegerTime - Limits the maximum query execution time in milliseconds.
- max
Expanded IntegerAst Elements - Maximum abstract syntax tree depth after after expansion of aliases.
- max
Insert IntegerBlock Size - The size of blocks (in a count of rows) to form for insertion into a table.
- max
Memory IntegerUsage - Limits the maximum memory usage (in bytes) for processing queries on a single server.
- max
Memory IntegerUsage For User - Limits the maximum memory usage (in bytes) for processing of user's queries on a single server.
- max
Network IntegerBandwidth - Limits the speed of the data exchange over the network in bytes per second.
- max
Network IntegerBandwidth For User - Limits the speed of the data exchange over the network in bytes per second.
- max
Query IntegerSize - The maximum part of a query that can be taken to RAM for parsing with the SQL parser.
- max
Replica IntegerDelay For Distributed Queries - Disables lagging replicas for distributed queries.
- max
Result IntegerBytes - Limits the number of bytes in the result.
- max
Result IntegerRows - Limits the number of rows in the result.
- max
Rows IntegerIn Distinct - Limits the maximum number of different rows when using DISTINCT.
- max
Rows IntegerIn Join - Limit on maximum size of the hash table for JOIN, in rows.
- max
Rows IntegerIn Set - Limit on the number of rows in the set resulting from the execution of the IN section.
- max
Rows IntegerTo Group By - Limits the maximum number of unique keys received from aggregation function.
- max
Rows IntegerTo Read - Limits the maximum number of rows that can be read from a table when running a query.
- max
Rows IntegerTo Sort - Limits the maximum number of rows that can be read from a table for sorting.
- max
Rows IntegerTo Transfer - Limits the maximum number of rows that can be passed to a remote server or saved in a temporary table when using GLOBAL IN.
- max
Temporary IntegerColumns - Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, including constant columns.
- max
Temporary IntegerNon Const Columns - Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, excluding constant columns.
- max
Threads Integer - The maximum number of query processing threads, excluding threads for retrieving data from remote servers.
- merge
Tree IntegerMax Bytes To Use Cache - If ClickHouse should read more than merge_tree_max_bytes_to_use_cache bytes in one query, it doesn’t use the cache of uncompressed blocks.
- merge
Tree IntegerMax Rows To Use Cache - If ClickHouse should read more than merge_tree_max_rows_to_use_cache rows in one query, it doesn’t use the cache of uncompressed blocks.
- merge
Tree IntegerMin Bytes For Concurrent Read - If the number of bytes to read from one file of a MergeTree-engine table exceeds merge_tree_min_bytes_for_concurrent_read, then ClickHouse tries to concurrently read from this file in several threads.
- merge
Tree IntegerMin Rows For Concurrent Read - If the number of rows to be read from a file of a MergeTree table exceeds merge_tree_min_rows_for_concurrent_read then ClickHouse tries to perform a concurrent reading from this file on several threads.
- min
Bytes IntegerTo Use Direct Io - The minimum data volume required for using direct I/O access to the storage disk.
- min
Count IntegerTo Compile - How many times to potentially use a compiled chunk of code before running compilation.
- min
Count IntegerTo Compile Expression - A query waits for expression compilation process to complete prior to continuing execution.
- min
Execution IntegerSpeed - Minimal execution speed in rows per second.
- min
Execution IntegerSpeed Bytes - Minimal execution speed in bytes per second.
- min
Insert IntegerBlock Size Bytes - Sets the minimum number of bytes in the block which can be inserted into a table by an INSERT query.
- min
Insert IntegerBlock Size Rows - Sets the minimum number of rows in the block which can be inserted into a table by an INSERT query.
- output
Format BooleanJson Quote64bit Integers - If the value is true, integers appear in quotes when using JSON* Int64 and UInt64 formats (for compatibility with most JavaScript implementations); otherwise, integers are output without the quotes.
- output
Format BooleanJson Quote Denormals - Enables +nan, -nan, +inf, -inf outputs in JSON output format.
- priority Integer
- Query priority.
- quota
Mode String - Quota accounting mode.
- read
Overflow StringMode - Sets behaviour on overflow while read. Possible values:
- readonly Integer
- Restricts permissions for reading data, write data and change settings queries.
- receive
Timeout Integer - Receive timeout in milliseconds on the socket used for communicating with the client.
- replication
Alter IntegerPartitions Sync - For ALTER ... ATTACH|DETACH|DROP queries, you can use the replication_alter_partitions_sync setting to set up waiting.
- result
Overflow StringMode - Sets behaviour on overflow in result. Possible values:
- select
Sequential BooleanConsistency - Enables or disables sequential consistency for SELECT queries.
- send
Progress BooleanIn Http Headers - Enables or disables X-ClickHouse-Progress HTTP response headers in clickhouse-server responses.
- send
Timeout Integer - Send timeout in milliseconds on the socket used for communicating with the client.
- set
Overflow StringMode - Sets behaviour on overflow in the set resulting. Possible values:
- Boolean
- Enables or disables silently skipping of unavailable shards.
- sort
Overflow StringMode - Sets behaviour on overflow while sort. Possible values:
- timeout
Overflow StringMode - Sets behaviour on overflow. Possible values:
- transfer
Overflow StringMode - Sets behaviour on overflow. Possible values:
- transform
Null BooleanIn - Enables equality of NULL values for IN operator.
- use
Uncompressed BooleanCache - Whether to use a cache of uncompressed blocks.
- add
Http booleanCors Header - Include CORS headers in HTTP responces.
- allow
Ddl boolean - Allows or denies DDL queries.
- compile boolean
- Enable compilation of queries.
- compile
Expressions boolean - Turn on expression compilation.
- connect
Timeout number - Connect timeout in milliseconds on the socket used for communicating with the client.
- count
Distinct stringImplementation - Specifies which of the uniq* functions should be used to perform the COUNT(DISTINCT …) construction.
- distinct
Overflow stringMode - Sets behaviour on overflow when using DISTINCT. Possible values:
- distributed
Aggregation booleanMemory Efficient - Determine the behavior of distributed subqueries.
- distributed
Ddl numberTask Timeout - Timeout for DDL queries, in milliseconds.
- distributed
Product stringMode - Changes the behaviour of distributed subqueries.
- empty
Result booleanFor Aggregation By Empty Set - Allows to retunr empty result.
- enable
Http booleanCompression - Enables or disables data compression in the response to an HTTP request.
- fallback
To booleanStale Replicas For Distributed Queries - Forces a query to an out-of-date replica if updated data is not available.
- force
Index booleanBy Date - Disables query execution if the index can’t be used by date.
- force
Primary booleanKey - Disables query execution if indexing by the primary key is not possible.
- group
By stringOverflow Mode - Sets behaviour on overflow while GROUP BY operation. Possible values:
- group
By numberTwo Level Threshold - Sets the threshold of the number of keys, after that the two-level aggregation should be used.
- group
By numberTwo Level Threshold Bytes - Sets the threshold of the number of bytes, after that the two-level aggregation should be used.
- http
Connection numberTimeout - Timeout for HTTP connection in milliseconds.
- http
Headers numberProgress Interval - Sets minimal interval between notifications about request process in HTTP header X-ClickHouse-Progress.
- http
Receive numberTimeout - Timeout for HTTP connection in milliseconds.
- http
Send numberTimeout - Timeout for HTTP connection in milliseconds.
- input
Format booleanDefaults For Omitted Fields - When performing INSERT queries, replace omitted input column values with default values of the respective columns.
- input
Format booleanValues Interpret Expressions - Enables or disables the full SQL parser if the fast stream parser can’t parse the data.
- insert
Quorum number - Enables the quorum writes.
- insert
Quorum numberTimeout - Write to a quorum timeout in milliseconds.
- join
Overflow stringMode - Sets behaviour on overflow in JOIN. Possible values:
- join
Use booleanNulls - Sets the type of JOIN behaviour. When merging tables, empty cells may appear. ClickHouse fills them differently based on this setting.
- joined
Subquery booleanRequires Alias - Require aliases for subselects and table functions in FROM that more than one table is present.
- low
Cardinality booleanAllow In Native Format - Allows or restricts using the LowCardinality data type with the Native format.
- max
Ast numberDepth - Maximum abstract syntax tree depth.
- max
Ast numberElements - Maximum abstract syntax tree elements.
- max
Block numberSize - A recommendation for what size of the block (in a count of rows) to load from tables.
- max
Bytes numberBefore External Group By - Limit in bytes for using memoru for GROUP BY before using swap on disk.
- max
Bytes numberBefore External Sort - This setting is equivalent of the max_bytes_before_external_group_by setting, except for it is for sort operation (ORDER BY), not aggregation.
- max
Bytes numberIn Distinct - Limits the maximum size of a hash table in bytes (uncompressed data) when using DISTINCT.
- max
Bytes numberIn Join - Limit on maximum size of the hash table for JOIN, in bytes.
- max
Bytes numberIn Set - Limit on the number of bytes in the set resulting from the execution of the IN section.
- max
Bytes numberTo Read - Limits the maximum number of bytes (uncompressed data) that can be read from a table when running a query.
- max
Bytes numberTo Sort - Limits the maximum number of bytes (uncompressed data) that can be read from a table for sorting.
- max
Bytes numberTo Transfer - Limits the maximum number of bytes (uncompressed data) that can be passed to a remote server or saved in a temporary table when using GLOBAL IN.
- max
Columns numberTo Read - Limits the maximum number of columns that can be read from a table in a single query.
- max
Execution numberTime - Limits the maximum query execution time in milliseconds.
- max
Expanded numberAst Elements - Maximum abstract syntax tree depth after after expansion of aliases.
- max
Insert numberBlock Size - The size of blocks (in a count of rows) to form for insertion into a table.
- max
Memory numberUsage - Limits the maximum memory usage (in bytes) for processing queries on a single server.
- max
Memory numberUsage For User - Limits the maximum memory usage (in bytes) for processing of user's queries on a single server.
- max
Network numberBandwidth - Limits the speed of the data exchange over the network in bytes per second.
- max
Network numberBandwidth For User - Limits the speed of the data exchange over the network in bytes per second.
- max
Query numberSize - The maximum part of a query that can be taken to RAM for parsing with the SQL parser.
- max
Replica numberDelay For Distributed Queries - Disables lagging replicas for distributed queries.
- max
Result numberBytes - Limits the number of bytes in the result.
- max
Result numberRows - Limits the number of rows in the result.
- max
Rows numberIn Distinct - Limits the maximum number of different rows when using DISTINCT.
- max
Rows numberIn Join - Limit on maximum size of the hash table for JOIN, in rows.
- max
Rows numberIn Set - Limit on the number of rows in the set resulting from the execution of the IN section.
- max
Rows numberTo Group By - Limits the maximum number of unique keys received from aggregation function.
- max
Rows numberTo Read - Limits the maximum number of rows that can be read from a table when running a query.
- max
Rows numberTo Sort - Limits the maximum number of rows that can be read from a table for sorting.
- max
Rows numberTo Transfer - Limits the maximum number of rows that can be passed to a remote server or saved in a temporary table when using GLOBAL IN.
- max
Temporary numberColumns - Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, including constant columns.
- max
Temporary numberNon Const Columns - Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, excluding constant columns.
- max
Threads number - The maximum number of query processing threads, excluding threads for retrieving data from remote servers.
- merge
Tree numberMax Bytes To Use Cache - If ClickHouse should read more than merge_tree_max_bytes_to_use_cache bytes in one query, it doesn’t use the cache of uncompressed blocks.
- merge
Tree numberMax Rows To Use Cache - If ClickHouse should read more than merge_tree_max_rows_to_use_cache rows in one query, it doesn’t use the cache of uncompressed blocks.
- merge
Tree numberMin Bytes For Concurrent Read - If the number of bytes to read from one file of a MergeTree-engine table exceeds merge_tree_min_bytes_for_concurrent_read, then ClickHouse tries to concurrently read from this file in several threads.
- merge
Tree numberMin Rows For Concurrent Read - If the number of rows to be read from a file of a MergeTree table exceeds merge_tree_min_rows_for_concurrent_read then ClickHouse tries to perform a concurrent reading from this file on several threads.
- min
Bytes numberTo Use Direct Io - The minimum data volume required for using direct I/O access to the storage disk.
- min
Count numberTo Compile - How many times to potentially use a compiled chunk of code before running compilation.
- min
Count numberTo Compile Expression - A query waits for expression compilation process to complete prior to continuing execution.
- min
Execution numberSpeed - Minimal execution speed in rows per second.
- min
Execution numberSpeed Bytes - Minimal execution speed in bytes per second.
- min
Insert numberBlock Size Bytes - Sets the minimum number of bytes in the block which can be inserted into a table by an INSERT query.
- min
Insert numberBlock Size Rows - Sets the minimum number of rows in the block which can be inserted into a table by an INSERT query.
- output
Format booleanJson Quote64bit Integers - If the value is true, integers appear in quotes when using JSON* Int64 and UInt64 formats (for compatibility with most JavaScript implementations); otherwise, integers are output without the quotes.
- output
Format booleanJson Quote Denormals - Enables +nan, -nan, +inf, -inf outputs in JSON output format.
- priority number
- Query priority.
- quota
Mode string - Quota accounting mode.
- read
Overflow stringMode - Sets behaviour on overflow while read. Possible values:
- readonly number
- Restricts permissions for reading data, write data and change settings queries.
- receive
Timeout number - Receive timeout in milliseconds on the socket used for communicating with the client.
- replication
Alter numberPartitions Sync - For ALTER ... ATTACH|DETACH|DROP queries, you can use the replication_alter_partitions_sync setting to set up waiting.
- result
Overflow stringMode - Sets behaviour on overflow in result. Possible values:
- select
Sequential booleanConsistency - Enables or disables sequential consistency for SELECT queries.
- send
Progress booleanIn Http Headers - Enables or disables X-ClickHouse-Progress HTTP response headers in clickhouse-server responses.
- send
Timeout number - Send timeout in milliseconds on the socket used for communicating with the client.
- set
Overflow stringMode - Sets behaviour on overflow in the set resulting. Possible values:
- boolean
- Enables or disables silently skipping of unavailable shards.
- sort
Overflow stringMode - Sets behaviour on overflow while sort. Possible values:
- timeout
Overflow stringMode - Sets behaviour on overflow. Possible values:
- transfer
Overflow stringMode - Sets behaviour on overflow. Possible values:
- transform
Null booleanIn - Enables equality of NULL values for IN operator.
- use
Uncompressed booleanCache - Whether to use a cache of uncompressed blocks.
- add_
http_ boolcors_ header - Include CORS headers in HTTP responces.
- allow_
ddl bool - Allows or denies DDL queries.
- compile bool
- Enable compilation of queries.
- compile_
expressions bool - Turn on expression compilation.
- connect_
timeout int - Connect timeout in milliseconds on the socket used for communicating with the client.
- count_
distinct_ strimplementation - Specifies which of the uniq* functions should be used to perform the COUNT(DISTINCT …) construction.
- distinct_
overflow_ strmode - Sets behaviour on overflow when using DISTINCT. Possible values:
- distributed_
aggregation_ boolmemory_ efficient - Determine the behavior of distributed subqueries.
- distributed_
ddl_ inttask_ timeout - Timeout for DDL queries, in milliseconds.
- distributed_
product_ strmode - Changes the behaviour of distributed subqueries.
- empty_
result_ boolfor_ aggregation_ by_ empty_ set - Allows to retunr empty result.
- enable_
http_ boolcompression - Enables or disables data compression in the response to an HTTP request.
- fallback_
to_ boolstale_ replicas_ for_ distributed_ queries - Forces a query to an out-of-date replica if updated data is not available.
- force_
index_ boolby_ date - Disables query execution if the index can’t be used by date.
- force_
primary_ boolkey - Disables query execution if indexing by the primary key is not possible.
- group_
by_ stroverflow_ mode - Sets behaviour on overflow while GROUP BY operation. Possible values:
- group_
by_ inttwo_ level_ threshold - Sets the threshold of the number of keys, after that the two-level aggregation should be used.
- group_
by_ inttwo_ level_ threshold_ bytes - Sets the threshold of the number of bytes, after that the two-level aggregation should be used.
- http_
connection_ inttimeout - Timeout for HTTP connection in milliseconds.
- http_
headers_ intprogress_ interval - Sets minimal interval between notifications about request process in HTTP header X-ClickHouse-Progress.
- http_
receive_ inttimeout - Timeout for HTTP connection in milliseconds.
- http_
send_ inttimeout - Timeout for HTTP connection in milliseconds.
- input_
format_ booldefaults_ for_ omitted_ fields - When performing INSERT queries, replace omitted input column values with default values of the respective columns.
- input_
format_ boolvalues_ interpret_ expressions - Enables or disables the full SQL parser if the fast stream parser can’t parse the data.
- insert_
quorum int - Enables the quorum writes.
- insert_
quorum_ inttimeout - Write to a quorum timeout in milliseconds.
- join_
overflow_ strmode - Sets behaviour on overflow in JOIN. Possible values:
- join_
use_ boolnulls - Sets the type of JOIN behaviour. When merging tables, empty cells may appear. ClickHouse fills them differently based on this setting.
- joined_
subquery_ boolrequires_ alias - Require aliases for subselects and table functions in FROM that more than one table is present.
- low_
cardinality_ boolallow_ in_ native_ format - Allows or restricts using the LowCardinality data type with the Native format.
- max_
ast_ intdepth - Maximum abstract syntax tree depth.
- max_
ast_ intelements - Maximum abstract syntax tree elements.
- max_
block_ intsize - A recommendation for what size of the block (in a count of rows) to load from tables.
- max_
bytes_ intbefore_ external_ group_ by - Limit in bytes for using memoru for GROUP BY before using swap on disk.
- max_
bytes_ intbefore_ external_ sort - This setting is equivalent of the max_bytes_before_external_group_by setting, except for it is for sort operation (ORDER BY), not aggregation.
- max_
bytes_ intin_ distinct - Limits the maximum size of a hash table in bytes (uncompressed data) when using DISTINCT.
- max_
bytes_ intin_ join - Limit on maximum size of the hash table for JOIN, in bytes.
- max_
bytes_ intin_ set - Limit on the number of bytes in the set resulting from the execution of the IN section.
- max_
bytes_ intto_ read - Limits the maximum number of bytes (uncompressed data) that can be read from a table when running a query.
- max_
bytes_ intto_ sort - Limits the maximum number of bytes (uncompressed data) that can be read from a table for sorting.
- max_
bytes_ intto_ transfer - Limits the maximum number of bytes (uncompressed data) that can be passed to a remote server or saved in a temporary table when using GLOBAL IN.
- max_
columns_ intto_ read - Limits the maximum number of columns that can be read from a table in a single query.
- max_
execution_ inttime - Limits the maximum query execution time in milliseconds.
- max_
expanded_ intast_ elements - Maximum abstract syntax tree depth after after expansion of aliases.
- max_
insert_ intblock_ size - The size of blocks (in a count of rows) to form for insertion into a table.
- max_
memory_ intusage - Limits the maximum memory usage (in bytes) for processing queries on a single server.
- max_
memory_ intusage_ for_ user - Limits the maximum memory usage (in bytes) for processing of user's queries on a single server.
- max_
network_ intbandwidth - Limits the speed of the data exchange over the network in bytes per second.
- max_
network_ intbandwidth_ for_ user - Limits the speed of the data exchange over the network in bytes per second.
- max_
query_ intsize - The maximum part of a query that can be taken to RAM for parsing with the SQL parser.
- max_
replica_ intdelay_ for_ distributed_ queries - Disables lagging replicas for distributed queries.
- max_
result_ intbytes - Limits the number of bytes in the result.
- max_
result_ introws - Limits the number of rows in the result.
- max_
rows_ intin_ distinct - Limits the maximum number of different rows when using DISTINCT.
- max_
rows_ intin_ join - Limit on maximum size of the hash table for JOIN, in rows.
- max_
rows_ intin_ set - Limit on the number of rows in the set resulting from the execution of the IN section.
- max_
rows_ intto_ group_ by - Limits the maximum number of unique keys received from aggregation function.
- max_
rows_ intto_ read - Limits the maximum number of rows that can be read from a table when running a query.
- max_
rows_ intto_ sort - Limits the maximum number of rows that can be read from a table for sorting.
- max_
rows_ intto_ transfer - Limits the maximum number of rows that can be passed to a remote server or saved in a temporary table when using GLOBAL IN.
- max_
temporary_ intcolumns - Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, including constant columns.
- max_
temporary_ intnon_ const_ columns - Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, excluding constant columns.
- max_
threads int - The maximum number of query processing threads, excluding threads for retrieving data from remote servers.
- merge_
tree_ intmax_ bytes_ to_ use_ cache - If ClickHouse should read more than merge_tree_max_bytes_to_use_cache bytes in one query, it doesn’t use the cache of uncompressed blocks.
- merge_
tree_ intmax_ rows_ to_ use_ cache - If ClickHouse should read more than merge_tree_max_rows_to_use_cache rows in one query, it doesn’t use the cache of uncompressed blocks.
- merge_
tree_ intmin_ bytes_ for_ concurrent_ read - If the number of bytes to read from one file of a MergeTree-engine table exceeds merge_tree_min_bytes_for_concurrent_read, then ClickHouse tries to concurrently read from this file in several threads.
- merge_
tree_ intmin_ rows_ for_ concurrent_ read - If the number of rows to be read from a file of a MergeTree table exceeds merge_tree_min_rows_for_concurrent_read then ClickHouse tries to perform a concurrent reading from this file on several threads.
- min_
bytes_ intto_ use_ direct_ io - The minimum data volume required for using direct I/O access to the storage disk.
- min_
count_ intto_ compile - How many times to potentially use a compiled chunk of code before running compilation.
- min_
count_ intto_ compile_ expression - A query waits for expression compilation process to complete prior to continuing execution.
- min_
execution_ intspeed - Minimal execution speed in rows per second.
- min_
execution_ intspeed_ bytes - Minimal execution speed in bytes per second.
- min_
insert_ intblock_ size_ bytes - Sets the minimum number of bytes in the block which can be inserted into a table by an INSERT query.
- min_
insert_ intblock_ size_ rows - Sets the minimum number of rows in the block which can be inserted into a table by an INSERT query.
- output_
format_ booljson_ quote64bit_ integers - If the value is true, integers appear in quotes when using JSON* Int64 and UInt64 formats (for compatibility with most JavaScript implementations); otherwise, integers are output without the quotes.
- output_
format_ booljson_ quote_ denormals - Enables +nan, -nan, +inf, -inf outputs in JSON output format.
- priority int
- Query priority.
- quota_
mode str - Quota accounting mode.
- read_
overflow_ strmode - Sets behaviour on overflow while read. Possible values:
- readonly int
- Restricts permissions for reading data, write data and change settings queries.
- receive_
timeout int - Receive timeout in milliseconds on the socket used for communicating with the client.
- replication_
alter_ intpartitions_ sync - For ALTER ... ATTACH|DETACH|DROP queries, you can use the replication_alter_partitions_sync setting to set up waiting.
- result_
overflow_ strmode - Sets behaviour on overflow in result. Possible values:
- select_
sequential_ boolconsistency - Enables or disables sequential consistency for SELECT queries.
- send_
progress_ boolin_ http_ headers - Enables or disables X-ClickHouse-Progress HTTP response headers in clickhouse-server responses.
- send_
timeout int - Send timeout in milliseconds on the socket used for communicating with the client.
- set_
overflow_ strmode - Sets behaviour on overflow in the set resulting. Possible values:
- bool
- Enables or disables silently skipping of unavailable shards.
- sort_
overflow_ strmode - Sets behaviour on overflow while sort. Possible values:
- timeout_
overflow_ strmode - Sets behaviour on overflow. Possible values:
- transfer_
overflow_ strmode - Sets behaviour on overflow. Possible values:
- transform_
null_ boolin - Enables equality of NULL values for IN operator.
- use_
uncompressed_ boolcache - Whether to use a cache of uncompressed blocks.
- add
Http BooleanCors Header - Include CORS headers in HTTP responces.
- allow
Ddl Boolean - Allows or denies DDL queries.
- compile Boolean
- Enable compilation of queries.
- compile
Expressions Boolean - Turn on expression compilation.
- connect
Timeout Number - Connect timeout in milliseconds on the socket used for communicating with the client.
- count
Distinct StringImplementation - Specifies which of the uniq* functions should be used to perform the COUNT(DISTINCT …) construction.
- distinct
Overflow StringMode - Sets behaviour on overflow when using DISTINCT. Possible values:
- distributed
Aggregation BooleanMemory Efficient - Determine the behavior of distributed subqueries.
- distributed
Ddl NumberTask Timeout - Timeout for DDL queries, in milliseconds.
- distributed
Product StringMode - Changes the behaviour of distributed subqueries.
- empty
Result BooleanFor Aggregation By Empty Set - Allows to retunr empty result.
- enable
Http BooleanCompression - Enables or disables data compression in the response to an HTTP request.
- fallback
To BooleanStale Replicas For Distributed Queries - Forces a query to an out-of-date replica if updated data is not available.
- force
Index BooleanBy Date - Disables query execution if the index can’t be used by date.
- force
Primary BooleanKey - Disables query execution if indexing by the primary key is not possible.
- group
By StringOverflow Mode - Sets behaviour on overflow while GROUP BY operation. Possible values:
- group
By NumberTwo Level Threshold - Sets the threshold of the number of keys, after that the two-level aggregation should be used.
- group
By NumberTwo Level Threshold Bytes - Sets the threshold of the number of bytes, after that the two-level aggregation should be used.
- http
Connection NumberTimeout - Timeout for HTTP connection in milliseconds.
- http
Headers NumberProgress Interval - Sets minimal interval between notifications about request process in HTTP header X-ClickHouse-Progress.
- http
Receive NumberTimeout - Timeout for HTTP connection in milliseconds.
- http
Send NumberTimeout - Timeout for HTTP connection in milliseconds.
- input
Format BooleanDefaults For Omitted Fields - When performing INSERT queries, replace omitted input column values with default values of the respective columns.
- input
Format BooleanValues Interpret Expressions - Enables or disables the full SQL parser if the fast stream parser can’t parse the data.
- insert
Quorum Number - Enables the quorum writes.
- insert
Quorum NumberTimeout - Write to a quorum timeout in milliseconds.
- join
Overflow StringMode - Sets behaviour on overflow in JOIN. Possible values:
- join
Use BooleanNulls - Sets the type of JOIN behaviour. When merging tables, empty cells may appear. ClickHouse fills them differently based on this setting.
- joined
Subquery BooleanRequires Alias - Require aliases for subselects and table functions in FROM that more than one table is present.
- low
Cardinality BooleanAllow In Native Format - Allows or restricts using the LowCardinality data type with the Native format.
- max
Ast NumberDepth - Maximum abstract syntax tree depth.
- max
Ast NumberElements - Maximum abstract syntax tree elements.
- max
Block NumberSize - A recommendation for what size of the block (in a count of rows) to load from tables.
- max
Bytes NumberBefore External Group By - Limit in bytes for using memoru for GROUP BY before using swap on disk.
- max
Bytes NumberBefore External Sort - This setting is equivalent of the max_bytes_before_external_group_by setting, except for it is for sort operation (ORDER BY), not aggregation.
- max
Bytes NumberIn Distinct - Limits the maximum size of a hash table in bytes (uncompressed data) when using DISTINCT.
- max
Bytes NumberIn Join - Limit on maximum size of the hash table for JOIN, in bytes.
- max
Bytes NumberIn Set - Limit on the number of bytes in the set resulting from the execution of the IN section.
- max
Bytes NumberTo Read - Limits the maximum number of bytes (uncompressed data) that can be read from a table when running a query.
- max
Bytes NumberTo Sort - Limits the maximum number of bytes (uncompressed data) that can be read from a table for sorting.
- max
Bytes NumberTo Transfer - Limits the maximum number of bytes (uncompressed data) that can be passed to a remote server or saved in a temporary table when using GLOBAL IN.
- max
Columns NumberTo Read - Limits the maximum number of columns that can be read from a table in a single query.
- max
Execution NumberTime - Limits the maximum query execution time in milliseconds.
- max
Expanded NumberAst Elements - Maximum abstract syntax tree depth after after expansion of aliases.
- max
Insert NumberBlock Size - The size of blocks (in a count of rows) to form for insertion into a table.
- max
Memory NumberUsage - Limits the maximum memory usage (in bytes) for processing queries on a single server.
- max
Memory NumberUsage For User - Limits the maximum memory usage (in bytes) for processing of user's queries on a single server.
- max
Network NumberBandwidth - Limits the speed of the data exchange over the network in bytes per second.
- max
Network NumberBandwidth For User - Limits the speed of the data exchange over the network in bytes per second.
- max
Query NumberSize - The maximum part of a query that can be taken to RAM for parsing with the SQL parser.
- max
Replica NumberDelay For Distributed Queries - Disables lagging replicas for distributed queries.
- max
Result NumberBytes - Limits the number of bytes in the result.
- max
Result NumberRows - Limits the number of rows in the result.
- max
Rows NumberIn Distinct - Limits the maximum number of different rows when using DISTINCT.
- max
Rows NumberIn Join - Limit on maximum size of the hash table for JOIN, in rows.
- max
Rows NumberIn Set - Limit on the number of rows in the set resulting from the execution of the IN section.
- max
Rows NumberTo Group By - Limits the maximum number of unique keys received from aggregation function.
- max
Rows NumberTo Read - Limits the maximum number of rows that can be read from a table when running a query.
- max
Rows NumberTo Sort - Limits the maximum number of rows that can be read from a table for sorting.
- max
Rows NumberTo Transfer - Limits the maximum number of rows that can be passed to a remote server or saved in a temporary table when using GLOBAL IN.
- max
Temporary NumberColumns - Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, including constant columns.
- max
Temporary NumberNon Const Columns - Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, excluding constant columns.
- max
Threads Number - The maximum number of query processing threads, excluding threads for retrieving data from remote servers.
- merge
Tree NumberMax Bytes To Use Cache - If ClickHouse should read more than merge_tree_max_bytes_to_use_cache bytes in one query, it doesn’t use the cache of uncompressed blocks.
- merge
Tree NumberMax Rows To Use Cache - If ClickHouse should read more than merge_tree_max_rows_to_use_cache rows in one query, it doesn’t use the cache of uncompressed blocks.
- merge
Tree NumberMin Bytes For Concurrent Read - If the number of bytes to read from one file of a MergeTree-engine table exceeds merge_tree_min_bytes_for_concurrent_read, then ClickHouse tries to concurrently read from this file in several threads.
- merge
Tree NumberMin Rows For Concurrent Read - If the number of rows to be read from a file of a MergeTree table exceeds merge_tree_min_rows_for_concurrent_read then ClickHouse tries to perform a concurrent reading from this file on several threads.
- min
Bytes NumberTo Use Direct Io - The minimum data volume required for using direct I/O access to the storage disk.
- min
Count NumberTo Compile - How many times to potentially use a compiled chunk of code before running compilation.
- min
Count NumberTo Compile Expression - A query waits for expression compilation process to complete prior to continuing execution.
- min
Execution NumberSpeed - Minimal execution speed in rows per second.
- min
Execution NumberSpeed Bytes - Minimal execution speed in bytes per second.
- min
Insert NumberBlock Size Bytes - Sets the minimum number of bytes in the block which can be inserted into a table by an INSERT query.
- min
Insert NumberBlock Size Rows - Sets the minimum number of rows in the block which can be inserted into a table by an INSERT query.
- output
Format BooleanJson Quote64bit Integers - If the value is true, integers appear in quotes when using JSON* Int64 and UInt64 formats (for compatibility with most JavaScript implementations); otherwise, integers are output without the quotes.
- output
Format BooleanJson Quote Denormals - Enables +nan, -nan, +inf, -inf outputs in JSON output format.
- priority Number
- Query priority.
- quota
Mode String - Quota accounting mode.
- read
Overflow StringMode - Sets behaviour on overflow while read. Possible values:
- readonly Number
- Restricts permissions for reading data, write data and change settings queries.
- receive
Timeout Number - Receive timeout in milliseconds on the socket used for communicating with the client.
- replication
Alter NumberPartitions Sync - For ALTER ... ATTACH|DETACH|DROP queries, you can use the replication_alter_partitions_sync setting to set up waiting.
- result
Overflow StringMode - Sets behaviour on overflow in result. Possible values:
- select
Sequential BooleanConsistency - Enables or disables sequential consistency for SELECT queries.
- send
Progress BooleanIn Http Headers - Enables or disables X-ClickHouse-Progress HTTP response headers in clickhouse-server responses.
- send
Timeout Number - Send timeout in milliseconds on the socket used for communicating with the client.
- set
Overflow StringMode - Sets behaviour on overflow in the set resulting. Possible values:
- Boolean
- Enables or disables silently skipping of unavailable shards.
- sort
Overflow StringMode - Sets behaviour on overflow while sort. Possible values:
- timeout
Overflow StringMode - Sets behaviour on overflow. Possible values:
- transfer
Overflow StringMode - Sets behaviour on overflow. Possible values:
- transform
Null BooleanIn - Enables equality of NULL values for IN operator.
- use
Uncompressed BooleanCache - Whether to use a cache of uncompressed blocks.
MdbClickhouseClusterZookeeper, MdbClickhouseClusterZookeeperArgs
- Resources
Mdb
Clickhouse Cluster Zookeeper Resources - Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
- Resources
Mdb
Clickhouse Cluster Zookeeper Resources - Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
- resources
Mdb
Clickhouse Cluster Zookeeper Resources - Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
- resources
Mdb
Clickhouse Cluster Zookeeper Resources - Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
- resources
Mdb
Clickhouse Cluster Zookeeper Resources - Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
- resources Property Map
- Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
MdbClickhouseClusterZookeeperResources, MdbClickhouseClusterZookeeperResourcesArgs
- Disk
Size int - Volume of the storage available to a ZooKeeper host, in gigabytes.
- Disk
Type stringId - Type of the storage of ZooKeeper hosts. For more information see the official documentation.
- Resource
Preset stringId
- Disk
Size int - Volume of the storage available to a ZooKeeper host, in gigabytes.
- Disk
Type stringId - Type of the storage of ZooKeeper hosts. For more information see the official documentation.
- Resource
Preset stringId
- disk
Size Integer - Volume of the storage available to a ZooKeeper host, in gigabytes.
- disk
Type StringId - Type of the storage of ZooKeeper hosts. For more information see the official documentation.
- resource
Preset StringId
- disk
Size number - Volume of the storage available to a ZooKeeper host, in gigabytes.
- disk
Type stringId - Type of the storage of ZooKeeper hosts. For more information see the official documentation.
- resource
Preset stringId
- disk_
size int - Volume of the storage available to a ZooKeeper host, in gigabytes.
- disk_
type_ strid - Type of the storage of ZooKeeper hosts. For more information see the official documentation.
- resource_
preset_ strid
- disk
Size Number - Volume of the storage available to a ZooKeeper host, in gigabytes.
- disk
Type StringId - Type of the storage of ZooKeeper hosts. For more information see the official documentation.
- resource
Preset StringId
Import
A cluster can be imported using the id
of the resource, e.g.
$ pulumi import yandex:index/mdbClickhouseCluster:MdbClickhouseCluster foo cluster_id
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- Yandex pulumi/pulumi-yandex
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
yandex
Terraform Provider.