Aiven v6.28.0 published on Friday, Nov 15, 2024 by Pulumi
aiven.getServiceIntegration
Explore with Pulumi AI
Gets information about an Aiven service integration.
Example Usage
import * as pulumi from "@pulumi/pulumi";
import * as aiven from "@pulumi/aiven";
const exampleIntegration = aiven.getServiceIntegration({
project: exampleProject.project,
destinationServiceName: exampleM3db.serviceName,
integrationType: "metrics",
sourceServiceName: exampleKafka.serviceName,
});
import pulumi
import pulumi_aiven as aiven
example_integration = aiven.get_service_integration(project=example_project["project"],
destination_service_name=example_m3db["serviceName"],
integration_type="metrics",
source_service_name=example_kafka["serviceName"])
package main
import (
"github.com/pulumi/pulumi-aiven/sdk/v6/go/aiven"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := aiven.LookupServiceIntegration(ctx, &aiven.LookupServiceIntegrationArgs{
Project: exampleProject.Project,
DestinationServiceName: exampleM3db.ServiceName,
IntegrationType: "metrics",
SourceServiceName: exampleKafka.ServiceName,
}, nil)
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aiven = Pulumi.Aiven;
return await Deployment.RunAsync(() =>
{
var exampleIntegration = Aiven.GetServiceIntegration.Invoke(new()
{
Project = exampleProject.Project,
DestinationServiceName = exampleM3db.ServiceName,
IntegrationType = "metrics",
SourceServiceName = exampleKafka.ServiceName,
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aiven.AivenFunctions;
import com.pulumi.aiven.inputs.GetServiceIntegrationArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
final var exampleIntegration = AivenFunctions.getServiceIntegration(GetServiceIntegrationArgs.builder()
.project(exampleProject.project())
.destinationServiceName(exampleM3db.serviceName())
.integrationType("metrics")
.sourceServiceName(exampleKafka.serviceName())
.build());
}
}
variables:
exampleIntegration:
fn::invoke:
Function: aiven:getServiceIntegration
Arguments:
project: ${exampleProject.project}
destinationServiceName: ${exampleM3db.serviceName}
integrationType: metrics
sourceServiceName: ${exampleKafka.serviceName}
Using getServiceIntegration
Two invocation forms are available. The direct form accepts plain arguments and either blocks until the result value is available, or returns a Promise-wrapped result. The output form accepts Input-wrapped arguments and returns an Output-wrapped result.
function getServiceIntegration(args: GetServiceIntegrationArgs, opts?: InvokeOptions): Promise<GetServiceIntegrationResult>
function getServiceIntegrationOutput(args: GetServiceIntegrationOutputArgs, opts?: InvokeOptions): Output<GetServiceIntegrationResult>
def get_service_integration(destination_service_name: Optional[str] = None,
integration_type: Optional[str] = None,
project: Optional[str] = None,
source_service_name: Optional[str] = None,
opts: Optional[InvokeOptions] = None) -> GetServiceIntegrationResult
def get_service_integration_output(destination_service_name: Optional[pulumi.Input[str]] = None,
integration_type: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
source_service_name: Optional[pulumi.Input[str]] = None,
opts: Optional[InvokeOptions] = None) -> Output[GetServiceIntegrationResult]
func LookupServiceIntegration(ctx *Context, args *LookupServiceIntegrationArgs, opts ...InvokeOption) (*LookupServiceIntegrationResult, error)
func LookupServiceIntegrationOutput(ctx *Context, args *LookupServiceIntegrationOutputArgs, opts ...InvokeOption) LookupServiceIntegrationResultOutput
> Note: This function is named LookupServiceIntegration
in the Go SDK.
public static class GetServiceIntegration
{
public static Task<GetServiceIntegrationResult> InvokeAsync(GetServiceIntegrationArgs args, InvokeOptions? opts = null)
public static Output<GetServiceIntegrationResult> Invoke(GetServiceIntegrationInvokeArgs args, InvokeOptions? opts = null)
}
public static CompletableFuture<GetServiceIntegrationResult> getServiceIntegration(GetServiceIntegrationArgs args, InvokeOptions options)
// Output-based functions aren't available in Java yet
fn::invoke:
function: aiven:index/getServiceIntegration:getServiceIntegration
arguments:
# arguments dictionary
The following arguments are supported:
- Destination
Service stringName - Destination service for the integration.
- Integration
Type string - Type of the service integration. The possible values are
alertmanager
,autoscaler
,caching
,cassandra_cross_service_cluster
,clickhouse_credentials
,clickhouse_kafka
,clickhouse_postgresql
,dashboard
,datadog
,datasource
,disaster_recovery
,external_aws_cloudwatch_logs
,external_aws_cloudwatch_metrics
,external_elasticsearch_logs
,external_google_cloud_logging
,external_opensearch_logs
,flink
,flink_external_bigquery
,flink_external_kafka
,flink_external_postgresql
,internal_connectivity
,jolokia
,kafka_connect
,kafka_connect_postgresql
,kafka_logs
,kafka_mirrormaker
,logs
,m3aggregator
,m3coordinator
,metrics
,opensearch_cross_cluster_replication
,opensearch_cross_cluster_search
,prometheus
,read_replica
,rsyslog
,schema_registry_proxy
,stresstester
,thanos_distributed_query
,thanos_migrate
,thanoscompactor
,thanosquery
,thanosruler
,thanosstore
,vector
andvmalert
. - Project string
- Project the integration belongs to.
- Source
Service stringName - Source service for the integration (if any)
- Destination
Service stringName - Destination service for the integration.
- Integration
Type string - Type of the service integration. The possible values are
alertmanager
,autoscaler
,caching
,cassandra_cross_service_cluster
,clickhouse_credentials
,clickhouse_kafka
,clickhouse_postgresql
,dashboard
,datadog
,datasource
,disaster_recovery
,external_aws_cloudwatch_logs
,external_aws_cloudwatch_metrics
,external_elasticsearch_logs
,external_google_cloud_logging
,external_opensearch_logs
,flink
,flink_external_bigquery
,flink_external_kafka
,flink_external_postgresql
,internal_connectivity
,jolokia
,kafka_connect
,kafka_connect_postgresql
,kafka_logs
,kafka_mirrormaker
,logs
,m3aggregator
,m3coordinator
,metrics
,opensearch_cross_cluster_replication
,opensearch_cross_cluster_search
,prometheus
,read_replica
,rsyslog
,schema_registry_proxy
,stresstester
,thanos_distributed_query
,thanos_migrate
,thanoscompactor
,thanosquery
,thanosruler
,thanosstore
,vector
andvmalert
. - Project string
- Project the integration belongs to.
- Source
Service stringName - Source service for the integration (if any)
- destination
Service StringName - Destination service for the integration.
- integration
Type String - Type of the service integration. The possible values are
alertmanager
,autoscaler
,caching
,cassandra_cross_service_cluster
,clickhouse_credentials
,clickhouse_kafka
,clickhouse_postgresql
,dashboard
,datadog
,datasource
,disaster_recovery
,external_aws_cloudwatch_logs
,external_aws_cloudwatch_metrics
,external_elasticsearch_logs
,external_google_cloud_logging
,external_opensearch_logs
,flink
,flink_external_bigquery
,flink_external_kafka
,flink_external_postgresql
,internal_connectivity
,jolokia
,kafka_connect
,kafka_connect_postgresql
,kafka_logs
,kafka_mirrormaker
,logs
,m3aggregator
,m3coordinator
,metrics
,opensearch_cross_cluster_replication
,opensearch_cross_cluster_search
,prometheus
,read_replica
,rsyslog
,schema_registry_proxy
,stresstester
,thanos_distributed_query
,thanos_migrate
,thanoscompactor
,thanosquery
,thanosruler
,thanosstore
,vector
andvmalert
. - project String
- Project the integration belongs to.
- source
Service StringName - Source service for the integration (if any)
- destination
Service stringName - Destination service for the integration.
- integration
Type string - Type of the service integration. The possible values are
alertmanager
,autoscaler
,caching
,cassandra_cross_service_cluster
,clickhouse_credentials
,clickhouse_kafka
,clickhouse_postgresql
,dashboard
,datadog
,datasource
,disaster_recovery
,external_aws_cloudwatch_logs
,external_aws_cloudwatch_metrics
,external_elasticsearch_logs
,external_google_cloud_logging
,external_opensearch_logs
,flink
,flink_external_bigquery
,flink_external_kafka
,flink_external_postgresql
,internal_connectivity
,jolokia
,kafka_connect
,kafka_connect_postgresql
,kafka_logs
,kafka_mirrormaker
,logs
,m3aggregator
,m3coordinator
,metrics
,opensearch_cross_cluster_replication
,opensearch_cross_cluster_search
,prometheus
,read_replica
,rsyslog
,schema_registry_proxy
,stresstester
,thanos_distributed_query
,thanos_migrate
,thanoscompactor
,thanosquery
,thanosruler
,thanosstore
,vector
andvmalert
. - project string
- Project the integration belongs to.
- source
Service stringName - Source service for the integration (if any)
- destination_
service_ strname - Destination service for the integration.
- integration_
type str - Type of the service integration. The possible values are
alertmanager
,autoscaler
,caching
,cassandra_cross_service_cluster
,clickhouse_credentials
,clickhouse_kafka
,clickhouse_postgresql
,dashboard
,datadog
,datasource
,disaster_recovery
,external_aws_cloudwatch_logs
,external_aws_cloudwatch_metrics
,external_elasticsearch_logs
,external_google_cloud_logging
,external_opensearch_logs
,flink
,flink_external_bigquery
,flink_external_kafka
,flink_external_postgresql
,internal_connectivity
,jolokia
,kafka_connect
,kafka_connect_postgresql
,kafka_logs
,kafka_mirrormaker
,logs
,m3aggregator
,m3coordinator
,metrics
,opensearch_cross_cluster_replication
,opensearch_cross_cluster_search
,prometheus
,read_replica
,rsyslog
,schema_registry_proxy
,stresstester
,thanos_distributed_query
,thanos_migrate
,thanoscompactor
,thanosquery
,thanosruler
,thanosstore
,vector
andvmalert
. - project str
- Project the integration belongs to.
- source_
service_ strname - Source service for the integration (if any)
- destination
Service StringName - Destination service for the integration.
- integration
Type String - Type of the service integration. The possible values are
alertmanager
,autoscaler
,caching
,cassandra_cross_service_cluster
,clickhouse_credentials
,clickhouse_kafka
,clickhouse_postgresql
,dashboard
,datadog
,datasource
,disaster_recovery
,external_aws_cloudwatch_logs
,external_aws_cloudwatch_metrics
,external_elasticsearch_logs
,external_google_cloud_logging
,external_opensearch_logs
,flink
,flink_external_bigquery
,flink_external_kafka
,flink_external_postgresql
,internal_connectivity
,jolokia
,kafka_connect
,kafka_connect_postgresql
,kafka_logs
,kafka_mirrormaker
,logs
,m3aggregator
,m3coordinator
,metrics
,opensearch_cross_cluster_replication
,opensearch_cross_cluster_search
,prometheus
,read_replica
,rsyslog
,schema_registry_proxy
,stresstester
,thanos_distributed_query
,thanos_migrate
,thanoscompactor
,thanosquery
,thanosruler
,thanosstore
,vector
andvmalert
. - project String
- Project the integration belongs to.
- source
Service StringName - Source service for the integration (if any)
getServiceIntegration Result
The following output properties are available:
- Clickhouse
Kafka List<GetUser Configs Service Integration Clickhouse Kafka User Config> - ClickhouseKafka user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- Clickhouse
Postgresql List<GetUser Configs Service Integration Clickhouse Postgresql User Config> - ClickhousePostgresql user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- Datadog
User List<GetConfigs Service Integration Datadog User Config> - Datadog user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- Destination
Endpoint stringId - Destination endpoint for the integration.
- Destination
Service stringName - Destination service for the integration.
- External
Aws List<GetCloudwatch Logs User Configs Service Integration External Aws Cloudwatch Logs User Config> - ExternalAwsCloudwatchLogs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- External
Aws List<GetCloudwatch Metrics User Configs Service Integration External Aws Cloudwatch Metrics User Config> - ExternalAwsCloudwatchMetrics user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- External
Elasticsearch List<GetLogs User Configs Service Integration External Elasticsearch Logs User Config> - ExternalElasticsearchLogs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- External
Opensearch List<GetLogs User Configs Service Integration External Opensearch Logs User Config> - ExternalOpensearchLogs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- Flink
External List<GetPostgresql User Configs Service Integration Flink External Postgresql User Config> - FlinkExternalPostgresql user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- Id string
- The provider-assigned unique ID for this managed resource.
- Integration
Id string - The ID of the Aiven service integration.
- Integration
Type string - Type of the service integration. The possible values are
alertmanager
,autoscaler
,caching
,cassandra_cross_service_cluster
,clickhouse_credentials
,clickhouse_kafka
,clickhouse_postgresql
,dashboard
,datadog
,datasource
,disaster_recovery
,external_aws_cloudwatch_logs
,external_aws_cloudwatch_metrics
,external_elasticsearch_logs
,external_google_cloud_logging
,external_opensearch_logs
,flink
,flink_external_bigquery
,flink_external_kafka
,flink_external_postgresql
,internal_connectivity
,jolokia
,kafka_connect
,kafka_connect_postgresql
,kafka_logs
,kafka_mirrormaker
,logs
,m3aggregator
,m3coordinator
,metrics
,opensearch_cross_cluster_replication
,opensearch_cross_cluster_search
,prometheus
,read_replica
,rsyslog
,schema_registry_proxy
,stresstester
,thanos_distributed_query
,thanos_migrate
,thanoscompactor
,thanosquery
,thanosruler
,thanosstore
,vector
andvmalert
. - Kafka
Connect List<GetUser Configs Service Integration Kafka Connect User Config> - KafkaConnect user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- Kafka
Logs List<GetUser Configs Service Integration Kafka Logs User Config> - KafkaLogs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- Kafka
Mirrormaker List<GetUser Configs Service Integration Kafka Mirrormaker User Config> - KafkaMirrormaker user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- Logs
User List<GetConfigs Service Integration Logs User Config> - Logs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- Metrics
User List<GetConfigs Service Integration Metrics User Config> - Metrics user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- Project string
- Project the integration belongs to.
- Prometheus
User List<GetConfigs Service Integration Prometheus User Config> - Prometheus user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- Source
Endpoint stringId - Source endpoint for the integration.
- Source
Service stringName - Source service for the integration (if any)
- Clickhouse
Kafka []GetUser Configs Service Integration Clickhouse Kafka User Config - ClickhouseKafka user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- Clickhouse
Postgresql []GetUser Configs Service Integration Clickhouse Postgresql User Config - ClickhousePostgresql user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- Datadog
User []GetConfigs Service Integration Datadog User Config - Datadog user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- Destination
Endpoint stringId - Destination endpoint for the integration.
- Destination
Service stringName - Destination service for the integration.
- External
Aws []GetCloudwatch Logs User Configs Service Integration External Aws Cloudwatch Logs User Config - ExternalAwsCloudwatchLogs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- External
Aws []GetCloudwatch Metrics User Configs Service Integration External Aws Cloudwatch Metrics User Config - ExternalAwsCloudwatchMetrics user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- External
Elasticsearch []GetLogs User Configs Service Integration External Elasticsearch Logs User Config - ExternalElasticsearchLogs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- External
Opensearch []GetLogs User Configs Service Integration External Opensearch Logs User Config - ExternalOpensearchLogs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- Flink
External []GetPostgresql User Configs Service Integration Flink External Postgresql User Config - FlinkExternalPostgresql user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- Id string
- The provider-assigned unique ID for this managed resource.
- Integration
Id string - The ID of the Aiven service integration.
- Integration
Type string - Type of the service integration. The possible values are
alertmanager
,autoscaler
,caching
,cassandra_cross_service_cluster
,clickhouse_credentials
,clickhouse_kafka
,clickhouse_postgresql
,dashboard
,datadog
,datasource
,disaster_recovery
,external_aws_cloudwatch_logs
,external_aws_cloudwatch_metrics
,external_elasticsearch_logs
,external_google_cloud_logging
,external_opensearch_logs
,flink
,flink_external_bigquery
,flink_external_kafka
,flink_external_postgresql
,internal_connectivity
,jolokia
,kafka_connect
,kafka_connect_postgresql
,kafka_logs
,kafka_mirrormaker
,logs
,m3aggregator
,m3coordinator
,metrics
,opensearch_cross_cluster_replication
,opensearch_cross_cluster_search
,prometheus
,read_replica
,rsyslog
,schema_registry_proxy
,stresstester
,thanos_distributed_query
,thanos_migrate
,thanoscompactor
,thanosquery
,thanosruler
,thanosstore
,vector
andvmalert
. - Kafka
Connect []GetUser Configs Service Integration Kafka Connect User Config - KafkaConnect user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- Kafka
Logs []GetUser Configs Service Integration Kafka Logs User Config - KafkaLogs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- Kafka
Mirrormaker []GetUser Configs Service Integration Kafka Mirrormaker User Config - KafkaMirrormaker user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- Logs
User []GetConfigs Service Integration Logs User Config - Logs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- Metrics
User []GetConfigs Service Integration Metrics User Config - Metrics user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- Project string
- Project the integration belongs to.
- Prometheus
User []GetConfigs Service Integration Prometheus User Config - Prometheus user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- Source
Endpoint stringId - Source endpoint for the integration.
- Source
Service stringName - Source service for the integration (if any)
- clickhouse
Kafka List<GetUser Configs Service Integration Clickhouse Kafka User Config> - ClickhouseKafka user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- clickhouse
Postgresql List<GetUser Configs Service Integration Clickhouse Postgresql User Config> - ClickhousePostgresql user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- datadog
User List<GetConfigs Service Integration Datadog User Config> - Datadog user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- destination
Endpoint StringId - Destination endpoint for the integration.
- destination
Service StringName - Destination service for the integration.
- external
Aws List<GetCloudwatch Logs User Configs Service Integration External Aws Cloudwatch Logs User Config> - ExternalAwsCloudwatchLogs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- external
Aws List<GetCloudwatch Metrics User Configs Service Integration External Aws Cloudwatch Metrics User Config> - ExternalAwsCloudwatchMetrics user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- external
Elasticsearch List<GetLogs User Configs Service Integration External Elasticsearch Logs User Config> - ExternalElasticsearchLogs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- external
Opensearch List<GetLogs User Configs Service Integration External Opensearch Logs User Config> - ExternalOpensearchLogs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- flink
External List<GetPostgresql User Configs Service Integration Flink External Postgresql User Config> - FlinkExternalPostgresql user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- id String
- The provider-assigned unique ID for this managed resource.
- integration
Id String - The ID of the Aiven service integration.
- integration
Type String - Type of the service integration. The possible values are
alertmanager
,autoscaler
,caching
,cassandra_cross_service_cluster
,clickhouse_credentials
,clickhouse_kafka
,clickhouse_postgresql
,dashboard
,datadog
,datasource
,disaster_recovery
,external_aws_cloudwatch_logs
,external_aws_cloudwatch_metrics
,external_elasticsearch_logs
,external_google_cloud_logging
,external_opensearch_logs
,flink
,flink_external_bigquery
,flink_external_kafka
,flink_external_postgresql
,internal_connectivity
,jolokia
,kafka_connect
,kafka_connect_postgresql
,kafka_logs
,kafka_mirrormaker
,logs
,m3aggregator
,m3coordinator
,metrics
,opensearch_cross_cluster_replication
,opensearch_cross_cluster_search
,prometheus
,read_replica
,rsyslog
,schema_registry_proxy
,stresstester
,thanos_distributed_query
,thanos_migrate
,thanoscompactor
,thanosquery
,thanosruler
,thanosstore
,vector
andvmalert
. - kafka
Connect List<GetUser Configs Service Integration Kafka Connect User Config> - KafkaConnect user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- kafka
Logs List<GetUser Configs Service Integration Kafka Logs User Config> - KafkaLogs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- kafka
Mirrormaker List<GetUser Configs Service Integration Kafka Mirrormaker User Config> - KafkaMirrormaker user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- logs
User List<GetConfigs Service Integration Logs User Config> - Logs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- metrics
User List<GetConfigs Service Integration Metrics User Config> - Metrics user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- project String
- Project the integration belongs to.
- prometheus
User List<GetConfigs Service Integration Prometheus User Config> - Prometheus user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- source
Endpoint StringId - Source endpoint for the integration.
- source
Service StringName - Source service for the integration (if any)
- clickhouse
Kafka GetUser Configs Service Integration Clickhouse Kafka User Config[] - ClickhouseKafka user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- clickhouse
Postgresql GetUser Configs Service Integration Clickhouse Postgresql User Config[] - ClickhousePostgresql user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- datadog
User GetConfigs Service Integration Datadog User Config[] - Datadog user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- destination
Endpoint stringId - Destination endpoint for the integration.
- destination
Service stringName - Destination service for the integration.
- external
Aws GetCloudwatch Logs User Configs Service Integration External Aws Cloudwatch Logs User Config[] - ExternalAwsCloudwatchLogs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- external
Aws GetCloudwatch Metrics User Configs Service Integration External Aws Cloudwatch Metrics User Config[] - ExternalAwsCloudwatchMetrics user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- external
Elasticsearch GetLogs User Configs Service Integration External Elasticsearch Logs User Config[] - ExternalElasticsearchLogs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- external
Opensearch GetLogs User Configs Service Integration External Opensearch Logs User Config[] - ExternalOpensearchLogs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- flink
External GetPostgresql User Configs Service Integration Flink External Postgresql User Config[] - FlinkExternalPostgresql user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- id string
- The provider-assigned unique ID for this managed resource.
- integration
Id string - The ID of the Aiven service integration.
- integration
Type string - Type of the service integration. The possible values are
alertmanager
,autoscaler
,caching
,cassandra_cross_service_cluster
,clickhouse_credentials
,clickhouse_kafka
,clickhouse_postgresql
,dashboard
,datadog
,datasource
,disaster_recovery
,external_aws_cloudwatch_logs
,external_aws_cloudwatch_metrics
,external_elasticsearch_logs
,external_google_cloud_logging
,external_opensearch_logs
,flink
,flink_external_bigquery
,flink_external_kafka
,flink_external_postgresql
,internal_connectivity
,jolokia
,kafka_connect
,kafka_connect_postgresql
,kafka_logs
,kafka_mirrormaker
,logs
,m3aggregator
,m3coordinator
,metrics
,opensearch_cross_cluster_replication
,opensearch_cross_cluster_search
,prometheus
,read_replica
,rsyslog
,schema_registry_proxy
,stresstester
,thanos_distributed_query
,thanos_migrate
,thanoscompactor
,thanosquery
,thanosruler
,thanosstore
,vector
andvmalert
. - kafka
Connect GetUser Configs Service Integration Kafka Connect User Config[] - KafkaConnect user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- kafka
Logs GetUser Configs Service Integration Kafka Logs User Config[] - KafkaLogs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- kafka
Mirrormaker GetUser Configs Service Integration Kafka Mirrormaker User Config[] - KafkaMirrormaker user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- logs
User GetConfigs Service Integration Logs User Config[] - Logs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- metrics
User GetConfigs Service Integration Metrics User Config[] - Metrics user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- project string
- Project the integration belongs to.
- prometheus
User GetConfigs Service Integration Prometheus User Config[] - Prometheus user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- source
Endpoint stringId - Source endpoint for the integration.
- source
Service stringName - Source service for the integration (if any)
- clickhouse_
kafka_ Sequence[Getuser_ configs Service Integration Clickhouse Kafka User Config] - ClickhouseKafka user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- clickhouse_
postgresql_ Sequence[Getuser_ configs Service Integration Clickhouse Postgresql User Config] - ClickhousePostgresql user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- datadog_
user_ Sequence[Getconfigs Service Integration Datadog User Config] - Datadog user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- destination_
endpoint_ strid - Destination endpoint for the integration.
- destination_
service_ strname - Destination service for the integration.
- external_
aws_ Sequence[Getcloudwatch_ logs_ user_ configs Service Integration External Aws Cloudwatch Logs User Config] - ExternalAwsCloudwatchLogs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- external_
aws_ Sequence[Getcloudwatch_ metrics_ user_ configs Service Integration External Aws Cloudwatch Metrics User Config] - ExternalAwsCloudwatchMetrics user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- external_
elasticsearch_ Sequence[Getlogs_ user_ configs Service Integration External Elasticsearch Logs User Config] - ExternalElasticsearchLogs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- external_
opensearch_ Sequence[Getlogs_ user_ configs Service Integration External Opensearch Logs User Config] - ExternalOpensearchLogs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- flink_
external_ Sequence[Getpostgresql_ user_ configs Service Integration Flink External Postgresql User Config] - FlinkExternalPostgresql user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- id str
- The provider-assigned unique ID for this managed resource.
- integration_
id str - The ID of the Aiven service integration.
- integration_
type str - Type of the service integration. The possible values are
alertmanager
,autoscaler
,caching
,cassandra_cross_service_cluster
,clickhouse_credentials
,clickhouse_kafka
,clickhouse_postgresql
,dashboard
,datadog
,datasource
,disaster_recovery
,external_aws_cloudwatch_logs
,external_aws_cloudwatch_metrics
,external_elasticsearch_logs
,external_google_cloud_logging
,external_opensearch_logs
,flink
,flink_external_bigquery
,flink_external_kafka
,flink_external_postgresql
,internal_connectivity
,jolokia
,kafka_connect
,kafka_connect_postgresql
,kafka_logs
,kafka_mirrormaker
,logs
,m3aggregator
,m3coordinator
,metrics
,opensearch_cross_cluster_replication
,opensearch_cross_cluster_search
,prometheus
,read_replica
,rsyslog
,schema_registry_proxy
,stresstester
,thanos_distributed_query
,thanos_migrate
,thanoscompactor
,thanosquery
,thanosruler
,thanosstore
,vector
andvmalert
. - kafka_
connect_ Sequence[Getuser_ configs Service Integration Kafka Connect User Config] - KafkaConnect user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- kafka_
logs_ Sequence[Getuser_ configs Service Integration Kafka Logs User Config] - KafkaLogs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- kafka_
mirrormaker_ Sequence[Getuser_ configs Service Integration Kafka Mirrormaker User Config] - KafkaMirrormaker user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- logs_
user_ Sequence[Getconfigs Service Integration Logs User Config] - Logs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- metrics_
user_ Sequence[Getconfigs Service Integration Metrics User Config] - Metrics user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- project str
- Project the integration belongs to.
- prometheus_
user_ Sequence[Getconfigs Service Integration Prometheus User Config] - Prometheus user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- source_
endpoint_ strid - Source endpoint for the integration.
- source_
service_ strname - Source service for the integration (if any)
- clickhouse
Kafka List<Property Map>User Configs - ClickhouseKafka user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- clickhouse
Postgresql List<Property Map>User Configs - ClickhousePostgresql user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- datadog
User List<Property Map>Configs - Datadog user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- destination
Endpoint StringId - Destination endpoint for the integration.
- destination
Service StringName - Destination service for the integration.
- external
Aws List<Property Map>Cloudwatch Logs User Configs - ExternalAwsCloudwatchLogs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- external
Aws List<Property Map>Cloudwatch Metrics User Configs - ExternalAwsCloudwatchMetrics user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- external
Elasticsearch List<Property Map>Logs User Configs - ExternalElasticsearchLogs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- external
Opensearch List<Property Map>Logs User Configs - ExternalOpensearchLogs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- flink
External List<Property Map>Postgresql User Configs - FlinkExternalPostgresql user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- id String
- The provider-assigned unique ID for this managed resource.
- integration
Id String - The ID of the Aiven service integration.
- integration
Type String - Type of the service integration. The possible values are
alertmanager
,autoscaler
,caching
,cassandra_cross_service_cluster
,clickhouse_credentials
,clickhouse_kafka
,clickhouse_postgresql
,dashboard
,datadog
,datasource
,disaster_recovery
,external_aws_cloudwatch_logs
,external_aws_cloudwatch_metrics
,external_elasticsearch_logs
,external_google_cloud_logging
,external_opensearch_logs
,flink
,flink_external_bigquery
,flink_external_kafka
,flink_external_postgresql
,internal_connectivity
,jolokia
,kafka_connect
,kafka_connect_postgresql
,kafka_logs
,kafka_mirrormaker
,logs
,m3aggregator
,m3coordinator
,metrics
,opensearch_cross_cluster_replication
,opensearch_cross_cluster_search
,prometheus
,read_replica
,rsyslog
,schema_registry_proxy
,stresstester
,thanos_distributed_query
,thanos_migrate
,thanoscompactor
,thanosquery
,thanosruler
,thanosstore
,vector
andvmalert
. - kafka
Connect List<Property Map>User Configs - KafkaConnect user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- kafka
Logs List<Property Map>User Configs - KafkaLogs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- kafka
Mirrormaker List<Property Map>User Configs - KafkaMirrormaker user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- logs
User List<Property Map>Configs - Logs user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- metrics
User List<Property Map>Configs - Metrics user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- project String
- Project the integration belongs to.
- prometheus
User List<Property Map>Configs - Prometheus user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- source
Endpoint StringId - Source endpoint for the integration.
- source
Service StringName - Source service for the integration (if any)
Supporting Types
GetServiceIntegrationClickhouseKafkaUserConfig
- Tables
List<Get
Service Integration Clickhouse Kafka User Config Table> - Tables to create
- Tables
[]Get
Service Integration Clickhouse Kafka User Config Table - Tables to create
- tables
List<Get
Service Integration Clickhouse Kafka User Config Table> - Tables to create
- tables
Get
Service Integration Clickhouse Kafka User Config Table[] - Tables to create
- tables
Sequence[Get
Service Integration Clickhouse Kafka User Config Table] - Tables to create
- tables List<Property Map>
- Tables to create
GetServiceIntegrationClickhouseKafkaUserConfigTable
- Columns
List<Get
Service Integration Clickhouse Kafka User Config Table Column> - Table columns
- Data
Format string - Enum:
Avro
,AvroConfluent
,CSV
,JSONAsString
,JSONCompactEachRow
,JSONCompactStringsEachRow
,JSONEachRow
,JSONStringsEachRow
,MsgPack
,Parquet
,RawBLOB
,TSKV
,TSV
,TabSeparated
. Message data format. Default:JSONEachRow
. - Group
Name string - Kafka consumers group. Default:
clickhouse
. - Name string
- Name of the table. Example:
events
. - Topics
List<Get
Service Integration Clickhouse Kafka User Config Table Topic> - Kafka topics
- Auto
Offset stringReset - Enum:
beginning
,earliest
,end
,largest
,latest
,smallest
. Action to take when there is no initial offset in offset store or the desired offset is out of range. Default:earliest
. - Date
Time stringInput Format - Enum:
basic
,best_effort
,best_effort_us
. Method to read DateTime from text input formats. Default:basic
. - Handle
Error stringMode - Enum:
default
,stream
. How to handle errors for Kafka engine. Default:default
. - Max
Block intSize - Number of row collected by poll(s) for flushing data from Kafka. Default:
0
. - Max
Rows intPer Message - The maximum number of rows produced in one kafka message for row-based formats. Default:
1
. - Num
Consumers int - The number of consumers per table per replica. Default:
1
. - Poll
Max intBatch Size - Maximum amount of messages to be polled in a single Kafka poll. Default:
0
. - Poll
Max intTimeout Ms - Timeout in milliseconds for a single poll from Kafka. Takes the value of the stream_flush_interval_ms server setting by default (500ms). Default:
0
. - Skip
Broken intMessages - Skip at least this number of broken messages from Kafka topic per block. Default:
0
. - Thread
Per boolConsumer - Provide an independent thread for each consumer. All consumers run in the same thread by default. Default:
false
.
- Columns
[]Get
Service Integration Clickhouse Kafka User Config Table Column - Table columns
- Data
Format string - Enum:
Avro
,AvroConfluent
,CSV
,JSONAsString
,JSONCompactEachRow
,JSONCompactStringsEachRow
,JSONEachRow
,JSONStringsEachRow
,MsgPack
,Parquet
,RawBLOB
,TSKV
,TSV
,TabSeparated
. Message data format. Default:JSONEachRow
. - Group
Name string - Kafka consumers group. Default:
clickhouse
. - Name string
- Name of the table. Example:
events
. - Topics
[]Get
Service Integration Clickhouse Kafka User Config Table Topic - Kafka topics
- Auto
Offset stringReset - Enum:
beginning
,earliest
,end
,largest
,latest
,smallest
. Action to take when there is no initial offset in offset store or the desired offset is out of range. Default:earliest
. - Date
Time stringInput Format - Enum:
basic
,best_effort
,best_effort_us
. Method to read DateTime from text input formats. Default:basic
. - Handle
Error stringMode - Enum:
default
,stream
. How to handle errors for Kafka engine. Default:default
. - Max
Block intSize - Number of row collected by poll(s) for flushing data from Kafka. Default:
0
. - Max
Rows intPer Message - The maximum number of rows produced in one kafka message for row-based formats. Default:
1
. - Num
Consumers int - The number of consumers per table per replica. Default:
1
. - Poll
Max intBatch Size - Maximum amount of messages to be polled in a single Kafka poll. Default:
0
. - Poll
Max intTimeout Ms - Timeout in milliseconds for a single poll from Kafka. Takes the value of the stream_flush_interval_ms server setting by default (500ms). Default:
0
. - Skip
Broken intMessages - Skip at least this number of broken messages from Kafka topic per block. Default:
0
. - Thread
Per boolConsumer - Provide an independent thread for each consumer. All consumers run in the same thread by default. Default:
false
.
- columns
List<Get
Service Integration Clickhouse Kafka User Config Table Column> - Table columns
- data
Format String - Enum:
Avro
,AvroConfluent
,CSV
,JSONAsString
,JSONCompactEachRow
,JSONCompactStringsEachRow
,JSONEachRow
,JSONStringsEachRow
,MsgPack
,Parquet
,RawBLOB
,TSKV
,TSV
,TabSeparated
. Message data format. Default:JSONEachRow
. - group
Name String - Kafka consumers group. Default:
clickhouse
. - name String
- Name of the table. Example:
events
. - topics
List<Get
Service Integration Clickhouse Kafka User Config Table Topic> - Kafka topics
- auto
Offset StringReset - Enum:
beginning
,earliest
,end
,largest
,latest
,smallest
. Action to take when there is no initial offset in offset store or the desired offset is out of range. Default:earliest
. - date
Time StringInput Format - Enum:
basic
,best_effort
,best_effort_us
. Method to read DateTime from text input formats. Default:basic
. - handle
Error StringMode - Enum:
default
,stream
. How to handle errors for Kafka engine. Default:default
. - max
Block IntegerSize - Number of row collected by poll(s) for flushing data from Kafka. Default:
0
. - max
Rows IntegerPer Message - The maximum number of rows produced in one kafka message for row-based formats. Default:
1
. - num
Consumers Integer - The number of consumers per table per replica. Default:
1
. - poll
Max IntegerBatch Size - Maximum amount of messages to be polled in a single Kafka poll. Default:
0
. - poll
Max IntegerTimeout Ms - Timeout in milliseconds for a single poll from Kafka. Takes the value of the stream_flush_interval_ms server setting by default (500ms). Default:
0
. - skip
Broken IntegerMessages - Skip at least this number of broken messages from Kafka topic per block. Default:
0
. - thread
Per BooleanConsumer - Provide an independent thread for each consumer. All consumers run in the same thread by default. Default:
false
.
- columns
Get
Service Integration Clickhouse Kafka User Config Table Column[] - Table columns
- data
Format string - Enum:
Avro
,AvroConfluent
,CSV
,JSONAsString
,JSONCompactEachRow
,JSONCompactStringsEachRow
,JSONEachRow
,JSONStringsEachRow
,MsgPack
,Parquet
,RawBLOB
,TSKV
,TSV
,TabSeparated
. Message data format. Default:JSONEachRow
. - group
Name string - Kafka consumers group. Default:
clickhouse
. - name string
- Name of the table. Example:
events
. - topics
Get
Service Integration Clickhouse Kafka User Config Table Topic[] - Kafka topics
- auto
Offset stringReset - Enum:
beginning
,earliest
,end
,largest
,latest
,smallest
. Action to take when there is no initial offset in offset store or the desired offset is out of range. Default:earliest
. - date
Time stringInput Format - Enum:
basic
,best_effort
,best_effort_us
. Method to read DateTime from text input formats. Default:basic
. - handle
Error stringMode - Enum:
default
,stream
. How to handle errors for Kafka engine. Default:default
. - max
Block numberSize - Number of row collected by poll(s) for flushing data from Kafka. Default:
0
. - max
Rows numberPer Message - The maximum number of rows produced in one kafka message for row-based formats. Default:
1
. - num
Consumers number - The number of consumers per table per replica. Default:
1
. - poll
Max numberBatch Size - Maximum amount of messages to be polled in a single Kafka poll. Default:
0
. - poll
Max numberTimeout Ms - Timeout in milliseconds for a single poll from Kafka. Takes the value of the stream_flush_interval_ms server setting by default (500ms). Default:
0
. - skip
Broken numberMessages - Skip at least this number of broken messages from Kafka topic per block. Default:
0
. - thread
Per booleanConsumer - Provide an independent thread for each consumer. All consumers run in the same thread by default. Default:
false
.
- columns
Sequence[Get
Service Integration Clickhouse Kafka User Config Table Column] - Table columns
- data_
format str - Enum:
Avro
,AvroConfluent
,CSV
,JSONAsString
,JSONCompactEachRow
,JSONCompactStringsEachRow
,JSONEachRow
,JSONStringsEachRow
,MsgPack
,Parquet
,RawBLOB
,TSKV
,TSV
,TabSeparated
. Message data format. Default:JSONEachRow
. - group_
name str - Kafka consumers group. Default:
clickhouse
. - name str
- Name of the table. Example:
events
. - topics
Sequence[Get
Service Integration Clickhouse Kafka User Config Table Topic] - Kafka topics
- auto_
offset_ strreset - Enum:
beginning
,earliest
,end
,largest
,latest
,smallest
. Action to take when there is no initial offset in offset store or the desired offset is out of range. Default:earliest
. - date_
time_ strinput_ format - Enum:
basic
,best_effort
,best_effort_us
. Method to read DateTime from text input formats. Default:basic
. - handle_
error_ strmode - Enum:
default
,stream
. How to handle errors for Kafka engine. Default:default
. - max_
block_ intsize - Number of row collected by poll(s) for flushing data from Kafka. Default:
0
. - max_
rows_ intper_ message - The maximum number of rows produced in one kafka message for row-based formats. Default:
1
. - num_
consumers int - The number of consumers per table per replica. Default:
1
. - poll_
max_ intbatch_ size - Maximum amount of messages to be polled in a single Kafka poll. Default:
0
. - poll_
max_ inttimeout_ ms - Timeout in milliseconds for a single poll from Kafka. Takes the value of the stream_flush_interval_ms server setting by default (500ms). Default:
0
. - skip_
broken_ intmessages - Skip at least this number of broken messages from Kafka topic per block. Default:
0
. - thread_
per_ boolconsumer - Provide an independent thread for each consumer. All consumers run in the same thread by default. Default:
false
.
- columns List<Property Map>
- Table columns
- data
Format String - Enum:
Avro
,AvroConfluent
,CSV
,JSONAsString
,JSONCompactEachRow
,JSONCompactStringsEachRow
,JSONEachRow
,JSONStringsEachRow
,MsgPack
,Parquet
,RawBLOB
,TSKV
,TSV
,TabSeparated
. Message data format. Default:JSONEachRow
. - group
Name String - Kafka consumers group. Default:
clickhouse
. - name String
- Name of the table. Example:
events
. - topics List<Property Map>
- Kafka topics
- auto
Offset StringReset - Enum:
beginning
,earliest
,end
,largest
,latest
,smallest
. Action to take when there is no initial offset in offset store or the desired offset is out of range. Default:earliest
. - date
Time StringInput Format - Enum:
basic
,best_effort
,best_effort_us
. Method to read DateTime from text input formats. Default:basic
. - handle
Error StringMode - Enum:
default
,stream
. How to handle errors for Kafka engine. Default:default
. - max
Block NumberSize - Number of row collected by poll(s) for flushing data from Kafka. Default:
0
. - max
Rows NumberPer Message - The maximum number of rows produced in one kafka message for row-based formats. Default:
1
. - num
Consumers Number - The number of consumers per table per replica. Default:
1
. - poll
Max NumberBatch Size - Maximum amount of messages to be polled in a single Kafka poll. Default:
0
. - poll
Max NumberTimeout Ms - Timeout in milliseconds for a single poll from Kafka. Takes the value of the stream_flush_interval_ms server setting by default (500ms). Default:
0
. - skip
Broken NumberMessages - Skip at least this number of broken messages from Kafka topic per block. Default:
0
. - thread
Per BooleanConsumer - Provide an independent thread for each consumer. All consumers run in the same thread by default. Default:
false
.
GetServiceIntegrationClickhouseKafkaUserConfigTableColumn
GetServiceIntegrationClickhouseKafkaUserConfigTableTopic
- Name string
- Name of the topic. Example:
topic_name
.
- Name string
- Name of the topic. Example:
topic_name
.
- name String
- Name of the topic. Example:
topic_name
.
- name string
- Name of the topic. Example:
topic_name
.
- name str
- Name of the topic. Example:
topic_name
.
- name String
- Name of the topic. Example:
topic_name
.
GetServiceIntegrationClickhousePostgresqlUserConfig
- Databases
List<Get
Service Integration Clickhouse Postgresql User Config Database> - Databases to expose
- Databases
[]Get
Service Integration Clickhouse Postgresql User Config Database - Databases to expose
- databases
List<Get
Service Integration Clickhouse Postgresql User Config Database> - Databases to expose
- databases
Get
Service Integration Clickhouse Postgresql User Config Database[] - Databases to expose
- databases
Sequence[Get
Service Integration Clickhouse Postgresql User Config Database] - Databases to expose
- databases List<Property Map>
- Databases to expose
GetServiceIntegrationClickhousePostgresqlUserConfigDatabase
GetServiceIntegrationDatadogUserConfig
- Datadog
Dbm boolEnabled - Enable Datadog Database Monitoring.
- Datadog
Pgbouncer boolEnabled - Enable Datadog PgBouncer Metric Tracking.
- List<Get
Service Integration Datadog User Config Datadog Tag> - Custom tags provided by user
- Exclude
Consumer List<string>Groups - List of custom metrics.
- Exclude
Topics List<string> - List of topics to exclude.
- Include
Consumer List<string>Groups - List of custom metrics.
- Include
Topics List<string> - List of topics to include.
- Kafka
Custom List<string>Metrics - List of custom metrics.
- Max
Jmx intMetrics - Maximum number of JMX metrics to send. Example:
2000
. - Mirrormaker
Custom List<string>Metrics - List of custom metrics.
- Opensearch
Get
Service Integration Datadog User Config Opensearch - Datadog Opensearch Options
- Redis
Get
Service Integration Datadog User Config Redis - Datadog Redis Options
- Datadog
Dbm boolEnabled - Enable Datadog Database Monitoring.
- Datadog
Pgbouncer boolEnabled - Enable Datadog PgBouncer Metric Tracking.
- []Get
Service Integration Datadog User Config Datadog Tag - Custom tags provided by user
- Exclude
Consumer []stringGroups - List of custom metrics.
- Exclude
Topics []string - List of topics to exclude.
- Include
Consumer []stringGroups - List of custom metrics.
- Include
Topics []string - List of topics to include.
- Kafka
Custom []stringMetrics - List of custom metrics.
- Max
Jmx intMetrics - Maximum number of JMX metrics to send. Example:
2000
. - Mirrormaker
Custom []stringMetrics - List of custom metrics.
- Opensearch
Get
Service Integration Datadog User Config Opensearch - Datadog Opensearch Options
- Redis
Get
Service Integration Datadog User Config Redis - Datadog Redis Options
- datadog
Dbm BooleanEnabled - Enable Datadog Database Monitoring.
- datadog
Pgbouncer BooleanEnabled - Enable Datadog PgBouncer Metric Tracking.
- List<Get
Service Integration Datadog User Config Datadog Tag> - Custom tags provided by user
- exclude
Consumer List<String>Groups - List of custom metrics.
- exclude
Topics List<String> - List of topics to exclude.
- include
Consumer List<String>Groups - List of custom metrics.
- include
Topics List<String> - List of topics to include.
- kafka
Custom List<String>Metrics - List of custom metrics.
- max
Jmx IntegerMetrics - Maximum number of JMX metrics to send. Example:
2000
. - mirrormaker
Custom List<String>Metrics - List of custom metrics.
- opensearch
Get
Service Integration Datadog User Config Opensearch - Datadog Opensearch Options
- redis
Get
Service Integration Datadog User Config Redis - Datadog Redis Options
- datadog
Dbm booleanEnabled - Enable Datadog Database Monitoring.
- datadog
Pgbouncer booleanEnabled - Enable Datadog PgBouncer Metric Tracking.
- Get
Service Integration Datadog User Config Datadog Tag[] - Custom tags provided by user
- exclude
Consumer string[]Groups - List of custom metrics.
- exclude
Topics string[] - List of topics to exclude.
- include
Consumer string[]Groups - List of custom metrics.
- include
Topics string[] - List of topics to include.
- kafka
Custom string[]Metrics - List of custom metrics.
- max
Jmx numberMetrics - Maximum number of JMX metrics to send. Example:
2000
. - mirrormaker
Custom string[]Metrics - List of custom metrics.
- opensearch
Get
Service Integration Datadog User Config Opensearch - Datadog Opensearch Options
- redis
Get
Service Integration Datadog User Config Redis - Datadog Redis Options
- datadog_
dbm_ boolenabled - Enable Datadog Database Monitoring.
- datadog_
pgbouncer_ boolenabled - Enable Datadog PgBouncer Metric Tracking.
- Sequence[Get
Service Integration Datadog User Config Datadog Tag] - Custom tags provided by user
- exclude_
consumer_ Sequence[str]groups - List of custom metrics.
- exclude_
topics Sequence[str] - List of topics to exclude.
- include_
consumer_ Sequence[str]groups - List of custom metrics.
- include_
topics Sequence[str] - List of topics to include.
- kafka_
custom_ Sequence[str]metrics - List of custom metrics.
- max_
jmx_ intmetrics - Maximum number of JMX metrics to send. Example:
2000
. - mirrormaker_
custom_ Sequence[str]metrics - List of custom metrics.
- opensearch
Get
Service Integration Datadog User Config Opensearch - Datadog Opensearch Options
- redis
Get
Service Integration Datadog User Config Redis - Datadog Redis Options
- datadog
Dbm BooleanEnabled - Enable Datadog Database Monitoring.
- datadog
Pgbouncer BooleanEnabled - Enable Datadog PgBouncer Metric Tracking.
- List<Property Map>
- Custom tags provided by user
- exclude
Consumer List<String>Groups - List of custom metrics.
- exclude
Topics List<String> - List of topics to exclude.
- include
Consumer List<String>Groups - List of custom metrics.
- include
Topics List<String> - List of topics to include.
- kafka
Custom List<String>Metrics - List of custom metrics.
- max
Jmx NumberMetrics - Maximum number of JMX metrics to send. Example:
2000
. - mirrormaker
Custom List<String>Metrics - List of custom metrics.
- opensearch Property Map
- Datadog Opensearch Options
- redis Property Map
- Datadog Redis Options
GetServiceIntegrationDatadogUserConfigDatadogTag
GetServiceIntegrationDatadogUserConfigOpensearch
- Cluster
Stats boolEnabled - Enable Datadog Opensearch Cluster Monitoring.
- Index
Stats boolEnabled - Enable Datadog Opensearch Index Monitoring.
- Pending
Task boolStats Enabled - Enable Datadog Opensearch Pending Task Monitoring.
- Pshard
Stats boolEnabled - Enable Datadog Opensearch Primary Shard Monitoring.
- Cluster
Stats boolEnabled - Enable Datadog Opensearch Cluster Monitoring.
- Index
Stats boolEnabled - Enable Datadog Opensearch Index Monitoring.
- Pending
Task boolStats Enabled - Enable Datadog Opensearch Pending Task Monitoring.
- Pshard
Stats boolEnabled - Enable Datadog Opensearch Primary Shard Monitoring.
- cluster
Stats BooleanEnabled - Enable Datadog Opensearch Cluster Monitoring.
- index
Stats BooleanEnabled - Enable Datadog Opensearch Index Monitoring.
- pending
Task BooleanStats Enabled - Enable Datadog Opensearch Pending Task Monitoring.
- pshard
Stats BooleanEnabled - Enable Datadog Opensearch Primary Shard Monitoring.
- cluster
Stats booleanEnabled - Enable Datadog Opensearch Cluster Monitoring.
- index
Stats booleanEnabled - Enable Datadog Opensearch Index Monitoring.
- pending
Task booleanStats Enabled - Enable Datadog Opensearch Pending Task Monitoring.
- pshard
Stats booleanEnabled - Enable Datadog Opensearch Primary Shard Monitoring.
- cluster_
stats_ boolenabled - Enable Datadog Opensearch Cluster Monitoring.
- index_
stats_ boolenabled - Enable Datadog Opensearch Index Monitoring.
- pending_
task_ boolstats_ enabled - Enable Datadog Opensearch Pending Task Monitoring.
- pshard_
stats_ boolenabled - Enable Datadog Opensearch Primary Shard Monitoring.
- cluster
Stats BooleanEnabled - Enable Datadog Opensearch Cluster Monitoring.
- index
Stats BooleanEnabled - Enable Datadog Opensearch Index Monitoring.
- pending
Task BooleanStats Enabled - Enable Datadog Opensearch Pending Task Monitoring.
- pshard
Stats BooleanEnabled - Enable Datadog Opensearch Primary Shard Monitoring.
GetServiceIntegrationDatadogUserConfigRedis
- Command
Stats boolEnabled - Enable command_stats option in the agent's configuration. Default:
false
.
- Command
Stats boolEnabled - Enable command_stats option in the agent's configuration. Default:
false
.
- command
Stats BooleanEnabled - Enable command_stats option in the agent's configuration. Default:
false
.
- command
Stats booleanEnabled - Enable command_stats option in the agent's configuration. Default:
false
.
- command_
stats_ boolenabled - Enable command_stats option in the agent's configuration. Default:
false
.
- command
Stats BooleanEnabled - Enable command_stats option in the agent's configuration. Default:
false
.
GetServiceIntegrationExternalAwsCloudwatchLogsUserConfig
- Selected
Log List<string>Fields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- Selected
Log []stringFields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- selected
Log List<String>Fields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- selected
Log string[]Fields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- selected_
log_ Sequence[str]fields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- selected
Log List<String>Fields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
GetServiceIntegrationExternalAwsCloudwatchMetricsUserConfig
- Dropped
Metrics List<GetService Integration External Aws Cloudwatch Metrics User Config Dropped Metric> - Metrics to not send to AWS CloudWatch (takes precedence over extra_metrics)
- Extra
Metrics List<GetService Integration External Aws Cloudwatch Metrics User Config Extra Metric> - Metrics to allow through to AWS CloudWatch (in addition to default metrics)
- Dropped
Metrics []GetService Integration External Aws Cloudwatch Metrics User Config Dropped Metric - Metrics to not send to AWS CloudWatch (takes precedence over extra_metrics)
- Extra
Metrics []GetService Integration External Aws Cloudwatch Metrics User Config Extra Metric - Metrics to allow through to AWS CloudWatch (in addition to default metrics)
- dropped
Metrics List<GetService Integration External Aws Cloudwatch Metrics User Config Dropped Metric> - Metrics to not send to AWS CloudWatch (takes precedence over extra_metrics)
- extra
Metrics List<GetService Integration External Aws Cloudwatch Metrics User Config Extra Metric> - Metrics to allow through to AWS CloudWatch (in addition to default metrics)
- dropped
Metrics GetService Integration External Aws Cloudwatch Metrics User Config Dropped Metric[] - Metrics to not send to AWS CloudWatch (takes precedence over extra_metrics)
- extra
Metrics GetService Integration External Aws Cloudwatch Metrics User Config Extra Metric[] - Metrics to allow through to AWS CloudWatch (in addition to default metrics)
- dropped_
metrics Sequence[GetService Integration External Aws Cloudwatch Metrics User Config Dropped Metric] - Metrics to not send to AWS CloudWatch (takes precedence over extra_metrics)
- extra_
metrics Sequence[GetService Integration External Aws Cloudwatch Metrics User Config Extra Metric] - Metrics to allow through to AWS CloudWatch (in addition to default metrics)
- dropped
Metrics List<Property Map> - Metrics to not send to AWS CloudWatch (takes precedence over extra_metrics)
- extra
Metrics List<Property Map> - Metrics to allow through to AWS CloudWatch (in addition to default metrics)
GetServiceIntegrationExternalAwsCloudwatchMetricsUserConfigDroppedMetric
GetServiceIntegrationExternalAwsCloudwatchMetricsUserConfigExtraMetric
GetServiceIntegrationExternalElasticsearchLogsUserConfig
- Selected
Log List<string>Fields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- Selected
Log []stringFields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- selected
Log List<String>Fields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- selected
Log string[]Fields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- selected_
log_ Sequence[str]fields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- selected
Log List<String>Fields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
GetServiceIntegrationExternalOpensearchLogsUserConfig
- Selected
Log List<string>Fields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- Selected
Log []stringFields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- selected
Log List<String>Fields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- selected
Log string[]Fields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- selected_
log_ Sequence[str]fields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- selected
Log List<String>Fields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
GetServiceIntegrationFlinkExternalPostgresqlUserConfig
- Stringtype string
- Enum:
unspecified
. If stringtype is set to unspecified, parameters will be sent to the server as untyped values.
- Stringtype string
- Enum:
unspecified
. If stringtype is set to unspecified, parameters will be sent to the server as untyped values.
- stringtype String
- Enum:
unspecified
. If stringtype is set to unspecified, parameters will be sent to the server as untyped values.
- stringtype string
- Enum:
unspecified
. If stringtype is set to unspecified, parameters will be sent to the server as untyped values.
- stringtype str
- Enum:
unspecified
. If stringtype is set to unspecified, parameters will be sent to the server as untyped values.
- stringtype String
- Enum:
unspecified
. If stringtype is set to unspecified, parameters will be sent to the server as untyped values.
GetServiceIntegrationKafkaConnectUserConfig
- Kafka
Connect GetService Integration Kafka Connect User Config Kafka Connect - Kafka Connect service configuration values
- Kafka
Connect GetService Integration Kafka Connect User Config Kafka Connect - Kafka Connect service configuration values
- kafka
Connect GetService Integration Kafka Connect User Config Kafka Connect - Kafka Connect service configuration values
- kafka
Connect GetService Integration Kafka Connect User Config Kafka Connect - Kafka Connect service configuration values
- kafka_
connect GetService Integration Kafka Connect User Config Kafka Connect - Kafka Connect service configuration values
- kafka
Connect Property Map - Kafka Connect service configuration values
GetServiceIntegrationKafkaConnectUserConfigKafkaConnect
- Config
Storage stringTopic - The name of the topic where connector and task configuration data are stored.This must be the same for all workers with the same group_id. Example:
__connect_configs
. - Group
Id string - A unique string that identifies the Connect cluster group this worker belongs to. Example:
connect
. - Offset
Storage stringTopic - The name of the topic where connector and task configuration offsets are stored.This must be the same for all workers with the same group_id. Example:
__connect_offsets
. - Status
Storage stringTopic - The name of the topic where connector and task configuration status updates are stored.This must be the same for all workers with the same group_id. Example:
__connect_status
.
- Config
Storage stringTopic - The name of the topic where connector and task configuration data are stored.This must be the same for all workers with the same group_id. Example:
__connect_configs
. - Group
Id string - A unique string that identifies the Connect cluster group this worker belongs to. Example:
connect
. - Offset
Storage stringTopic - The name of the topic where connector and task configuration offsets are stored.This must be the same for all workers with the same group_id. Example:
__connect_offsets
. - Status
Storage stringTopic - The name of the topic where connector and task configuration status updates are stored.This must be the same for all workers with the same group_id. Example:
__connect_status
.
- config
Storage StringTopic - The name of the topic where connector and task configuration data are stored.This must be the same for all workers with the same group_id. Example:
__connect_configs
. - group
Id String - A unique string that identifies the Connect cluster group this worker belongs to. Example:
connect
. - offset
Storage StringTopic - The name of the topic where connector and task configuration offsets are stored.This must be the same for all workers with the same group_id. Example:
__connect_offsets
. - status
Storage StringTopic - The name of the topic where connector and task configuration status updates are stored.This must be the same for all workers with the same group_id. Example:
__connect_status
.
- config
Storage stringTopic - The name of the topic where connector and task configuration data are stored.This must be the same for all workers with the same group_id. Example:
__connect_configs
. - group
Id string - A unique string that identifies the Connect cluster group this worker belongs to. Example:
connect
. - offset
Storage stringTopic - The name of the topic where connector and task configuration offsets are stored.This must be the same for all workers with the same group_id. Example:
__connect_offsets
. - status
Storage stringTopic - The name of the topic where connector and task configuration status updates are stored.This must be the same for all workers with the same group_id. Example:
__connect_status
.
- config_
storage_ strtopic - The name of the topic where connector and task configuration data are stored.This must be the same for all workers with the same group_id. Example:
__connect_configs
. - group_
id str - A unique string that identifies the Connect cluster group this worker belongs to. Example:
connect
. - offset_
storage_ strtopic - The name of the topic where connector and task configuration offsets are stored.This must be the same for all workers with the same group_id. Example:
__connect_offsets
. - status_
storage_ strtopic - The name of the topic where connector and task configuration status updates are stored.This must be the same for all workers with the same group_id. Example:
__connect_status
.
- config
Storage StringTopic - The name of the topic where connector and task configuration data are stored.This must be the same for all workers with the same group_id. Example:
__connect_configs
. - group
Id String - A unique string that identifies the Connect cluster group this worker belongs to. Example:
connect
. - offset
Storage StringTopic - The name of the topic where connector and task configuration offsets are stored.This must be the same for all workers with the same group_id. Example:
__connect_offsets
. - status
Storage StringTopic - The name of the topic where connector and task configuration status updates are stored.This must be the same for all workers with the same group_id. Example:
__connect_status
.
GetServiceIntegrationKafkaLogsUserConfig
- Kafka
Topic string - Topic name. Example:
mytopic
. - Selected
Log List<string>Fields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- Kafka
Topic string - Topic name. Example:
mytopic
. - Selected
Log []stringFields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- kafka
Topic String - Topic name. Example:
mytopic
. - selected
Log List<String>Fields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- kafka
Topic string - Topic name. Example:
mytopic
. - selected
Log string[]Fields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- kafka_
topic str - Topic name. Example:
mytopic
. - selected_
log_ Sequence[str]fields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- kafka
Topic String - Topic name. Example:
mytopic
. - selected
Log List<String>Fields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
GetServiceIntegrationKafkaMirrormakerUserConfig
- Cluster
Alias string - The alias under which the Kafka cluster is known to MirrorMaker. Can contain the following symbols: ASCII alphanumerics,
.
,_
, and-
. Example:kafka-abc
. - Kafka
Mirrormaker GetService Integration Kafka Mirrormaker User Config Kafka Mirrormaker - Kafka MirrorMaker configuration values
- Cluster
Alias string - The alias under which the Kafka cluster is known to MirrorMaker. Can contain the following symbols: ASCII alphanumerics,
.
,_
, and-
. Example:kafka-abc
. - Kafka
Mirrormaker GetService Integration Kafka Mirrormaker User Config Kafka Mirrormaker - Kafka MirrorMaker configuration values
- cluster
Alias String - The alias under which the Kafka cluster is known to MirrorMaker. Can contain the following symbols: ASCII alphanumerics,
.
,_
, and-
. Example:kafka-abc
. - kafka
Mirrormaker GetService Integration Kafka Mirrormaker User Config Kafka Mirrormaker - Kafka MirrorMaker configuration values
- cluster
Alias string - The alias under which the Kafka cluster is known to MirrorMaker. Can contain the following symbols: ASCII alphanumerics,
.
,_
, and-
. Example:kafka-abc
. - kafka
Mirrormaker GetService Integration Kafka Mirrormaker User Config Kafka Mirrormaker - Kafka MirrorMaker configuration values
- cluster_
alias str - The alias under which the Kafka cluster is known to MirrorMaker. Can contain the following symbols: ASCII alphanumerics,
.
,_
, and-
. Example:kafka-abc
. - kafka_
mirrormaker GetService Integration Kafka Mirrormaker User Config Kafka Mirrormaker - Kafka MirrorMaker configuration values
- cluster
Alias String - The alias under which the Kafka cluster is known to MirrorMaker. Can contain the following symbols: ASCII alphanumerics,
.
,_
, and-
. Example:kafka-abc
. - kafka
Mirrormaker Property Map - Kafka MirrorMaker configuration values
GetServiceIntegrationKafkaMirrormakerUserConfigKafkaMirrormaker
- Consumer
Auto stringOffset Reset - Enum:
earliest
,latest
. Set where consumer starts to consume data. Valueearliest
: Start replication from the earliest offset. Valuelatest
: Start replication from the latest offset. Default isearliest
. - Consumer
Fetch intMin Bytes - The minimum amount of data the server should return for a fetch request. Example:
1024
. - Consumer
Max intPoll Records - Set consumer max.poll.records. The default is 500. Example:
500
. - Producer
Batch intSize - The batch size in bytes producer will attempt to collect before publishing to broker. Example:
1024
. - Producer
Buffer intMemory - The amount of bytes producer can use for buffering data before publishing to broker. Example:
8388608
. - Producer
Compression stringType - Enum:
gzip
,lz4
,none
,snappy
,zstd
. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip
,snappy
,lz4
,zstd
). It additionally acceptsnone
which is the default and equivalent to no compression. - Producer
Linger intMs - The linger time (ms) for waiting new data to arrive for publishing. Example:
100
. - Producer
Max intRequest Size - The maximum request size in bytes. Example:
1048576
.
- Consumer
Auto stringOffset Reset - Enum:
earliest
,latest
. Set where consumer starts to consume data. Valueearliest
: Start replication from the earliest offset. Valuelatest
: Start replication from the latest offset. Default isearliest
. - Consumer
Fetch intMin Bytes - The minimum amount of data the server should return for a fetch request. Example:
1024
. - Consumer
Max intPoll Records - Set consumer max.poll.records. The default is 500. Example:
500
. - Producer
Batch intSize - The batch size in bytes producer will attempt to collect before publishing to broker. Example:
1024
. - Producer
Buffer intMemory - The amount of bytes producer can use for buffering data before publishing to broker. Example:
8388608
. - Producer
Compression stringType - Enum:
gzip
,lz4
,none
,snappy
,zstd
. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip
,snappy
,lz4
,zstd
). It additionally acceptsnone
which is the default and equivalent to no compression. - Producer
Linger intMs - The linger time (ms) for waiting new data to arrive for publishing. Example:
100
. - Producer
Max intRequest Size - The maximum request size in bytes. Example:
1048576
.
- consumer
Auto StringOffset Reset - Enum:
earliest
,latest
. Set where consumer starts to consume data. Valueearliest
: Start replication from the earliest offset. Valuelatest
: Start replication from the latest offset. Default isearliest
. - consumer
Fetch IntegerMin Bytes - The minimum amount of data the server should return for a fetch request. Example:
1024
. - consumer
Max IntegerPoll Records - Set consumer max.poll.records. The default is 500. Example:
500
. - producer
Batch IntegerSize - The batch size in bytes producer will attempt to collect before publishing to broker. Example:
1024
. - producer
Buffer IntegerMemory - The amount of bytes producer can use for buffering data before publishing to broker. Example:
8388608
. - producer
Compression StringType - Enum:
gzip
,lz4
,none
,snappy
,zstd
. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip
,snappy
,lz4
,zstd
). It additionally acceptsnone
which is the default and equivalent to no compression. - producer
Linger IntegerMs - The linger time (ms) for waiting new data to arrive for publishing. Example:
100
. - producer
Max IntegerRequest Size - The maximum request size in bytes. Example:
1048576
.
- consumer
Auto stringOffset Reset - Enum:
earliest
,latest
. Set where consumer starts to consume data. Valueearliest
: Start replication from the earliest offset. Valuelatest
: Start replication from the latest offset. Default isearliest
. - consumer
Fetch numberMin Bytes - The minimum amount of data the server should return for a fetch request. Example:
1024
. - consumer
Max numberPoll Records - Set consumer max.poll.records. The default is 500. Example:
500
. - producer
Batch numberSize - The batch size in bytes producer will attempt to collect before publishing to broker. Example:
1024
. - producer
Buffer numberMemory - The amount of bytes producer can use for buffering data before publishing to broker. Example:
8388608
. - producer
Compression stringType - Enum:
gzip
,lz4
,none
,snappy
,zstd
. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip
,snappy
,lz4
,zstd
). It additionally acceptsnone
which is the default and equivalent to no compression. - producer
Linger numberMs - The linger time (ms) for waiting new data to arrive for publishing. Example:
100
. - producer
Max numberRequest Size - The maximum request size in bytes. Example:
1048576
.
- consumer_
auto_ stroffset_ reset - Enum:
earliest
,latest
. Set where consumer starts to consume data. Valueearliest
: Start replication from the earliest offset. Valuelatest
: Start replication from the latest offset. Default isearliest
. - consumer_
fetch_ intmin_ bytes - The minimum amount of data the server should return for a fetch request. Example:
1024
. - consumer_
max_ intpoll_ records - Set consumer max.poll.records. The default is 500. Example:
500
. - producer_
batch_ intsize - The batch size in bytes producer will attempt to collect before publishing to broker. Example:
1024
. - producer_
buffer_ intmemory - The amount of bytes producer can use for buffering data before publishing to broker. Example:
8388608
. - producer_
compression_ strtype - Enum:
gzip
,lz4
,none
,snappy
,zstd
. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip
,snappy
,lz4
,zstd
). It additionally acceptsnone
which is the default and equivalent to no compression. - producer_
linger_ intms - The linger time (ms) for waiting new data to arrive for publishing. Example:
100
. - producer_
max_ intrequest_ size - The maximum request size in bytes. Example:
1048576
.
- consumer
Auto StringOffset Reset - Enum:
earliest
,latest
. Set where consumer starts to consume data. Valueearliest
: Start replication from the earliest offset. Valuelatest
: Start replication from the latest offset. Default isearliest
. - consumer
Fetch NumberMin Bytes - The minimum amount of data the server should return for a fetch request. Example:
1024
. - consumer
Max NumberPoll Records - Set consumer max.poll.records. The default is 500. Example:
500
. - producer
Batch NumberSize - The batch size in bytes producer will attempt to collect before publishing to broker. Example:
1024
. - producer
Buffer NumberMemory - The amount of bytes producer can use for buffering data before publishing to broker. Example:
8388608
. - producer
Compression StringType - Enum:
gzip
,lz4
,none
,snappy
,zstd
. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip
,snappy
,lz4
,zstd
). It additionally acceptsnone
which is the default and equivalent to no compression. - producer
Linger NumberMs - The linger time (ms) for waiting new data to arrive for publishing. Example:
100
. - producer
Max NumberRequest Size - The maximum request size in bytes. Example:
1048576
.
GetServiceIntegrationLogsUserConfig
- Elasticsearch
Index intDays Max - Elasticsearch index retention limit. Default:
3
. - Elasticsearch
Index stringPrefix - Elasticsearch index prefix. Default:
logs
. - Selected
Log List<string>Fields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- Elasticsearch
Index intDays Max - Elasticsearch index retention limit. Default:
3
. - Elasticsearch
Index stringPrefix - Elasticsearch index prefix. Default:
logs
. - Selected
Log []stringFields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- elasticsearch
Index IntegerDays Max - Elasticsearch index retention limit. Default:
3
. - elasticsearch
Index StringPrefix - Elasticsearch index prefix. Default:
logs
. - selected
Log List<String>Fields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- elasticsearch
Index numberDays Max - Elasticsearch index retention limit. Default:
3
. - elasticsearch
Index stringPrefix - Elasticsearch index prefix. Default:
logs
. - selected
Log string[]Fields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- elasticsearch_
index_ intdays_ max - Elasticsearch index retention limit. Default:
3
. - elasticsearch_
index_ strprefix - Elasticsearch index prefix. Default:
logs
. - selected_
log_ Sequence[str]fields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
- elasticsearch
Index NumberDays Max - Elasticsearch index retention limit. Default:
3
. - elasticsearch
Index StringPrefix - Elasticsearch index prefix. Default:
logs
. - selected
Log List<String>Fields - The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
GetServiceIntegrationMetricsUserConfig
- Database string
- Name of the database where to store metric datapoints. Only affects PostgreSQL destinations. Defaults to
metrics
. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service. - Retention
Days int - Number of days to keep old metrics. Only affects PostgreSQL destinations. Set to 0 for no automatic cleanup. Defaults to 30 days.
- Ro
Username string - Name of a user that can be used to read metrics. This will be used for Grafana integration (if enabled) to prevent Grafana users from making undesired changes. Only affects PostgreSQL destinations. Defaults to
metrics_reader
. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service. - Source
Mysql GetService Integration Metrics User Config Source Mysql - Configuration options for metrics where source service is MySQL
- Username string
- Name of the user used to write metrics. Only affects PostgreSQL destinations. Defaults to
metrics_writer
. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.
- Database string
- Name of the database where to store metric datapoints. Only affects PostgreSQL destinations. Defaults to
metrics
. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service. - Retention
Days int - Number of days to keep old metrics. Only affects PostgreSQL destinations. Set to 0 for no automatic cleanup. Defaults to 30 days.
- Ro
Username string - Name of a user that can be used to read metrics. This will be used for Grafana integration (if enabled) to prevent Grafana users from making undesired changes. Only affects PostgreSQL destinations. Defaults to
metrics_reader
. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service. - Source
Mysql GetService Integration Metrics User Config Source Mysql - Configuration options for metrics where source service is MySQL
- Username string
- Name of the user used to write metrics. Only affects PostgreSQL destinations. Defaults to
metrics_writer
. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.
- database String
- Name of the database where to store metric datapoints. Only affects PostgreSQL destinations. Defaults to
metrics
. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service. - retention
Days Integer - Number of days to keep old metrics. Only affects PostgreSQL destinations. Set to 0 for no automatic cleanup. Defaults to 30 days.
- ro
Username String - Name of a user that can be used to read metrics. This will be used for Grafana integration (if enabled) to prevent Grafana users from making undesired changes. Only affects PostgreSQL destinations. Defaults to
metrics_reader
. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service. - source
Mysql GetService Integration Metrics User Config Source Mysql - Configuration options for metrics where source service is MySQL
- username String
- Name of the user used to write metrics. Only affects PostgreSQL destinations. Defaults to
metrics_writer
. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.
- database string
- Name of the database where to store metric datapoints. Only affects PostgreSQL destinations. Defaults to
metrics
. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service. - retention
Days number - Number of days to keep old metrics. Only affects PostgreSQL destinations. Set to 0 for no automatic cleanup. Defaults to 30 days.
- ro
Username string - Name of a user that can be used to read metrics. This will be used for Grafana integration (if enabled) to prevent Grafana users from making undesired changes. Only affects PostgreSQL destinations. Defaults to
metrics_reader
. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service. - source
Mysql GetService Integration Metrics User Config Source Mysql - Configuration options for metrics where source service is MySQL
- username string
- Name of the user used to write metrics. Only affects PostgreSQL destinations. Defaults to
metrics_writer
. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.
- database str
- Name of the database where to store metric datapoints. Only affects PostgreSQL destinations. Defaults to
metrics
. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service. - retention_
days int - Number of days to keep old metrics. Only affects PostgreSQL destinations. Set to 0 for no automatic cleanup. Defaults to 30 days.
- ro_
username str - Name of a user that can be used to read metrics. This will be used for Grafana integration (if enabled) to prevent Grafana users from making undesired changes. Only affects PostgreSQL destinations. Defaults to
metrics_reader
. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service. - source_
mysql GetService Integration Metrics User Config Source Mysql - Configuration options for metrics where source service is MySQL
- username str
- Name of the user used to write metrics. Only affects PostgreSQL destinations. Defaults to
metrics_writer
. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.
- database String
- Name of the database where to store metric datapoints. Only affects PostgreSQL destinations. Defaults to
metrics
. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service. - retention
Days Number - Number of days to keep old metrics. Only affects PostgreSQL destinations. Set to 0 for no automatic cleanup. Defaults to 30 days.
- ro
Username String - Name of a user that can be used to read metrics. This will be used for Grafana integration (if enabled) to prevent Grafana users from making undesired changes. Only affects PostgreSQL destinations. Defaults to
metrics_reader
. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service. - source
Mysql Property Map - Configuration options for metrics where source service is MySQL
- username String
- Name of the user used to write metrics. Only affects PostgreSQL destinations. Defaults to
metrics_writer
. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.
GetServiceIntegrationMetricsUserConfigSourceMysql
- Telegraf
Get
Service Integration Metrics User Config Source Mysql Telegraf - Configuration options for Telegraf MySQL input plugin
- Telegraf
Get
Service Integration Metrics User Config Source Mysql Telegraf - Configuration options for Telegraf MySQL input plugin
- telegraf
Get
Service Integration Metrics User Config Source Mysql Telegraf - Configuration options for Telegraf MySQL input plugin
- telegraf
Get
Service Integration Metrics User Config Source Mysql Telegraf - Configuration options for Telegraf MySQL input plugin
- telegraf
Get
Service Integration Metrics User Config Source Mysql Telegraf - Configuration options for Telegraf MySQL input plugin
- telegraf Property Map
- Configuration options for Telegraf MySQL input plugin
GetServiceIntegrationMetricsUserConfigSourceMysqlTelegraf
- Gather
Event boolWaits - Gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS.
- Gather
File boolEvents Stats - Gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME.
- Gather
Index boolIo Waits - Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE.
- Gather
Info boolSchema Auto Inc - Gather auto_increment columns and max values from information schema.
- Gather
Innodb boolMetrics - Gather metrics from INFORMATION_SCHEMA.INNODB_METRICS.
- Gather
Perf boolEvents Statements - Gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST.
- Gather
Process boolList - Gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST.
- Gather
Slave boolStatus - Gather metrics from SHOW SLAVE STATUS command output.
- Gather
Table boolIo Waits - Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE.
- Gather
Table boolLock Waits - Gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS.
- Gather
Table boolSchema - Gather metrics from INFORMATION_SCHEMA.TABLES.
- Perf
Events intStatements Digest Text Limit - Truncates digest text from perf_events_statements into this many characters. Example:
120
. - Perf
Events intStatements Limit - Limits metrics from perf_events_statements. Example:
250
. - Perf
Events intStatements Time Limit - Only include perf_events_statements whose last seen is less than this many seconds. Example:
86400
.
- Gather
Event boolWaits - Gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS.
- Gather
File boolEvents Stats - Gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME.
- Gather
Index boolIo Waits - Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE.
- Gather
Info boolSchema Auto Inc - Gather auto_increment columns and max values from information schema.
- Gather
Innodb boolMetrics - Gather metrics from INFORMATION_SCHEMA.INNODB_METRICS.
- Gather
Perf boolEvents Statements - Gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST.
- Gather
Process boolList - Gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST.
- Gather
Slave boolStatus - Gather metrics from SHOW SLAVE STATUS command output.
- Gather
Table boolIo Waits - Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE.
- Gather
Table boolLock Waits - Gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS.
- Gather
Table boolSchema - Gather metrics from INFORMATION_SCHEMA.TABLES.
- Perf
Events intStatements Digest Text Limit - Truncates digest text from perf_events_statements into this many characters. Example:
120
. - Perf
Events intStatements Limit - Limits metrics from perf_events_statements. Example:
250
. - Perf
Events intStatements Time Limit - Only include perf_events_statements whose last seen is less than this many seconds. Example:
86400
.
- gather
Event BooleanWaits - Gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS.
- gather
File BooleanEvents Stats - Gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME.
- gather
Index BooleanIo Waits - Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE.
- gather
Info BooleanSchema Auto Inc - Gather auto_increment columns and max values from information schema.
- gather
Innodb BooleanMetrics - Gather metrics from INFORMATION_SCHEMA.INNODB_METRICS.
- gather
Perf BooleanEvents Statements - Gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST.
- gather
Process BooleanList - Gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST.
- gather
Slave BooleanStatus - Gather metrics from SHOW SLAVE STATUS command output.
- gather
Table BooleanIo Waits - Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE.
- gather
Table BooleanLock Waits - Gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS.
- gather
Table BooleanSchema - Gather metrics from INFORMATION_SCHEMA.TABLES.
- perf
Events IntegerStatements Digest Text Limit - Truncates digest text from perf_events_statements into this many characters. Example:
120
. - perf
Events IntegerStatements Limit - Limits metrics from perf_events_statements. Example:
250
. - perf
Events IntegerStatements Time Limit - Only include perf_events_statements whose last seen is less than this many seconds. Example:
86400
.
- gather
Event booleanWaits - Gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS.
- gather
File booleanEvents Stats - Gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME.
- gather
Index booleanIo Waits - Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE.
- gather
Info booleanSchema Auto Inc - Gather auto_increment columns and max values from information schema.
- gather
Innodb booleanMetrics - Gather metrics from INFORMATION_SCHEMA.INNODB_METRICS.
- gather
Perf booleanEvents Statements - Gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST.
- gather
Process booleanList - Gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST.
- gather
Slave booleanStatus - Gather metrics from SHOW SLAVE STATUS command output.
- gather
Table booleanIo Waits - Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE.
- gather
Table booleanLock Waits - Gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS.
- gather
Table booleanSchema - Gather metrics from INFORMATION_SCHEMA.TABLES.
- perf
Events numberStatements Digest Text Limit - Truncates digest text from perf_events_statements into this many characters. Example:
120
. - perf
Events numberStatements Limit - Limits metrics from perf_events_statements. Example:
250
. - perf
Events numberStatements Time Limit - Only include perf_events_statements whose last seen is less than this many seconds. Example:
86400
.
- gather_
event_ boolwaits - Gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS.
- gather_
file_ boolevents_ stats - Gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME.
- gather_
index_ boolio_ waits - Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE.
- gather_
info_ boolschema_ auto_ inc - Gather auto_increment columns and max values from information schema.
- gather_
innodb_ boolmetrics - Gather metrics from INFORMATION_SCHEMA.INNODB_METRICS.
- gather_
perf_ boolevents_ statements - Gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST.
- gather_
process_ boollist - Gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST.
- gather_
slave_ boolstatus - Gather metrics from SHOW SLAVE STATUS command output.
- gather_
table_ boolio_ waits - Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE.
- gather_
table_ boollock_ waits - Gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS.
- gather_
table_ boolschema - Gather metrics from INFORMATION_SCHEMA.TABLES.
- perf_
events_ intstatements_ digest_ text_ limit - Truncates digest text from perf_events_statements into this many characters. Example:
120
. - perf_
events_ intstatements_ limit - Limits metrics from perf_events_statements. Example:
250
. - perf_
events_ intstatements_ time_ limit - Only include perf_events_statements whose last seen is less than this many seconds. Example:
86400
.
- gather
Event BooleanWaits - Gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS.
- gather
File BooleanEvents Stats - Gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME.
- gather
Index BooleanIo Waits - Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE.
- gather
Info BooleanSchema Auto Inc - Gather auto_increment columns and max values from information schema.
- gather
Innodb BooleanMetrics - Gather metrics from INFORMATION_SCHEMA.INNODB_METRICS.
- gather
Perf BooleanEvents Statements - Gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST.
- gather
Process BooleanList - Gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST.
- gather
Slave BooleanStatus - Gather metrics from SHOW SLAVE STATUS command output.
- gather
Table BooleanIo Waits - Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE.
- gather
Table BooleanLock Waits - Gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS.
- gather
Table BooleanSchema - Gather metrics from INFORMATION_SCHEMA.TABLES.
- perf
Events NumberStatements Digest Text Limit - Truncates digest text from perf_events_statements into this many characters. Example:
120
. - perf
Events NumberStatements Limit - Limits metrics from perf_events_statements. Example:
250
. - perf
Events NumberStatements Time Limit - Only include perf_events_statements whose last seen is less than this many seconds. Example:
86400
.
GetServiceIntegrationPrometheusUserConfig
- Source
Mysql GetService Integration Prometheus User Config Source Mysql - Configuration options for metrics where source service is MySQL
- Source
Mysql GetService Integration Prometheus User Config Source Mysql - Configuration options for metrics where source service is MySQL
- source
Mysql GetService Integration Prometheus User Config Source Mysql - Configuration options for metrics where source service is MySQL
- source
Mysql GetService Integration Prometheus User Config Source Mysql - Configuration options for metrics where source service is MySQL
- source_
mysql GetService Integration Prometheus User Config Source Mysql - Configuration options for metrics where source service is MySQL
- source
Mysql Property Map - Configuration options for metrics where source service is MySQL
GetServiceIntegrationPrometheusUserConfigSourceMysql
- Telegraf
Get
Service Integration Prometheus User Config Source Mysql Telegraf - Configuration options for Telegraf MySQL input plugin
- Telegraf
Get
Service Integration Prometheus User Config Source Mysql Telegraf - Configuration options for Telegraf MySQL input plugin
- telegraf
Get
Service Integration Prometheus User Config Source Mysql Telegraf - Configuration options for Telegraf MySQL input plugin
- telegraf
Get
Service Integration Prometheus User Config Source Mysql Telegraf - Configuration options for Telegraf MySQL input plugin
- telegraf
Get
Service Integration Prometheus User Config Source Mysql Telegraf - Configuration options for Telegraf MySQL input plugin
- telegraf Property Map
- Configuration options for Telegraf MySQL input plugin
GetServiceIntegrationPrometheusUserConfigSourceMysqlTelegraf
- Gather
Event boolWaits - Gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS.
- Gather
File boolEvents Stats - Gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME.
- Gather
Index boolIo Waits - Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE.
- Gather
Info boolSchema Auto Inc - Gather auto_increment columns and max values from information schema.
- Gather
Innodb boolMetrics - Gather metrics from INFORMATION_SCHEMA.INNODB_METRICS.
- Gather
Perf boolEvents Statements - Gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST.
- Gather
Process boolList - Gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST.
- Gather
Slave boolStatus - Gather metrics from SHOW SLAVE STATUS command output.
- Gather
Table boolIo Waits - Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE.
- Gather
Table boolLock Waits - Gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS.
- Gather
Table boolSchema - Gather metrics from INFORMATION_SCHEMA.TABLES.
- Perf
Events intStatements Digest Text Limit - Truncates digest text from perf_events_statements into this many characters. Example:
120
. - Perf
Events intStatements Limit - Limits metrics from perf_events_statements. Example:
250
. - Perf
Events intStatements Time Limit - Only include perf_events_statements whose last seen is less than this many seconds. Example:
86400
.
- Gather
Event boolWaits - Gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS.
- Gather
File boolEvents Stats - Gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME.
- Gather
Index boolIo Waits - Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE.
- Gather
Info boolSchema Auto Inc - Gather auto_increment columns and max values from information schema.
- Gather
Innodb boolMetrics - Gather metrics from INFORMATION_SCHEMA.INNODB_METRICS.
- Gather
Perf boolEvents Statements - Gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST.
- Gather
Process boolList - Gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST.
- Gather
Slave boolStatus - Gather metrics from SHOW SLAVE STATUS command output.
- Gather
Table boolIo Waits - Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE.
- Gather
Table boolLock Waits - Gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS.
- Gather
Table boolSchema - Gather metrics from INFORMATION_SCHEMA.TABLES.
- Perf
Events intStatements Digest Text Limit - Truncates digest text from perf_events_statements into this many characters. Example:
120
. - Perf
Events intStatements Limit - Limits metrics from perf_events_statements. Example:
250
. - Perf
Events intStatements Time Limit - Only include perf_events_statements whose last seen is less than this many seconds. Example:
86400
.
- gather
Event BooleanWaits - Gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS.
- gather
File BooleanEvents Stats - Gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME.
- gather
Index BooleanIo Waits - Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE.
- gather
Info BooleanSchema Auto Inc - Gather auto_increment columns and max values from information schema.
- gather
Innodb BooleanMetrics - Gather metrics from INFORMATION_SCHEMA.INNODB_METRICS.
- gather
Perf BooleanEvents Statements - Gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST.
- gather
Process BooleanList - Gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST.
- gather
Slave BooleanStatus - Gather metrics from SHOW SLAVE STATUS command output.
- gather
Table BooleanIo Waits - Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE.
- gather
Table BooleanLock Waits - Gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS.
- gather
Table BooleanSchema - Gather metrics from INFORMATION_SCHEMA.TABLES.
- perf
Events IntegerStatements Digest Text Limit - Truncates digest text from perf_events_statements into this many characters. Example:
120
. - perf
Events IntegerStatements Limit - Limits metrics from perf_events_statements. Example:
250
. - perf
Events IntegerStatements Time Limit - Only include perf_events_statements whose last seen is less than this many seconds. Example:
86400
.
- gather
Event booleanWaits - Gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS.
- gather
File booleanEvents Stats - Gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME.
- gather
Index booleanIo Waits - Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE.
- gather
Info booleanSchema Auto Inc - Gather auto_increment columns and max values from information schema.
- gather
Innodb booleanMetrics - Gather metrics from INFORMATION_SCHEMA.INNODB_METRICS.
- gather
Perf booleanEvents Statements - Gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST.
- gather
Process booleanList - Gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST.
- gather
Slave booleanStatus - Gather metrics from SHOW SLAVE STATUS command output.
- gather
Table booleanIo Waits - Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE.
- gather
Table booleanLock Waits - Gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS.
- gather
Table booleanSchema - Gather metrics from INFORMATION_SCHEMA.TABLES.
- perf
Events numberStatements Digest Text Limit - Truncates digest text from perf_events_statements into this many characters. Example:
120
. - perf
Events numberStatements Limit - Limits metrics from perf_events_statements. Example:
250
. - perf
Events numberStatements Time Limit - Only include perf_events_statements whose last seen is less than this many seconds. Example:
86400
.
- gather_
event_ boolwaits - Gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS.
- gather_
file_ boolevents_ stats - Gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME.
- gather_
index_ boolio_ waits - Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE.
- gather_
info_ boolschema_ auto_ inc - Gather auto_increment columns and max values from information schema.
- gather_
innodb_ boolmetrics - Gather metrics from INFORMATION_SCHEMA.INNODB_METRICS.
- gather_
perf_ boolevents_ statements - Gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST.
- gather_
process_ boollist - Gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST.
- gather_
slave_ boolstatus - Gather metrics from SHOW SLAVE STATUS command output.
- gather_
table_ boolio_ waits - Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE.
- gather_
table_ boollock_ waits - Gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS.
- gather_
table_ boolschema - Gather metrics from INFORMATION_SCHEMA.TABLES.
- perf_
events_ intstatements_ digest_ text_ limit - Truncates digest text from perf_events_statements into this many characters. Example:
120
. - perf_
events_ intstatements_ limit - Limits metrics from perf_events_statements. Example:
250
. - perf_
events_ intstatements_ time_ limit - Only include perf_events_statements whose last seen is less than this many seconds. Example:
86400
.
- gather
Event BooleanWaits - Gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS.
- gather
File BooleanEvents Stats - Gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME.
- gather
Index BooleanIo Waits - Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE.
- gather
Info BooleanSchema Auto Inc - Gather auto_increment columns and max values from information schema.
- gather
Innodb BooleanMetrics - Gather metrics from INFORMATION_SCHEMA.INNODB_METRICS.
- gather
Perf BooleanEvents Statements - Gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST.
- gather
Process BooleanList - Gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST.
- gather
Slave BooleanStatus - Gather metrics from SHOW SLAVE STATUS command output.
- gather
Table BooleanIo Waits - Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE.
- gather
Table BooleanLock Waits - Gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS.
- gather
Table BooleanSchema - Gather metrics from INFORMATION_SCHEMA.TABLES.
- perf
Events NumberStatements Digest Text Limit - Truncates digest text from perf_events_statements into this many characters. Example:
120
. - perf
Events NumberStatements Limit - Limits metrics from perf_events_statements. Example:
250
. - perf
Events NumberStatements Time Limit - Only include perf_events_statements whose last seen is less than this many seconds. Example:
86400
.
Package Details
- Repository
- Aiven pulumi/pulumi-aiven
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
aiven
Terraform Provider.