gcp.dataproc.MetastoreService
Explore with Pulumi AI
A managed metastore service that serves metadata queries.
To get more information about Service, see:
- API documentation
- How-to Guides
Example Usage
Dataproc Metastore Service Basic
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const _default = new gcp.dataproc.MetastoreService("default", {
serviceId: "metastore-srv",
location: "us-central1",
port: 9080,
tier: "DEVELOPER",
maintenanceWindow: {
hourOfDay: 2,
dayOfWeek: "SUNDAY",
},
hiveMetastoreConfig: {
version: "2.3.6",
},
labels: {
env: "test",
},
});
import pulumi
import pulumi_gcp as gcp
default = gcp.dataproc.MetastoreService("default",
service_id="metastore-srv",
location="us-central1",
port=9080,
tier="DEVELOPER",
maintenance_window={
"hour_of_day": 2,
"day_of_week": "SUNDAY",
},
hive_metastore_config={
"version": "2.3.6",
},
labels={
"env": "test",
})
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/dataproc"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := dataproc.NewMetastoreService(ctx, "default", &dataproc.MetastoreServiceArgs{
ServiceId: pulumi.String("metastore-srv"),
Location: pulumi.String("us-central1"),
Port: pulumi.Int(9080),
Tier: pulumi.String("DEVELOPER"),
MaintenanceWindow: &dataproc.MetastoreServiceMaintenanceWindowArgs{
HourOfDay: pulumi.Int(2),
DayOfWeek: pulumi.String("SUNDAY"),
},
HiveMetastoreConfig: &dataproc.MetastoreServiceHiveMetastoreConfigArgs{
Version: pulumi.String("2.3.6"),
},
Labels: pulumi.StringMap{
"env": pulumi.String("test"),
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var @default = new Gcp.Dataproc.MetastoreService("default", new()
{
ServiceId = "metastore-srv",
Location = "us-central1",
Port = 9080,
Tier = "DEVELOPER",
MaintenanceWindow = new Gcp.Dataproc.Inputs.MetastoreServiceMaintenanceWindowArgs
{
HourOfDay = 2,
DayOfWeek = "SUNDAY",
},
HiveMetastoreConfig = new Gcp.Dataproc.Inputs.MetastoreServiceHiveMetastoreConfigArgs
{
Version = "2.3.6",
},
Labels =
{
{ "env", "test" },
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.dataproc.MetastoreService;
import com.pulumi.gcp.dataproc.MetastoreServiceArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceMaintenanceWindowArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceHiveMetastoreConfigArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var default_ = new MetastoreService("default", MetastoreServiceArgs.builder()
.serviceId("metastore-srv")
.location("us-central1")
.port(9080)
.tier("DEVELOPER")
.maintenanceWindow(MetastoreServiceMaintenanceWindowArgs.builder()
.hourOfDay(2)
.dayOfWeek("SUNDAY")
.build())
.hiveMetastoreConfig(MetastoreServiceHiveMetastoreConfigArgs.builder()
.version("2.3.6")
.build())
.labels(Map.of("env", "test"))
.build());
}
}
resources:
default:
type: gcp:dataproc:MetastoreService
properties:
serviceId: metastore-srv
location: us-central1
port: 9080
tier: DEVELOPER
maintenanceWindow:
hourOfDay: 2
dayOfWeek: SUNDAY
hiveMetastoreConfig:
version: 2.3.6
labels:
env: test
Dataproc Metastore Service Deletion Protection
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const _default = new gcp.dataproc.MetastoreService("default", {
serviceId: "metastore-srv",
location: "us-central1",
port: 9080,
tier: "DEVELOPER",
deletionProtection: true,
maintenanceWindow: {
hourOfDay: 2,
dayOfWeek: "SUNDAY",
},
hiveMetastoreConfig: {
version: "2.3.6",
},
labels: {
env: "test",
},
});
import pulumi
import pulumi_gcp as gcp
default = gcp.dataproc.MetastoreService("default",
service_id="metastore-srv",
location="us-central1",
port=9080,
tier="DEVELOPER",
deletion_protection=True,
maintenance_window={
"hour_of_day": 2,
"day_of_week": "SUNDAY",
},
hive_metastore_config={
"version": "2.3.6",
},
labels={
"env": "test",
})
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/dataproc"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := dataproc.NewMetastoreService(ctx, "default", &dataproc.MetastoreServiceArgs{
ServiceId: pulumi.String("metastore-srv"),
Location: pulumi.String("us-central1"),
Port: pulumi.Int(9080),
Tier: pulumi.String("DEVELOPER"),
DeletionProtection: pulumi.Bool(true),
MaintenanceWindow: &dataproc.MetastoreServiceMaintenanceWindowArgs{
HourOfDay: pulumi.Int(2),
DayOfWeek: pulumi.String("SUNDAY"),
},
HiveMetastoreConfig: &dataproc.MetastoreServiceHiveMetastoreConfigArgs{
Version: pulumi.String("2.3.6"),
},
Labels: pulumi.StringMap{
"env": pulumi.String("test"),
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var @default = new Gcp.Dataproc.MetastoreService("default", new()
{
ServiceId = "metastore-srv",
Location = "us-central1",
Port = 9080,
Tier = "DEVELOPER",
DeletionProtection = true,
MaintenanceWindow = new Gcp.Dataproc.Inputs.MetastoreServiceMaintenanceWindowArgs
{
HourOfDay = 2,
DayOfWeek = "SUNDAY",
},
HiveMetastoreConfig = new Gcp.Dataproc.Inputs.MetastoreServiceHiveMetastoreConfigArgs
{
Version = "2.3.6",
},
Labels =
{
{ "env", "test" },
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.dataproc.MetastoreService;
import com.pulumi.gcp.dataproc.MetastoreServiceArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceMaintenanceWindowArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceHiveMetastoreConfigArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var default_ = new MetastoreService("default", MetastoreServiceArgs.builder()
.serviceId("metastore-srv")
.location("us-central1")
.port(9080)
.tier("DEVELOPER")
.deletionProtection(true)
.maintenanceWindow(MetastoreServiceMaintenanceWindowArgs.builder()
.hourOfDay(2)
.dayOfWeek("SUNDAY")
.build())
.hiveMetastoreConfig(MetastoreServiceHiveMetastoreConfigArgs.builder()
.version("2.3.6")
.build())
.labels(Map.of("env", "test"))
.build());
}
}
resources:
default:
type: gcp:dataproc:MetastoreService
properties:
serviceId: metastore-srv
location: us-central1
port: 9080
tier: DEVELOPER
deletionProtection: true
maintenanceWindow:
hourOfDay: 2
dayOfWeek: SUNDAY
hiveMetastoreConfig:
version: 2.3.6
labels:
env: test
Dataproc Metastore Service Cmek Example
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const keyRing = new gcp.kms.KeyRing("key_ring", {
name: "example-keyring",
location: "us-central1",
});
const cryptoKey = new gcp.kms.CryptoKey("crypto_key", {
name: "example-key",
keyRing: keyRing.id,
purpose: "ENCRYPT_DECRYPT",
});
const _default = new gcp.dataproc.MetastoreService("default", {
serviceId: "example-service",
location: "us-central1",
encryptionConfig: {
kmsKey: cryptoKey.id,
},
hiveMetastoreConfig: {
version: "3.1.2",
},
});
import pulumi
import pulumi_gcp as gcp
key_ring = gcp.kms.KeyRing("key_ring",
name="example-keyring",
location="us-central1")
crypto_key = gcp.kms.CryptoKey("crypto_key",
name="example-key",
key_ring=key_ring.id,
purpose="ENCRYPT_DECRYPT")
default = gcp.dataproc.MetastoreService("default",
service_id="example-service",
location="us-central1",
encryption_config={
"kms_key": crypto_key.id,
},
hive_metastore_config={
"version": "3.1.2",
})
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/dataproc"
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/kms"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
keyRing, err := kms.NewKeyRing(ctx, "key_ring", &kms.KeyRingArgs{
Name: pulumi.String("example-keyring"),
Location: pulumi.String("us-central1"),
})
if err != nil {
return err
}
cryptoKey, err := kms.NewCryptoKey(ctx, "crypto_key", &kms.CryptoKeyArgs{
Name: pulumi.String("example-key"),
KeyRing: keyRing.ID(),
Purpose: pulumi.String("ENCRYPT_DECRYPT"),
})
if err != nil {
return err
}
_, err = dataproc.NewMetastoreService(ctx, "default", &dataproc.MetastoreServiceArgs{
ServiceId: pulumi.String("example-service"),
Location: pulumi.String("us-central1"),
EncryptionConfig: &dataproc.MetastoreServiceEncryptionConfigArgs{
KmsKey: cryptoKey.ID(),
},
HiveMetastoreConfig: &dataproc.MetastoreServiceHiveMetastoreConfigArgs{
Version: pulumi.String("3.1.2"),
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var keyRing = new Gcp.Kms.KeyRing("key_ring", new()
{
Name = "example-keyring",
Location = "us-central1",
});
var cryptoKey = new Gcp.Kms.CryptoKey("crypto_key", new()
{
Name = "example-key",
KeyRing = keyRing.Id,
Purpose = "ENCRYPT_DECRYPT",
});
var @default = new Gcp.Dataproc.MetastoreService("default", new()
{
ServiceId = "example-service",
Location = "us-central1",
EncryptionConfig = new Gcp.Dataproc.Inputs.MetastoreServiceEncryptionConfigArgs
{
KmsKey = cryptoKey.Id,
},
HiveMetastoreConfig = new Gcp.Dataproc.Inputs.MetastoreServiceHiveMetastoreConfigArgs
{
Version = "3.1.2",
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.kms.KeyRing;
import com.pulumi.gcp.kms.KeyRingArgs;
import com.pulumi.gcp.kms.CryptoKey;
import com.pulumi.gcp.kms.CryptoKeyArgs;
import com.pulumi.gcp.dataproc.MetastoreService;
import com.pulumi.gcp.dataproc.MetastoreServiceArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceEncryptionConfigArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceHiveMetastoreConfigArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var keyRing = new KeyRing("keyRing", KeyRingArgs.builder()
.name("example-keyring")
.location("us-central1")
.build());
var cryptoKey = new CryptoKey("cryptoKey", CryptoKeyArgs.builder()
.name("example-key")
.keyRing(keyRing.id())
.purpose("ENCRYPT_DECRYPT")
.build());
var default_ = new MetastoreService("default", MetastoreServiceArgs.builder()
.serviceId("example-service")
.location("us-central1")
.encryptionConfig(MetastoreServiceEncryptionConfigArgs.builder()
.kmsKey(cryptoKey.id())
.build())
.hiveMetastoreConfig(MetastoreServiceHiveMetastoreConfigArgs.builder()
.version("3.1.2")
.build())
.build());
}
}
resources:
default:
type: gcp:dataproc:MetastoreService
properties:
serviceId: example-service
location: us-central1
encryptionConfig:
kmsKey: ${cryptoKey.id}
hiveMetastoreConfig:
version: 3.1.2
cryptoKey:
type: gcp:kms:CryptoKey
name: crypto_key
properties:
name: example-key
keyRing: ${keyRing.id}
purpose: ENCRYPT_DECRYPT
keyRing:
type: gcp:kms:KeyRing
name: key_ring
properties:
name: example-keyring
location: us-central1
Dataproc Metastore Service Private Service Connect
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const net = new gcp.compute.Network("net", {
name: "my-network",
autoCreateSubnetworks: false,
});
const subnet = new gcp.compute.Subnetwork("subnet", {
name: "my-subnetwork",
region: "us-central1",
network: net.id,
ipCidrRange: "10.0.0.0/22",
privateIpGoogleAccess: true,
});
const _default = new gcp.dataproc.MetastoreService("default", {
serviceId: "metastore-srv",
location: "us-central1",
tier: "DEVELOPER",
hiveMetastoreConfig: {
version: "3.1.2",
},
networkConfig: {
consumers: [{
subnetwork: subnet.id,
}],
},
});
import pulumi
import pulumi_gcp as gcp
net = gcp.compute.Network("net",
name="my-network",
auto_create_subnetworks=False)
subnet = gcp.compute.Subnetwork("subnet",
name="my-subnetwork",
region="us-central1",
network=net.id,
ip_cidr_range="10.0.0.0/22",
private_ip_google_access=True)
default = gcp.dataproc.MetastoreService("default",
service_id="metastore-srv",
location="us-central1",
tier="DEVELOPER",
hive_metastore_config={
"version": "3.1.2",
},
network_config={
"consumers": [{
"subnetwork": subnet.id,
}],
})
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute"
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/dataproc"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
net, err := compute.NewNetwork(ctx, "net", &compute.NetworkArgs{
Name: pulumi.String("my-network"),
AutoCreateSubnetworks: pulumi.Bool(false),
})
if err != nil {
return err
}
subnet, err := compute.NewSubnetwork(ctx, "subnet", &compute.SubnetworkArgs{
Name: pulumi.String("my-subnetwork"),
Region: pulumi.String("us-central1"),
Network: net.ID(),
IpCidrRange: pulumi.String("10.0.0.0/22"),
PrivateIpGoogleAccess: pulumi.Bool(true),
})
if err != nil {
return err
}
_, err = dataproc.NewMetastoreService(ctx, "default", &dataproc.MetastoreServiceArgs{
ServiceId: pulumi.String("metastore-srv"),
Location: pulumi.String("us-central1"),
Tier: pulumi.String("DEVELOPER"),
HiveMetastoreConfig: &dataproc.MetastoreServiceHiveMetastoreConfigArgs{
Version: pulumi.String("3.1.2"),
},
NetworkConfig: &dataproc.MetastoreServiceNetworkConfigArgs{
Consumers: dataproc.MetastoreServiceNetworkConfigConsumerArray{
&dataproc.MetastoreServiceNetworkConfigConsumerArgs{
Subnetwork: subnet.ID(),
},
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var net = new Gcp.Compute.Network("net", new()
{
Name = "my-network",
AutoCreateSubnetworks = false,
});
var subnet = new Gcp.Compute.Subnetwork("subnet", new()
{
Name = "my-subnetwork",
Region = "us-central1",
Network = net.Id,
IpCidrRange = "10.0.0.0/22",
PrivateIpGoogleAccess = true,
});
var @default = new Gcp.Dataproc.MetastoreService("default", new()
{
ServiceId = "metastore-srv",
Location = "us-central1",
Tier = "DEVELOPER",
HiveMetastoreConfig = new Gcp.Dataproc.Inputs.MetastoreServiceHiveMetastoreConfigArgs
{
Version = "3.1.2",
},
NetworkConfig = new Gcp.Dataproc.Inputs.MetastoreServiceNetworkConfigArgs
{
Consumers = new[]
{
new Gcp.Dataproc.Inputs.MetastoreServiceNetworkConfigConsumerArgs
{
Subnetwork = subnet.Id,
},
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.compute.Network;
import com.pulumi.gcp.compute.NetworkArgs;
import com.pulumi.gcp.compute.Subnetwork;
import com.pulumi.gcp.compute.SubnetworkArgs;
import com.pulumi.gcp.dataproc.MetastoreService;
import com.pulumi.gcp.dataproc.MetastoreServiceArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceHiveMetastoreConfigArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceNetworkConfigArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var net = new Network("net", NetworkArgs.builder()
.name("my-network")
.autoCreateSubnetworks(false)
.build());
var subnet = new Subnetwork("subnet", SubnetworkArgs.builder()
.name("my-subnetwork")
.region("us-central1")
.network(net.id())
.ipCidrRange("10.0.0.0/22")
.privateIpGoogleAccess(true)
.build());
var default_ = new MetastoreService("default", MetastoreServiceArgs.builder()
.serviceId("metastore-srv")
.location("us-central1")
.tier("DEVELOPER")
.hiveMetastoreConfig(MetastoreServiceHiveMetastoreConfigArgs.builder()
.version("3.1.2")
.build())
.networkConfig(MetastoreServiceNetworkConfigArgs.builder()
.consumers(MetastoreServiceNetworkConfigConsumerArgs.builder()
.subnetwork(subnet.id())
.build())
.build())
.build());
}
}
resources:
net:
type: gcp:compute:Network
properties:
name: my-network
autoCreateSubnetworks: false
subnet:
type: gcp:compute:Subnetwork
properties:
name: my-subnetwork
region: us-central1
network: ${net.id}
ipCidrRange: 10.0.0.0/22
privateIpGoogleAccess: true
default:
type: gcp:dataproc:MetastoreService
properties:
serviceId: metastore-srv
location: us-central1
tier: DEVELOPER
hiveMetastoreConfig:
version: 3.1.2
networkConfig:
consumers:
- subnetwork: ${subnet.id}
Dataproc Metastore Service Private Service Connect Custom Routes
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const net = new gcp.compute.Network("net", {
name: "my-network",
autoCreateSubnetworks: false,
});
const subnet = new gcp.compute.Subnetwork("subnet", {
name: "my-subnetwork",
region: "us-central1",
network: net.id,
ipCidrRange: "10.0.0.0/22",
privateIpGoogleAccess: true,
});
const _default = new gcp.dataproc.MetastoreService("default", {
serviceId: "metastore-srv",
location: "us-central1",
hiveMetastoreConfig: {
version: "3.1.2",
},
networkConfig: {
consumers: [{
subnetwork: subnet.id,
}],
customRoutesEnabled: true,
},
});
import pulumi
import pulumi_gcp as gcp
net = gcp.compute.Network("net",
name="my-network",
auto_create_subnetworks=False)
subnet = gcp.compute.Subnetwork("subnet",
name="my-subnetwork",
region="us-central1",
network=net.id,
ip_cidr_range="10.0.0.0/22",
private_ip_google_access=True)
default = gcp.dataproc.MetastoreService("default",
service_id="metastore-srv",
location="us-central1",
hive_metastore_config={
"version": "3.1.2",
},
network_config={
"consumers": [{
"subnetwork": subnet.id,
}],
"custom_routes_enabled": True,
})
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute"
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/dataproc"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
net, err := compute.NewNetwork(ctx, "net", &compute.NetworkArgs{
Name: pulumi.String("my-network"),
AutoCreateSubnetworks: pulumi.Bool(false),
})
if err != nil {
return err
}
subnet, err := compute.NewSubnetwork(ctx, "subnet", &compute.SubnetworkArgs{
Name: pulumi.String("my-subnetwork"),
Region: pulumi.String("us-central1"),
Network: net.ID(),
IpCidrRange: pulumi.String("10.0.0.0/22"),
PrivateIpGoogleAccess: pulumi.Bool(true),
})
if err != nil {
return err
}
_, err = dataproc.NewMetastoreService(ctx, "default", &dataproc.MetastoreServiceArgs{
ServiceId: pulumi.String("metastore-srv"),
Location: pulumi.String("us-central1"),
HiveMetastoreConfig: &dataproc.MetastoreServiceHiveMetastoreConfigArgs{
Version: pulumi.String("3.1.2"),
},
NetworkConfig: &dataproc.MetastoreServiceNetworkConfigArgs{
Consumers: dataproc.MetastoreServiceNetworkConfigConsumerArray{
&dataproc.MetastoreServiceNetworkConfigConsumerArgs{
Subnetwork: subnet.ID(),
},
},
CustomRoutesEnabled: pulumi.Bool(true),
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var net = new Gcp.Compute.Network("net", new()
{
Name = "my-network",
AutoCreateSubnetworks = false,
});
var subnet = new Gcp.Compute.Subnetwork("subnet", new()
{
Name = "my-subnetwork",
Region = "us-central1",
Network = net.Id,
IpCidrRange = "10.0.0.0/22",
PrivateIpGoogleAccess = true,
});
var @default = new Gcp.Dataproc.MetastoreService("default", new()
{
ServiceId = "metastore-srv",
Location = "us-central1",
HiveMetastoreConfig = new Gcp.Dataproc.Inputs.MetastoreServiceHiveMetastoreConfigArgs
{
Version = "3.1.2",
},
NetworkConfig = new Gcp.Dataproc.Inputs.MetastoreServiceNetworkConfigArgs
{
Consumers = new[]
{
new Gcp.Dataproc.Inputs.MetastoreServiceNetworkConfigConsumerArgs
{
Subnetwork = subnet.Id,
},
},
CustomRoutesEnabled = true,
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.compute.Network;
import com.pulumi.gcp.compute.NetworkArgs;
import com.pulumi.gcp.compute.Subnetwork;
import com.pulumi.gcp.compute.SubnetworkArgs;
import com.pulumi.gcp.dataproc.MetastoreService;
import com.pulumi.gcp.dataproc.MetastoreServiceArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceHiveMetastoreConfigArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceNetworkConfigArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var net = new Network("net", NetworkArgs.builder()
.name("my-network")
.autoCreateSubnetworks(false)
.build());
var subnet = new Subnetwork("subnet", SubnetworkArgs.builder()
.name("my-subnetwork")
.region("us-central1")
.network(net.id())
.ipCidrRange("10.0.0.0/22")
.privateIpGoogleAccess(true)
.build());
var default_ = new MetastoreService("default", MetastoreServiceArgs.builder()
.serviceId("metastore-srv")
.location("us-central1")
.hiveMetastoreConfig(MetastoreServiceHiveMetastoreConfigArgs.builder()
.version("3.1.2")
.build())
.networkConfig(MetastoreServiceNetworkConfigArgs.builder()
.consumers(MetastoreServiceNetworkConfigConsumerArgs.builder()
.subnetwork(subnet.id())
.build())
.customRoutesEnabled(true)
.build())
.build());
}
}
resources:
net:
type: gcp:compute:Network
properties:
name: my-network
autoCreateSubnetworks: false
subnet:
type: gcp:compute:Subnetwork
properties:
name: my-subnetwork
region: us-central1
network: ${net.id}
ipCidrRange: 10.0.0.0/22
privateIpGoogleAccess: true
default:
type: gcp:dataproc:MetastoreService
properties:
serviceId: metastore-srv
location: us-central1
hiveMetastoreConfig:
version: 3.1.2
networkConfig:
consumers:
- subnetwork: ${subnet.id}
customRoutesEnabled: true
Dataproc Metastore Service Dpms2
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const dpms2 = new gcp.dataproc.MetastoreService("dpms2", {
serviceId: "ms-dpms2",
location: "us-central1",
databaseType: "SPANNER",
hiveMetastoreConfig: {
version: "3.1.2",
},
scalingConfig: {
instanceSize: "EXTRA_SMALL",
},
});
import pulumi
import pulumi_gcp as gcp
dpms2 = gcp.dataproc.MetastoreService("dpms2",
service_id="ms-dpms2",
location="us-central1",
database_type="SPANNER",
hive_metastore_config={
"version": "3.1.2",
},
scaling_config={
"instance_size": "EXTRA_SMALL",
})
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/dataproc"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := dataproc.NewMetastoreService(ctx, "dpms2", &dataproc.MetastoreServiceArgs{
ServiceId: pulumi.String("ms-dpms2"),
Location: pulumi.String("us-central1"),
DatabaseType: pulumi.String("SPANNER"),
HiveMetastoreConfig: &dataproc.MetastoreServiceHiveMetastoreConfigArgs{
Version: pulumi.String("3.1.2"),
},
ScalingConfig: &dataproc.MetastoreServiceScalingConfigArgs{
InstanceSize: pulumi.String("EXTRA_SMALL"),
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var dpms2 = new Gcp.Dataproc.MetastoreService("dpms2", new()
{
ServiceId = "ms-dpms2",
Location = "us-central1",
DatabaseType = "SPANNER",
HiveMetastoreConfig = new Gcp.Dataproc.Inputs.MetastoreServiceHiveMetastoreConfigArgs
{
Version = "3.1.2",
},
ScalingConfig = new Gcp.Dataproc.Inputs.MetastoreServiceScalingConfigArgs
{
InstanceSize = "EXTRA_SMALL",
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.dataproc.MetastoreService;
import com.pulumi.gcp.dataproc.MetastoreServiceArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceHiveMetastoreConfigArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceScalingConfigArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var dpms2 = new MetastoreService("dpms2", MetastoreServiceArgs.builder()
.serviceId("ms-dpms2")
.location("us-central1")
.databaseType("SPANNER")
.hiveMetastoreConfig(MetastoreServiceHiveMetastoreConfigArgs.builder()
.version("3.1.2")
.build())
.scalingConfig(MetastoreServiceScalingConfigArgs.builder()
.instanceSize("EXTRA_SMALL")
.build())
.build());
}
}
resources:
dpms2:
type: gcp:dataproc:MetastoreService
properties:
serviceId: ms-dpms2
location: us-central1
databaseType: SPANNER
hiveMetastoreConfig:
version: 3.1.2
scalingConfig:
instanceSize: EXTRA_SMALL
Dataproc Metastore Service Dpms2 Scaling Factor
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const dpms2ScalingFactor = new gcp.dataproc.MetastoreService("dpms2_scaling_factor", {
serviceId: "ms-dpms2sf",
location: "us-central1",
databaseType: "SPANNER",
hiveMetastoreConfig: {
version: "3.1.2",
},
scalingConfig: {
scalingFactor: 2,
},
});
import pulumi
import pulumi_gcp as gcp
dpms2_scaling_factor = gcp.dataproc.MetastoreService("dpms2_scaling_factor",
service_id="ms-dpms2sf",
location="us-central1",
database_type="SPANNER",
hive_metastore_config={
"version": "3.1.2",
},
scaling_config={
"scaling_factor": 2,
})
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/dataproc"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := dataproc.NewMetastoreService(ctx, "dpms2_scaling_factor", &dataproc.MetastoreServiceArgs{
ServiceId: pulumi.String("ms-dpms2sf"),
Location: pulumi.String("us-central1"),
DatabaseType: pulumi.String("SPANNER"),
HiveMetastoreConfig: &dataproc.MetastoreServiceHiveMetastoreConfigArgs{
Version: pulumi.String("3.1.2"),
},
ScalingConfig: &dataproc.MetastoreServiceScalingConfigArgs{
ScalingFactor: pulumi.Float64(2),
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var dpms2ScalingFactor = new Gcp.Dataproc.MetastoreService("dpms2_scaling_factor", new()
{
ServiceId = "ms-dpms2sf",
Location = "us-central1",
DatabaseType = "SPANNER",
HiveMetastoreConfig = new Gcp.Dataproc.Inputs.MetastoreServiceHiveMetastoreConfigArgs
{
Version = "3.1.2",
},
ScalingConfig = new Gcp.Dataproc.Inputs.MetastoreServiceScalingConfigArgs
{
ScalingFactor = 2,
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.dataproc.MetastoreService;
import com.pulumi.gcp.dataproc.MetastoreServiceArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceHiveMetastoreConfigArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceScalingConfigArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var dpms2ScalingFactor = new MetastoreService("dpms2ScalingFactor", MetastoreServiceArgs.builder()
.serviceId("ms-dpms2sf")
.location("us-central1")
.databaseType("SPANNER")
.hiveMetastoreConfig(MetastoreServiceHiveMetastoreConfigArgs.builder()
.version("3.1.2")
.build())
.scalingConfig(MetastoreServiceScalingConfigArgs.builder()
.scalingFactor("2")
.build())
.build());
}
}
resources:
dpms2ScalingFactor:
type: gcp:dataproc:MetastoreService
name: dpms2_scaling_factor
properties:
serviceId: ms-dpms2sf
location: us-central1
databaseType: SPANNER
hiveMetastoreConfig:
version: 3.1.2
scalingConfig:
scalingFactor: '2'
Dataproc Metastore Service Scheduled Backup
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const bucket = new gcp.storage.Bucket("bucket", {
name: "backup",
location: "us-central1",
});
const backup = new gcp.dataproc.MetastoreService("backup", {
serviceId: "backup",
location: "us-central1",
port: 9080,
tier: "DEVELOPER",
maintenanceWindow: {
hourOfDay: 2,
dayOfWeek: "SUNDAY",
},
hiveMetastoreConfig: {
version: "2.3.6",
},
scheduledBackup: {
enabled: true,
cronSchedule: "0 0 * * *",
timeZone: "UTC",
backupLocation: pulumi.interpolate`gs://${bucket.name}`,
},
labels: {
env: "test",
},
});
import pulumi
import pulumi_gcp as gcp
bucket = gcp.storage.Bucket("bucket",
name="backup",
location="us-central1")
backup = gcp.dataproc.MetastoreService("backup",
service_id="backup",
location="us-central1",
port=9080,
tier="DEVELOPER",
maintenance_window={
"hour_of_day": 2,
"day_of_week": "SUNDAY",
},
hive_metastore_config={
"version": "2.3.6",
},
scheduled_backup={
"enabled": True,
"cron_schedule": "0 0 * * *",
"time_zone": "UTC",
"backup_location": bucket.name.apply(lambda name: f"gs://{name}"),
},
labels={
"env": "test",
})
package main
import (
"fmt"
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/dataproc"
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/storage"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
bucket, err := storage.NewBucket(ctx, "bucket", &storage.BucketArgs{
Name: pulumi.String("backup"),
Location: pulumi.String("us-central1"),
})
if err != nil {
return err
}
_, err = dataproc.NewMetastoreService(ctx, "backup", &dataproc.MetastoreServiceArgs{
ServiceId: pulumi.String("backup"),
Location: pulumi.String("us-central1"),
Port: pulumi.Int(9080),
Tier: pulumi.String("DEVELOPER"),
MaintenanceWindow: &dataproc.MetastoreServiceMaintenanceWindowArgs{
HourOfDay: pulumi.Int(2),
DayOfWeek: pulumi.String("SUNDAY"),
},
HiveMetastoreConfig: &dataproc.MetastoreServiceHiveMetastoreConfigArgs{
Version: pulumi.String("2.3.6"),
},
ScheduledBackup: &dataproc.MetastoreServiceScheduledBackupArgs{
Enabled: pulumi.Bool(true),
CronSchedule: pulumi.String("0 0 * * *"),
TimeZone: pulumi.String("UTC"),
BackupLocation: bucket.Name.ApplyT(func(name string) (string, error) {
return fmt.Sprintf("gs://%v", name), nil
}).(pulumi.StringOutput),
},
Labels: pulumi.StringMap{
"env": pulumi.String("test"),
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var bucket = new Gcp.Storage.Bucket("bucket", new()
{
Name = "backup",
Location = "us-central1",
});
var backup = new Gcp.Dataproc.MetastoreService("backup", new()
{
ServiceId = "backup",
Location = "us-central1",
Port = 9080,
Tier = "DEVELOPER",
MaintenanceWindow = new Gcp.Dataproc.Inputs.MetastoreServiceMaintenanceWindowArgs
{
HourOfDay = 2,
DayOfWeek = "SUNDAY",
},
HiveMetastoreConfig = new Gcp.Dataproc.Inputs.MetastoreServiceHiveMetastoreConfigArgs
{
Version = "2.3.6",
},
ScheduledBackup = new Gcp.Dataproc.Inputs.MetastoreServiceScheduledBackupArgs
{
Enabled = true,
CronSchedule = "0 0 * * *",
TimeZone = "UTC",
BackupLocation = bucket.Name.Apply(name => $"gs://{name}"),
},
Labels =
{
{ "env", "test" },
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.storage.Bucket;
import com.pulumi.gcp.storage.BucketArgs;
import com.pulumi.gcp.dataproc.MetastoreService;
import com.pulumi.gcp.dataproc.MetastoreServiceArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceMaintenanceWindowArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceHiveMetastoreConfigArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceScheduledBackupArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var bucket = new Bucket("bucket", BucketArgs.builder()
.name("backup")
.location("us-central1")
.build());
var backup = new MetastoreService("backup", MetastoreServiceArgs.builder()
.serviceId("backup")
.location("us-central1")
.port(9080)
.tier("DEVELOPER")
.maintenanceWindow(MetastoreServiceMaintenanceWindowArgs.builder()
.hourOfDay(2)
.dayOfWeek("SUNDAY")
.build())
.hiveMetastoreConfig(MetastoreServiceHiveMetastoreConfigArgs.builder()
.version("2.3.6")
.build())
.scheduledBackup(MetastoreServiceScheduledBackupArgs.builder()
.enabled(true)
.cronSchedule("0 0 * * *")
.timeZone("UTC")
.backupLocation(bucket.name().applyValue(name -> String.format("gs://%s", name)))
.build())
.labels(Map.of("env", "test"))
.build());
}
}
resources:
backup:
type: gcp:dataproc:MetastoreService
properties:
serviceId: backup
location: us-central1
port: 9080
tier: DEVELOPER
maintenanceWindow:
hourOfDay: 2
dayOfWeek: SUNDAY
hiveMetastoreConfig:
version: 2.3.6
scheduledBackup:
enabled: true
cronSchedule: 0 0 * * *
timeZone: UTC
backupLocation: gs://${bucket.name}
labels:
env: test
bucket:
type: gcp:storage:Bucket
properties:
name: backup
location: us-central1
Dataproc Metastore Service Autoscaling Max Scaling Factor
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const testResource = new gcp.dataproc.MetastoreService("test_resource", {
serviceId: "test-service",
location: "us-central1",
databaseType: "SPANNER",
hiveMetastoreConfig: {
version: "3.1.2",
},
scalingConfig: {
autoscalingConfig: {
autoscalingEnabled: true,
limitConfig: {
maxScalingFactor: 1,
},
},
},
});
import pulumi
import pulumi_gcp as gcp
test_resource = gcp.dataproc.MetastoreService("test_resource",
service_id="test-service",
location="us-central1",
database_type="SPANNER",
hive_metastore_config={
"version": "3.1.2",
},
scaling_config={
"autoscaling_config": {
"autoscaling_enabled": True,
"limit_config": {
"max_scaling_factor": 1,
},
},
})
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/dataproc"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := dataproc.NewMetastoreService(ctx, "test_resource", &dataproc.MetastoreServiceArgs{
ServiceId: pulumi.String("test-service"),
Location: pulumi.String("us-central1"),
DatabaseType: pulumi.String("SPANNER"),
HiveMetastoreConfig: &dataproc.MetastoreServiceHiveMetastoreConfigArgs{
Version: pulumi.String("3.1.2"),
},
ScalingConfig: &dataproc.MetastoreServiceScalingConfigArgs{
AutoscalingConfig: &dataproc.MetastoreServiceScalingConfigAutoscalingConfigArgs{
AutoscalingEnabled: pulumi.Bool(true),
LimitConfig: &dataproc.MetastoreServiceScalingConfigAutoscalingConfigLimitConfigArgs{
MaxScalingFactor: pulumi.Float64(1),
},
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var testResource = new Gcp.Dataproc.MetastoreService("test_resource", new()
{
ServiceId = "test-service",
Location = "us-central1",
DatabaseType = "SPANNER",
HiveMetastoreConfig = new Gcp.Dataproc.Inputs.MetastoreServiceHiveMetastoreConfigArgs
{
Version = "3.1.2",
},
ScalingConfig = new Gcp.Dataproc.Inputs.MetastoreServiceScalingConfigArgs
{
AutoscalingConfig = new Gcp.Dataproc.Inputs.MetastoreServiceScalingConfigAutoscalingConfigArgs
{
AutoscalingEnabled = true,
LimitConfig = new Gcp.Dataproc.Inputs.MetastoreServiceScalingConfigAutoscalingConfigLimitConfigArgs
{
MaxScalingFactor = 1,
},
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.dataproc.MetastoreService;
import com.pulumi.gcp.dataproc.MetastoreServiceArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceHiveMetastoreConfigArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceScalingConfigArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceScalingConfigAutoscalingConfigArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceScalingConfigAutoscalingConfigLimitConfigArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var testResource = new MetastoreService("testResource", MetastoreServiceArgs.builder()
.serviceId("test-service")
.location("us-central1")
.databaseType("SPANNER")
.hiveMetastoreConfig(MetastoreServiceHiveMetastoreConfigArgs.builder()
.version("3.1.2")
.build())
.scalingConfig(MetastoreServiceScalingConfigArgs.builder()
.autoscalingConfig(MetastoreServiceScalingConfigAutoscalingConfigArgs.builder()
.autoscalingEnabled(true)
.limitConfig(MetastoreServiceScalingConfigAutoscalingConfigLimitConfigArgs.builder()
.maxScalingFactor(1)
.build())
.build())
.build())
.build());
}
}
resources:
testResource:
type: gcp:dataproc:MetastoreService
name: test_resource
properties:
serviceId: test-service
location: us-central1
databaseType: SPANNER
hiveMetastoreConfig:
version: 3.1.2
scalingConfig:
autoscalingConfig:
autoscalingEnabled: true
limitConfig:
maxScalingFactor: 1
Dataproc Metastore Service Autoscaling Min And Max Scaling Factor
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const testResource = new gcp.dataproc.MetastoreService("test_resource", {
serviceId: "test-service",
location: "us-central1",
databaseType: "SPANNER",
hiveMetastoreConfig: {
version: "3.1.2",
},
scalingConfig: {
autoscalingConfig: {
autoscalingEnabled: true,
limitConfig: {
minScalingFactor: 0.1,
maxScalingFactor: 1,
},
},
},
});
import pulumi
import pulumi_gcp as gcp
test_resource = gcp.dataproc.MetastoreService("test_resource",
service_id="test-service",
location="us-central1",
database_type="SPANNER",
hive_metastore_config={
"version": "3.1.2",
},
scaling_config={
"autoscaling_config": {
"autoscaling_enabled": True,
"limit_config": {
"min_scaling_factor": 0.1,
"max_scaling_factor": 1,
},
},
})
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/dataproc"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := dataproc.NewMetastoreService(ctx, "test_resource", &dataproc.MetastoreServiceArgs{
ServiceId: pulumi.String("test-service"),
Location: pulumi.String("us-central1"),
DatabaseType: pulumi.String("SPANNER"),
HiveMetastoreConfig: &dataproc.MetastoreServiceHiveMetastoreConfigArgs{
Version: pulumi.String("3.1.2"),
},
ScalingConfig: &dataproc.MetastoreServiceScalingConfigArgs{
AutoscalingConfig: &dataproc.MetastoreServiceScalingConfigAutoscalingConfigArgs{
AutoscalingEnabled: pulumi.Bool(true),
LimitConfig: &dataproc.MetastoreServiceScalingConfigAutoscalingConfigLimitConfigArgs{
MinScalingFactor: pulumi.Float64(0.1),
MaxScalingFactor: pulumi.Float64(1),
},
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var testResource = new Gcp.Dataproc.MetastoreService("test_resource", new()
{
ServiceId = "test-service",
Location = "us-central1",
DatabaseType = "SPANNER",
HiveMetastoreConfig = new Gcp.Dataproc.Inputs.MetastoreServiceHiveMetastoreConfigArgs
{
Version = "3.1.2",
},
ScalingConfig = new Gcp.Dataproc.Inputs.MetastoreServiceScalingConfigArgs
{
AutoscalingConfig = new Gcp.Dataproc.Inputs.MetastoreServiceScalingConfigAutoscalingConfigArgs
{
AutoscalingEnabled = true,
LimitConfig = new Gcp.Dataproc.Inputs.MetastoreServiceScalingConfigAutoscalingConfigLimitConfigArgs
{
MinScalingFactor = 0.1,
MaxScalingFactor = 1,
},
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.dataproc.MetastoreService;
import com.pulumi.gcp.dataproc.MetastoreServiceArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceHiveMetastoreConfigArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceScalingConfigArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceScalingConfigAutoscalingConfigArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceScalingConfigAutoscalingConfigLimitConfigArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var testResource = new MetastoreService("testResource", MetastoreServiceArgs.builder()
.serviceId("test-service")
.location("us-central1")
.databaseType("SPANNER")
.hiveMetastoreConfig(MetastoreServiceHiveMetastoreConfigArgs.builder()
.version("3.1.2")
.build())
.scalingConfig(MetastoreServiceScalingConfigArgs.builder()
.autoscalingConfig(MetastoreServiceScalingConfigAutoscalingConfigArgs.builder()
.autoscalingEnabled(true)
.limitConfig(MetastoreServiceScalingConfigAutoscalingConfigLimitConfigArgs.builder()
.minScalingFactor(0.1)
.maxScalingFactor(1)
.build())
.build())
.build())
.build());
}
}
resources:
testResource:
type: gcp:dataproc:MetastoreService
name: test_resource
properties:
serviceId: test-service
location: us-central1
databaseType: SPANNER
hiveMetastoreConfig:
version: 3.1.2
scalingConfig:
autoscalingConfig:
autoscalingEnabled: true
limitConfig:
minScalingFactor: 0.1
maxScalingFactor: 1
Dataproc Metastore Service Autoscaling Min Scaling Factor
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const testResource = new gcp.dataproc.MetastoreService("test_resource", {
serviceId: "test-service",
location: "us-central1",
databaseType: "SPANNER",
hiveMetastoreConfig: {
version: "3.1.2",
},
scalingConfig: {
autoscalingConfig: {
autoscalingEnabled: true,
limitConfig: {
minScalingFactor: 0.1,
},
},
},
});
import pulumi
import pulumi_gcp as gcp
test_resource = gcp.dataproc.MetastoreService("test_resource",
service_id="test-service",
location="us-central1",
database_type="SPANNER",
hive_metastore_config={
"version": "3.1.2",
},
scaling_config={
"autoscaling_config": {
"autoscaling_enabled": True,
"limit_config": {
"min_scaling_factor": 0.1,
},
},
})
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/dataproc"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := dataproc.NewMetastoreService(ctx, "test_resource", &dataproc.MetastoreServiceArgs{
ServiceId: pulumi.String("test-service"),
Location: pulumi.String("us-central1"),
DatabaseType: pulumi.String("SPANNER"),
HiveMetastoreConfig: &dataproc.MetastoreServiceHiveMetastoreConfigArgs{
Version: pulumi.String("3.1.2"),
},
ScalingConfig: &dataproc.MetastoreServiceScalingConfigArgs{
AutoscalingConfig: &dataproc.MetastoreServiceScalingConfigAutoscalingConfigArgs{
AutoscalingEnabled: pulumi.Bool(true),
LimitConfig: &dataproc.MetastoreServiceScalingConfigAutoscalingConfigLimitConfigArgs{
MinScalingFactor: pulumi.Float64(0.1),
},
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var testResource = new Gcp.Dataproc.MetastoreService("test_resource", new()
{
ServiceId = "test-service",
Location = "us-central1",
DatabaseType = "SPANNER",
HiveMetastoreConfig = new Gcp.Dataproc.Inputs.MetastoreServiceHiveMetastoreConfigArgs
{
Version = "3.1.2",
},
ScalingConfig = new Gcp.Dataproc.Inputs.MetastoreServiceScalingConfigArgs
{
AutoscalingConfig = new Gcp.Dataproc.Inputs.MetastoreServiceScalingConfigAutoscalingConfigArgs
{
AutoscalingEnabled = true,
LimitConfig = new Gcp.Dataproc.Inputs.MetastoreServiceScalingConfigAutoscalingConfigLimitConfigArgs
{
MinScalingFactor = 0.1,
},
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.dataproc.MetastoreService;
import com.pulumi.gcp.dataproc.MetastoreServiceArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceHiveMetastoreConfigArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceScalingConfigArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceScalingConfigAutoscalingConfigArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceScalingConfigAutoscalingConfigLimitConfigArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var testResource = new MetastoreService("testResource", MetastoreServiceArgs.builder()
.serviceId("test-service")
.location("us-central1")
.databaseType("SPANNER")
.hiveMetastoreConfig(MetastoreServiceHiveMetastoreConfigArgs.builder()
.version("3.1.2")
.build())
.scalingConfig(MetastoreServiceScalingConfigArgs.builder()
.autoscalingConfig(MetastoreServiceScalingConfigAutoscalingConfigArgs.builder()
.autoscalingEnabled(true)
.limitConfig(MetastoreServiceScalingConfigAutoscalingConfigLimitConfigArgs.builder()
.minScalingFactor(0.1)
.build())
.build())
.build())
.build());
}
}
resources:
testResource:
type: gcp:dataproc:MetastoreService
name: test_resource
properties:
serviceId: test-service
location: us-central1
databaseType: SPANNER
hiveMetastoreConfig:
version: 3.1.2
scalingConfig:
autoscalingConfig:
autoscalingEnabled: true
limitConfig:
minScalingFactor: 0.1
Dataproc Metastore Service Autoscaling No Limit Config
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const testResource = new gcp.dataproc.MetastoreService("test_resource", {
serviceId: "test-service",
location: "us-central1",
databaseType: "SPANNER",
hiveMetastoreConfig: {
version: "3.1.2",
},
scalingConfig: {
autoscalingConfig: {
autoscalingEnabled: true,
},
},
});
import pulumi
import pulumi_gcp as gcp
test_resource = gcp.dataproc.MetastoreService("test_resource",
service_id="test-service",
location="us-central1",
database_type="SPANNER",
hive_metastore_config={
"version": "3.1.2",
},
scaling_config={
"autoscaling_config": {
"autoscaling_enabled": True,
},
})
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/dataproc"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := dataproc.NewMetastoreService(ctx, "test_resource", &dataproc.MetastoreServiceArgs{
ServiceId: pulumi.String("test-service"),
Location: pulumi.String("us-central1"),
DatabaseType: pulumi.String("SPANNER"),
HiveMetastoreConfig: &dataproc.MetastoreServiceHiveMetastoreConfigArgs{
Version: pulumi.String("3.1.2"),
},
ScalingConfig: &dataproc.MetastoreServiceScalingConfigArgs{
AutoscalingConfig: &dataproc.MetastoreServiceScalingConfigAutoscalingConfigArgs{
AutoscalingEnabled: pulumi.Bool(true),
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var testResource = new Gcp.Dataproc.MetastoreService("test_resource", new()
{
ServiceId = "test-service",
Location = "us-central1",
DatabaseType = "SPANNER",
HiveMetastoreConfig = new Gcp.Dataproc.Inputs.MetastoreServiceHiveMetastoreConfigArgs
{
Version = "3.1.2",
},
ScalingConfig = new Gcp.Dataproc.Inputs.MetastoreServiceScalingConfigArgs
{
AutoscalingConfig = new Gcp.Dataproc.Inputs.MetastoreServiceScalingConfigAutoscalingConfigArgs
{
AutoscalingEnabled = true,
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.dataproc.MetastoreService;
import com.pulumi.gcp.dataproc.MetastoreServiceArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceHiveMetastoreConfigArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceScalingConfigArgs;
import com.pulumi.gcp.dataproc.inputs.MetastoreServiceScalingConfigAutoscalingConfigArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var testResource = new MetastoreService("testResource", MetastoreServiceArgs.builder()
.serviceId("test-service")
.location("us-central1")
.databaseType("SPANNER")
.hiveMetastoreConfig(MetastoreServiceHiveMetastoreConfigArgs.builder()
.version("3.1.2")
.build())
.scalingConfig(MetastoreServiceScalingConfigArgs.builder()
.autoscalingConfig(MetastoreServiceScalingConfigAutoscalingConfigArgs.builder()
.autoscalingEnabled(true)
.build())
.build())
.build());
}
}
resources:
testResource:
type: gcp:dataproc:MetastoreService
name: test_resource
properties:
serviceId: test-service
location: us-central1
databaseType: SPANNER
hiveMetastoreConfig:
version: 3.1.2
scalingConfig:
autoscalingConfig:
autoscalingEnabled: true
Create MetastoreService Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new MetastoreService(name: string, args: MetastoreServiceArgs, opts?: CustomResourceOptions);
@overload
def MetastoreService(resource_name: str,
args: MetastoreServiceArgs,
opts: Optional[ResourceOptions] = None)
@overload
def MetastoreService(resource_name: str,
opts: Optional[ResourceOptions] = None,
service_id: Optional[str] = None,
network: Optional[str] = None,
location: Optional[str] = None,
network_config: Optional[MetastoreServiceNetworkConfigArgs] = None,
labels: Optional[Mapping[str, str]] = None,
port: Optional[int] = None,
maintenance_window: Optional[MetastoreServiceMaintenanceWindowArgs] = None,
metadata_integration: Optional[MetastoreServiceMetadataIntegrationArgs] = None,
project: Optional[str] = None,
hive_metastore_config: Optional[MetastoreServiceHiveMetastoreConfigArgs] = None,
encryption_config: Optional[MetastoreServiceEncryptionConfigArgs] = None,
database_type: Optional[str] = None,
release_channel: Optional[str] = None,
scaling_config: Optional[MetastoreServiceScalingConfigArgs] = None,
scheduled_backup: Optional[MetastoreServiceScheduledBackupArgs] = None,
deletion_protection: Optional[bool] = None,
telemetry_config: Optional[MetastoreServiceTelemetryConfigArgs] = None,
tier: Optional[str] = None)
func NewMetastoreService(ctx *Context, name string, args MetastoreServiceArgs, opts ...ResourceOption) (*MetastoreService, error)
public MetastoreService(string name, MetastoreServiceArgs args, CustomResourceOptions? opts = null)
public MetastoreService(String name, MetastoreServiceArgs args)
public MetastoreService(String name, MetastoreServiceArgs args, CustomResourceOptions options)
type: gcp:dataproc:MetastoreService
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args MetastoreServiceArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args MetastoreServiceArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args MetastoreServiceArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args MetastoreServiceArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args MetastoreServiceArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var metastoreServiceResource = new Gcp.Dataproc.MetastoreService("metastoreServiceResource", new()
{
ServiceId = "string",
Network = "string",
Location = "string",
NetworkConfig = new Gcp.Dataproc.Inputs.MetastoreServiceNetworkConfigArgs
{
Consumers = new[]
{
new Gcp.Dataproc.Inputs.MetastoreServiceNetworkConfigConsumerArgs
{
Subnetwork = "string",
EndpointUri = "string",
},
},
CustomRoutesEnabled = false,
},
Labels =
{
{ "string", "string" },
},
Port = 0,
MaintenanceWindow = new Gcp.Dataproc.Inputs.MetastoreServiceMaintenanceWindowArgs
{
DayOfWeek = "string",
HourOfDay = 0,
},
MetadataIntegration = new Gcp.Dataproc.Inputs.MetastoreServiceMetadataIntegrationArgs
{
DataCatalogConfig = new Gcp.Dataproc.Inputs.MetastoreServiceMetadataIntegrationDataCatalogConfigArgs
{
Enabled = false,
},
},
Project = "string",
HiveMetastoreConfig = new Gcp.Dataproc.Inputs.MetastoreServiceHiveMetastoreConfigArgs
{
Version = "string",
AuxiliaryVersions = new[]
{
new Gcp.Dataproc.Inputs.MetastoreServiceHiveMetastoreConfigAuxiliaryVersionArgs
{
Key = "string",
Version = "string",
ConfigOverrides =
{
{ "string", "string" },
},
},
},
ConfigOverrides =
{
{ "string", "string" },
},
EndpointProtocol = "string",
KerberosConfig = new Gcp.Dataproc.Inputs.MetastoreServiceHiveMetastoreConfigKerberosConfigArgs
{
Keytab = new Gcp.Dataproc.Inputs.MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabArgs
{
CloudSecret = "string",
},
Krb5ConfigGcsUri = "string",
Principal = "string",
},
},
EncryptionConfig = new Gcp.Dataproc.Inputs.MetastoreServiceEncryptionConfigArgs
{
KmsKey = "string",
},
DatabaseType = "string",
ReleaseChannel = "string",
ScalingConfig = new Gcp.Dataproc.Inputs.MetastoreServiceScalingConfigArgs
{
AutoscalingConfig = new Gcp.Dataproc.Inputs.MetastoreServiceScalingConfigAutoscalingConfigArgs
{
AutoscalingEnabled = false,
LimitConfig = new Gcp.Dataproc.Inputs.MetastoreServiceScalingConfigAutoscalingConfigLimitConfigArgs
{
MaxScalingFactor = 0,
MinScalingFactor = 0,
},
},
InstanceSize = "string",
ScalingFactor = 0,
},
ScheduledBackup = new Gcp.Dataproc.Inputs.MetastoreServiceScheduledBackupArgs
{
BackupLocation = "string",
CronSchedule = "string",
Enabled = false,
TimeZone = "string",
},
DeletionProtection = false,
TelemetryConfig = new Gcp.Dataproc.Inputs.MetastoreServiceTelemetryConfigArgs
{
LogFormat = "string",
},
Tier = "string",
});
example, err := dataproc.NewMetastoreService(ctx, "metastoreServiceResource", &dataproc.MetastoreServiceArgs{
ServiceId: pulumi.String("string"),
Network: pulumi.String("string"),
Location: pulumi.String("string"),
NetworkConfig: &dataproc.MetastoreServiceNetworkConfigArgs{
Consumers: dataproc.MetastoreServiceNetworkConfigConsumerArray{
&dataproc.MetastoreServiceNetworkConfigConsumerArgs{
Subnetwork: pulumi.String("string"),
EndpointUri: pulumi.String("string"),
},
},
CustomRoutesEnabled: pulumi.Bool(false),
},
Labels: pulumi.StringMap{
"string": pulumi.String("string"),
},
Port: pulumi.Int(0),
MaintenanceWindow: &dataproc.MetastoreServiceMaintenanceWindowArgs{
DayOfWeek: pulumi.String("string"),
HourOfDay: pulumi.Int(0),
},
MetadataIntegration: &dataproc.MetastoreServiceMetadataIntegrationArgs{
DataCatalogConfig: &dataproc.MetastoreServiceMetadataIntegrationDataCatalogConfigArgs{
Enabled: pulumi.Bool(false),
},
},
Project: pulumi.String("string"),
HiveMetastoreConfig: &dataproc.MetastoreServiceHiveMetastoreConfigArgs{
Version: pulumi.String("string"),
AuxiliaryVersions: dataproc.MetastoreServiceHiveMetastoreConfigAuxiliaryVersionArray{
&dataproc.MetastoreServiceHiveMetastoreConfigAuxiliaryVersionArgs{
Key: pulumi.String("string"),
Version: pulumi.String("string"),
ConfigOverrides: pulumi.StringMap{
"string": pulumi.String("string"),
},
},
},
ConfigOverrides: pulumi.StringMap{
"string": pulumi.String("string"),
},
EndpointProtocol: pulumi.String("string"),
KerberosConfig: &dataproc.MetastoreServiceHiveMetastoreConfigKerberosConfigArgs{
Keytab: &dataproc.MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabArgs{
CloudSecret: pulumi.String("string"),
},
Krb5ConfigGcsUri: pulumi.String("string"),
Principal: pulumi.String("string"),
},
},
EncryptionConfig: &dataproc.MetastoreServiceEncryptionConfigArgs{
KmsKey: pulumi.String("string"),
},
DatabaseType: pulumi.String("string"),
ReleaseChannel: pulumi.String("string"),
ScalingConfig: &dataproc.MetastoreServiceScalingConfigArgs{
AutoscalingConfig: &dataproc.MetastoreServiceScalingConfigAutoscalingConfigArgs{
AutoscalingEnabled: pulumi.Bool(false),
LimitConfig: &dataproc.MetastoreServiceScalingConfigAutoscalingConfigLimitConfigArgs{
MaxScalingFactor: pulumi.Float64(0),
MinScalingFactor: pulumi.Float64(0),
},
},
InstanceSize: pulumi.String("string"),
ScalingFactor: pulumi.Float64(0),
},
ScheduledBackup: &dataproc.MetastoreServiceScheduledBackupArgs{
BackupLocation: pulumi.String("string"),
CronSchedule: pulumi.String("string"),
Enabled: pulumi.Bool(false),
TimeZone: pulumi.String("string"),
},
DeletionProtection: pulumi.Bool(false),
TelemetryConfig: &dataproc.MetastoreServiceTelemetryConfigArgs{
LogFormat: pulumi.String("string"),
},
Tier: pulumi.String("string"),
})
var metastoreServiceResource = new MetastoreService("metastoreServiceResource", MetastoreServiceArgs.builder()
.serviceId("string")
.network("string")
.location("string")
.networkConfig(MetastoreServiceNetworkConfigArgs.builder()
.consumers(MetastoreServiceNetworkConfigConsumerArgs.builder()
.subnetwork("string")
.endpointUri("string")
.build())
.customRoutesEnabled(false)
.build())
.labels(Map.of("string", "string"))
.port(0)
.maintenanceWindow(MetastoreServiceMaintenanceWindowArgs.builder()
.dayOfWeek("string")
.hourOfDay(0)
.build())
.metadataIntegration(MetastoreServiceMetadataIntegrationArgs.builder()
.dataCatalogConfig(MetastoreServiceMetadataIntegrationDataCatalogConfigArgs.builder()
.enabled(false)
.build())
.build())
.project("string")
.hiveMetastoreConfig(MetastoreServiceHiveMetastoreConfigArgs.builder()
.version("string")
.auxiliaryVersions(MetastoreServiceHiveMetastoreConfigAuxiliaryVersionArgs.builder()
.key("string")
.version("string")
.configOverrides(Map.of("string", "string"))
.build())
.configOverrides(Map.of("string", "string"))
.endpointProtocol("string")
.kerberosConfig(MetastoreServiceHiveMetastoreConfigKerberosConfigArgs.builder()
.keytab(MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabArgs.builder()
.cloudSecret("string")
.build())
.krb5ConfigGcsUri("string")
.principal("string")
.build())
.build())
.encryptionConfig(MetastoreServiceEncryptionConfigArgs.builder()
.kmsKey("string")
.build())
.databaseType("string")
.releaseChannel("string")
.scalingConfig(MetastoreServiceScalingConfigArgs.builder()
.autoscalingConfig(MetastoreServiceScalingConfigAutoscalingConfigArgs.builder()
.autoscalingEnabled(false)
.limitConfig(MetastoreServiceScalingConfigAutoscalingConfigLimitConfigArgs.builder()
.maxScalingFactor(0)
.minScalingFactor(0)
.build())
.build())
.instanceSize("string")
.scalingFactor(0)
.build())
.scheduledBackup(MetastoreServiceScheduledBackupArgs.builder()
.backupLocation("string")
.cronSchedule("string")
.enabled(false)
.timeZone("string")
.build())
.deletionProtection(false)
.telemetryConfig(MetastoreServiceTelemetryConfigArgs.builder()
.logFormat("string")
.build())
.tier("string")
.build());
metastore_service_resource = gcp.dataproc.MetastoreService("metastoreServiceResource",
service_id="string",
network="string",
location="string",
network_config={
"consumers": [{
"subnetwork": "string",
"endpoint_uri": "string",
}],
"custom_routes_enabled": False,
},
labels={
"string": "string",
},
port=0,
maintenance_window={
"day_of_week": "string",
"hour_of_day": 0,
},
metadata_integration={
"data_catalog_config": {
"enabled": False,
},
},
project="string",
hive_metastore_config={
"version": "string",
"auxiliary_versions": [{
"key": "string",
"version": "string",
"config_overrides": {
"string": "string",
},
}],
"config_overrides": {
"string": "string",
},
"endpoint_protocol": "string",
"kerberos_config": {
"keytab": {
"cloud_secret": "string",
},
"krb5_config_gcs_uri": "string",
"principal": "string",
},
},
encryption_config={
"kms_key": "string",
},
database_type="string",
release_channel="string",
scaling_config={
"autoscaling_config": {
"autoscaling_enabled": False,
"limit_config": {
"max_scaling_factor": 0,
"min_scaling_factor": 0,
},
},
"instance_size": "string",
"scaling_factor": 0,
},
scheduled_backup={
"backup_location": "string",
"cron_schedule": "string",
"enabled": False,
"time_zone": "string",
},
deletion_protection=False,
telemetry_config={
"log_format": "string",
},
tier="string")
const metastoreServiceResource = new gcp.dataproc.MetastoreService("metastoreServiceResource", {
serviceId: "string",
network: "string",
location: "string",
networkConfig: {
consumers: [{
subnetwork: "string",
endpointUri: "string",
}],
customRoutesEnabled: false,
},
labels: {
string: "string",
},
port: 0,
maintenanceWindow: {
dayOfWeek: "string",
hourOfDay: 0,
},
metadataIntegration: {
dataCatalogConfig: {
enabled: false,
},
},
project: "string",
hiveMetastoreConfig: {
version: "string",
auxiliaryVersions: [{
key: "string",
version: "string",
configOverrides: {
string: "string",
},
}],
configOverrides: {
string: "string",
},
endpointProtocol: "string",
kerberosConfig: {
keytab: {
cloudSecret: "string",
},
krb5ConfigGcsUri: "string",
principal: "string",
},
},
encryptionConfig: {
kmsKey: "string",
},
databaseType: "string",
releaseChannel: "string",
scalingConfig: {
autoscalingConfig: {
autoscalingEnabled: false,
limitConfig: {
maxScalingFactor: 0,
minScalingFactor: 0,
},
},
instanceSize: "string",
scalingFactor: 0,
},
scheduledBackup: {
backupLocation: "string",
cronSchedule: "string",
enabled: false,
timeZone: "string",
},
deletionProtection: false,
telemetryConfig: {
logFormat: "string",
},
tier: "string",
});
type: gcp:dataproc:MetastoreService
properties:
databaseType: string
deletionProtection: false
encryptionConfig:
kmsKey: string
hiveMetastoreConfig:
auxiliaryVersions:
- configOverrides:
string: string
key: string
version: string
configOverrides:
string: string
endpointProtocol: string
kerberosConfig:
keytab:
cloudSecret: string
krb5ConfigGcsUri: string
principal: string
version: string
labels:
string: string
location: string
maintenanceWindow:
dayOfWeek: string
hourOfDay: 0
metadataIntegration:
dataCatalogConfig:
enabled: false
network: string
networkConfig:
consumers:
- endpointUri: string
subnetwork: string
customRoutesEnabled: false
port: 0
project: string
releaseChannel: string
scalingConfig:
autoscalingConfig:
autoscalingEnabled: false
limitConfig:
maxScalingFactor: 0
minScalingFactor: 0
instanceSize: string
scalingFactor: 0
scheduledBackup:
backupLocation: string
cronSchedule: string
enabled: false
timeZone: string
serviceId: string
telemetryConfig:
logFormat: string
tier: string
MetastoreService Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The MetastoreService resource accepts the following input properties:
- Service
Id string - The ID of the metastore service. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between
3 and 63 characters.
- Database
Type string - The database type that the Metastore service stores its data.
Default value is
MYSQL
. Possible values are:MYSQL
,SPANNER
. - Deletion
Protection bool - Indicates if the dataproc metastore should be protected against accidental deletions.
- Encryption
Config MetastoreService Encryption Config - Information used to configure the Dataproc Metastore service to encrypt customer data at rest. Structure is documented below.
- Hive
Metastore MetastoreConfig Service Hive Metastore Config - Configuration information specific to running Hive metastore software as the metastore service. Structure is documented below.
- Labels Dictionary<string, string>
- User-defined labels for the metastore service.
Note: This field is non-authoritative, and will only manage the labels present in your configuration.
Please refer to the field
effective_labels
for all of the labels present on the resource. - Location string
- The location where the metastore service should reside.
The default value is
global
. - Maintenance
Window MetastoreService Maintenance Window - The one hour maintenance window of the metastore service.
This specifies when the service can be restarted for maintenance purposes in UTC time.
Maintenance window is not needed for services with the
SPANNER
database type. Structure is documented below. - Metadata
Integration MetastoreService Metadata Integration - The setting that defines how metastore metadata should be integrated with external services and systems. Structure is documented below.
- Network string
- The relative resource name of the VPC network on which the instance can be accessed. It is specified in the following form: "projects/{projectNumber}/global/networks/{network_id}".
- Network
Config MetastoreService Network Config - The configuration specifying the network settings for the Dataproc Metastore service. Structure is documented below.
- Port int
- The TCP port at which the metastore service is reached. Default: 9083.
- Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- Release
Channel string - The release channel of the service. If unspecified, defaults to
STABLE
. Default value isSTABLE
. Possible values are:CANARY
,STABLE
. - Scaling
Config MetastoreService Scaling Config - Represents the scaling configuration of a metastore service. Structure is documented below.
- Scheduled
Backup MetastoreService Scheduled Backup - The configuration of scheduled backup for the metastore service. Structure is documented below.
- Telemetry
Config MetastoreService Telemetry Config - The configuration specifying telemetry settings for the Dataproc Metastore service. If unspecified defaults to JSON. Structure is documented below.
- Tier string
- The tier of the service.
Possible values are:
DEVELOPER
,ENTERPRISE
.
- Service
Id string - The ID of the metastore service. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between
3 and 63 characters.
- Database
Type string - The database type that the Metastore service stores its data.
Default value is
MYSQL
. Possible values are:MYSQL
,SPANNER
. - Deletion
Protection bool - Indicates if the dataproc metastore should be protected against accidental deletions.
- Encryption
Config MetastoreService Encryption Config Args - Information used to configure the Dataproc Metastore service to encrypt customer data at rest. Structure is documented below.
- Hive
Metastore MetastoreConfig Service Hive Metastore Config Args - Configuration information specific to running Hive metastore software as the metastore service. Structure is documented below.
- Labels map[string]string
- User-defined labels for the metastore service.
Note: This field is non-authoritative, and will only manage the labels present in your configuration.
Please refer to the field
effective_labels
for all of the labels present on the resource. - Location string
- The location where the metastore service should reside.
The default value is
global
. - Maintenance
Window MetastoreService Maintenance Window Args - The one hour maintenance window of the metastore service.
This specifies when the service can be restarted for maintenance purposes in UTC time.
Maintenance window is not needed for services with the
SPANNER
database type. Structure is documented below. - Metadata
Integration MetastoreService Metadata Integration Args - The setting that defines how metastore metadata should be integrated with external services and systems. Structure is documented below.
- Network string
- The relative resource name of the VPC network on which the instance can be accessed. It is specified in the following form: "projects/{projectNumber}/global/networks/{network_id}".
- Network
Config MetastoreService Network Config Args - The configuration specifying the network settings for the Dataproc Metastore service. Structure is documented below.
- Port int
- The TCP port at which the metastore service is reached. Default: 9083.
- Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- Release
Channel string - The release channel of the service. If unspecified, defaults to
STABLE
. Default value isSTABLE
. Possible values are:CANARY
,STABLE
. - Scaling
Config MetastoreService Scaling Config Args - Represents the scaling configuration of a metastore service. Structure is documented below.
- Scheduled
Backup MetastoreService Scheduled Backup Args - The configuration of scheduled backup for the metastore service. Structure is documented below.
- Telemetry
Config MetastoreService Telemetry Config Args - The configuration specifying telemetry settings for the Dataproc Metastore service. If unspecified defaults to JSON. Structure is documented below.
- Tier string
- The tier of the service.
Possible values are:
DEVELOPER
,ENTERPRISE
.
- service
Id String - The ID of the metastore service. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between
3 and 63 characters.
- database
Type String - The database type that the Metastore service stores its data.
Default value is
MYSQL
. Possible values are:MYSQL
,SPANNER
. - deletion
Protection Boolean - Indicates if the dataproc metastore should be protected against accidental deletions.
- encryption
Config MetastoreService Encryption Config - Information used to configure the Dataproc Metastore service to encrypt customer data at rest. Structure is documented below.
- hive
Metastore MetastoreConfig Service Hive Metastore Config - Configuration information specific to running Hive metastore software as the metastore service. Structure is documented below.
- labels Map<String,String>
- User-defined labels for the metastore service.
Note: This field is non-authoritative, and will only manage the labels present in your configuration.
Please refer to the field
effective_labels
for all of the labels present on the resource. - location String
- The location where the metastore service should reside.
The default value is
global
. - maintenance
Window MetastoreService Maintenance Window - The one hour maintenance window of the metastore service.
This specifies when the service can be restarted for maintenance purposes in UTC time.
Maintenance window is not needed for services with the
SPANNER
database type. Structure is documented below. - metadata
Integration MetastoreService Metadata Integration - The setting that defines how metastore metadata should be integrated with external services and systems. Structure is documented below.
- network String
- The relative resource name of the VPC network on which the instance can be accessed. It is specified in the following form: "projects/{projectNumber}/global/networks/{network_id}".
- network
Config MetastoreService Network Config - The configuration specifying the network settings for the Dataproc Metastore service. Structure is documented below.
- port Integer
- The TCP port at which the metastore service is reached. Default: 9083.
- project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- release
Channel String - The release channel of the service. If unspecified, defaults to
STABLE
. Default value isSTABLE
. Possible values are:CANARY
,STABLE
. - scaling
Config MetastoreService Scaling Config - Represents the scaling configuration of a metastore service. Structure is documented below.
- scheduled
Backup MetastoreService Scheduled Backup - The configuration of scheduled backup for the metastore service. Structure is documented below.
- telemetry
Config MetastoreService Telemetry Config - The configuration specifying telemetry settings for the Dataproc Metastore service. If unspecified defaults to JSON. Structure is documented below.
- tier String
- The tier of the service.
Possible values are:
DEVELOPER
,ENTERPRISE
.
- service
Id string - The ID of the metastore service. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between
3 and 63 characters.
- database
Type string - The database type that the Metastore service stores its data.
Default value is
MYSQL
. Possible values are:MYSQL
,SPANNER
. - deletion
Protection boolean - Indicates if the dataproc metastore should be protected against accidental deletions.
- encryption
Config MetastoreService Encryption Config - Information used to configure the Dataproc Metastore service to encrypt customer data at rest. Structure is documented below.
- hive
Metastore MetastoreConfig Service Hive Metastore Config - Configuration information specific to running Hive metastore software as the metastore service. Structure is documented below.
- labels {[key: string]: string}
- User-defined labels for the metastore service.
Note: This field is non-authoritative, and will only manage the labels present in your configuration.
Please refer to the field
effective_labels
for all of the labels present on the resource. - location string
- The location where the metastore service should reside.
The default value is
global
. - maintenance
Window MetastoreService Maintenance Window - The one hour maintenance window of the metastore service.
This specifies when the service can be restarted for maintenance purposes in UTC time.
Maintenance window is not needed for services with the
SPANNER
database type. Structure is documented below. - metadata
Integration MetastoreService Metadata Integration - The setting that defines how metastore metadata should be integrated with external services and systems. Structure is documented below.
- network string
- The relative resource name of the VPC network on which the instance can be accessed. It is specified in the following form: "projects/{projectNumber}/global/networks/{network_id}".
- network
Config MetastoreService Network Config - The configuration specifying the network settings for the Dataproc Metastore service. Structure is documented below.
- port number
- The TCP port at which the metastore service is reached. Default: 9083.
- project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- release
Channel string - The release channel of the service. If unspecified, defaults to
STABLE
. Default value isSTABLE
. Possible values are:CANARY
,STABLE
. - scaling
Config MetastoreService Scaling Config - Represents the scaling configuration of a metastore service. Structure is documented below.
- scheduled
Backup MetastoreService Scheduled Backup - The configuration of scheduled backup for the metastore service. Structure is documented below.
- telemetry
Config MetastoreService Telemetry Config - The configuration specifying telemetry settings for the Dataproc Metastore service. If unspecified defaults to JSON. Structure is documented below.
- tier string
- The tier of the service.
Possible values are:
DEVELOPER
,ENTERPRISE
.
- service_
id str - The ID of the metastore service. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between
3 and 63 characters.
- database_
type str - The database type that the Metastore service stores its data.
Default value is
MYSQL
. Possible values are:MYSQL
,SPANNER
. - deletion_
protection bool - Indicates if the dataproc metastore should be protected against accidental deletions.
- encryption_
config MetastoreService Encryption Config Args - Information used to configure the Dataproc Metastore service to encrypt customer data at rest. Structure is documented below.
- hive_
metastore_ Metastoreconfig Service Hive Metastore Config Args - Configuration information specific to running Hive metastore software as the metastore service. Structure is documented below.
- labels Mapping[str, str]
- User-defined labels for the metastore service.
Note: This field is non-authoritative, and will only manage the labels present in your configuration.
Please refer to the field
effective_labels
for all of the labels present on the resource. - location str
- The location where the metastore service should reside.
The default value is
global
. - maintenance_
window MetastoreService Maintenance Window Args - The one hour maintenance window of the metastore service.
This specifies when the service can be restarted for maintenance purposes in UTC time.
Maintenance window is not needed for services with the
SPANNER
database type. Structure is documented below. - metadata_
integration MetastoreService Metadata Integration Args - The setting that defines how metastore metadata should be integrated with external services and systems. Structure is documented below.
- network str
- The relative resource name of the VPC network on which the instance can be accessed. It is specified in the following form: "projects/{projectNumber}/global/networks/{network_id}".
- network_
config MetastoreService Network Config Args - The configuration specifying the network settings for the Dataproc Metastore service. Structure is documented below.
- port int
- The TCP port at which the metastore service is reached. Default: 9083.
- project str
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- release_
channel str - The release channel of the service. If unspecified, defaults to
STABLE
. Default value isSTABLE
. Possible values are:CANARY
,STABLE
. - scaling_
config MetastoreService Scaling Config Args - Represents the scaling configuration of a metastore service. Structure is documented below.
- scheduled_
backup MetastoreService Scheduled Backup Args - The configuration of scheduled backup for the metastore service. Structure is documented below.
- telemetry_
config MetastoreService Telemetry Config Args - The configuration specifying telemetry settings for the Dataproc Metastore service. If unspecified defaults to JSON. Structure is documented below.
- tier str
- The tier of the service.
Possible values are:
DEVELOPER
,ENTERPRISE
.
- service
Id String - The ID of the metastore service. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between
3 and 63 characters.
- database
Type String - The database type that the Metastore service stores its data.
Default value is
MYSQL
. Possible values are:MYSQL
,SPANNER
. - deletion
Protection Boolean - Indicates if the dataproc metastore should be protected against accidental deletions.
- encryption
Config Property Map - Information used to configure the Dataproc Metastore service to encrypt customer data at rest. Structure is documented below.
- hive
Metastore Property MapConfig - Configuration information specific to running Hive metastore software as the metastore service. Structure is documented below.
- labels Map<String>
- User-defined labels for the metastore service.
Note: This field is non-authoritative, and will only manage the labels present in your configuration.
Please refer to the field
effective_labels
for all of the labels present on the resource. - location String
- The location where the metastore service should reside.
The default value is
global
. - maintenance
Window Property Map - The one hour maintenance window of the metastore service.
This specifies when the service can be restarted for maintenance purposes in UTC time.
Maintenance window is not needed for services with the
SPANNER
database type. Structure is documented below. - metadata
Integration Property Map - The setting that defines how metastore metadata should be integrated with external services and systems. Structure is documented below.
- network String
- The relative resource name of the VPC network on which the instance can be accessed. It is specified in the following form: "projects/{projectNumber}/global/networks/{network_id}".
- network
Config Property Map - The configuration specifying the network settings for the Dataproc Metastore service. Structure is documented below.
- port Number
- The TCP port at which the metastore service is reached. Default: 9083.
- project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- release
Channel String - The release channel of the service. If unspecified, defaults to
STABLE
. Default value isSTABLE
. Possible values are:CANARY
,STABLE
. - scaling
Config Property Map - Represents the scaling configuration of a metastore service. Structure is documented below.
- scheduled
Backup Property Map - The configuration of scheduled backup for the metastore service. Structure is documented below.
- telemetry
Config Property Map - The configuration specifying telemetry settings for the Dataproc Metastore service. If unspecified defaults to JSON. Structure is documented below.
- tier String
- The tier of the service.
Possible values are:
DEVELOPER
,ENTERPRISE
.
Outputs
All input properties are implicitly available as output properties. Additionally, the MetastoreService resource produces the following output properties:
- Artifact
Gcs stringUri - A Cloud Storage URI (starting with gs://) that specifies where artifacts related to the metastore service are stored.
- Effective
Labels Dictionary<string, string> - All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- Endpoint
Uri string - The URI of the endpoint used to access the metastore service.
- Id string
- The provider-assigned unique ID for this managed resource.
- Name string
- The relative resource name of the metastore service.
- Pulumi
Labels Dictionary<string, string> - The combination of labels configured directly on the resource and default labels configured on the provider.
- State string
- The current state of the metastore service.
- State
Message string - Additional information about the current state of the metastore service, if available.
- Uid string
- The globally unique resource identifier of the metastore service.
- Artifact
Gcs stringUri - A Cloud Storage URI (starting with gs://) that specifies where artifacts related to the metastore service are stored.
- Effective
Labels map[string]string - All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- Endpoint
Uri string - The URI of the endpoint used to access the metastore service.
- Id string
- The provider-assigned unique ID for this managed resource.
- Name string
- The relative resource name of the metastore service.
- Pulumi
Labels map[string]string - The combination of labels configured directly on the resource and default labels configured on the provider.
- State string
- The current state of the metastore service.
- State
Message string - Additional information about the current state of the metastore service, if available.
- Uid string
- The globally unique resource identifier of the metastore service.
- artifact
Gcs StringUri - A Cloud Storage URI (starting with gs://) that specifies where artifacts related to the metastore service are stored.
- effective
Labels Map<String,String> - All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- endpoint
Uri String - The URI of the endpoint used to access the metastore service.
- id String
- The provider-assigned unique ID for this managed resource.
- name String
- The relative resource name of the metastore service.
- pulumi
Labels Map<String,String> - The combination of labels configured directly on the resource and default labels configured on the provider.
- state String
- The current state of the metastore service.
- state
Message String - Additional information about the current state of the metastore service, if available.
- uid String
- The globally unique resource identifier of the metastore service.
- artifact
Gcs stringUri - A Cloud Storage URI (starting with gs://) that specifies where artifacts related to the metastore service are stored.
- effective
Labels {[key: string]: string} - All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- endpoint
Uri string - The URI of the endpoint used to access the metastore service.
- id string
- The provider-assigned unique ID for this managed resource.
- name string
- The relative resource name of the metastore service.
- pulumi
Labels {[key: string]: string} - The combination of labels configured directly on the resource and default labels configured on the provider.
- state string
- The current state of the metastore service.
- state
Message string - Additional information about the current state of the metastore service, if available.
- uid string
- The globally unique resource identifier of the metastore service.
- artifact_
gcs_ struri - A Cloud Storage URI (starting with gs://) that specifies where artifacts related to the metastore service are stored.
- effective_
labels Mapping[str, str] - All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- endpoint_
uri str - The URI of the endpoint used to access the metastore service.
- id str
- The provider-assigned unique ID for this managed resource.
- name str
- The relative resource name of the metastore service.
- pulumi_
labels Mapping[str, str] - The combination of labels configured directly on the resource and default labels configured on the provider.
- state str
- The current state of the metastore service.
- state_
message str - Additional information about the current state of the metastore service, if available.
- uid str
- The globally unique resource identifier of the metastore service.
- artifact
Gcs StringUri - A Cloud Storage URI (starting with gs://) that specifies where artifacts related to the metastore service are stored.
- effective
Labels Map<String> - All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- endpoint
Uri String - The URI of the endpoint used to access the metastore service.
- id String
- The provider-assigned unique ID for this managed resource.
- name String
- The relative resource name of the metastore service.
- pulumi
Labels Map<String> - The combination of labels configured directly on the resource and default labels configured on the provider.
- state String
- The current state of the metastore service.
- state
Message String - Additional information about the current state of the metastore service, if available.
- uid String
- The globally unique resource identifier of the metastore service.
Look up Existing MetastoreService Resource
Get an existing MetastoreService resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: MetastoreServiceState, opts?: CustomResourceOptions): MetastoreService
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
artifact_gcs_uri: Optional[str] = None,
database_type: Optional[str] = None,
deletion_protection: Optional[bool] = None,
effective_labels: Optional[Mapping[str, str]] = None,
encryption_config: Optional[MetastoreServiceEncryptionConfigArgs] = None,
endpoint_uri: Optional[str] = None,
hive_metastore_config: Optional[MetastoreServiceHiveMetastoreConfigArgs] = None,
labels: Optional[Mapping[str, str]] = None,
location: Optional[str] = None,
maintenance_window: Optional[MetastoreServiceMaintenanceWindowArgs] = None,
metadata_integration: Optional[MetastoreServiceMetadataIntegrationArgs] = None,
name: Optional[str] = None,
network: Optional[str] = None,
network_config: Optional[MetastoreServiceNetworkConfigArgs] = None,
port: Optional[int] = None,
project: Optional[str] = None,
pulumi_labels: Optional[Mapping[str, str]] = None,
release_channel: Optional[str] = None,
scaling_config: Optional[MetastoreServiceScalingConfigArgs] = None,
scheduled_backup: Optional[MetastoreServiceScheduledBackupArgs] = None,
service_id: Optional[str] = None,
state: Optional[str] = None,
state_message: Optional[str] = None,
telemetry_config: Optional[MetastoreServiceTelemetryConfigArgs] = None,
tier: Optional[str] = None,
uid: Optional[str] = None) -> MetastoreService
func GetMetastoreService(ctx *Context, name string, id IDInput, state *MetastoreServiceState, opts ...ResourceOption) (*MetastoreService, error)
public static MetastoreService Get(string name, Input<string> id, MetastoreServiceState? state, CustomResourceOptions? opts = null)
public static MetastoreService get(String name, Output<String> id, MetastoreServiceState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Artifact
Gcs stringUri - A Cloud Storage URI (starting with gs://) that specifies where artifacts related to the metastore service are stored.
- Database
Type string - The database type that the Metastore service stores its data.
Default value is
MYSQL
. Possible values are:MYSQL
,SPANNER
. - Deletion
Protection bool - Indicates if the dataproc metastore should be protected against accidental deletions.
- Effective
Labels Dictionary<string, string> - All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- Encryption
Config MetastoreService Encryption Config - Information used to configure the Dataproc Metastore service to encrypt customer data at rest. Structure is documented below.
- Endpoint
Uri string - The URI of the endpoint used to access the metastore service.
- Hive
Metastore MetastoreConfig Service Hive Metastore Config - Configuration information specific to running Hive metastore software as the metastore service. Structure is documented below.
- Labels Dictionary<string, string>
- User-defined labels for the metastore service.
Note: This field is non-authoritative, and will only manage the labels present in your configuration.
Please refer to the field
effective_labels
for all of the labels present on the resource. - Location string
- The location where the metastore service should reside.
The default value is
global
. - Maintenance
Window MetastoreService Maintenance Window - The one hour maintenance window of the metastore service.
This specifies when the service can be restarted for maintenance purposes in UTC time.
Maintenance window is not needed for services with the
SPANNER
database type. Structure is documented below. - Metadata
Integration MetastoreService Metadata Integration - The setting that defines how metastore metadata should be integrated with external services and systems. Structure is documented below.
- Name string
- The relative resource name of the metastore service.
- Network string
- The relative resource name of the VPC network on which the instance can be accessed. It is specified in the following form: "projects/{projectNumber}/global/networks/{network_id}".
- Network
Config MetastoreService Network Config - The configuration specifying the network settings for the Dataproc Metastore service. Structure is documented below.
- Port int
- The TCP port at which the metastore service is reached. Default: 9083.
- Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- Pulumi
Labels Dictionary<string, string> - The combination of labels configured directly on the resource and default labels configured on the provider.
- Release
Channel string - The release channel of the service. If unspecified, defaults to
STABLE
. Default value isSTABLE
. Possible values are:CANARY
,STABLE
. - Scaling
Config MetastoreService Scaling Config - Represents the scaling configuration of a metastore service. Structure is documented below.
- Scheduled
Backup MetastoreService Scheduled Backup - The configuration of scheduled backup for the metastore service. Structure is documented below.
- Service
Id string - The ID of the metastore service. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between
3 and 63 characters.
- State string
- The current state of the metastore service.
- State
Message string - Additional information about the current state of the metastore service, if available.
- Telemetry
Config MetastoreService Telemetry Config - The configuration specifying telemetry settings for the Dataproc Metastore service. If unspecified defaults to JSON. Structure is documented below.
- Tier string
- The tier of the service.
Possible values are:
DEVELOPER
,ENTERPRISE
. - Uid string
- The globally unique resource identifier of the metastore service.
- Artifact
Gcs stringUri - A Cloud Storage URI (starting with gs://) that specifies where artifacts related to the metastore service are stored.
- Database
Type string - The database type that the Metastore service stores its data.
Default value is
MYSQL
. Possible values are:MYSQL
,SPANNER
. - Deletion
Protection bool - Indicates if the dataproc metastore should be protected against accidental deletions.
- Effective
Labels map[string]string - All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- Encryption
Config MetastoreService Encryption Config Args - Information used to configure the Dataproc Metastore service to encrypt customer data at rest. Structure is documented below.
- Endpoint
Uri string - The URI of the endpoint used to access the metastore service.
- Hive
Metastore MetastoreConfig Service Hive Metastore Config Args - Configuration information specific to running Hive metastore software as the metastore service. Structure is documented below.
- Labels map[string]string
- User-defined labels for the metastore service.
Note: This field is non-authoritative, and will only manage the labels present in your configuration.
Please refer to the field
effective_labels
for all of the labels present on the resource. - Location string
- The location where the metastore service should reside.
The default value is
global
. - Maintenance
Window MetastoreService Maintenance Window Args - The one hour maintenance window of the metastore service.
This specifies when the service can be restarted for maintenance purposes in UTC time.
Maintenance window is not needed for services with the
SPANNER
database type. Structure is documented below. - Metadata
Integration MetastoreService Metadata Integration Args - The setting that defines how metastore metadata should be integrated with external services and systems. Structure is documented below.
- Name string
- The relative resource name of the metastore service.
- Network string
- The relative resource name of the VPC network on which the instance can be accessed. It is specified in the following form: "projects/{projectNumber}/global/networks/{network_id}".
- Network
Config MetastoreService Network Config Args - The configuration specifying the network settings for the Dataproc Metastore service. Structure is documented below.
- Port int
- The TCP port at which the metastore service is reached. Default: 9083.
- Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- Pulumi
Labels map[string]string - The combination of labels configured directly on the resource and default labels configured on the provider.
- Release
Channel string - The release channel of the service. If unspecified, defaults to
STABLE
. Default value isSTABLE
. Possible values are:CANARY
,STABLE
. - Scaling
Config MetastoreService Scaling Config Args - Represents the scaling configuration of a metastore service. Structure is documented below.
- Scheduled
Backup MetastoreService Scheduled Backup Args - The configuration of scheduled backup for the metastore service. Structure is documented below.
- Service
Id string - The ID of the metastore service. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between
3 and 63 characters.
- State string
- The current state of the metastore service.
- State
Message string - Additional information about the current state of the metastore service, if available.
- Telemetry
Config MetastoreService Telemetry Config Args - The configuration specifying telemetry settings for the Dataproc Metastore service. If unspecified defaults to JSON. Structure is documented below.
- Tier string
- The tier of the service.
Possible values are:
DEVELOPER
,ENTERPRISE
. - Uid string
- The globally unique resource identifier of the metastore service.
- artifact
Gcs StringUri - A Cloud Storage URI (starting with gs://) that specifies where artifacts related to the metastore service are stored.
- database
Type String - The database type that the Metastore service stores its data.
Default value is
MYSQL
. Possible values are:MYSQL
,SPANNER
. - deletion
Protection Boolean - Indicates if the dataproc metastore should be protected against accidental deletions.
- effective
Labels Map<String,String> - All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- encryption
Config MetastoreService Encryption Config - Information used to configure the Dataproc Metastore service to encrypt customer data at rest. Structure is documented below.
- endpoint
Uri String - The URI of the endpoint used to access the metastore service.
- hive
Metastore MetastoreConfig Service Hive Metastore Config - Configuration information specific to running Hive metastore software as the metastore service. Structure is documented below.
- labels Map<String,String>
- User-defined labels for the metastore service.
Note: This field is non-authoritative, and will only manage the labels present in your configuration.
Please refer to the field
effective_labels
for all of the labels present on the resource. - location String
- The location where the metastore service should reside.
The default value is
global
. - maintenance
Window MetastoreService Maintenance Window - The one hour maintenance window of the metastore service.
This specifies when the service can be restarted for maintenance purposes in UTC time.
Maintenance window is not needed for services with the
SPANNER
database type. Structure is documented below. - metadata
Integration MetastoreService Metadata Integration - The setting that defines how metastore metadata should be integrated with external services and systems. Structure is documented below.
- name String
- The relative resource name of the metastore service.
- network String
- The relative resource name of the VPC network on which the instance can be accessed. It is specified in the following form: "projects/{projectNumber}/global/networks/{network_id}".
- network
Config MetastoreService Network Config - The configuration specifying the network settings for the Dataproc Metastore service. Structure is documented below.
- port Integer
- The TCP port at which the metastore service is reached. Default: 9083.
- project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- pulumi
Labels Map<String,String> - The combination of labels configured directly on the resource and default labels configured on the provider.
- release
Channel String - The release channel of the service. If unspecified, defaults to
STABLE
. Default value isSTABLE
. Possible values are:CANARY
,STABLE
. - scaling
Config MetastoreService Scaling Config - Represents the scaling configuration of a metastore service. Structure is documented below.
- scheduled
Backup MetastoreService Scheduled Backup - The configuration of scheduled backup for the metastore service. Structure is documented below.
- service
Id String - The ID of the metastore service. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between
3 and 63 characters.
- state String
- The current state of the metastore service.
- state
Message String - Additional information about the current state of the metastore service, if available.
- telemetry
Config MetastoreService Telemetry Config - The configuration specifying telemetry settings for the Dataproc Metastore service. If unspecified defaults to JSON. Structure is documented below.
- tier String
- The tier of the service.
Possible values are:
DEVELOPER
,ENTERPRISE
. - uid String
- The globally unique resource identifier of the metastore service.
- artifact
Gcs stringUri - A Cloud Storage URI (starting with gs://) that specifies where artifacts related to the metastore service are stored.
- database
Type string - The database type that the Metastore service stores its data.
Default value is
MYSQL
. Possible values are:MYSQL
,SPANNER
. - deletion
Protection boolean - Indicates if the dataproc metastore should be protected against accidental deletions.
- effective
Labels {[key: string]: string} - All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- encryption
Config MetastoreService Encryption Config - Information used to configure the Dataproc Metastore service to encrypt customer data at rest. Structure is documented below.
- endpoint
Uri string - The URI of the endpoint used to access the metastore service.
- hive
Metastore MetastoreConfig Service Hive Metastore Config - Configuration information specific to running Hive metastore software as the metastore service. Structure is documented below.
- labels {[key: string]: string}
- User-defined labels for the metastore service.
Note: This field is non-authoritative, and will only manage the labels present in your configuration.
Please refer to the field
effective_labels
for all of the labels present on the resource. - location string
- The location where the metastore service should reside.
The default value is
global
. - maintenance
Window MetastoreService Maintenance Window - The one hour maintenance window of the metastore service.
This specifies when the service can be restarted for maintenance purposes in UTC time.
Maintenance window is not needed for services with the
SPANNER
database type. Structure is documented below. - metadata
Integration MetastoreService Metadata Integration - The setting that defines how metastore metadata should be integrated with external services and systems. Structure is documented below.
- name string
- The relative resource name of the metastore service.
- network string
- The relative resource name of the VPC network on which the instance can be accessed. It is specified in the following form: "projects/{projectNumber}/global/networks/{network_id}".
- network
Config MetastoreService Network Config - The configuration specifying the network settings for the Dataproc Metastore service. Structure is documented below.
- port number
- The TCP port at which the metastore service is reached. Default: 9083.
- project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- pulumi
Labels {[key: string]: string} - The combination of labels configured directly on the resource and default labels configured on the provider.
- release
Channel string - The release channel of the service. If unspecified, defaults to
STABLE
. Default value isSTABLE
. Possible values are:CANARY
,STABLE
. - scaling
Config MetastoreService Scaling Config - Represents the scaling configuration of a metastore service. Structure is documented below.
- scheduled
Backup MetastoreService Scheduled Backup - The configuration of scheduled backup for the metastore service. Structure is documented below.
- service
Id string - The ID of the metastore service. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between
3 and 63 characters.
- state string
- The current state of the metastore service.
- state
Message string - Additional information about the current state of the metastore service, if available.
- telemetry
Config MetastoreService Telemetry Config - The configuration specifying telemetry settings for the Dataproc Metastore service. If unspecified defaults to JSON. Structure is documented below.
- tier string
- The tier of the service.
Possible values are:
DEVELOPER
,ENTERPRISE
. - uid string
- The globally unique resource identifier of the metastore service.
- artifact_
gcs_ struri - A Cloud Storage URI (starting with gs://) that specifies where artifacts related to the metastore service are stored.
- database_
type str - The database type that the Metastore service stores its data.
Default value is
MYSQL
. Possible values are:MYSQL
,SPANNER
. - deletion_
protection bool - Indicates if the dataproc metastore should be protected against accidental deletions.
- effective_
labels Mapping[str, str] - All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- encryption_
config MetastoreService Encryption Config Args - Information used to configure the Dataproc Metastore service to encrypt customer data at rest. Structure is documented below.
- endpoint_
uri str - The URI of the endpoint used to access the metastore service.
- hive_
metastore_ Metastoreconfig Service Hive Metastore Config Args - Configuration information specific to running Hive metastore software as the metastore service. Structure is documented below.
- labels Mapping[str, str]
- User-defined labels for the metastore service.
Note: This field is non-authoritative, and will only manage the labels present in your configuration.
Please refer to the field
effective_labels
for all of the labels present on the resource. - location str
- The location where the metastore service should reside.
The default value is
global
. - maintenance_
window MetastoreService Maintenance Window Args - The one hour maintenance window of the metastore service.
This specifies when the service can be restarted for maintenance purposes in UTC time.
Maintenance window is not needed for services with the
SPANNER
database type. Structure is documented below. - metadata_
integration MetastoreService Metadata Integration Args - The setting that defines how metastore metadata should be integrated with external services and systems. Structure is documented below.
- name str
- The relative resource name of the metastore service.
- network str
- The relative resource name of the VPC network on which the instance can be accessed. It is specified in the following form: "projects/{projectNumber}/global/networks/{network_id}".
- network_
config MetastoreService Network Config Args - The configuration specifying the network settings for the Dataproc Metastore service. Structure is documented below.
- port int
- The TCP port at which the metastore service is reached. Default: 9083.
- project str
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- pulumi_
labels Mapping[str, str] - The combination of labels configured directly on the resource and default labels configured on the provider.
- release_
channel str - The release channel of the service. If unspecified, defaults to
STABLE
. Default value isSTABLE
. Possible values are:CANARY
,STABLE
. - scaling_
config MetastoreService Scaling Config Args - Represents the scaling configuration of a metastore service. Structure is documented below.
- scheduled_
backup MetastoreService Scheduled Backup Args - The configuration of scheduled backup for the metastore service. Structure is documented below.
- service_
id str - The ID of the metastore service. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between
3 and 63 characters.
- state str
- The current state of the metastore service.
- state_
message str - Additional information about the current state of the metastore service, if available.
- telemetry_
config MetastoreService Telemetry Config Args - The configuration specifying telemetry settings for the Dataproc Metastore service. If unspecified defaults to JSON. Structure is documented below.
- tier str
- The tier of the service.
Possible values are:
DEVELOPER
,ENTERPRISE
. - uid str
- The globally unique resource identifier of the metastore service.
- artifact
Gcs StringUri - A Cloud Storage URI (starting with gs://) that specifies where artifacts related to the metastore service are stored.
- database
Type String - The database type that the Metastore service stores its data.
Default value is
MYSQL
. Possible values are:MYSQL
,SPANNER
. - deletion
Protection Boolean - Indicates if the dataproc metastore should be protected against accidental deletions.
- effective
Labels Map<String> - All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- encryption
Config Property Map - Information used to configure the Dataproc Metastore service to encrypt customer data at rest. Structure is documented below.
- endpoint
Uri String - The URI of the endpoint used to access the metastore service.
- hive
Metastore Property MapConfig - Configuration information specific to running Hive metastore software as the metastore service. Structure is documented below.
- labels Map<String>
- User-defined labels for the metastore service.
Note: This field is non-authoritative, and will only manage the labels present in your configuration.
Please refer to the field
effective_labels
for all of the labels present on the resource. - location String
- The location where the metastore service should reside.
The default value is
global
. - maintenance
Window Property Map - The one hour maintenance window of the metastore service.
This specifies when the service can be restarted for maintenance purposes in UTC time.
Maintenance window is not needed for services with the
SPANNER
database type. Structure is documented below. - metadata
Integration Property Map - The setting that defines how metastore metadata should be integrated with external services and systems. Structure is documented below.
- name String
- The relative resource name of the metastore service.
- network String
- The relative resource name of the VPC network on which the instance can be accessed. It is specified in the following form: "projects/{projectNumber}/global/networks/{network_id}".
- network
Config Property Map - The configuration specifying the network settings for the Dataproc Metastore service. Structure is documented below.
- port Number
- The TCP port at which the metastore service is reached. Default: 9083.
- project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- pulumi
Labels Map<String> - The combination of labels configured directly on the resource and default labels configured on the provider.
- release
Channel String - The release channel of the service. If unspecified, defaults to
STABLE
. Default value isSTABLE
. Possible values are:CANARY
,STABLE
. - scaling
Config Property Map - Represents the scaling configuration of a metastore service. Structure is documented below.
- scheduled
Backup Property Map - The configuration of scheduled backup for the metastore service. Structure is documented below.
- service
Id String - The ID of the metastore service. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between
3 and 63 characters.
- state String
- The current state of the metastore service.
- state
Message String - Additional information about the current state of the metastore service, if available.
- telemetry
Config Property Map - The configuration specifying telemetry settings for the Dataproc Metastore service. If unspecified defaults to JSON. Structure is documented below.
- tier String
- The tier of the service.
Possible values are:
DEVELOPER
,ENTERPRISE
. - uid String
- The globally unique resource identifier of the metastore service.
Supporting Types
MetastoreServiceEncryptionConfig, MetastoreServiceEncryptionConfigArgs
- Kms
Key string - The fully qualified customer provided Cloud KMS key name to use for customer data encryption.
Use the following format:
projects/([^/]+)/locations/([^/]+)/keyRings/([^/]+)/cryptoKeys/([^/]+)
- Kms
Key string - The fully qualified customer provided Cloud KMS key name to use for customer data encryption.
Use the following format:
projects/([^/]+)/locations/([^/]+)/keyRings/([^/]+)/cryptoKeys/([^/]+)
- kms
Key String - The fully qualified customer provided Cloud KMS key name to use for customer data encryption.
Use the following format:
projects/([^/]+)/locations/([^/]+)/keyRings/([^/]+)/cryptoKeys/([^/]+)
- kms
Key string - The fully qualified customer provided Cloud KMS key name to use for customer data encryption.
Use the following format:
projects/([^/]+)/locations/([^/]+)/keyRings/([^/]+)/cryptoKeys/([^/]+)
- kms_
key str - The fully qualified customer provided Cloud KMS key name to use for customer data encryption.
Use the following format:
projects/([^/]+)/locations/([^/]+)/keyRings/([^/]+)/cryptoKeys/([^/]+)
- kms
Key String - The fully qualified customer provided Cloud KMS key name to use for customer data encryption.
Use the following format:
projects/([^/]+)/locations/([^/]+)/keyRings/([^/]+)/cryptoKeys/([^/]+)
MetastoreServiceHiveMetastoreConfig, MetastoreServiceHiveMetastoreConfigArgs
- Version string
- The Hive metastore schema version.
- Auxiliary
Versions List<MetastoreService Hive Metastore Config Auxiliary Version> - A mapping of Hive metastore version to the auxiliary version configuration. When specified, a secondary Hive metastore service is created along with the primary service. All auxiliary versions must be less than the service's primary version. The key is the auxiliary service name and it must match the regular expression a-z?. This means that the first character must be a lowercase letter, and all the following characters must be hyphens, lowercase letters, or digits, except the last character, which cannot be a hyphen. Structure is documented below.
- Config
Overrides Dictionary<string, string> - A mapping of Hive metastore configuration key-value pairs to apply to the Hive metastore (configured in hive-site.xml). The mappings override system defaults (some keys cannot be overridden)
- Endpoint
Protocol string - The protocol to use for the metastore service endpoint. If unspecified, defaults to
THRIFT
. Default value isTHRIFT
. Possible values are:THRIFT
,GRPC
. - Kerberos
Config MetastoreService Hive Metastore Config Kerberos Config - Information used to configure the Hive metastore service as a service principal in a Kerberos realm. Structure is documented below.
- Version string
- The Hive metastore schema version.
- Auxiliary
Versions []MetastoreService Hive Metastore Config Auxiliary Version - A mapping of Hive metastore version to the auxiliary version configuration. When specified, a secondary Hive metastore service is created along with the primary service. All auxiliary versions must be less than the service's primary version. The key is the auxiliary service name and it must match the regular expression a-z?. This means that the first character must be a lowercase letter, and all the following characters must be hyphens, lowercase letters, or digits, except the last character, which cannot be a hyphen. Structure is documented below.
- Config
Overrides map[string]string - A mapping of Hive metastore configuration key-value pairs to apply to the Hive metastore (configured in hive-site.xml). The mappings override system defaults (some keys cannot be overridden)
- Endpoint
Protocol string - The protocol to use for the metastore service endpoint. If unspecified, defaults to
THRIFT
. Default value isTHRIFT
. Possible values are:THRIFT
,GRPC
. - Kerberos
Config MetastoreService Hive Metastore Config Kerberos Config - Information used to configure the Hive metastore service as a service principal in a Kerberos realm. Structure is documented below.
- version String
- The Hive metastore schema version.
- auxiliary
Versions List<MetastoreService Hive Metastore Config Auxiliary Version> - A mapping of Hive metastore version to the auxiliary version configuration. When specified, a secondary Hive metastore service is created along with the primary service. All auxiliary versions must be less than the service's primary version. The key is the auxiliary service name and it must match the regular expression a-z?. This means that the first character must be a lowercase letter, and all the following characters must be hyphens, lowercase letters, or digits, except the last character, which cannot be a hyphen. Structure is documented below.
- config
Overrides Map<String,String> - A mapping of Hive metastore configuration key-value pairs to apply to the Hive metastore (configured in hive-site.xml). The mappings override system defaults (some keys cannot be overridden)
- endpoint
Protocol String - The protocol to use for the metastore service endpoint. If unspecified, defaults to
THRIFT
. Default value isTHRIFT
. Possible values are:THRIFT
,GRPC
. - kerberos
Config MetastoreService Hive Metastore Config Kerberos Config - Information used to configure the Hive metastore service as a service principal in a Kerberos realm. Structure is documented below.
- version string
- The Hive metastore schema version.
- auxiliary
Versions MetastoreService Hive Metastore Config Auxiliary Version[] - A mapping of Hive metastore version to the auxiliary version configuration. When specified, a secondary Hive metastore service is created along with the primary service. All auxiliary versions must be less than the service's primary version. The key is the auxiliary service name and it must match the regular expression a-z?. This means that the first character must be a lowercase letter, and all the following characters must be hyphens, lowercase letters, or digits, except the last character, which cannot be a hyphen. Structure is documented below.
- config
Overrides {[key: string]: string} - A mapping of Hive metastore configuration key-value pairs to apply to the Hive metastore (configured in hive-site.xml). The mappings override system defaults (some keys cannot be overridden)
- endpoint
Protocol string - The protocol to use for the metastore service endpoint. If unspecified, defaults to
THRIFT
. Default value isTHRIFT
. Possible values are:THRIFT
,GRPC
. - kerberos
Config MetastoreService Hive Metastore Config Kerberos Config - Information used to configure the Hive metastore service as a service principal in a Kerberos realm. Structure is documented below.
- version str
- The Hive metastore schema version.
- auxiliary_
versions Sequence[MetastoreService Hive Metastore Config Auxiliary Version] - A mapping of Hive metastore version to the auxiliary version configuration. When specified, a secondary Hive metastore service is created along with the primary service. All auxiliary versions must be less than the service's primary version. The key is the auxiliary service name and it must match the regular expression a-z?. This means that the first character must be a lowercase letter, and all the following characters must be hyphens, lowercase letters, or digits, except the last character, which cannot be a hyphen. Structure is documented below.
- config_
overrides Mapping[str, str] - A mapping of Hive metastore configuration key-value pairs to apply to the Hive metastore (configured in hive-site.xml). The mappings override system defaults (some keys cannot be overridden)
- endpoint_
protocol str - The protocol to use for the metastore service endpoint. If unspecified, defaults to
THRIFT
. Default value isTHRIFT
. Possible values are:THRIFT
,GRPC
. - kerberos_
config MetastoreService Hive Metastore Config Kerberos Config - Information used to configure the Hive metastore service as a service principal in a Kerberos realm. Structure is documented below.
- version String
- The Hive metastore schema version.
- auxiliary
Versions List<Property Map> - A mapping of Hive metastore version to the auxiliary version configuration. When specified, a secondary Hive metastore service is created along with the primary service. All auxiliary versions must be less than the service's primary version. The key is the auxiliary service name and it must match the regular expression a-z?. This means that the first character must be a lowercase letter, and all the following characters must be hyphens, lowercase letters, or digits, except the last character, which cannot be a hyphen. Structure is documented below.
- config
Overrides Map<String> - A mapping of Hive metastore configuration key-value pairs to apply to the Hive metastore (configured in hive-site.xml). The mappings override system defaults (some keys cannot be overridden)
- endpoint
Protocol String - The protocol to use for the metastore service endpoint. If unspecified, defaults to
THRIFT
. Default value isTHRIFT
. Possible values are:THRIFT
,GRPC
. - kerberos
Config Property Map - Information used to configure the Hive metastore service as a service principal in a Kerberos realm. Structure is documented below.
MetastoreServiceHiveMetastoreConfigAuxiliaryVersion, MetastoreServiceHiveMetastoreConfigAuxiliaryVersionArgs
- Key string
- The identifier for this object. Format specified above.
- Version string
- The Hive metastore version of the auxiliary service. It must be less than the primary Hive metastore service's version.
- Config
Overrides Dictionary<string, string> - A mapping of Hive metastore configuration key-value pairs to apply to the auxiliary Hive metastore (configured in hive-site.xml) in addition to the primary version's overrides. If keys are present in both the auxiliary version's overrides and the primary version's overrides, the value from the auxiliary version's overrides takes precedence.
- Key string
- The identifier for this object. Format specified above.
- Version string
- The Hive metastore version of the auxiliary service. It must be less than the primary Hive metastore service's version.
- Config
Overrides map[string]string - A mapping of Hive metastore configuration key-value pairs to apply to the auxiliary Hive metastore (configured in hive-site.xml) in addition to the primary version's overrides. If keys are present in both the auxiliary version's overrides and the primary version's overrides, the value from the auxiliary version's overrides takes precedence.
- key String
- The identifier for this object. Format specified above.
- version String
- The Hive metastore version of the auxiliary service. It must be less than the primary Hive metastore service's version.
- config
Overrides Map<String,String> - A mapping of Hive metastore configuration key-value pairs to apply to the auxiliary Hive metastore (configured in hive-site.xml) in addition to the primary version's overrides. If keys are present in both the auxiliary version's overrides and the primary version's overrides, the value from the auxiliary version's overrides takes precedence.
- key string
- The identifier for this object. Format specified above.
- version string
- The Hive metastore version of the auxiliary service. It must be less than the primary Hive metastore service's version.
- config
Overrides {[key: string]: string} - A mapping of Hive metastore configuration key-value pairs to apply to the auxiliary Hive metastore (configured in hive-site.xml) in addition to the primary version's overrides. If keys are present in both the auxiliary version's overrides and the primary version's overrides, the value from the auxiliary version's overrides takes precedence.
- key str
- The identifier for this object. Format specified above.
- version str
- The Hive metastore version of the auxiliary service. It must be less than the primary Hive metastore service's version.
- config_
overrides Mapping[str, str] - A mapping of Hive metastore configuration key-value pairs to apply to the auxiliary Hive metastore (configured in hive-site.xml) in addition to the primary version's overrides. If keys are present in both the auxiliary version's overrides and the primary version's overrides, the value from the auxiliary version's overrides takes precedence.
- key String
- The identifier for this object. Format specified above.
- version String
- The Hive metastore version of the auxiliary service. It must be less than the primary Hive metastore service's version.
- config
Overrides Map<String> - A mapping of Hive metastore configuration key-value pairs to apply to the auxiliary Hive metastore (configured in hive-site.xml) in addition to the primary version's overrides. If keys are present in both the auxiliary version's overrides and the primary version's overrides, the value from the auxiliary version's overrides takes precedence.
MetastoreServiceHiveMetastoreConfigKerberosConfig, MetastoreServiceHiveMetastoreConfigKerberosConfigArgs
- Keytab
Metastore
Service Hive Metastore Config Kerberos Config Keytab - A Kerberos keytab file that can be used to authenticate a service principal with a Kerberos Key Distribution Center (KDC). Structure is documented below.
- Krb5Config
Gcs stringUri - A Cloud Storage URI that specifies the path to a krb5.conf file. It is of the form gs://{bucket_name}/path/to/krb5.conf, although the file does not need to be named krb5.conf explicitly.
- Principal string
- A Kerberos principal that exists in the both the keytab the KDC to authenticate as. A typical principal is of the form "primary/instance@REALM", but there is no exact format.
- Keytab
Metastore
Service Hive Metastore Config Kerberos Config Keytab - A Kerberos keytab file that can be used to authenticate a service principal with a Kerberos Key Distribution Center (KDC). Structure is documented below.
- Krb5Config
Gcs stringUri - A Cloud Storage URI that specifies the path to a krb5.conf file. It is of the form gs://{bucket_name}/path/to/krb5.conf, although the file does not need to be named krb5.conf explicitly.
- Principal string
- A Kerberos principal that exists in the both the keytab the KDC to authenticate as. A typical principal is of the form "primary/instance@REALM", but there is no exact format.
- keytab
Metastore
Service Hive Metastore Config Kerberos Config Keytab - A Kerberos keytab file that can be used to authenticate a service principal with a Kerberos Key Distribution Center (KDC). Structure is documented below.
- krb5Config
Gcs StringUri - A Cloud Storage URI that specifies the path to a krb5.conf file. It is of the form gs://{bucket_name}/path/to/krb5.conf, although the file does not need to be named krb5.conf explicitly.
- principal String
- A Kerberos principal that exists in the both the keytab the KDC to authenticate as. A typical principal is of the form "primary/instance@REALM", but there is no exact format.
- keytab
Metastore
Service Hive Metastore Config Kerberos Config Keytab - A Kerberos keytab file that can be used to authenticate a service principal with a Kerberos Key Distribution Center (KDC). Structure is documented below.
- krb5Config
Gcs stringUri - A Cloud Storage URI that specifies the path to a krb5.conf file. It is of the form gs://{bucket_name}/path/to/krb5.conf, although the file does not need to be named krb5.conf explicitly.
- principal string
- A Kerberos principal that exists in the both the keytab the KDC to authenticate as. A typical principal is of the form "primary/instance@REALM", but there is no exact format.
- keytab
Metastore
Service Hive Metastore Config Kerberos Config Keytab - A Kerberos keytab file that can be used to authenticate a service principal with a Kerberos Key Distribution Center (KDC). Structure is documented below.
- krb5_
config_ strgcs_ uri - A Cloud Storage URI that specifies the path to a krb5.conf file. It is of the form gs://{bucket_name}/path/to/krb5.conf, although the file does not need to be named krb5.conf explicitly.
- principal str
- A Kerberos principal that exists in the both the keytab the KDC to authenticate as. A typical principal is of the form "primary/instance@REALM", but there is no exact format.
- keytab Property Map
- A Kerberos keytab file that can be used to authenticate a service principal with a Kerberos Key Distribution Center (KDC). Structure is documented below.
- krb5Config
Gcs StringUri - A Cloud Storage URI that specifies the path to a krb5.conf file. It is of the form gs://{bucket_name}/path/to/krb5.conf, although the file does not need to be named krb5.conf explicitly.
- principal String
- A Kerberos principal that exists in the both the keytab the KDC to authenticate as. A typical principal is of the form "primary/instance@REALM", but there is no exact format.
MetastoreServiceHiveMetastoreConfigKerberosConfigKeytab, MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabArgs
- Cloud
Secret string - The relative resource name of a Secret Manager secret version, in the following form: "projects/{projectNumber}/secrets/{secret_id}/versions/{version_id}".
- Cloud
Secret string - The relative resource name of a Secret Manager secret version, in the following form: "projects/{projectNumber}/secrets/{secret_id}/versions/{version_id}".
- cloud
Secret String - The relative resource name of a Secret Manager secret version, in the following form: "projects/{projectNumber}/secrets/{secret_id}/versions/{version_id}".
- cloud
Secret string - The relative resource name of a Secret Manager secret version, in the following form: "projects/{projectNumber}/secrets/{secret_id}/versions/{version_id}".
- cloud_
secret str - The relative resource name of a Secret Manager secret version, in the following form: "projects/{projectNumber}/secrets/{secret_id}/versions/{version_id}".
- cloud
Secret String - The relative resource name of a Secret Manager secret version, in the following form: "projects/{projectNumber}/secrets/{secret_id}/versions/{version_id}".
MetastoreServiceMaintenanceWindow, MetastoreServiceMaintenanceWindowArgs
- day_
of_ strweek - The day of week, when the window starts.
Possible values are:
MONDAY
,TUESDAY
,WEDNESDAY
,THURSDAY
,FRIDAY
,SATURDAY
,SUNDAY
. - hour_
of_ intday - The hour of day (0-23) when the window starts.
MetastoreServiceMetadataIntegration, MetastoreServiceMetadataIntegrationArgs
- Data
Catalog MetastoreConfig Service Metadata Integration Data Catalog Config - The integration config for the Data Catalog service. Structure is documented below.
- Data
Catalog MetastoreConfig Service Metadata Integration Data Catalog Config - The integration config for the Data Catalog service. Structure is documented below.
- data
Catalog MetastoreConfig Service Metadata Integration Data Catalog Config - The integration config for the Data Catalog service. Structure is documented below.
- data
Catalog MetastoreConfig Service Metadata Integration Data Catalog Config - The integration config for the Data Catalog service. Structure is documented below.
- data_
catalog_ Metastoreconfig Service Metadata Integration Data Catalog Config - The integration config for the Data Catalog service. Structure is documented below.
- data
Catalog Property MapConfig - The integration config for the Data Catalog service. Structure is documented below.
MetastoreServiceMetadataIntegrationDataCatalogConfig, MetastoreServiceMetadataIntegrationDataCatalogConfigArgs
- Enabled bool
- Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog.
- Enabled bool
- Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog.
- enabled Boolean
- Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog.
- enabled boolean
- Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog.
- enabled bool
- Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog.
- enabled Boolean
- Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog.
MetastoreServiceNetworkConfig, MetastoreServiceNetworkConfigArgs
- Consumers
List<Metastore
Service Network Config Consumer> - The consumer-side network configuration for the Dataproc Metastore instance. Structure is documented below.
- Custom
Routes boolEnabled - Enables custom routes to be imported and exported for the Dataproc Metastore service's peered VPC network.
- Consumers
[]Metastore
Service Network Config Consumer - The consumer-side network configuration for the Dataproc Metastore instance. Structure is documented below.
- Custom
Routes boolEnabled - Enables custom routes to be imported and exported for the Dataproc Metastore service's peered VPC network.
- consumers
List<Metastore
Service Network Config Consumer> - The consumer-side network configuration for the Dataproc Metastore instance. Structure is documented below.
- custom
Routes BooleanEnabled - Enables custom routes to be imported and exported for the Dataproc Metastore service's peered VPC network.
- consumers
Metastore
Service Network Config Consumer[] - The consumer-side network configuration for the Dataproc Metastore instance. Structure is documented below.
- custom
Routes booleanEnabled - Enables custom routes to be imported and exported for the Dataproc Metastore service's peered VPC network.
- consumers
Sequence[Metastore
Service Network Config Consumer] - The consumer-side network configuration for the Dataproc Metastore instance. Structure is documented below.
- custom_
routes_ boolenabled - Enables custom routes to be imported and exported for the Dataproc Metastore service's peered VPC network.
- consumers List<Property Map>
- The consumer-side network configuration for the Dataproc Metastore instance. Structure is documented below.
- custom
Routes BooleanEnabled - Enables custom routes to be imported and exported for the Dataproc Metastore service's peered VPC network.
MetastoreServiceNetworkConfigConsumer, MetastoreServiceNetworkConfigConsumerArgs
- Subnetwork string
- The subnetwork of the customer project from which an IP address is reserved and used as the Dataproc Metastore service's endpoint. It is accessible to hosts in the subnet and to all hosts in a subnet in the same region and same network. There must be at least one IP address available in the subnet's primary range. The subnet is specified in the following form: `projects/{projectNumber}/regions/{region_id}/subnetworks/{subnetwork_id}
- Endpoint
Uri string - (Output) The URI of the endpoint used to access the metastore service.
- Subnetwork string
- The subnetwork of the customer project from which an IP address is reserved and used as the Dataproc Metastore service's endpoint. It is accessible to hosts in the subnet and to all hosts in a subnet in the same region and same network. There must be at least one IP address available in the subnet's primary range. The subnet is specified in the following form: `projects/{projectNumber}/regions/{region_id}/subnetworks/{subnetwork_id}
- Endpoint
Uri string - (Output) The URI of the endpoint used to access the metastore service.
- subnetwork String
- The subnetwork of the customer project from which an IP address is reserved and used as the Dataproc Metastore service's endpoint. It is accessible to hosts in the subnet and to all hosts in a subnet in the same region and same network. There must be at least one IP address available in the subnet's primary range. The subnet is specified in the following form: `projects/{projectNumber}/regions/{region_id}/subnetworks/{subnetwork_id}
- endpoint
Uri String - (Output) The URI of the endpoint used to access the metastore service.
- subnetwork string
- The subnetwork of the customer project from which an IP address is reserved and used as the Dataproc Metastore service's endpoint. It is accessible to hosts in the subnet and to all hosts in a subnet in the same region and same network. There must be at least one IP address available in the subnet's primary range. The subnet is specified in the following form: `projects/{projectNumber}/regions/{region_id}/subnetworks/{subnetwork_id}
- endpoint
Uri string - (Output) The URI of the endpoint used to access the metastore service.
- subnetwork str
- The subnetwork of the customer project from which an IP address is reserved and used as the Dataproc Metastore service's endpoint. It is accessible to hosts in the subnet and to all hosts in a subnet in the same region and same network. There must be at least one IP address available in the subnet's primary range. The subnet is specified in the following form: `projects/{projectNumber}/regions/{region_id}/subnetworks/{subnetwork_id}
- endpoint_
uri str - (Output) The URI of the endpoint used to access the metastore service.
- subnetwork String
- The subnetwork of the customer project from which an IP address is reserved and used as the Dataproc Metastore service's endpoint. It is accessible to hosts in the subnet and to all hosts in a subnet in the same region and same network. There must be at least one IP address available in the subnet's primary range. The subnet is specified in the following form: `projects/{projectNumber}/regions/{region_id}/subnetworks/{subnetwork_id}
- endpoint
Uri String - (Output) The URI of the endpoint used to access the metastore service.
MetastoreServiceScalingConfig, MetastoreServiceScalingConfigArgs
- Autoscaling
Config MetastoreService Scaling Config Autoscaling Config - Represents the autoscaling configuration of a metastore service. Structure is documented below.
- Instance
Size string - Metastore instance sizes.
Possible values are:
EXTRA_SMALL
,SMALL
,MEDIUM
,LARGE
,EXTRA_LARGE
. - Scaling
Factor double - Scaling factor, in increments of 0.1 for values less than 1.0, and increments of 1.0 for values greater than 1.0.
- Autoscaling
Config MetastoreService Scaling Config Autoscaling Config - Represents the autoscaling configuration of a metastore service. Structure is documented below.
- Instance
Size string - Metastore instance sizes.
Possible values are:
EXTRA_SMALL
,SMALL
,MEDIUM
,LARGE
,EXTRA_LARGE
. - Scaling
Factor float64 - Scaling factor, in increments of 0.1 for values less than 1.0, and increments of 1.0 for values greater than 1.0.
- autoscaling
Config MetastoreService Scaling Config Autoscaling Config - Represents the autoscaling configuration of a metastore service. Structure is documented below.
- instance
Size String - Metastore instance sizes.
Possible values are:
EXTRA_SMALL
,SMALL
,MEDIUM
,LARGE
,EXTRA_LARGE
. - scaling
Factor Double - Scaling factor, in increments of 0.1 for values less than 1.0, and increments of 1.0 for values greater than 1.0.
- autoscaling
Config MetastoreService Scaling Config Autoscaling Config - Represents the autoscaling configuration of a metastore service. Structure is documented below.
- instance
Size string - Metastore instance sizes.
Possible values are:
EXTRA_SMALL
,SMALL
,MEDIUM
,LARGE
,EXTRA_LARGE
. - scaling
Factor number - Scaling factor, in increments of 0.1 for values less than 1.0, and increments of 1.0 for values greater than 1.0.
- autoscaling_
config MetastoreService Scaling Config Autoscaling Config - Represents the autoscaling configuration of a metastore service. Structure is documented below.
- instance_
size str - Metastore instance sizes.
Possible values are:
EXTRA_SMALL
,SMALL
,MEDIUM
,LARGE
,EXTRA_LARGE
. - scaling_
factor float - Scaling factor, in increments of 0.1 for values less than 1.0, and increments of 1.0 for values greater than 1.0.
- autoscaling
Config Property Map - Represents the autoscaling configuration of a metastore service. Structure is documented below.
- instance
Size String - Metastore instance sizes.
Possible values are:
EXTRA_SMALL
,SMALL
,MEDIUM
,LARGE
,EXTRA_LARGE
. - scaling
Factor Number - Scaling factor, in increments of 0.1 for values less than 1.0, and increments of 1.0 for values greater than 1.0.
MetastoreServiceScalingConfigAutoscalingConfig, MetastoreServiceScalingConfigAutoscalingConfigArgs
- Autoscaling
Enabled bool - Defines whether autoscaling is enabled. The default value is false.
- Limit
Config MetastoreService Scaling Config Autoscaling Config Limit Config - Represents the limit configuration of a metastore service. Structure is documented below.
- Autoscaling
Enabled bool - Defines whether autoscaling is enabled. The default value is false.
- Limit
Config MetastoreService Scaling Config Autoscaling Config Limit Config - Represents the limit configuration of a metastore service. Structure is documented below.
- autoscaling
Enabled Boolean - Defines whether autoscaling is enabled. The default value is false.
- limit
Config MetastoreService Scaling Config Autoscaling Config Limit Config - Represents the limit configuration of a metastore service. Structure is documented below.
- autoscaling
Enabled boolean - Defines whether autoscaling is enabled. The default value is false.
- limit
Config MetastoreService Scaling Config Autoscaling Config Limit Config - Represents the limit configuration of a metastore service. Structure is documented below.
- autoscaling_
enabled bool - Defines whether autoscaling is enabled. The default value is false.
- limit_
config MetastoreService Scaling Config Autoscaling Config Limit Config - Represents the limit configuration of a metastore service. Structure is documented below.
- autoscaling
Enabled Boolean - Defines whether autoscaling is enabled. The default value is false.
- limit
Config Property Map - Represents the limit configuration of a metastore service. Structure is documented below.
MetastoreServiceScalingConfigAutoscalingConfigLimitConfig, MetastoreServiceScalingConfigAutoscalingConfigLimitConfigArgs
- Max
Scaling doubleFactor - The maximum scaling factor that the service will autoscale to. The default value is 6.0.
- Min
Scaling doubleFactor - The minimum scaling factor that the service will autoscale to. The default value is 0.1.
- Max
Scaling float64Factor - The maximum scaling factor that the service will autoscale to. The default value is 6.0.
- Min
Scaling float64Factor - The minimum scaling factor that the service will autoscale to. The default value is 0.1.
- max
Scaling DoubleFactor - The maximum scaling factor that the service will autoscale to. The default value is 6.0.
- min
Scaling DoubleFactor - The minimum scaling factor that the service will autoscale to. The default value is 0.1.
- max
Scaling numberFactor - The maximum scaling factor that the service will autoscale to. The default value is 6.0.
- min
Scaling numberFactor - The minimum scaling factor that the service will autoscale to. The default value is 0.1.
- max_
scaling_ floatfactor - The maximum scaling factor that the service will autoscale to. The default value is 6.0.
- min_
scaling_ floatfactor - The minimum scaling factor that the service will autoscale to. The default value is 0.1.
- max
Scaling NumberFactor - The maximum scaling factor that the service will autoscale to. The default value is 6.0.
- min
Scaling NumberFactor - The minimum scaling factor that the service will autoscale to. The default value is 0.1.
MetastoreServiceScheduledBackup, MetastoreServiceScheduledBackupArgs
- Backup
Location string - A Cloud Storage URI of a folder, in the format gs://<bucket_name>/<path_inside_bucket>. A sub-folder <backup_folder> containing backup files will be stored below it.
- Cron
Schedule string - The scheduled interval in Cron format, see https://en.wikipedia.org/wiki/Cron The default is empty: scheduled backup is not enabled. Must be specified to enable scheduled backups.
- Enabled bool
- Defines whether the scheduled backup is enabled. The default value is false.
- Time
Zone string - Specifies the time zone to be used when interpreting cronSchedule. Must be a time zone name from the time zone database (https://en.wikipedia.org/wiki/List_of_tz_database_time_zones), e.g. America/Los_Angeles or Africa/Abidjan. If left unspecified, the default is UTC.
- Backup
Location string - A Cloud Storage URI of a folder, in the format gs://<bucket_name>/<path_inside_bucket>. A sub-folder <backup_folder> containing backup files will be stored below it.
- Cron
Schedule string - The scheduled interval in Cron format, see https://en.wikipedia.org/wiki/Cron The default is empty: scheduled backup is not enabled. Must be specified to enable scheduled backups.
- Enabled bool
- Defines whether the scheduled backup is enabled. The default value is false.
- Time
Zone string - Specifies the time zone to be used when interpreting cronSchedule. Must be a time zone name from the time zone database (https://en.wikipedia.org/wiki/List_of_tz_database_time_zones), e.g. America/Los_Angeles or Africa/Abidjan. If left unspecified, the default is UTC.
- backup
Location String - A Cloud Storage URI of a folder, in the format gs://<bucket_name>/<path_inside_bucket>. A sub-folder <backup_folder> containing backup files will be stored below it.
- cron
Schedule String - The scheduled interval in Cron format, see https://en.wikipedia.org/wiki/Cron The default is empty: scheduled backup is not enabled. Must be specified to enable scheduled backups.
- enabled Boolean
- Defines whether the scheduled backup is enabled. The default value is false.
- time
Zone String - Specifies the time zone to be used when interpreting cronSchedule. Must be a time zone name from the time zone database (https://en.wikipedia.org/wiki/List_of_tz_database_time_zones), e.g. America/Los_Angeles or Africa/Abidjan. If left unspecified, the default is UTC.
- backup
Location string - A Cloud Storage URI of a folder, in the format gs://<bucket_name>/<path_inside_bucket>. A sub-folder <backup_folder> containing backup files will be stored below it.
- cron
Schedule string - The scheduled interval in Cron format, see https://en.wikipedia.org/wiki/Cron The default is empty: scheduled backup is not enabled. Must be specified to enable scheduled backups.
- enabled boolean
- Defines whether the scheduled backup is enabled. The default value is false.
- time
Zone string - Specifies the time zone to be used when interpreting cronSchedule. Must be a time zone name from the time zone database (https://en.wikipedia.org/wiki/List_of_tz_database_time_zones), e.g. America/Los_Angeles or Africa/Abidjan. If left unspecified, the default is UTC.
- backup_
location str - A Cloud Storage URI of a folder, in the format gs://<bucket_name>/<path_inside_bucket>. A sub-folder <backup_folder> containing backup files will be stored below it.
- cron_
schedule str - The scheduled interval in Cron format, see https://en.wikipedia.org/wiki/Cron The default is empty: scheduled backup is not enabled. Must be specified to enable scheduled backups.
- enabled bool
- Defines whether the scheduled backup is enabled. The default value is false.
- time_
zone str - Specifies the time zone to be used when interpreting cronSchedule. Must be a time zone name from the time zone database (https://en.wikipedia.org/wiki/List_of_tz_database_time_zones), e.g. America/Los_Angeles or Africa/Abidjan. If left unspecified, the default is UTC.
- backup
Location String - A Cloud Storage URI of a folder, in the format gs://<bucket_name>/<path_inside_bucket>. A sub-folder <backup_folder> containing backup files will be stored below it.
- cron
Schedule String - The scheduled interval in Cron format, see https://en.wikipedia.org/wiki/Cron The default is empty: scheduled backup is not enabled. Must be specified to enable scheduled backups.
- enabled Boolean
- Defines whether the scheduled backup is enabled. The default value is false.
- time
Zone String - Specifies the time zone to be used when interpreting cronSchedule. Must be a time zone name from the time zone database (https://en.wikipedia.org/wiki/List_of_tz_database_time_zones), e.g. America/Los_Angeles or Africa/Abidjan. If left unspecified, the default is UTC.
MetastoreServiceTelemetryConfig, MetastoreServiceTelemetryConfigArgs
- Log
Format string - The output format of the Dataproc Metastore service's logs.
Default value is
JSON
. Possible values are:LEGACY
,JSON
.
- Log
Format string - The output format of the Dataproc Metastore service's logs.
Default value is
JSON
. Possible values are:LEGACY
,JSON
.
- log
Format String - The output format of the Dataproc Metastore service's logs.
Default value is
JSON
. Possible values are:LEGACY
,JSON
.
- log
Format string - The output format of the Dataproc Metastore service's logs.
Default value is
JSON
. Possible values are:LEGACY
,JSON
.
- log_
format str - The output format of the Dataproc Metastore service's logs.
Default value is
JSON
. Possible values are:LEGACY
,JSON
.
- log
Format String - The output format of the Dataproc Metastore service's logs.
Default value is
JSON
. Possible values are:LEGACY
,JSON
.
Import
Service can be imported using any of these accepted formats:
projects/{{project}}/locations/{{location}}/services/{{service_id}}
{{project}}/{{location}}/{{service_id}}
{{location}}/{{service_id}}
When using the pulumi import
command, Service can be imported using one of the formats above. For example:
$ pulumi import gcp:dataproc/metastoreService:MetastoreService default projects/{{project}}/locations/{{location}}/services/{{service_id}}
$ pulumi import gcp:dataproc/metastoreService:MetastoreService default {{project}}/{{location}}/{{service_id}}
$ pulumi import gcp:dataproc/metastoreService:MetastoreService default {{location}}/{{service_id}}
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- Google Cloud (GCP) Classic pulumi/pulumi-gcp
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
google-beta
Terraform Provider.