gcp.gkeonprem.VMwareNodePool
Explore with Pulumi AI
A Google Vmware Node Pool.
Example Usage
Gkeonprem Vmware Node Pool Basic
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const default_basic = new gcp.gkeonprem.VMwareCluster("default-basic", {
name: "my-cluster",
location: "us-west1",
adminClusterMembership: "projects/870316890899/locations/global/memberships/gkeonprem-terraform-test",
description: "test cluster",
onPremVersion: "1.13.1-gke.35",
networkConfig: {
serviceAddressCidrBlocks: ["10.96.0.0/12"],
podAddressCidrBlocks: ["192.168.0.0/16"],
dhcpIpConfig: {
enabled: true,
},
},
controlPlaneNode: {
cpus: 4,
memory: 8192,
replicas: 1,
},
loadBalancer: {
vipConfig: {
controlPlaneVip: "10.251.133.5",
ingressVip: "10.251.135.19",
},
metalLbConfig: {
addressPools: [
{
pool: "ingress-ip",
manualAssign: true,
addresses: ["10.251.135.19"],
},
{
pool: "lb-test-ip",
manualAssign: true,
addresses: ["10.251.135.19"],
},
],
},
},
});
const nodepool_basic = new gcp.gkeonprem.VMwareNodePool("nodepool-basic", {
name: "my-nodepool",
location: "us-west1",
vmwareCluster: default_basic.name,
config: {
replicas: 3,
imageType: "ubuntu_containerd",
enableLoadBalancer: true,
},
});
import pulumi
import pulumi_gcp as gcp
default_basic = gcp.gkeonprem.VMwareCluster("default-basic",
name="my-cluster",
location="us-west1",
admin_cluster_membership="projects/870316890899/locations/global/memberships/gkeonprem-terraform-test",
description="test cluster",
on_prem_version="1.13.1-gke.35",
network_config={
"service_address_cidr_blocks": ["10.96.0.0/12"],
"pod_address_cidr_blocks": ["192.168.0.0/16"],
"dhcp_ip_config": {
"enabled": True,
},
},
control_plane_node={
"cpus": 4,
"memory": 8192,
"replicas": 1,
},
load_balancer={
"vip_config": {
"control_plane_vip": "10.251.133.5",
"ingress_vip": "10.251.135.19",
},
"metal_lb_config": {
"address_pools": [
{
"pool": "ingress-ip",
"manual_assign": True,
"addresses": ["10.251.135.19"],
},
{
"pool": "lb-test-ip",
"manual_assign": True,
"addresses": ["10.251.135.19"],
},
],
},
})
nodepool_basic = gcp.gkeonprem.VMwareNodePool("nodepool-basic",
name="my-nodepool",
location="us-west1",
vmware_cluster=default_basic.name,
config={
"replicas": 3,
"image_type": "ubuntu_containerd",
"enable_load_balancer": True,
})
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/gkeonprem"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := gkeonprem.NewVMwareCluster(ctx, "default-basic", &gkeonprem.VMwareClusterArgs{
Name: pulumi.String("my-cluster"),
Location: pulumi.String("us-west1"),
AdminClusterMembership: pulumi.String("projects/870316890899/locations/global/memberships/gkeonprem-terraform-test"),
Description: pulumi.String("test cluster"),
OnPremVersion: pulumi.String("1.13.1-gke.35"),
NetworkConfig: &gkeonprem.VMwareClusterNetworkConfigArgs{
ServiceAddressCidrBlocks: pulumi.StringArray{
pulumi.String("10.96.0.0/12"),
},
PodAddressCidrBlocks: pulumi.StringArray{
pulumi.String("192.168.0.0/16"),
},
DhcpIpConfig: &gkeonprem.VMwareClusterNetworkConfigDhcpIpConfigArgs{
Enabled: pulumi.Bool(true),
},
},
ControlPlaneNode: &gkeonprem.VMwareClusterControlPlaneNodeArgs{
Cpus: pulumi.Int(4),
Memory: pulumi.Int(8192),
Replicas: pulumi.Int(1),
},
LoadBalancer: &gkeonprem.VMwareClusterLoadBalancerArgs{
VipConfig: &gkeonprem.VMwareClusterLoadBalancerVipConfigArgs{
ControlPlaneVip: pulumi.String("10.251.133.5"),
IngressVip: pulumi.String("10.251.135.19"),
},
MetalLbConfig: &gkeonprem.VMwareClusterLoadBalancerMetalLbConfigArgs{
AddressPools: gkeonprem.VMwareClusterLoadBalancerMetalLbConfigAddressPoolArray{
&gkeonprem.VMwareClusterLoadBalancerMetalLbConfigAddressPoolArgs{
Pool: pulumi.String("ingress-ip"),
ManualAssign: pulumi.Bool(true),
Addresses: pulumi.StringArray{
pulumi.String("10.251.135.19"),
},
},
&gkeonprem.VMwareClusterLoadBalancerMetalLbConfigAddressPoolArgs{
Pool: pulumi.String("lb-test-ip"),
ManualAssign: pulumi.Bool(true),
Addresses: pulumi.StringArray{
pulumi.String("10.251.135.19"),
},
},
},
},
},
})
if err != nil {
return err
}
_, err = gkeonprem.NewVMwareNodePool(ctx, "nodepool-basic", &gkeonprem.VMwareNodePoolArgs{
Name: pulumi.String("my-nodepool"),
Location: pulumi.String("us-west1"),
VmwareCluster: default_basic.Name,
Config: &gkeonprem.VMwareNodePoolConfigArgs{
Replicas: pulumi.Int(3),
ImageType: pulumi.String("ubuntu_containerd"),
EnableLoadBalancer: pulumi.Bool(true),
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var default_basic = new Gcp.GkeOnPrem.VMwareCluster("default-basic", new()
{
Name = "my-cluster",
Location = "us-west1",
AdminClusterMembership = "projects/870316890899/locations/global/memberships/gkeonprem-terraform-test",
Description = "test cluster",
OnPremVersion = "1.13.1-gke.35",
NetworkConfig = new Gcp.GkeOnPrem.Inputs.VMwareClusterNetworkConfigArgs
{
ServiceAddressCidrBlocks = new[]
{
"10.96.0.0/12",
},
PodAddressCidrBlocks = new[]
{
"192.168.0.0/16",
},
DhcpIpConfig = new Gcp.GkeOnPrem.Inputs.VMwareClusterNetworkConfigDhcpIpConfigArgs
{
Enabled = true,
},
},
ControlPlaneNode = new Gcp.GkeOnPrem.Inputs.VMwareClusterControlPlaneNodeArgs
{
Cpus = 4,
Memory = 8192,
Replicas = 1,
},
LoadBalancer = new Gcp.GkeOnPrem.Inputs.VMwareClusterLoadBalancerArgs
{
VipConfig = new Gcp.GkeOnPrem.Inputs.VMwareClusterLoadBalancerVipConfigArgs
{
ControlPlaneVip = "10.251.133.5",
IngressVip = "10.251.135.19",
},
MetalLbConfig = new Gcp.GkeOnPrem.Inputs.VMwareClusterLoadBalancerMetalLbConfigArgs
{
AddressPools = new[]
{
new Gcp.GkeOnPrem.Inputs.VMwareClusterLoadBalancerMetalLbConfigAddressPoolArgs
{
Pool = "ingress-ip",
ManualAssign = true,
Addresses = new[]
{
"10.251.135.19",
},
},
new Gcp.GkeOnPrem.Inputs.VMwareClusterLoadBalancerMetalLbConfigAddressPoolArgs
{
Pool = "lb-test-ip",
ManualAssign = true,
Addresses = new[]
{
"10.251.135.19",
},
},
},
},
},
});
var nodepool_basic = new Gcp.GkeOnPrem.VMwareNodePool("nodepool-basic", new()
{
Name = "my-nodepool",
Location = "us-west1",
VmwareCluster = default_basic.Name,
Config = new Gcp.GkeOnPrem.Inputs.VMwareNodePoolConfigArgs
{
Replicas = 3,
ImageType = "ubuntu_containerd",
EnableLoadBalancer = true,
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.gkeonprem.VMwareCluster;
import com.pulumi.gcp.gkeonprem.VMwareClusterArgs;
import com.pulumi.gcp.gkeonprem.inputs.VMwareClusterNetworkConfigArgs;
import com.pulumi.gcp.gkeonprem.inputs.VMwareClusterNetworkConfigDhcpIpConfigArgs;
import com.pulumi.gcp.gkeonprem.inputs.VMwareClusterControlPlaneNodeArgs;
import com.pulumi.gcp.gkeonprem.inputs.VMwareClusterLoadBalancerArgs;
import com.pulumi.gcp.gkeonprem.inputs.VMwareClusterLoadBalancerVipConfigArgs;
import com.pulumi.gcp.gkeonprem.inputs.VMwareClusterLoadBalancerMetalLbConfigArgs;
import com.pulumi.gcp.gkeonprem.VMwareNodePool;
import com.pulumi.gcp.gkeonprem.VMwareNodePoolArgs;
import com.pulumi.gcp.gkeonprem.inputs.VMwareNodePoolConfigArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var default_basic = new VMwareCluster("default-basic", VMwareClusterArgs.builder()
.name("my-cluster")
.location("us-west1")
.adminClusterMembership("projects/870316890899/locations/global/memberships/gkeonprem-terraform-test")
.description("test cluster")
.onPremVersion("1.13.1-gke.35")
.networkConfig(VMwareClusterNetworkConfigArgs.builder()
.serviceAddressCidrBlocks("10.96.0.0/12")
.podAddressCidrBlocks("192.168.0.0/16")
.dhcpIpConfig(VMwareClusterNetworkConfigDhcpIpConfigArgs.builder()
.enabled(true)
.build())
.build())
.controlPlaneNode(VMwareClusterControlPlaneNodeArgs.builder()
.cpus(4)
.memory(8192)
.replicas(1)
.build())
.loadBalancer(VMwareClusterLoadBalancerArgs.builder()
.vipConfig(VMwareClusterLoadBalancerVipConfigArgs.builder()
.controlPlaneVip("10.251.133.5")
.ingressVip("10.251.135.19")
.build())
.metalLbConfig(VMwareClusterLoadBalancerMetalLbConfigArgs.builder()
.addressPools(
VMwareClusterLoadBalancerMetalLbConfigAddressPoolArgs.builder()
.pool("ingress-ip")
.manualAssign("true")
.addresses("10.251.135.19")
.build(),
VMwareClusterLoadBalancerMetalLbConfigAddressPoolArgs.builder()
.pool("lb-test-ip")
.manualAssign("true")
.addresses("10.251.135.19")
.build())
.build())
.build())
.build());
var nodepool_basic = new VMwareNodePool("nodepool-basic", VMwareNodePoolArgs.builder()
.name("my-nodepool")
.location("us-west1")
.vmwareCluster(default_basic.name())
.config(VMwareNodePoolConfigArgs.builder()
.replicas(3)
.imageType("ubuntu_containerd")
.enableLoadBalancer(true)
.build())
.build());
}
}
resources:
default-basic:
type: gcp:gkeonprem:VMwareCluster
properties:
name: my-cluster
location: us-west1
adminClusterMembership: projects/870316890899/locations/global/memberships/gkeonprem-terraform-test
description: test cluster
onPremVersion: 1.13.1-gke.35
networkConfig:
serviceAddressCidrBlocks:
- 10.96.0.0/12
podAddressCidrBlocks:
- 192.168.0.0/16
dhcpIpConfig:
enabled: true
controlPlaneNode:
cpus: 4
memory: 8192
replicas: 1
loadBalancer:
vipConfig:
controlPlaneVip: 10.251.133.5
ingressVip: 10.251.135.19
metalLbConfig:
addressPools:
- pool: ingress-ip
manualAssign: 'true'
addresses:
- 10.251.135.19
- pool: lb-test-ip
manualAssign: 'true'
addresses:
- 10.251.135.19
nodepool-basic:
type: gcp:gkeonprem:VMwareNodePool
properties:
name: my-nodepool
location: us-west1
vmwareCluster: ${["default-basic"].name}
config:
replicas: 3
imageType: ubuntu_containerd
enableLoadBalancer: true
Gkeonprem Vmware Node Pool Full
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const default_full = new gcp.gkeonprem.VMwareCluster("default-full", {
name: "my-cluster",
location: "us-west1",
adminClusterMembership: "projects/870316890899/locations/global/memberships/gkeonprem-terraform-test",
description: "test cluster",
onPremVersion: "1.13.1-gke.35",
networkConfig: {
serviceAddressCidrBlocks: ["10.96.0.0/12"],
podAddressCidrBlocks: ["192.168.0.0/16"],
dhcpIpConfig: {
enabled: true,
},
},
controlPlaneNode: {
cpus: 4,
memory: 8192,
replicas: 1,
},
loadBalancer: {
vipConfig: {
controlPlaneVip: "10.251.133.5",
ingressVip: "10.251.135.19",
},
metalLbConfig: {
addressPools: [
{
pool: "ingress-ip",
manualAssign: true,
addresses: ["10.251.135.19"],
},
{
pool: "lb-test-ip",
manualAssign: true,
addresses: ["10.251.135.19"],
},
],
},
},
});
const nodepool_full = new gcp.gkeonprem.VMwareNodePool("nodepool-full", {
name: "my-nodepool",
location: "us-west1",
vmwareCluster: default_full.name,
annotations: {},
config: {
cpus: 4,
memoryMb: 8196,
replicas: 3,
imageType: "ubuntu_containerd",
image: "image",
bootDiskSizeGb: 10,
taints: [
{
key: "key",
value: "value",
},
{
key: "key",
value: "value",
effect: "NO_SCHEDULE",
},
],
labels: {},
vsphereConfig: {
datastore: "test-datastore",
tags: [
{
category: "test-category-1",
tag: "tag-1",
},
{
category: "test-category-2",
tag: "tag-2",
},
],
hostGroups: [
"host1",
"host2",
],
},
enableLoadBalancer: true,
},
nodePoolAutoscaling: {
minReplicas: 1,
maxReplicas: 5,
},
});
import pulumi
import pulumi_gcp as gcp
default_full = gcp.gkeonprem.VMwareCluster("default-full",
name="my-cluster",
location="us-west1",
admin_cluster_membership="projects/870316890899/locations/global/memberships/gkeonprem-terraform-test",
description="test cluster",
on_prem_version="1.13.1-gke.35",
network_config={
"service_address_cidr_blocks": ["10.96.0.0/12"],
"pod_address_cidr_blocks": ["192.168.0.0/16"],
"dhcp_ip_config": {
"enabled": True,
},
},
control_plane_node={
"cpus": 4,
"memory": 8192,
"replicas": 1,
},
load_balancer={
"vip_config": {
"control_plane_vip": "10.251.133.5",
"ingress_vip": "10.251.135.19",
},
"metal_lb_config": {
"address_pools": [
{
"pool": "ingress-ip",
"manual_assign": True,
"addresses": ["10.251.135.19"],
},
{
"pool": "lb-test-ip",
"manual_assign": True,
"addresses": ["10.251.135.19"],
},
],
},
})
nodepool_full = gcp.gkeonprem.VMwareNodePool("nodepool-full",
name="my-nodepool",
location="us-west1",
vmware_cluster=default_full.name,
annotations={},
config={
"cpus": 4,
"memory_mb": 8196,
"replicas": 3,
"image_type": "ubuntu_containerd",
"image": "image",
"boot_disk_size_gb": 10,
"taints": [
{
"key": "key",
"value": "value",
},
{
"key": "key",
"value": "value",
"effect": "NO_SCHEDULE",
},
],
"labels": {},
"vsphere_config": {
"datastore": "test-datastore",
"tags": [
{
"category": "test-category-1",
"tag": "tag-1",
},
{
"category": "test-category-2",
"tag": "tag-2",
},
],
"host_groups": [
"host1",
"host2",
],
},
"enable_load_balancer": True,
},
node_pool_autoscaling={
"min_replicas": 1,
"max_replicas": 5,
})
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/gkeonprem"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := gkeonprem.NewVMwareCluster(ctx, "default-full", &gkeonprem.VMwareClusterArgs{
Name: pulumi.String("my-cluster"),
Location: pulumi.String("us-west1"),
AdminClusterMembership: pulumi.String("projects/870316890899/locations/global/memberships/gkeonprem-terraform-test"),
Description: pulumi.String("test cluster"),
OnPremVersion: pulumi.String("1.13.1-gke.35"),
NetworkConfig: &gkeonprem.VMwareClusterNetworkConfigArgs{
ServiceAddressCidrBlocks: pulumi.StringArray{
pulumi.String("10.96.0.0/12"),
},
PodAddressCidrBlocks: pulumi.StringArray{
pulumi.String("192.168.0.0/16"),
},
DhcpIpConfig: &gkeonprem.VMwareClusterNetworkConfigDhcpIpConfigArgs{
Enabled: pulumi.Bool(true),
},
},
ControlPlaneNode: &gkeonprem.VMwareClusterControlPlaneNodeArgs{
Cpus: pulumi.Int(4),
Memory: pulumi.Int(8192),
Replicas: pulumi.Int(1),
},
LoadBalancer: &gkeonprem.VMwareClusterLoadBalancerArgs{
VipConfig: &gkeonprem.VMwareClusterLoadBalancerVipConfigArgs{
ControlPlaneVip: pulumi.String("10.251.133.5"),
IngressVip: pulumi.String("10.251.135.19"),
},
MetalLbConfig: &gkeonprem.VMwareClusterLoadBalancerMetalLbConfigArgs{
AddressPools: gkeonprem.VMwareClusterLoadBalancerMetalLbConfigAddressPoolArray{
&gkeonprem.VMwareClusterLoadBalancerMetalLbConfigAddressPoolArgs{
Pool: pulumi.String("ingress-ip"),
ManualAssign: pulumi.Bool(true),
Addresses: pulumi.StringArray{
pulumi.String("10.251.135.19"),
},
},
&gkeonprem.VMwareClusterLoadBalancerMetalLbConfigAddressPoolArgs{
Pool: pulumi.String("lb-test-ip"),
ManualAssign: pulumi.Bool(true),
Addresses: pulumi.StringArray{
pulumi.String("10.251.135.19"),
},
},
},
},
},
})
if err != nil {
return err
}
_, err = gkeonprem.NewVMwareNodePool(ctx, "nodepool-full", &gkeonprem.VMwareNodePoolArgs{
Name: pulumi.String("my-nodepool"),
Location: pulumi.String("us-west1"),
VmwareCluster: default_full.Name,
Annotations: pulumi.StringMap{},
Config: &gkeonprem.VMwareNodePoolConfigArgs{
Cpus: pulumi.Int(4),
MemoryMb: pulumi.Int(8196),
Replicas: pulumi.Int(3),
ImageType: pulumi.String("ubuntu_containerd"),
Image: pulumi.String("image"),
BootDiskSizeGb: pulumi.Int(10),
Taints: gkeonprem.VMwareNodePoolConfigTaintArray{
&gkeonprem.VMwareNodePoolConfigTaintArgs{
Key: pulumi.String("key"),
Value: pulumi.String("value"),
},
&gkeonprem.VMwareNodePoolConfigTaintArgs{
Key: pulumi.String("key"),
Value: pulumi.String("value"),
Effect: pulumi.String("NO_SCHEDULE"),
},
},
Labels: pulumi.StringMap{},
VsphereConfig: &gkeonprem.VMwareNodePoolConfigVsphereConfigArgs{
Datastore: pulumi.String("test-datastore"),
Tags: gkeonprem.VMwareNodePoolConfigVsphereConfigTagArray{
&gkeonprem.VMwareNodePoolConfigVsphereConfigTagArgs{
Category: pulumi.String("test-category-1"),
Tag: pulumi.String("tag-1"),
},
&gkeonprem.VMwareNodePoolConfigVsphereConfigTagArgs{
Category: pulumi.String("test-category-2"),
Tag: pulumi.String("tag-2"),
},
},
HostGroups: pulumi.StringArray{
pulumi.String("host1"),
pulumi.String("host2"),
},
},
EnableLoadBalancer: pulumi.Bool(true),
},
NodePoolAutoscaling: &gkeonprem.VMwareNodePoolNodePoolAutoscalingArgs{
MinReplicas: pulumi.Int(1),
MaxReplicas: pulumi.Int(5),
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var default_full = new Gcp.GkeOnPrem.VMwareCluster("default-full", new()
{
Name = "my-cluster",
Location = "us-west1",
AdminClusterMembership = "projects/870316890899/locations/global/memberships/gkeonprem-terraform-test",
Description = "test cluster",
OnPremVersion = "1.13.1-gke.35",
NetworkConfig = new Gcp.GkeOnPrem.Inputs.VMwareClusterNetworkConfigArgs
{
ServiceAddressCidrBlocks = new[]
{
"10.96.0.0/12",
},
PodAddressCidrBlocks = new[]
{
"192.168.0.0/16",
},
DhcpIpConfig = new Gcp.GkeOnPrem.Inputs.VMwareClusterNetworkConfigDhcpIpConfigArgs
{
Enabled = true,
},
},
ControlPlaneNode = new Gcp.GkeOnPrem.Inputs.VMwareClusterControlPlaneNodeArgs
{
Cpus = 4,
Memory = 8192,
Replicas = 1,
},
LoadBalancer = new Gcp.GkeOnPrem.Inputs.VMwareClusterLoadBalancerArgs
{
VipConfig = new Gcp.GkeOnPrem.Inputs.VMwareClusterLoadBalancerVipConfigArgs
{
ControlPlaneVip = "10.251.133.5",
IngressVip = "10.251.135.19",
},
MetalLbConfig = new Gcp.GkeOnPrem.Inputs.VMwareClusterLoadBalancerMetalLbConfigArgs
{
AddressPools = new[]
{
new Gcp.GkeOnPrem.Inputs.VMwareClusterLoadBalancerMetalLbConfigAddressPoolArgs
{
Pool = "ingress-ip",
ManualAssign = true,
Addresses = new[]
{
"10.251.135.19",
},
},
new Gcp.GkeOnPrem.Inputs.VMwareClusterLoadBalancerMetalLbConfigAddressPoolArgs
{
Pool = "lb-test-ip",
ManualAssign = true,
Addresses = new[]
{
"10.251.135.19",
},
},
},
},
},
});
var nodepool_full = new Gcp.GkeOnPrem.VMwareNodePool("nodepool-full", new()
{
Name = "my-nodepool",
Location = "us-west1",
VmwareCluster = default_full.Name,
Annotations = null,
Config = new Gcp.GkeOnPrem.Inputs.VMwareNodePoolConfigArgs
{
Cpus = 4,
MemoryMb = 8196,
Replicas = 3,
ImageType = "ubuntu_containerd",
Image = "image",
BootDiskSizeGb = 10,
Taints = new[]
{
new Gcp.GkeOnPrem.Inputs.VMwareNodePoolConfigTaintArgs
{
Key = "key",
Value = "value",
},
new Gcp.GkeOnPrem.Inputs.VMwareNodePoolConfigTaintArgs
{
Key = "key",
Value = "value",
Effect = "NO_SCHEDULE",
},
},
Labels = null,
VsphereConfig = new Gcp.GkeOnPrem.Inputs.VMwareNodePoolConfigVsphereConfigArgs
{
Datastore = "test-datastore",
Tags = new[]
{
new Gcp.GkeOnPrem.Inputs.VMwareNodePoolConfigVsphereConfigTagArgs
{
Category = "test-category-1",
Tag = "tag-1",
},
new Gcp.GkeOnPrem.Inputs.VMwareNodePoolConfigVsphereConfigTagArgs
{
Category = "test-category-2",
Tag = "tag-2",
},
},
HostGroups = new[]
{
"host1",
"host2",
},
},
EnableLoadBalancer = true,
},
NodePoolAutoscaling = new Gcp.GkeOnPrem.Inputs.VMwareNodePoolNodePoolAutoscalingArgs
{
MinReplicas = 1,
MaxReplicas = 5,
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.gkeonprem.VMwareCluster;
import com.pulumi.gcp.gkeonprem.VMwareClusterArgs;
import com.pulumi.gcp.gkeonprem.inputs.VMwareClusterNetworkConfigArgs;
import com.pulumi.gcp.gkeonprem.inputs.VMwareClusterNetworkConfigDhcpIpConfigArgs;
import com.pulumi.gcp.gkeonprem.inputs.VMwareClusterControlPlaneNodeArgs;
import com.pulumi.gcp.gkeonprem.inputs.VMwareClusterLoadBalancerArgs;
import com.pulumi.gcp.gkeonprem.inputs.VMwareClusterLoadBalancerVipConfigArgs;
import com.pulumi.gcp.gkeonprem.inputs.VMwareClusterLoadBalancerMetalLbConfigArgs;
import com.pulumi.gcp.gkeonprem.VMwareNodePool;
import com.pulumi.gcp.gkeonprem.VMwareNodePoolArgs;
import com.pulumi.gcp.gkeonprem.inputs.VMwareNodePoolConfigArgs;
import com.pulumi.gcp.gkeonprem.inputs.VMwareNodePoolConfigVsphereConfigArgs;
import com.pulumi.gcp.gkeonprem.inputs.VMwareNodePoolNodePoolAutoscalingArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var default_full = new VMwareCluster("default-full", VMwareClusterArgs.builder()
.name("my-cluster")
.location("us-west1")
.adminClusterMembership("projects/870316890899/locations/global/memberships/gkeonprem-terraform-test")
.description("test cluster")
.onPremVersion("1.13.1-gke.35")
.networkConfig(VMwareClusterNetworkConfigArgs.builder()
.serviceAddressCidrBlocks("10.96.0.0/12")
.podAddressCidrBlocks("192.168.0.0/16")
.dhcpIpConfig(VMwareClusterNetworkConfigDhcpIpConfigArgs.builder()
.enabled(true)
.build())
.build())
.controlPlaneNode(VMwareClusterControlPlaneNodeArgs.builder()
.cpus(4)
.memory(8192)
.replicas(1)
.build())
.loadBalancer(VMwareClusterLoadBalancerArgs.builder()
.vipConfig(VMwareClusterLoadBalancerVipConfigArgs.builder()
.controlPlaneVip("10.251.133.5")
.ingressVip("10.251.135.19")
.build())
.metalLbConfig(VMwareClusterLoadBalancerMetalLbConfigArgs.builder()
.addressPools(
VMwareClusterLoadBalancerMetalLbConfigAddressPoolArgs.builder()
.pool("ingress-ip")
.manualAssign("true")
.addresses("10.251.135.19")
.build(),
VMwareClusterLoadBalancerMetalLbConfigAddressPoolArgs.builder()
.pool("lb-test-ip")
.manualAssign("true")
.addresses("10.251.135.19")
.build())
.build())
.build())
.build());
var nodepool_full = new VMwareNodePool("nodepool-full", VMwareNodePoolArgs.builder()
.name("my-nodepool")
.location("us-west1")
.vmwareCluster(default_full.name())
.annotations()
.config(VMwareNodePoolConfigArgs.builder()
.cpus(4)
.memoryMb(8196)
.replicas(3)
.imageType("ubuntu_containerd")
.image("image")
.bootDiskSizeGb(10)
.taints(
VMwareNodePoolConfigTaintArgs.builder()
.key("key")
.value("value")
.build(),
VMwareNodePoolConfigTaintArgs.builder()
.key("key")
.value("value")
.effect("NO_SCHEDULE")
.build())
.labels()
.vsphereConfig(VMwareNodePoolConfigVsphereConfigArgs.builder()
.datastore("test-datastore")
.tags(
VMwareNodePoolConfigVsphereConfigTagArgs.builder()
.category("test-category-1")
.tag("tag-1")
.build(),
VMwareNodePoolConfigVsphereConfigTagArgs.builder()
.category("test-category-2")
.tag("tag-2")
.build())
.hostGroups(
"host1",
"host2")
.build())
.enableLoadBalancer(true)
.build())
.nodePoolAutoscaling(VMwareNodePoolNodePoolAutoscalingArgs.builder()
.minReplicas(1)
.maxReplicas(5)
.build())
.build());
}
}
resources:
default-full:
type: gcp:gkeonprem:VMwareCluster
properties:
name: my-cluster
location: us-west1
adminClusterMembership: projects/870316890899/locations/global/memberships/gkeonprem-terraform-test
description: test cluster
onPremVersion: 1.13.1-gke.35
networkConfig:
serviceAddressCidrBlocks:
- 10.96.0.0/12
podAddressCidrBlocks:
- 192.168.0.0/16
dhcpIpConfig:
enabled: true
controlPlaneNode:
cpus: 4
memory: 8192
replicas: 1
loadBalancer:
vipConfig:
controlPlaneVip: 10.251.133.5
ingressVip: 10.251.135.19
metalLbConfig:
addressPools:
- pool: ingress-ip
manualAssign: 'true'
addresses:
- 10.251.135.19
- pool: lb-test-ip
manualAssign: 'true'
addresses:
- 10.251.135.19
nodepool-full:
type: gcp:gkeonprem:VMwareNodePool
properties:
name: my-nodepool
location: us-west1
vmwareCluster: ${["default-full"].name}
annotations: {}
config:
cpus: 4
memoryMb: 8196
replicas: 3
imageType: ubuntu_containerd
image: image
bootDiskSizeGb: 10
taints:
- key: key
value: value
- key: key
value: value
effect: NO_SCHEDULE
labels: {}
vsphereConfig:
datastore: test-datastore
tags:
- category: test-category-1
tag: tag-1
- category: test-category-2
tag: tag-2
hostGroups:
- host1
- host2
enableLoadBalancer: true
nodePoolAutoscaling:
minReplicas: 1
maxReplicas: 5
Create VMwareNodePool Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new VMwareNodePool(name: string, args: VMwareNodePoolArgs, opts?: CustomResourceOptions);
@overload
def VMwareNodePool(resource_name: str,
args: VMwareNodePoolArgs,
opts: Optional[ResourceOptions] = None)
@overload
def VMwareNodePool(resource_name: str,
opts: Optional[ResourceOptions] = None,
config: Optional[VMwareNodePoolConfigArgs] = None,
location: Optional[str] = None,
vmware_cluster: Optional[str] = None,
annotations: Optional[Mapping[str, str]] = None,
display_name: Optional[str] = None,
name: Optional[str] = None,
node_pool_autoscaling: Optional[VMwareNodePoolNodePoolAutoscalingArgs] = None,
project: Optional[str] = None)
func NewVMwareNodePool(ctx *Context, name string, args VMwareNodePoolArgs, opts ...ResourceOption) (*VMwareNodePool, error)
public VMwareNodePool(string name, VMwareNodePoolArgs args, CustomResourceOptions? opts = null)
public VMwareNodePool(String name, VMwareNodePoolArgs args)
public VMwareNodePool(String name, VMwareNodePoolArgs args, CustomResourceOptions options)
type: gcp:gkeonprem:VMwareNodePool
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args VMwareNodePoolArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args VMwareNodePoolArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args VMwareNodePoolArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args VMwareNodePoolArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args VMwareNodePoolArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var vmwareNodePoolResource = new Gcp.GkeOnPrem.VMwareNodePool("vmwareNodePoolResource", new()
{
Config = new Gcp.GkeOnPrem.Inputs.VMwareNodePoolConfigArgs
{
ImageType = "string",
BootDiskSizeGb = 0,
Cpus = 0,
EnableLoadBalancer = false,
Image = "string",
Labels =
{
{ "string", "string" },
},
MemoryMb = 0,
Replicas = 0,
Taints = new[]
{
new Gcp.GkeOnPrem.Inputs.VMwareNodePoolConfigTaintArgs
{
Key = "string",
Value = "string",
Effect = "string",
},
},
VsphereConfig = new Gcp.GkeOnPrem.Inputs.VMwareNodePoolConfigVsphereConfigArgs
{
Datastore = "string",
HostGroups = new[]
{
"string",
},
Tags = new[]
{
new Gcp.GkeOnPrem.Inputs.VMwareNodePoolConfigVsphereConfigTagArgs
{
Category = "string",
Tag = "string",
},
},
},
},
Location = "string",
VmwareCluster = "string",
Annotations =
{
{ "string", "string" },
},
DisplayName = "string",
Name = "string",
NodePoolAutoscaling = new Gcp.GkeOnPrem.Inputs.VMwareNodePoolNodePoolAutoscalingArgs
{
MaxReplicas = 0,
MinReplicas = 0,
},
Project = "string",
});
example, err := gkeonprem.NewVMwareNodePool(ctx, "vmwareNodePoolResource", &gkeonprem.VMwareNodePoolArgs{
Config: &gkeonprem.VMwareNodePoolConfigArgs{
ImageType: pulumi.String("string"),
BootDiskSizeGb: pulumi.Int(0),
Cpus: pulumi.Int(0),
EnableLoadBalancer: pulumi.Bool(false),
Image: pulumi.String("string"),
Labels: pulumi.StringMap{
"string": pulumi.String("string"),
},
MemoryMb: pulumi.Int(0),
Replicas: pulumi.Int(0),
Taints: gkeonprem.VMwareNodePoolConfigTaintArray{
&gkeonprem.VMwareNodePoolConfigTaintArgs{
Key: pulumi.String("string"),
Value: pulumi.String("string"),
Effect: pulumi.String("string"),
},
},
VsphereConfig: &gkeonprem.VMwareNodePoolConfigVsphereConfigArgs{
Datastore: pulumi.String("string"),
HostGroups: pulumi.StringArray{
pulumi.String("string"),
},
Tags: gkeonprem.VMwareNodePoolConfigVsphereConfigTagArray{
&gkeonprem.VMwareNodePoolConfigVsphereConfigTagArgs{
Category: pulumi.String("string"),
Tag: pulumi.String("string"),
},
},
},
},
Location: pulumi.String("string"),
VmwareCluster: pulumi.String("string"),
Annotations: pulumi.StringMap{
"string": pulumi.String("string"),
},
DisplayName: pulumi.String("string"),
Name: pulumi.String("string"),
NodePoolAutoscaling: &gkeonprem.VMwareNodePoolNodePoolAutoscalingArgs{
MaxReplicas: pulumi.Int(0),
MinReplicas: pulumi.Int(0),
},
Project: pulumi.String("string"),
})
var vmwareNodePoolResource = new VMwareNodePool("vmwareNodePoolResource", VMwareNodePoolArgs.builder()
.config(VMwareNodePoolConfigArgs.builder()
.imageType("string")
.bootDiskSizeGb(0)
.cpus(0)
.enableLoadBalancer(false)
.image("string")
.labels(Map.of("string", "string"))
.memoryMb(0)
.replicas(0)
.taints(VMwareNodePoolConfigTaintArgs.builder()
.key("string")
.value("string")
.effect("string")
.build())
.vsphereConfig(VMwareNodePoolConfigVsphereConfigArgs.builder()
.datastore("string")
.hostGroups("string")
.tags(VMwareNodePoolConfigVsphereConfigTagArgs.builder()
.category("string")
.tag("string")
.build())
.build())
.build())
.location("string")
.vmwareCluster("string")
.annotations(Map.of("string", "string"))
.displayName("string")
.name("string")
.nodePoolAutoscaling(VMwareNodePoolNodePoolAutoscalingArgs.builder()
.maxReplicas(0)
.minReplicas(0)
.build())
.project("string")
.build());
vmware_node_pool_resource = gcp.gkeonprem.VMwareNodePool("vmwareNodePoolResource",
config={
"image_type": "string",
"boot_disk_size_gb": 0,
"cpus": 0,
"enable_load_balancer": False,
"image": "string",
"labels": {
"string": "string",
},
"memory_mb": 0,
"replicas": 0,
"taints": [{
"key": "string",
"value": "string",
"effect": "string",
}],
"vsphere_config": {
"datastore": "string",
"host_groups": ["string"],
"tags": [{
"category": "string",
"tag": "string",
}],
},
},
location="string",
vmware_cluster="string",
annotations={
"string": "string",
},
display_name="string",
name="string",
node_pool_autoscaling={
"max_replicas": 0,
"min_replicas": 0,
},
project="string")
const vmwareNodePoolResource = new gcp.gkeonprem.VMwareNodePool("vmwareNodePoolResource", {
config: {
imageType: "string",
bootDiskSizeGb: 0,
cpus: 0,
enableLoadBalancer: false,
image: "string",
labels: {
string: "string",
},
memoryMb: 0,
replicas: 0,
taints: [{
key: "string",
value: "string",
effect: "string",
}],
vsphereConfig: {
datastore: "string",
hostGroups: ["string"],
tags: [{
category: "string",
tag: "string",
}],
},
},
location: "string",
vmwareCluster: "string",
annotations: {
string: "string",
},
displayName: "string",
name: "string",
nodePoolAutoscaling: {
maxReplicas: 0,
minReplicas: 0,
},
project: "string",
});
type: gcp:gkeonprem:VMwareNodePool
properties:
annotations:
string: string
config:
bootDiskSizeGb: 0
cpus: 0
enableLoadBalancer: false
image: string
imageType: string
labels:
string: string
memoryMb: 0
replicas: 0
taints:
- effect: string
key: string
value: string
vsphereConfig:
datastore: string
hostGroups:
- string
tags:
- category: string
tag: string
displayName: string
location: string
name: string
nodePoolAutoscaling:
maxReplicas: 0
minReplicas: 0
project: string
vmwareCluster: string
VMwareNodePool Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The VMwareNodePool resource accepts the following input properties:
- Config
VMware
Node Pool Config - The node configuration of the node pool. Structure is documented below.
- Location string
- The location of the resource.
- Vmware
Cluster string - The cluster this node pool belongs to.
- Annotations Dictionary<string, string>
- Annotations on the node Pool. This field has the same restrictions as Kubernetes annotations. The total size of all keys and values combined is limited to 256k. Key can have 2 segments: prefix (optional) and name (required), separated by a slash (/). Prefix must be a DNS subdomain. Name must be 63 characters or less, begin and end with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics between. Note: This field is non-authoritative, and will only manage the annotations present in your configuration. Please refer to the field 'effective_annotations' for all of the annotations present on the resource.
- Display
Name string - The display name for the node pool.
- Name string
- The vmware node pool name.
- Node
Pool VMwareAutoscaling Node Pool Node Pool Autoscaling - Node Pool autoscaling config for the node pool.
- Project string
- Config
VMware
Node Pool Config Args - The node configuration of the node pool. Structure is documented below.
- Location string
- The location of the resource.
- Vmware
Cluster string - The cluster this node pool belongs to.
- Annotations map[string]string
- Annotations on the node Pool. This field has the same restrictions as Kubernetes annotations. The total size of all keys and values combined is limited to 256k. Key can have 2 segments: prefix (optional) and name (required), separated by a slash (/). Prefix must be a DNS subdomain. Name must be 63 characters or less, begin and end with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics between. Note: This field is non-authoritative, and will only manage the annotations present in your configuration. Please refer to the field 'effective_annotations' for all of the annotations present on the resource.
- Display
Name string - The display name for the node pool.
- Name string
- The vmware node pool name.
- Node
Pool VMwareAutoscaling Node Pool Node Pool Autoscaling Args - Node Pool autoscaling config for the node pool.
- Project string
- config
VMware
Node Pool Config - The node configuration of the node pool. Structure is documented below.
- location String
- The location of the resource.
- vmware
Cluster String - The cluster this node pool belongs to.
- annotations Map<String,String>
- Annotations on the node Pool. This field has the same restrictions as Kubernetes annotations. The total size of all keys and values combined is limited to 256k. Key can have 2 segments: prefix (optional) and name (required), separated by a slash (/). Prefix must be a DNS subdomain. Name must be 63 characters or less, begin and end with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics between. Note: This field is non-authoritative, and will only manage the annotations present in your configuration. Please refer to the field 'effective_annotations' for all of the annotations present on the resource.
- display
Name String - The display name for the node pool.
- name String
- The vmware node pool name.
- node
Pool VMwareAutoscaling Node Pool Node Pool Autoscaling - Node Pool autoscaling config for the node pool.
- project String
- config
VMware
Node Pool Config - The node configuration of the node pool. Structure is documented below.
- location string
- The location of the resource.
- vmware
Cluster string - The cluster this node pool belongs to.
- annotations {[key: string]: string}
- Annotations on the node Pool. This field has the same restrictions as Kubernetes annotations. The total size of all keys and values combined is limited to 256k. Key can have 2 segments: prefix (optional) and name (required), separated by a slash (/). Prefix must be a DNS subdomain. Name must be 63 characters or less, begin and end with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics between. Note: This field is non-authoritative, and will only manage the annotations present in your configuration. Please refer to the field 'effective_annotations' for all of the annotations present on the resource.
- display
Name string - The display name for the node pool.
- name string
- The vmware node pool name.
- node
Pool VMwareAutoscaling Node Pool Node Pool Autoscaling - Node Pool autoscaling config for the node pool.
- project string
- config
VMware
Node Pool Config Args - The node configuration of the node pool. Structure is documented below.
- location str
- The location of the resource.
- vmware_
cluster str - The cluster this node pool belongs to.
- annotations Mapping[str, str]
- Annotations on the node Pool. This field has the same restrictions as Kubernetes annotations. The total size of all keys and values combined is limited to 256k. Key can have 2 segments: prefix (optional) and name (required), separated by a slash (/). Prefix must be a DNS subdomain. Name must be 63 characters or less, begin and end with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics between. Note: This field is non-authoritative, and will only manage the annotations present in your configuration. Please refer to the field 'effective_annotations' for all of the annotations present on the resource.
- display_
name str - The display name for the node pool.
- name str
- The vmware node pool name.
- node_
pool_ VMwareautoscaling Node Pool Node Pool Autoscaling Args - Node Pool autoscaling config for the node pool.
- project str
- config Property Map
- The node configuration of the node pool. Structure is documented below.
- location String
- The location of the resource.
- vmware
Cluster String - The cluster this node pool belongs to.
- annotations Map<String>
- Annotations on the node Pool. This field has the same restrictions as Kubernetes annotations. The total size of all keys and values combined is limited to 256k. Key can have 2 segments: prefix (optional) and name (required), separated by a slash (/). Prefix must be a DNS subdomain. Name must be 63 characters or less, begin and end with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics between. Note: This field is non-authoritative, and will only manage the annotations present in your configuration. Please refer to the field 'effective_annotations' for all of the annotations present on the resource.
- display
Name String - The display name for the node pool.
- name String
- The vmware node pool name.
- node
Pool Property MapAutoscaling - Node Pool autoscaling config for the node pool.
- project String
Outputs
All input properties are implicitly available as output properties. Additionally, the VMwareNodePool resource produces the following output properties:
- Create
Time string - The time the cluster was created, in RFC3339 text format.
- Delete
Time string - The time the cluster was deleted, in RFC3339 text format.
- Effective
Annotations Dictionary<string, string> - Etag string
- This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. Allows clients to perform consistent read-modify-writes through optimistic concurrency control.
- Id string
- The provider-assigned unique ID for this managed resource.
- On
Prem stringVersion - Anthos version for the node pool. Defaults to the user cluster version.
- Reconciling bool
- If set, there are currently changes in flight to the node pool.
- State string
- (Output) The lifecycle state of the condition.
- Statuses
List<VMware
Node Pool Status> - ResourceStatus representing detailed cluster state. Structure is documented below.
- Uid string
- The unique identifier of the node pool.
- Update
Time string - The time the cluster was last updated, in RFC3339 text format.
- Create
Time string - The time the cluster was created, in RFC3339 text format.
- Delete
Time string - The time the cluster was deleted, in RFC3339 text format.
- Effective
Annotations map[string]string - Etag string
- This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. Allows clients to perform consistent read-modify-writes through optimistic concurrency control.
- Id string
- The provider-assigned unique ID for this managed resource.
- On
Prem stringVersion - Anthos version for the node pool. Defaults to the user cluster version.
- Reconciling bool
- If set, there are currently changes in flight to the node pool.
- State string
- (Output) The lifecycle state of the condition.
- Statuses
[]VMware
Node Pool Status - ResourceStatus representing detailed cluster state. Structure is documented below.
- Uid string
- The unique identifier of the node pool.
- Update
Time string - The time the cluster was last updated, in RFC3339 text format.
- create
Time String - The time the cluster was created, in RFC3339 text format.
- delete
Time String - The time the cluster was deleted, in RFC3339 text format.
- effective
Annotations Map<String,String> - etag String
- This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. Allows clients to perform consistent read-modify-writes through optimistic concurrency control.
- id String
- The provider-assigned unique ID for this managed resource.
- on
Prem StringVersion - Anthos version for the node pool. Defaults to the user cluster version.
- reconciling Boolean
- If set, there are currently changes in flight to the node pool.
- state String
- (Output) The lifecycle state of the condition.
- statuses
List<VMware
Node Pool Status> - ResourceStatus representing detailed cluster state. Structure is documented below.
- uid String
- The unique identifier of the node pool.
- update
Time String - The time the cluster was last updated, in RFC3339 text format.
- create
Time string - The time the cluster was created, in RFC3339 text format.
- delete
Time string - The time the cluster was deleted, in RFC3339 text format.
- effective
Annotations {[key: string]: string} - etag string
- This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. Allows clients to perform consistent read-modify-writes through optimistic concurrency control.
- id string
- The provider-assigned unique ID for this managed resource.
- on
Prem stringVersion - Anthos version for the node pool. Defaults to the user cluster version.
- reconciling boolean
- If set, there are currently changes in flight to the node pool.
- state string
- (Output) The lifecycle state of the condition.
- statuses
VMware
Node Pool Status[] - ResourceStatus representing detailed cluster state. Structure is documented below.
- uid string
- The unique identifier of the node pool.
- update
Time string - The time the cluster was last updated, in RFC3339 text format.
- create_
time str - The time the cluster was created, in RFC3339 text format.
- delete_
time str - The time the cluster was deleted, in RFC3339 text format.
- effective_
annotations Mapping[str, str] - etag str
- This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. Allows clients to perform consistent read-modify-writes through optimistic concurrency control.
- id str
- The provider-assigned unique ID for this managed resource.
- on_
prem_ strversion - Anthos version for the node pool. Defaults to the user cluster version.
- reconciling bool
- If set, there are currently changes in flight to the node pool.
- state str
- (Output) The lifecycle state of the condition.
- statuses
Sequence[VMware
Node Pool Status] - ResourceStatus representing detailed cluster state. Structure is documented below.
- uid str
- The unique identifier of the node pool.
- update_
time str - The time the cluster was last updated, in RFC3339 text format.
- create
Time String - The time the cluster was created, in RFC3339 text format.
- delete
Time String - The time the cluster was deleted, in RFC3339 text format.
- effective
Annotations Map<String> - etag String
- This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. Allows clients to perform consistent read-modify-writes through optimistic concurrency control.
- id String
- The provider-assigned unique ID for this managed resource.
- on
Prem StringVersion - Anthos version for the node pool. Defaults to the user cluster version.
- reconciling Boolean
- If set, there are currently changes in flight to the node pool.
- state String
- (Output) The lifecycle state of the condition.
- statuses List<Property Map>
- ResourceStatus representing detailed cluster state. Structure is documented below.
- uid String
- The unique identifier of the node pool.
- update
Time String - The time the cluster was last updated, in RFC3339 text format.
Look up Existing VMwareNodePool Resource
Get an existing VMwareNodePool resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: VMwareNodePoolState, opts?: CustomResourceOptions): VMwareNodePool
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
annotations: Optional[Mapping[str, str]] = None,
config: Optional[VMwareNodePoolConfigArgs] = None,
create_time: Optional[str] = None,
delete_time: Optional[str] = None,
display_name: Optional[str] = None,
effective_annotations: Optional[Mapping[str, str]] = None,
etag: Optional[str] = None,
location: Optional[str] = None,
name: Optional[str] = None,
node_pool_autoscaling: Optional[VMwareNodePoolNodePoolAutoscalingArgs] = None,
on_prem_version: Optional[str] = None,
project: Optional[str] = None,
reconciling: Optional[bool] = None,
state: Optional[str] = None,
statuses: Optional[Sequence[VMwareNodePoolStatusArgs]] = None,
uid: Optional[str] = None,
update_time: Optional[str] = None,
vmware_cluster: Optional[str] = None) -> VMwareNodePool
func GetVMwareNodePool(ctx *Context, name string, id IDInput, state *VMwareNodePoolState, opts ...ResourceOption) (*VMwareNodePool, error)
public static VMwareNodePool Get(string name, Input<string> id, VMwareNodePoolState? state, CustomResourceOptions? opts = null)
public static VMwareNodePool get(String name, Output<String> id, VMwareNodePoolState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Annotations Dictionary<string, string>
- Annotations on the node Pool. This field has the same restrictions as Kubernetes annotations. The total size of all keys and values combined is limited to 256k. Key can have 2 segments: prefix (optional) and name (required), separated by a slash (/). Prefix must be a DNS subdomain. Name must be 63 characters or less, begin and end with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics between. Note: This field is non-authoritative, and will only manage the annotations present in your configuration. Please refer to the field 'effective_annotations' for all of the annotations present on the resource.
- Config
VMware
Node Pool Config - The node configuration of the node pool. Structure is documented below.
- Create
Time string - The time the cluster was created, in RFC3339 text format.
- Delete
Time string - The time the cluster was deleted, in RFC3339 text format.
- Display
Name string - The display name for the node pool.
- Effective
Annotations Dictionary<string, string> - Etag string
- This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. Allows clients to perform consistent read-modify-writes through optimistic concurrency control.
- Location string
- The location of the resource.
- Name string
- The vmware node pool name.
- Node
Pool VMwareAutoscaling Node Pool Node Pool Autoscaling - Node Pool autoscaling config for the node pool.
- On
Prem stringVersion - Anthos version for the node pool. Defaults to the user cluster version.
- Project string
- Reconciling bool
- If set, there are currently changes in flight to the node pool.
- State string
- (Output) The lifecycle state of the condition.
- Statuses
List<VMware
Node Pool Status> - ResourceStatus representing detailed cluster state. Structure is documented below.
- Uid string
- The unique identifier of the node pool.
- Update
Time string - The time the cluster was last updated, in RFC3339 text format.
- Vmware
Cluster string - The cluster this node pool belongs to.
- Annotations map[string]string
- Annotations on the node Pool. This field has the same restrictions as Kubernetes annotations. The total size of all keys and values combined is limited to 256k. Key can have 2 segments: prefix (optional) and name (required), separated by a slash (/). Prefix must be a DNS subdomain. Name must be 63 characters or less, begin and end with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics between. Note: This field is non-authoritative, and will only manage the annotations present in your configuration. Please refer to the field 'effective_annotations' for all of the annotations present on the resource.
- Config
VMware
Node Pool Config Args - The node configuration of the node pool. Structure is documented below.
- Create
Time string - The time the cluster was created, in RFC3339 text format.
- Delete
Time string - The time the cluster was deleted, in RFC3339 text format.
- Display
Name string - The display name for the node pool.
- Effective
Annotations map[string]string - Etag string
- This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. Allows clients to perform consistent read-modify-writes through optimistic concurrency control.
- Location string
- The location of the resource.
- Name string
- The vmware node pool name.
- Node
Pool VMwareAutoscaling Node Pool Node Pool Autoscaling Args - Node Pool autoscaling config for the node pool.
- On
Prem stringVersion - Anthos version for the node pool. Defaults to the user cluster version.
- Project string
- Reconciling bool
- If set, there are currently changes in flight to the node pool.
- State string
- (Output) The lifecycle state of the condition.
- Statuses
[]VMware
Node Pool Status Args - ResourceStatus representing detailed cluster state. Structure is documented below.
- Uid string
- The unique identifier of the node pool.
- Update
Time string - The time the cluster was last updated, in RFC3339 text format.
- Vmware
Cluster string - The cluster this node pool belongs to.
- annotations Map<String,String>
- Annotations on the node Pool. This field has the same restrictions as Kubernetes annotations. The total size of all keys and values combined is limited to 256k. Key can have 2 segments: prefix (optional) and name (required), separated by a slash (/). Prefix must be a DNS subdomain. Name must be 63 characters or less, begin and end with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics between. Note: This field is non-authoritative, and will only manage the annotations present in your configuration. Please refer to the field 'effective_annotations' for all of the annotations present on the resource.
- config
VMware
Node Pool Config - The node configuration of the node pool. Structure is documented below.
- create
Time String - The time the cluster was created, in RFC3339 text format.
- delete
Time String - The time the cluster was deleted, in RFC3339 text format.
- display
Name String - The display name for the node pool.
- effective
Annotations Map<String,String> - etag String
- This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. Allows clients to perform consistent read-modify-writes through optimistic concurrency control.
- location String
- The location of the resource.
- name String
- The vmware node pool name.
- node
Pool VMwareAutoscaling Node Pool Node Pool Autoscaling - Node Pool autoscaling config for the node pool.
- on
Prem StringVersion - Anthos version for the node pool. Defaults to the user cluster version.
- project String
- reconciling Boolean
- If set, there are currently changes in flight to the node pool.
- state String
- (Output) The lifecycle state of the condition.
- statuses
List<VMware
Node Pool Status> - ResourceStatus representing detailed cluster state. Structure is documented below.
- uid String
- The unique identifier of the node pool.
- update
Time String - The time the cluster was last updated, in RFC3339 text format.
- vmware
Cluster String - The cluster this node pool belongs to.
- annotations {[key: string]: string}
- Annotations on the node Pool. This field has the same restrictions as Kubernetes annotations. The total size of all keys and values combined is limited to 256k. Key can have 2 segments: prefix (optional) and name (required), separated by a slash (/). Prefix must be a DNS subdomain. Name must be 63 characters or less, begin and end with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics between. Note: This field is non-authoritative, and will only manage the annotations present in your configuration. Please refer to the field 'effective_annotations' for all of the annotations present on the resource.
- config
VMware
Node Pool Config - The node configuration of the node pool. Structure is documented below.
- create
Time string - The time the cluster was created, in RFC3339 text format.
- delete
Time string - The time the cluster was deleted, in RFC3339 text format.
- display
Name string - The display name for the node pool.
- effective
Annotations {[key: string]: string} - etag string
- This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. Allows clients to perform consistent read-modify-writes through optimistic concurrency control.
- location string
- The location of the resource.
- name string
- The vmware node pool name.
- node
Pool VMwareAutoscaling Node Pool Node Pool Autoscaling - Node Pool autoscaling config for the node pool.
- on
Prem stringVersion - Anthos version for the node pool. Defaults to the user cluster version.
- project string
- reconciling boolean
- If set, there are currently changes in flight to the node pool.
- state string
- (Output) The lifecycle state of the condition.
- statuses
VMware
Node Pool Status[] - ResourceStatus representing detailed cluster state. Structure is documented below.
- uid string
- The unique identifier of the node pool.
- update
Time string - The time the cluster was last updated, in RFC3339 text format.
- vmware
Cluster string - The cluster this node pool belongs to.
- annotations Mapping[str, str]
- Annotations on the node Pool. This field has the same restrictions as Kubernetes annotations. The total size of all keys and values combined is limited to 256k. Key can have 2 segments: prefix (optional) and name (required), separated by a slash (/). Prefix must be a DNS subdomain. Name must be 63 characters or less, begin and end with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics between. Note: This field is non-authoritative, and will only manage the annotations present in your configuration. Please refer to the field 'effective_annotations' for all of the annotations present on the resource.
- config
VMware
Node Pool Config Args - The node configuration of the node pool. Structure is documented below.
- create_
time str - The time the cluster was created, in RFC3339 text format.
- delete_
time str - The time the cluster was deleted, in RFC3339 text format.
- display_
name str - The display name for the node pool.
- effective_
annotations Mapping[str, str] - etag str
- This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. Allows clients to perform consistent read-modify-writes through optimistic concurrency control.
- location str
- The location of the resource.
- name str
- The vmware node pool name.
- node_
pool_ VMwareautoscaling Node Pool Node Pool Autoscaling Args - Node Pool autoscaling config for the node pool.
- on_
prem_ strversion - Anthos version for the node pool. Defaults to the user cluster version.
- project str
- reconciling bool
- If set, there are currently changes in flight to the node pool.
- state str
- (Output) The lifecycle state of the condition.
- statuses
Sequence[VMware
Node Pool Status Args] - ResourceStatus representing detailed cluster state. Structure is documented below.
- uid str
- The unique identifier of the node pool.
- update_
time str - The time the cluster was last updated, in RFC3339 text format.
- vmware_
cluster str - The cluster this node pool belongs to.
- annotations Map<String>
- Annotations on the node Pool. This field has the same restrictions as Kubernetes annotations. The total size of all keys and values combined is limited to 256k. Key can have 2 segments: prefix (optional) and name (required), separated by a slash (/). Prefix must be a DNS subdomain. Name must be 63 characters or less, begin and end with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics between. Note: This field is non-authoritative, and will only manage the annotations present in your configuration. Please refer to the field 'effective_annotations' for all of the annotations present on the resource.
- config Property Map
- The node configuration of the node pool. Structure is documented below.
- create
Time String - The time the cluster was created, in RFC3339 text format.
- delete
Time String - The time the cluster was deleted, in RFC3339 text format.
- display
Name String - The display name for the node pool.
- effective
Annotations Map<String> - etag String
- This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. Allows clients to perform consistent read-modify-writes through optimistic concurrency control.
- location String
- The location of the resource.
- name String
- The vmware node pool name.
- node
Pool Property MapAutoscaling - Node Pool autoscaling config for the node pool.
- on
Prem StringVersion - Anthos version for the node pool. Defaults to the user cluster version.
- project String
- reconciling Boolean
- If set, there are currently changes in flight to the node pool.
- state String
- (Output) The lifecycle state of the condition.
- statuses List<Property Map>
- ResourceStatus representing detailed cluster state. Structure is documented below.
- uid String
- The unique identifier of the node pool.
- update
Time String - The time the cluster was last updated, in RFC3339 text format.
- vmware
Cluster String - The cluster this node pool belongs to.
Supporting Types
VMwareNodePoolConfig, VMwareNodePoolConfigArgs
- Image
Type string - The OS image to be used for each node in a node pool.
Currently
cos
,cos_cgv2
,ubuntu
,ubuntu_cgv2
,ubuntu_containerd
andwindows
are supported. - Boot
Disk intSize Gb - VMware disk size to be used during creation.
- Cpus int
- The number of CPUs for each node in the node pool.
- Enable
Load boolBalancer - Allow node pool traffic to be load balanced. Only works for clusters with MetalLB load balancers.
- Image string
- The OS image name in vCenter, only valid when using Windows.
- Labels Dictionary<string, string>
- The map of Kubernetes labels (key/value pairs) to be applied to each node. These will added in addition to any default label(s) that Kubernetes may apply to the node. In case of conflict in label keys, the applied set may differ depending on the Kubernetes version -- it's best to assume the behavior is undefined and conflicts should be avoided.
- Memory
Mb int - The megabytes of memory for each node in the node pool.
- Replicas int
- The number of nodes in the node pool.
- Taints
List<VMware
Node Pool Config Taint> - The initial taints assigned to nodes of this node pool. Structure is documented below.
- Vsphere
Config VMwareNode Pool Config Vsphere Config - Specifies the vSphere config for node pool. Structure is documented below.
- Image
Type string - The OS image to be used for each node in a node pool.
Currently
cos
,cos_cgv2
,ubuntu
,ubuntu_cgv2
,ubuntu_containerd
andwindows
are supported. - Boot
Disk intSize Gb - VMware disk size to be used during creation.
- Cpus int
- The number of CPUs for each node in the node pool.
- Enable
Load boolBalancer - Allow node pool traffic to be load balanced. Only works for clusters with MetalLB load balancers.
- Image string
- The OS image name in vCenter, only valid when using Windows.
- Labels map[string]string
- The map of Kubernetes labels (key/value pairs) to be applied to each node. These will added in addition to any default label(s) that Kubernetes may apply to the node. In case of conflict in label keys, the applied set may differ depending on the Kubernetes version -- it's best to assume the behavior is undefined and conflicts should be avoided.
- Memory
Mb int - The megabytes of memory for each node in the node pool.
- Replicas int
- The number of nodes in the node pool.
- Taints
[]VMware
Node Pool Config Taint - The initial taints assigned to nodes of this node pool. Structure is documented below.
- Vsphere
Config VMwareNode Pool Config Vsphere Config - Specifies the vSphere config for node pool. Structure is documented below.
- image
Type String - The OS image to be used for each node in a node pool.
Currently
cos
,cos_cgv2
,ubuntu
,ubuntu_cgv2
,ubuntu_containerd
andwindows
are supported. - boot
Disk IntegerSize Gb - VMware disk size to be used during creation.
- cpus Integer
- The number of CPUs for each node in the node pool.
- enable
Load BooleanBalancer - Allow node pool traffic to be load balanced. Only works for clusters with MetalLB load balancers.
- image String
- The OS image name in vCenter, only valid when using Windows.
- labels Map<String,String>
- The map of Kubernetes labels (key/value pairs) to be applied to each node. These will added in addition to any default label(s) that Kubernetes may apply to the node. In case of conflict in label keys, the applied set may differ depending on the Kubernetes version -- it's best to assume the behavior is undefined and conflicts should be avoided.
- memory
Mb Integer - The megabytes of memory for each node in the node pool.
- replicas Integer
- The number of nodes in the node pool.
- taints
List<VMware
Node Pool Config Taint> - The initial taints assigned to nodes of this node pool. Structure is documented below.
- vsphere
Config VMwareNode Pool Config Vsphere Config - Specifies the vSphere config for node pool. Structure is documented below.
- image
Type string - The OS image to be used for each node in a node pool.
Currently
cos
,cos_cgv2
,ubuntu
,ubuntu_cgv2
,ubuntu_containerd
andwindows
are supported. - boot
Disk numberSize Gb - VMware disk size to be used during creation.
- cpus number
- The number of CPUs for each node in the node pool.
- enable
Load booleanBalancer - Allow node pool traffic to be load balanced. Only works for clusters with MetalLB load balancers.
- image string
- The OS image name in vCenter, only valid when using Windows.
- labels {[key: string]: string}
- The map of Kubernetes labels (key/value pairs) to be applied to each node. These will added in addition to any default label(s) that Kubernetes may apply to the node. In case of conflict in label keys, the applied set may differ depending on the Kubernetes version -- it's best to assume the behavior is undefined and conflicts should be avoided.
- memory
Mb number - The megabytes of memory for each node in the node pool.
- replicas number
- The number of nodes in the node pool.
- taints
VMware
Node Pool Config Taint[] - The initial taints assigned to nodes of this node pool. Structure is documented below.
- vsphere
Config VMwareNode Pool Config Vsphere Config - Specifies the vSphere config for node pool. Structure is documented below.
- image_
type str - The OS image to be used for each node in a node pool.
Currently
cos
,cos_cgv2
,ubuntu
,ubuntu_cgv2
,ubuntu_containerd
andwindows
are supported. - boot_
disk_ intsize_ gb - VMware disk size to be used during creation.
- cpus int
- The number of CPUs for each node in the node pool.
- enable_
load_ boolbalancer - Allow node pool traffic to be load balanced. Only works for clusters with MetalLB load balancers.
- image str
- The OS image name in vCenter, only valid when using Windows.
- labels Mapping[str, str]
- The map of Kubernetes labels (key/value pairs) to be applied to each node. These will added in addition to any default label(s) that Kubernetes may apply to the node. In case of conflict in label keys, the applied set may differ depending on the Kubernetes version -- it's best to assume the behavior is undefined and conflicts should be avoided.
- memory_
mb int - The megabytes of memory for each node in the node pool.
- replicas int
- The number of nodes in the node pool.
- taints
Sequence[VMware
Node Pool Config Taint] - The initial taints assigned to nodes of this node pool. Structure is documented below.
- vsphere_
config VMwareNode Pool Config Vsphere Config - Specifies the vSphere config for node pool. Structure is documented below.
- image
Type String - The OS image to be used for each node in a node pool.
Currently
cos
,cos_cgv2
,ubuntu
,ubuntu_cgv2
,ubuntu_containerd
andwindows
are supported. - boot
Disk NumberSize Gb - VMware disk size to be used during creation.
- cpus Number
- The number of CPUs for each node in the node pool.
- enable
Load BooleanBalancer - Allow node pool traffic to be load balanced. Only works for clusters with MetalLB load balancers.
- image String
- The OS image name in vCenter, only valid when using Windows.
- labels Map<String>
- The map of Kubernetes labels (key/value pairs) to be applied to each node. These will added in addition to any default label(s) that Kubernetes may apply to the node. In case of conflict in label keys, the applied set may differ depending on the Kubernetes version -- it's best to assume the behavior is undefined and conflicts should be avoided.
- memory
Mb Number - The megabytes of memory for each node in the node pool.
- replicas Number
- The number of nodes in the node pool.
- taints List<Property Map>
- The initial taints assigned to nodes of this node pool. Structure is documented below.
- vsphere
Config Property Map - Specifies the vSphere config for node pool. Structure is documented below.
VMwareNodePoolConfigTaint, VMwareNodePoolConfigTaintArgs
VMwareNodePoolConfigVsphereConfig, VMwareNodePoolConfigVsphereConfigArgs
- Datastore string
- The name of the vCenter datastore. Inherited from the user cluster.
- Host
Groups List<string> - Vsphere host groups to apply to all VMs in the node pool
- List<VMware
Node Pool Config Vsphere Config Tag> - Tags to apply to VMs. Structure is documented below.
- Datastore string
- The name of the vCenter datastore. Inherited from the user cluster.
- Host
Groups []string - Vsphere host groups to apply to all VMs in the node pool
- []VMware
Node Pool Config Vsphere Config Tag - Tags to apply to VMs. Structure is documented below.
- datastore String
- The name of the vCenter datastore. Inherited from the user cluster.
- host
Groups List<String> - Vsphere host groups to apply to all VMs in the node pool
- List<VMware
Node Pool Config Vsphere Config Tag> - Tags to apply to VMs. Structure is documented below.
- datastore string
- The name of the vCenter datastore. Inherited from the user cluster.
- host
Groups string[] - Vsphere host groups to apply to all VMs in the node pool
- VMware
Node Pool Config Vsphere Config Tag[] - Tags to apply to VMs. Structure is documented below.
- datastore str
- The name of the vCenter datastore. Inherited from the user cluster.
- host_
groups Sequence[str] - Vsphere host groups to apply to all VMs in the node pool
- Sequence[VMware
Node Pool Config Vsphere Config Tag] - Tags to apply to VMs. Structure is documented below.
- datastore String
- The name of the vCenter datastore. Inherited from the user cluster.
- host
Groups List<String> - Vsphere host groups to apply to all VMs in the node pool
- List<Property Map>
- Tags to apply to VMs. Structure is documented below.
VMwareNodePoolConfigVsphereConfigTag, VMwareNodePoolConfigVsphereConfigTagArgs
VMwareNodePoolNodePoolAutoscaling, VMwareNodePoolNodePoolAutoscalingArgs
- Max
Replicas int - Maximum number of replicas in the NodePool.
- Min
Replicas int - Minimum number of replicas in the NodePool.
- Max
Replicas int - Maximum number of replicas in the NodePool.
- Min
Replicas int - Minimum number of replicas in the NodePool.
- max
Replicas Integer - Maximum number of replicas in the NodePool.
- min
Replicas Integer - Minimum number of replicas in the NodePool.
- max
Replicas number - Maximum number of replicas in the NodePool.
- min
Replicas number - Minimum number of replicas in the NodePool.
- max_
replicas int - Maximum number of replicas in the NodePool.
- min_
replicas int - Minimum number of replicas in the NodePool.
- max
Replicas Number - Maximum number of replicas in the NodePool.
- min
Replicas Number - Minimum number of replicas in the NodePool.
VMwareNodePoolStatus, VMwareNodePoolStatusArgs
- Conditions
List<VMware
Node Pool Status Condition> - (Output) ResourceConditions provide a standard mechanism for higher-level status reporting from user cluster controller. Structure is documented below.
- Error
Message string - (Output) Human-friendly representation of the error message from the user cluster controller. The error message can be temporary as the user cluster controller creates a cluster or node pool. If the error message persists for a longer period of time, it can be used to surface error message to indicate real problems requiring user intervention.
- Conditions
[]VMware
Node Pool Status Condition - (Output) ResourceConditions provide a standard mechanism for higher-level status reporting from user cluster controller. Structure is documented below.
- Error
Message string - (Output) Human-friendly representation of the error message from the user cluster controller. The error message can be temporary as the user cluster controller creates a cluster or node pool. If the error message persists for a longer period of time, it can be used to surface error message to indicate real problems requiring user intervention.
- conditions
List<VMware
Node Pool Status Condition> - (Output) ResourceConditions provide a standard mechanism for higher-level status reporting from user cluster controller. Structure is documented below.
- error
Message String - (Output) Human-friendly representation of the error message from the user cluster controller. The error message can be temporary as the user cluster controller creates a cluster or node pool. If the error message persists for a longer period of time, it can be used to surface error message to indicate real problems requiring user intervention.
- conditions
VMware
Node Pool Status Condition[] - (Output) ResourceConditions provide a standard mechanism for higher-level status reporting from user cluster controller. Structure is documented below.
- error
Message string - (Output) Human-friendly representation of the error message from the user cluster controller. The error message can be temporary as the user cluster controller creates a cluster or node pool. If the error message persists for a longer period of time, it can be used to surface error message to indicate real problems requiring user intervention.
- conditions
Sequence[VMware
Node Pool Status Condition] - (Output) ResourceConditions provide a standard mechanism for higher-level status reporting from user cluster controller. Structure is documented below.
- error_
message str - (Output) Human-friendly representation of the error message from the user cluster controller. The error message can be temporary as the user cluster controller creates a cluster or node pool. If the error message persists for a longer period of time, it can be used to surface error message to indicate real problems requiring user intervention.
- conditions List<Property Map>
- (Output) ResourceConditions provide a standard mechanism for higher-level status reporting from user cluster controller. Structure is documented below.
- error
Message String - (Output) Human-friendly representation of the error message from the user cluster controller. The error message can be temporary as the user cluster controller creates a cluster or node pool. If the error message persists for a longer period of time, it can be used to surface error message to indicate real problems requiring user intervention.
VMwareNodePoolStatusCondition, VMwareNodePoolStatusConditionArgs
- Last
Transition stringTime - (Output) Last time the condition transit from one status to another.
- Message string
- (Output) Human-readable message indicating details about last transition.
- Reason string
- (Output) Machine-readable message indicating details about last transition.
- State string
- (Output) The lifecycle state of the condition.
- Type string
- (Output) Type of the condition. (e.g., ClusterRunning, NodePoolRunning or ServerSidePreflightReady)
- Last
Transition stringTime - (Output) Last time the condition transit from one status to another.
- Message string
- (Output) Human-readable message indicating details about last transition.
- Reason string
- (Output) Machine-readable message indicating details about last transition.
- State string
- (Output) The lifecycle state of the condition.
- Type string
- (Output) Type of the condition. (e.g., ClusterRunning, NodePoolRunning or ServerSidePreflightReady)
- last
Transition StringTime - (Output) Last time the condition transit from one status to another.
- message String
- (Output) Human-readable message indicating details about last transition.
- reason String
- (Output) Machine-readable message indicating details about last transition.
- state String
- (Output) The lifecycle state of the condition.
- type String
- (Output) Type of the condition. (e.g., ClusterRunning, NodePoolRunning or ServerSidePreflightReady)
- last
Transition stringTime - (Output) Last time the condition transit from one status to another.
- message string
- (Output) Human-readable message indicating details about last transition.
- reason string
- (Output) Machine-readable message indicating details about last transition.
- state string
- (Output) The lifecycle state of the condition.
- type string
- (Output) Type of the condition. (e.g., ClusterRunning, NodePoolRunning or ServerSidePreflightReady)
- last_
transition_ strtime - (Output) Last time the condition transit from one status to another.
- message str
- (Output) Human-readable message indicating details about last transition.
- reason str
- (Output) Machine-readable message indicating details about last transition.
- state str
- (Output) The lifecycle state of the condition.
- type str
- (Output) Type of the condition. (e.g., ClusterRunning, NodePoolRunning or ServerSidePreflightReady)
- last
Transition StringTime - (Output) Last time the condition transit from one status to another.
- message String
- (Output) Human-readable message indicating details about last transition.
- reason String
- (Output) Machine-readable message indicating details about last transition.
- state String
- (Output) The lifecycle state of the condition.
- type String
- (Output) Type of the condition. (e.g., ClusterRunning, NodePoolRunning or ServerSidePreflightReady)
Import
VmwareNodePool can be imported using any of these accepted formats:
projects/{{project}}/locations/{{location}}/vmwareClusters/{{vmware_cluster}}/vmwareNodePools/{{name}}
{{project}}/{{location}}/{{vmware_cluster}}/{{name}}
{{location}}/{{vmware_cluster}}/{{name}}
When using the pulumi import
command, VmwareNodePool can be imported using one of the formats above. For example:
$ pulumi import gcp:gkeonprem/vMwareNodePool:VMwareNodePool default projects/{{project}}/locations/{{location}}/vmwareClusters/{{vmware_cluster}}/vmwareNodePools/{{name}}
$ pulumi import gcp:gkeonprem/vMwareNodePool:VMwareNodePool default {{project}}/{{location}}/{{vmware_cluster}}/{{name}}
$ pulumi import gcp:gkeonprem/vMwareNodePool:VMwareNodePool default {{location}}/{{vmware_cluster}}/{{name}}
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- Google Cloud (GCP) Classic pulumi/pulumi-gcp
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
google-beta
Terraform Provider.