1. Packages
  2. Google Cloud (GCP) Classic
  3. API Docs
  4. gkeonprem
  5. VMwareNodePool
Google Cloud Classic v8.9.3 published on Monday, Nov 18, 2024 by Pulumi

gcp.gkeonprem.VMwareNodePool

Explore with Pulumi AI

gcp logo
Google Cloud Classic v8.9.3 published on Monday, Nov 18, 2024 by Pulumi

    A Google Vmware Node Pool.

    Example Usage

    Gkeonprem Vmware Node Pool Basic

    import * as pulumi from "@pulumi/pulumi";
    import * as gcp from "@pulumi/gcp";
    
    const default_basic = new gcp.gkeonprem.VMwareCluster("default-basic", {
        name: "my-cluster",
        location: "us-west1",
        adminClusterMembership: "projects/870316890899/locations/global/memberships/gkeonprem-terraform-test",
        description: "test cluster",
        onPremVersion: "1.13.1-gke.35",
        networkConfig: {
            serviceAddressCidrBlocks: ["10.96.0.0/12"],
            podAddressCidrBlocks: ["192.168.0.0/16"],
            dhcpIpConfig: {
                enabled: true,
            },
        },
        controlPlaneNode: {
            cpus: 4,
            memory: 8192,
            replicas: 1,
        },
        loadBalancer: {
            vipConfig: {
                controlPlaneVip: "10.251.133.5",
                ingressVip: "10.251.135.19",
            },
            metalLbConfig: {
                addressPools: [
                    {
                        pool: "ingress-ip",
                        manualAssign: true,
                        addresses: ["10.251.135.19"],
                    },
                    {
                        pool: "lb-test-ip",
                        manualAssign: true,
                        addresses: ["10.251.135.19"],
                    },
                ],
            },
        },
    });
    const nodepool_basic = new gcp.gkeonprem.VMwareNodePool("nodepool-basic", {
        name: "my-nodepool",
        location: "us-west1",
        vmwareCluster: default_basic.name,
        config: {
            replicas: 3,
            imageType: "ubuntu_containerd",
            enableLoadBalancer: true,
        },
    });
    
    import pulumi
    import pulumi_gcp as gcp
    
    default_basic = gcp.gkeonprem.VMwareCluster("default-basic",
        name="my-cluster",
        location="us-west1",
        admin_cluster_membership="projects/870316890899/locations/global/memberships/gkeonprem-terraform-test",
        description="test cluster",
        on_prem_version="1.13.1-gke.35",
        network_config={
            "service_address_cidr_blocks": ["10.96.0.0/12"],
            "pod_address_cidr_blocks": ["192.168.0.0/16"],
            "dhcp_ip_config": {
                "enabled": True,
            },
        },
        control_plane_node={
            "cpus": 4,
            "memory": 8192,
            "replicas": 1,
        },
        load_balancer={
            "vip_config": {
                "control_plane_vip": "10.251.133.5",
                "ingress_vip": "10.251.135.19",
            },
            "metal_lb_config": {
                "address_pools": [
                    {
                        "pool": "ingress-ip",
                        "manual_assign": True,
                        "addresses": ["10.251.135.19"],
                    },
                    {
                        "pool": "lb-test-ip",
                        "manual_assign": True,
                        "addresses": ["10.251.135.19"],
                    },
                ],
            },
        })
    nodepool_basic = gcp.gkeonprem.VMwareNodePool("nodepool-basic",
        name="my-nodepool",
        location="us-west1",
        vmware_cluster=default_basic.name,
        config={
            "replicas": 3,
            "image_type": "ubuntu_containerd",
            "enable_load_balancer": True,
        })
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/gkeonprem"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := gkeonprem.NewVMwareCluster(ctx, "default-basic", &gkeonprem.VMwareClusterArgs{
    			Name:                   pulumi.String("my-cluster"),
    			Location:               pulumi.String("us-west1"),
    			AdminClusterMembership: pulumi.String("projects/870316890899/locations/global/memberships/gkeonprem-terraform-test"),
    			Description:            pulumi.String("test cluster"),
    			OnPremVersion:          pulumi.String("1.13.1-gke.35"),
    			NetworkConfig: &gkeonprem.VMwareClusterNetworkConfigArgs{
    				ServiceAddressCidrBlocks: pulumi.StringArray{
    					pulumi.String("10.96.0.0/12"),
    				},
    				PodAddressCidrBlocks: pulumi.StringArray{
    					pulumi.String("192.168.0.0/16"),
    				},
    				DhcpIpConfig: &gkeonprem.VMwareClusterNetworkConfigDhcpIpConfigArgs{
    					Enabled: pulumi.Bool(true),
    				},
    			},
    			ControlPlaneNode: &gkeonprem.VMwareClusterControlPlaneNodeArgs{
    				Cpus:     pulumi.Int(4),
    				Memory:   pulumi.Int(8192),
    				Replicas: pulumi.Int(1),
    			},
    			LoadBalancer: &gkeonprem.VMwareClusterLoadBalancerArgs{
    				VipConfig: &gkeonprem.VMwareClusterLoadBalancerVipConfigArgs{
    					ControlPlaneVip: pulumi.String("10.251.133.5"),
    					IngressVip:      pulumi.String("10.251.135.19"),
    				},
    				MetalLbConfig: &gkeonprem.VMwareClusterLoadBalancerMetalLbConfigArgs{
    					AddressPools: gkeonprem.VMwareClusterLoadBalancerMetalLbConfigAddressPoolArray{
    						&gkeonprem.VMwareClusterLoadBalancerMetalLbConfigAddressPoolArgs{
    							Pool:         pulumi.String("ingress-ip"),
    							ManualAssign: pulumi.Bool(true),
    							Addresses: pulumi.StringArray{
    								pulumi.String("10.251.135.19"),
    							},
    						},
    						&gkeonprem.VMwareClusterLoadBalancerMetalLbConfigAddressPoolArgs{
    							Pool:         pulumi.String("lb-test-ip"),
    							ManualAssign: pulumi.Bool(true),
    							Addresses: pulumi.StringArray{
    								pulumi.String("10.251.135.19"),
    							},
    						},
    					},
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		_, err = gkeonprem.NewVMwareNodePool(ctx, "nodepool-basic", &gkeonprem.VMwareNodePoolArgs{
    			Name:          pulumi.String("my-nodepool"),
    			Location:      pulumi.String("us-west1"),
    			VmwareCluster: default_basic.Name,
    			Config: &gkeonprem.VMwareNodePoolConfigArgs{
    				Replicas:           pulumi.Int(3),
    				ImageType:          pulumi.String("ubuntu_containerd"),
    				EnableLoadBalancer: pulumi.Bool(true),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Gcp = Pulumi.Gcp;
    
    return await Deployment.RunAsync(() => 
    {
        var default_basic = new Gcp.GkeOnPrem.VMwareCluster("default-basic", new()
        {
            Name = "my-cluster",
            Location = "us-west1",
            AdminClusterMembership = "projects/870316890899/locations/global/memberships/gkeonprem-terraform-test",
            Description = "test cluster",
            OnPremVersion = "1.13.1-gke.35",
            NetworkConfig = new Gcp.GkeOnPrem.Inputs.VMwareClusterNetworkConfigArgs
            {
                ServiceAddressCidrBlocks = new[]
                {
                    "10.96.0.0/12",
                },
                PodAddressCidrBlocks = new[]
                {
                    "192.168.0.0/16",
                },
                DhcpIpConfig = new Gcp.GkeOnPrem.Inputs.VMwareClusterNetworkConfigDhcpIpConfigArgs
                {
                    Enabled = true,
                },
            },
            ControlPlaneNode = new Gcp.GkeOnPrem.Inputs.VMwareClusterControlPlaneNodeArgs
            {
                Cpus = 4,
                Memory = 8192,
                Replicas = 1,
            },
            LoadBalancer = new Gcp.GkeOnPrem.Inputs.VMwareClusterLoadBalancerArgs
            {
                VipConfig = new Gcp.GkeOnPrem.Inputs.VMwareClusterLoadBalancerVipConfigArgs
                {
                    ControlPlaneVip = "10.251.133.5",
                    IngressVip = "10.251.135.19",
                },
                MetalLbConfig = new Gcp.GkeOnPrem.Inputs.VMwareClusterLoadBalancerMetalLbConfigArgs
                {
                    AddressPools = new[]
                    {
                        new Gcp.GkeOnPrem.Inputs.VMwareClusterLoadBalancerMetalLbConfigAddressPoolArgs
                        {
                            Pool = "ingress-ip",
                            ManualAssign = true,
                            Addresses = new[]
                            {
                                "10.251.135.19",
                            },
                        },
                        new Gcp.GkeOnPrem.Inputs.VMwareClusterLoadBalancerMetalLbConfigAddressPoolArgs
                        {
                            Pool = "lb-test-ip",
                            ManualAssign = true,
                            Addresses = new[]
                            {
                                "10.251.135.19",
                            },
                        },
                    },
                },
            },
        });
    
        var nodepool_basic = new Gcp.GkeOnPrem.VMwareNodePool("nodepool-basic", new()
        {
            Name = "my-nodepool",
            Location = "us-west1",
            VmwareCluster = default_basic.Name,
            Config = new Gcp.GkeOnPrem.Inputs.VMwareNodePoolConfigArgs
            {
                Replicas = 3,
                ImageType = "ubuntu_containerd",
                EnableLoadBalancer = true,
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.gcp.gkeonprem.VMwareCluster;
    import com.pulumi.gcp.gkeonprem.VMwareClusterArgs;
    import com.pulumi.gcp.gkeonprem.inputs.VMwareClusterNetworkConfigArgs;
    import com.pulumi.gcp.gkeonprem.inputs.VMwareClusterNetworkConfigDhcpIpConfigArgs;
    import com.pulumi.gcp.gkeonprem.inputs.VMwareClusterControlPlaneNodeArgs;
    import com.pulumi.gcp.gkeonprem.inputs.VMwareClusterLoadBalancerArgs;
    import com.pulumi.gcp.gkeonprem.inputs.VMwareClusterLoadBalancerVipConfigArgs;
    import com.pulumi.gcp.gkeonprem.inputs.VMwareClusterLoadBalancerMetalLbConfigArgs;
    import com.pulumi.gcp.gkeonprem.VMwareNodePool;
    import com.pulumi.gcp.gkeonprem.VMwareNodePoolArgs;
    import com.pulumi.gcp.gkeonprem.inputs.VMwareNodePoolConfigArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var default_basic = new VMwareCluster("default-basic", VMwareClusterArgs.builder()
                .name("my-cluster")
                .location("us-west1")
                .adminClusterMembership("projects/870316890899/locations/global/memberships/gkeonprem-terraform-test")
                .description("test cluster")
                .onPremVersion("1.13.1-gke.35")
                .networkConfig(VMwareClusterNetworkConfigArgs.builder()
                    .serviceAddressCidrBlocks("10.96.0.0/12")
                    .podAddressCidrBlocks("192.168.0.0/16")
                    .dhcpIpConfig(VMwareClusterNetworkConfigDhcpIpConfigArgs.builder()
                        .enabled(true)
                        .build())
                    .build())
                .controlPlaneNode(VMwareClusterControlPlaneNodeArgs.builder()
                    .cpus(4)
                    .memory(8192)
                    .replicas(1)
                    .build())
                .loadBalancer(VMwareClusterLoadBalancerArgs.builder()
                    .vipConfig(VMwareClusterLoadBalancerVipConfigArgs.builder()
                        .controlPlaneVip("10.251.133.5")
                        .ingressVip("10.251.135.19")
                        .build())
                    .metalLbConfig(VMwareClusterLoadBalancerMetalLbConfigArgs.builder()
                        .addressPools(                    
                            VMwareClusterLoadBalancerMetalLbConfigAddressPoolArgs.builder()
                                .pool("ingress-ip")
                                .manualAssign("true")
                                .addresses("10.251.135.19")
                                .build(),
                            VMwareClusterLoadBalancerMetalLbConfigAddressPoolArgs.builder()
                                .pool("lb-test-ip")
                                .manualAssign("true")
                                .addresses("10.251.135.19")
                                .build())
                        .build())
                    .build())
                .build());
    
            var nodepool_basic = new VMwareNodePool("nodepool-basic", VMwareNodePoolArgs.builder()
                .name("my-nodepool")
                .location("us-west1")
                .vmwareCluster(default_basic.name())
                .config(VMwareNodePoolConfigArgs.builder()
                    .replicas(3)
                    .imageType("ubuntu_containerd")
                    .enableLoadBalancer(true)
                    .build())
                .build());
    
        }
    }
    
    resources:
      default-basic:
        type: gcp:gkeonprem:VMwareCluster
        properties:
          name: my-cluster
          location: us-west1
          adminClusterMembership: projects/870316890899/locations/global/memberships/gkeonprem-terraform-test
          description: test cluster
          onPremVersion: 1.13.1-gke.35
          networkConfig:
            serviceAddressCidrBlocks:
              - 10.96.0.0/12
            podAddressCidrBlocks:
              - 192.168.0.0/16
            dhcpIpConfig:
              enabled: true
          controlPlaneNode:
            cpus: 4
            memory: 8192
            replicas: 1
          loadBalancer:
            vipConfig:
              controlPlaneVip: 10.251.133.5
              ingressVip: 10.251.135.19
            metalLbConfig:
              addressPools:
                - pool: ingress-ip
                  manualAssign: 'true'
                  addresses:
                    - 10.251.135.19
                - pool: lb-test-ip
                  manualAssign: 'true'
                  addresses:
                    - 10.251.135.19
      nodepool-basic:
        type: gcp:gkeonprem:VMwareNodePool
        properties:
          name: my-nodepool
          location: us-west1
          vmwareCluster: ${["default-basic"].name}
          config:
            replicas: 3
            imageType: ubuntu_containerd
            enableLoadBalancer: true
    

    Gkeonprem Vmware Node Pool Full

    import * as pulumi from "@pulumi/pulumi";
    import * as gcp from "@pulumi/gcp";
    
    const default_full = new gcp.gkeonprem.VMwareCluster("default-full", {
        name: "my-cluster",
        location: "us-west1",
        adminClusterMembership: "projects/870316890899/locations/global/memberships/gkeonprem-terraform-test",
        description: "test cluster",
        onPremVersion: "1.13.1-gke.35",
        networkConfig: {
            serviceAddressCidrBlocks: ["10.96.0.0/12"],
            podAddressCidrBlocks: ["192.168.0.0/16"],
            dhcpIpConfig: {
                enabled: true,
            },
        },
        controlPlaneNode: {
            cpus: 4,
            memory: 8192,
            replicas: 1,
        },
        loadBalancer: {
            vipConfig: {
                controlPlaneVip: "10.251.133.5",
                ingressVip: "10.251.135.19",
            },
            metalLbConfig: {
                addressPools: [
                    {
                        pool: "ingress-ip",
                        manualAssign: true,
                        addresses: ["10.251.135.19"],
                    },
                    {
                        pool: "lb-test-ip",
                        manualAssign: true,
                        addresses: ["10.251.135.19"],
                    },
                ],
            },
        },
    });
    const nodepool_full = new gcp.gkeonprem.VMwareNodePool("nodepool-full", {
        name: "my-nodepool",
        location: "us-west1",
        vmwareCluster: default_full.name,
        annotations: {},
        config: {
            cpus: 4,
            memoryMb: 8196,
            replicas: 3,
            imageType: "ubuntu_containerd",
            image: "image",
            bootDiskSizeGb: 10,
            taints: [
                {
                    key: "key",
                    value: "value",
                },
                {
                    key: "key",
                    value: "value",
                    effect: "NO_SCHEDULE",
                },
            ],
            labels: {},
            vsphereConfig: {
                datastore: "test-datastore",
                tags: [
                    {
                        category: "test-category-1",
                        tag: "tag-1",
                    },
                    {
                        category: "test-category-2",
                        tag: "tag-2",
                    },
                ],
                hostGroups: [
                    "host1",
                    "host2",
                ],
            },
            enableLoadBalancer: true,
        },
        nodePoolAutoscaling: {
            minReplicas: 1,
            maxReplicas: 5,
        },
    });
    
    import pulumi
    import pulumi_gcp as gcp
    
    default_full = gcp.gkeonprem.VMwareCluster("default-full",
        name="my-cluster",
        location="us-west1",
        admin_cluster_membership="projects/870316890899/locations/global/memberships/gkeonprem-terraform-test",
        description="test cluster",
        on_prem_version="1.13.1-gke.35",
        network_config={
            "service_address_cidr_blocks": ["10.96.0.0/12"],
            "pod_address_cidr_blocks": ["192.168.0.0/16"],
            "dhcp_ip_config": {
                "enabled": True,
            },
        },
        control_plane_node={
            "cpus": 4,
            "memory": 8192,
            "replicas": 1,
        },
        load_balancer={
            "vip_config": {
                "control_plane_vip": "10.251.133.5",
                "ingress_vip": "10.251.135.19",
            },
            "metal_lb_config": {
                "address_pools": [
                    {
                        "pool": "ingress-ip",
                        "manual_assign": True,
                        "addresses": ["10.251.135.19"],
                    },
                    {
                        "pool": "lb-test-ip",
                        "manual_assign": True,
                        "addresses": ["10.251.135.19"],
                    },
                ],
            },
        })
    nodepool_full = gcp.gkeonprem.VMwareNodePool("nodepool-full",
        name="my-nodepool",
        location="us-west1",
        vmware_cluster=default_full.name,
        annotations={},
        config={
            "cpus": 4,
            "memory_mb": 8196,
            "replicas": 3,
            "image_type": "ubuntu_containerd",
            "image": "image",
            "boot_disk_size_gb": 10,
            "taints": [
                {
                    "key": "key",
                    "value": "value",
                },
                {
                    "key": "key",
                    "value": "value",
                    "effect": "NO_SCHEDULE",
                },
            ],
            "labels": {},
            "vsphere_config": {
                "datastore": "test-datastore",
                "tags": [
                    {
                        "category": "test-category-1",
                        "tag": "tag-1",
                    },
                    {
                        "category": "test-category-2",
                        "tag": "tag-2",
                    },
                ],
                "host_groups": [
                    "host1",
                    "host2",
                ],
            },
            "enable_load_balancer": True,
        },
        node_pool_autoscaling={
            "min_replicas": 1,
            "max_replicas": 5,
        })
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/gkeonprem"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := gkeonprem.NewVMwareCluster(ctx, "default-full", &gkeonprem.VMwareClusterArgs{
    			Name:                   pulumi.String("my-cluster"),
    			Location:               pulumi.String("us-west1"),
    			AdminClusterMembership: pulumi.String("projects/870316890899/locations/global/memberships/gkeonprem-terraform-test"),
    			Description:            pulumi.String("test cluster"),
    			OnPremVersion:          pulumi.String("1.13.1-gke.35"),
    			NetworkConfig: &gkeonprem.VMwareClusterNetworkConfigArgs{
    				ServiceAddressCidrBlocks: pulumi.StringArray{
    					pulumi.String("10.96.0.0/12"),
    				},
    				PodAddressCidrBlocks: pulumi.StringArray{
    					pulumi.String("192.168.0.0/16"),
    				},
    				DhcpIpConfig: &gkeonprem.VMwareClusterNetworkConfigDhcpIpConfigArgs{
    					Enabled: pulumi.Bool(true),
    				},
    			},
    			ControlPlaneNode: &gkeonprem.VMwareClusterControlPlaneNodeArgs{
    				Cpus:     pulumi.Int(4),
    				Memory:   pulumi.Int(8192),
    				Replicas: pulumi.Int(1),
    			},
    			LoadBalancer: &gkeonprem.VMwareClusterLoadBalancerArgs{
    				VipConfig: &gkeonprem.VMwareClusterLoadBalancerVipConfigArgs{
    					ControlPlaneVip: pulumi.String("10.251.133.5"),
    					IngressVip:      pulumi.String("10.251.135.19"),
    				},
    				MetalLbConfig: &gkeonprem.VMwareClusterLoadBalancerMetalLbConfigArgs{
    					AddressPools: gkeonprem.VMwareClusterLoadBalancerMetalLbConfigAddressPoolArray{
    						&gkeonprem.VMwareClusterLoadBalancerMetalLbConfigAddressPoolArgs{
    							Pool:         pulumi.String("ingress-ip"),
    							ManualAssign: pulumi.Bool(true),
    							Addresses: pulumi.StringArray{
    								pulumi.String("10.251.135.19"),
    							},
    						},
    						&gkeonprem.VMwareClusterLoadBalancerMetalLbConfigAddressPoolArgs{
    							Pool:         pulumi.String("lb-test-ip"),
    							ManualAssign: pulumi.Bool(true),
    							Addresses: pulumi.StringArray{
    								pulumi.String("10.251.135.19"),
    							},
    						},
    					},
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		_, err = gkeonprem.NewVMwareNodePool(ctx, "nodepool-full", &gkeonprem.VMwareNodePoolArgs{
    			Name:          pulumi.String("my-nodepool"),
    			Location:      pulumi.String("us-west1"),
    			VmwareCluster: default_full.Name,
    			Annotations:   pulumi.StringMap{},
    			Config: &gkeonprem.VMwareNodePoolConfigArgs{
    				Cpus:           pulumi.Int(4),
    				MemoryMb:       pulumi.Int(8196),
    				Replicas:       pulumi.Int(3),
    				ImageType:      pulumi.String("ubuntu_containerd"),
    				Image:          pulumi.String("image"),
    				BootDiskSizeGb: pulumi.Int(10),
    				Taints: gkeonprem.VMwareNodePoolConfigTaintArray{
    					&gkeonprem.VMwareNodePoolConfigTaintArgs{
    						Key:   pulumi.String("key"),
    						Value: pulumi.String("value"),
    					},
    					&gkeonprem.VMwareNodePoolConfigTaintArgs{
    						Key:    pulumi.String("key"),
    						Value:  pulumi.String("value"),
    						Effect: pulumi.String("NO_SCHEDULE"),
    					},
    				},
    				Labels: pulumi.StringMap{},
    				VsphereConfig: &gkeonprem.VMwareNodePoolConfigVsphereConfigArgs{
    					Datastore: pulumi.String("test-datastore"),
    					Tags: gkeonprem.VMwareNodePoolConfigVsphereConfigTagArray{
    						&gkeonprem.VMwareNodePoolConfigVsphereConfigTagArgs{
    							Category: pulumi.String("test-category-1"),
    							Tag:      pulumi.String("tag-1"),
    						},
    						&gkeonprem.VMwareNodePoolConfigVsphereConfigTagArgs{
    							Category: pulumi.String("test-category-2"),
    							Tag:      pulumi.String("tag-2"),
    						},
    					},
    					HostGroups: pulumi.StringArray{
    						pulumi.String("host1"),
    						pulumi.String("host2"),
    					},
    				},
    				EnableLoadBalancer: pulumi.Bool(true),
    			},
    			NodePoolAutoscaling: &gkeonprem.VMwareNodePoolNodePoolAutoscalingArgs{
    				MinReplicas: pulumi.Int(1),
    				MaxReplicas: pulumi.Int(5),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Gcp = Pulumi.Gcp;
    
    return await Deployment.RunAsync(() => 
    {
        var default_full = new Gcp.GkeOnPrem.VMwareCluster("default-full", new()
        {
            Name = "my-cluster",
            Location = "us-west1",
            AdminClusterMembership = "projects/870316890899/locations/global/memberships/gkeonprem-terraform-test",
            Description = "test cluster",
            OnPremVersion = "1.13.1-gke.35",
            NetworkConfig = new Gcp.GkeOnPrem.Inputs.VMwareClusterNetworkConfigArgs
            {
                ServiceAddressCidrBlocks = new[]
                {
                    "10.96.0.0/12",
                },
                PodAddressCidrBlocks = new[]
                {
                    "192.168.0.0/16",
                },
                DhcpIpConfig = new Gcp.GkeOnPrem.Inputs.VMwareClusterNetworkConfigDhcpIpConfigArgs
                {
                    Enabled = true,
                },
            },
            ControlPlaneNode = new Gcp.GkeOnPrem.Inputs.VMwareClusterControlPlaneNodeArgs
            {
                Cpus = 4,
                Memory = 8192,
                Replicas = 1,
            },
            LoadBalancer = new Gcp.GkeOnPrem.Inputs.VMwareClusterLoadBalancerArgs
            {
                VipConfig = new Gcp.GkeOnPrem.Inputs.VMwareClusterLoadBalancerVipConfigArgs
                {
                    ControlPlaneVip = "10.251.133.5",
                    IngressVip = "10.251.135.19",
                },
                MetalLbConfig = new Gcp.GkeOnPrem.Inputs.VMwareClusterLoadBalancerMetalLbConfigArgs
                {
                    AddressPools = new[]
                    {
                        new Gcp.GkeOnPrem.Inputs.VMwareClusterLoadBalancerMetalLbConfigAddressPoolArgs
                        {
                            Pool = "ingress-ip",
                            ManualAssign = true,
                            Addresses = new[]
                            {
                                "10.251.135.19",
                            },
                        },
                        new Gcp.GkeOnPrem.Inputs.VMwareClusterLoadBalancerMetalLbConfigAddressPoolArgs
                        {
                            Pool = "lb-test-ip",
                            ManualAssign = true,
                            Addresses = new[]
                            {
                                "10.251.135.19",
                            },
                        },
                    },
                },
            },
        });
    
        var nodepool_full = new Gcp.GkeOnPrem.VMwareNodePool("nodepool-full", new()
        {
            Name = "my-nodepool",
            Location = "us-west1",
            VmwareCluster = default_full.Name,
            Annotations = null,
            Config = new Gcp.GkeOnPrem.Inputs.VMwareNodePoolConfigArgs
            {
                Cpus = 4,
                MemoryMb = 8196,
                Replicas = 3,
                ImageType = "ubuntu_containerd",
                Image = "image",
                BootDiskSizeGb = 10,
                Taints = new[]
                {
                    new Gcp.GkeOnPrem.Inputs.VMwareNodePoolConfigTaintArgs
                    {
                        Key = "key",
                        Value = "value",
                    },
                    new Gcp.GkeOnPrem.Inputs.VMwareNodePoolConfigTaintArgs
                    {
                        Key = "key",
                        Value = "value",
                        Effect = "NO_SCHEDULE",
                    },
                },
                Labels = null,
                VsphereConfig = new Gcp.GkeOnPrem.Inputs.VMwareNodePoolConfigVsphereConfigArgs
                {
                    Datastore = "test-datastore",
                    Tags = new[]
                    {
                        new Gcp.GkeOnPrem.Inputs.VMwareNodePoolConfigVsphereConfigTagArgs
                        {
                            Category = "test-category-1",
                            Tag = "tag-1",
                        },
                        new Gcp.GkeOnPrem.Inputs.VMwareNodePoolConfigVsphereConfigTagArgs
                        {
                            Category = "test-category-2",
                            Tag = "tag-2",
                        },
                    },
                    HostGroups = new[]
                    {
                        "host1",
                        "host2",
                    },
                },
                EnableLoadBalancer = true,
            },
            NodePoolAutoscaling = new Gcp.GkeOnPrem.Inputs.VMwareNodePoolNodePoolAutoscalingArgs
            {
                MinReplicas = 1,
                MaxReplicas = 5,
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.gcp.gkeonprem.VMwareCluster;
    import com.pulumi.gcp.gkeonprem.VMwareClusterArgs;
    import com.pulumi.gcp.gkeonprem.inputs.VMwareClusterNetworkConfigArgs;
    import com.pulumi.gcp.gkeonprem.inputs.VMwareClusterNetworkConfigDhcpIpConfigArgs;
    import com.pulumi.gcp.gkeonprem.inputs.VMwareClusterControlPlaneNodeArgs;
    import com.pulumi.gcp.gkeonprem.inputs.VMwareClusterLoadBalancerArgs;
    import com.pulumi.gcp.gkeonprem.inputs.VMwareClusterLoadBalancerVipConfigArgs;
    import com.pulumi.gcp.gkeonprem.inputs.VMwareClusterLoadBalancerMetalLbConfigArgs;
    import com.pulumi.gcp.gkeonprem.VMwareNodePool;
    import com.pulumi.gcp.gkeonprem.VMwareNodePoolArgs;
    import com.pulumi.gcp.gkeonprem.inputs.VMwareNodePoolConfigArgs;
    import com.pulumi.gcp.gkeonprem.inputs.VMwareNodePoolConfigVsphereConfigArgs;
    import com.pulumi.gcp.gkeonprem.inputs.VMwareNodePoolNodePoolAutoscalingArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var default_full = new VMwareCluster("default-full", VMwareClusterArgs.builder()
                .name("my-cluster")
                .location("us-west1")
                .adminClusterMembership("projects/870316890899/locations/global/memberships/gkeonprem-terraform-test")
                .description("test cluster")
                .onPremVersion("1.13.1-gke.35")
                .networkConfig(VMwareClusterNetworkConfigArgs.builder()
                    .serviceAddressCidrBlocks("10.96.0.0/12")
                    .podAddressCidrBlocks("192.168.0.0/16")
                    .dhcpIpConfig(VMwareClusterNetworkConfigDhcpIpConfigArgs.builder()
                        .enabled(true)
                        .build())
                    .build())
                .controlPlaneNode(VMwareClusterControlPlaneNodeArgs.builder()
                    .cpus(4)
                    .memory(8192)
                    .replicas(1)
                    .build())
                .loadBalancer(VMwareClusterLoadBalancerArgs.builder()
                    .vipConfig(VMwareClusterLoadBalancerVipConfigArgs.builder()
                        .controlPlaneVip("10.251.133.5")
                        .ingressVip("10.251.135.19")
                        .build())
                    .metalLbConfig(VMwareClusterLoadBalancerMetalLbConfigArgs.builder()
                        .addressPools(                    
                            VMwareClusterLoadBalancerMetalLbConfigAddressPoolArgs.builder()
                                .pool("ingress-ip")
                                .manualAssign("true")
                                .addresses("10.251.135.19")
                                .build(),
                            VMwareClusterLoadBalancerMetalLbConfigAddressPoolArgs.builder()
                                .pool("lb-test-ip")
                                .manualAssign("true")
                                .addresses("10.251.135.19")
                                .build())
                        .build())
                    .build())
                .build());
    
            var nodepool_full = new VMwareNodePool("nodepool-full", VMwareNodePoolArgs.builder()
                .name("my-nodepool")
                .location("us-west1")
                .vmwareCluster(default_full.name())
                .annotations()
                .config(VMwareNodePoolConfigArgs.builder()
                    .cpus(4)
                    .memoryMb(8196)
                    .replicas(3)
                    .imageType("ubuntu_containerd")
                    .image("image")
                    .bootDiskSizeGb(10)
                    .taints(                
                        VMwareNodePoolConfigTaintArgs.builder()
                            .key("key")
                            .value("value")
                            .build(),
                        VMwareNodePoolConfigTaintArgs.builder()
                            .key("key")
                            .value("value")
                            .effect("NO_SCHEDULE")
                            .build())
                    .labels()
                    .vsphereConfig(VMwareNodePoolConfigVsphereConfigArgs.builder()
                        .datastore("test-datastore")
                        .tags(                    
                            VMwareNodePoolConfigVsphereConfigTagArgs.builder()
                                .category("test-category-1")
                                .tag("tag-1")
                                .build(),
                            VMwareNodePoolConfigVsphereConfigTagArgs.builder()
                                .category("test-category-2")
                                .tag("tag-2")
                                .build())
                        .hostGroups(                    
                            "host1",
                            "host2")
                        .build())
                    .enableLoadBalancer(true)
                    .build())
                .nodePoolAutoscaling(VMwareNodePoolNodePoolAutoscalingArgs.builder()
                    .minReplicas(1)
                    .maxReplicas(5)
                    .build())
                .build());
    
        }
    }
    
    resources:
      default-full:
        type: gcp:gkeonprem:VMwareCluster
        properties:
          name: my-cluster
          location: us-west1
          adminClusterMembership: projects/870316890899/locations/global/memberships/gkeonprem-terraform-test
          description: test cluster
          onPremVersion: 1.13.1-gke.35
          networkConfig:
            serviceAddressCidrBlocks:
              - 10.96.0.0/12
            podAddressCidrBlocks:
              - 192.168.0.0/16
            dhcpIpConfig:
              enabled: true
          controlPlaneNode:
            cpus: 4
            memory: 8192
            replicas: 1
          loadBalancer:
            vipConfig:
              controlPlaneVip: 10.251.133.5
              ingressVip: 10.251.135.19
            metalLbConfig:
              addressPools:
                - pool: ingress-ip
                  manualAssign: 'true'
                  addresses:
                    - 10.251.135.19
                - pool: lb-test-ip
                  manualAssign: 'true'
                  addresses:
                    - 10.251.135.19
      nodepool-full:
        type: gcp:gkeonprem:VMwareNodePool
        properties:
          name: my-nodepool
          location: us-west1
          vmwareCluster: ${["default-full"].name}
          annotations: {}
          config:
            cpus: 4
            memoryMb: 8196
            replicas: 3
            imageType: ubuntu_containerd
            image: image
            bootDiskSizeGb: 10
            taints:
              - key: key
                value: value
              - key: key
                value: value
                effect: NO_SCHEDULE
            labels: {}
            vsphereConfig:
              datastore: test-datastore
              tags:
                - category: test-category-1
                  tag: tag-1
                - category: test-category-2
                  tag: tag-2
              hostGroups:
                - host1
                - host2
            enableLoadBalancer: true
          nodePoolAutoscaling:
            minReplicas: 1
            maxReplicas: 5
    

    Create VMwareNodePool Resource

    Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

    Constructor syntax

    new VMwareNodePool(name: string, args: VMwareNodePoolArgs, opts?: CustomResourceOptions);
    @overload
    def VMwareNodePool(resource_name: str,
                       args: VMwareNodePoolArgs,
                       opts: Optional[ResourceOptions] = None)
    
    @overload
    def VMwareNodePool(resource_name: str,
                       opts: Optional[ResourceOptions] = None,
                       config: Optional[VMwareNodePoolConfigArgs] = None,
                       location: Optional[str] = None,
                       vmware_cluster: Optional[str] = None,
                       annotations: Optional[Mapping[str, str]] = None,
                       display_name: Optional[str] = None,
                       name: Optional[str] = None,
                       node_pool_autoscaling: Optional[VMwareNodePoolNodePoolAutoscalingArgs] = None,
                       project: Optional[str] = None)
    func NewVMwareNodePool(ctx *Context, name string, args VMwareNodePoolArgs, opts ...ResourceOption) (*VMwareNodePool, error)
    public VMwareNodePool(string name, VMwareNodePoolArgs args, CustomResourceOptions? opts = null)
    public VMwareNodePool(String name, VMwareNodePoolArgs args)
    public VMwareNodePool(String name, VMwareNodePoolArgs args, CustomResourceOptions options)
    
    type: gcp:gkeonprem:VMwareNodePool
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    

    Parameters

    name string
    The unique name of the resource.
    args VMwareNodePoolArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args VMwareNodePoolArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args VMwareNodePoolArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args VMwareNodePoolArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args VMwareNodePoolArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    Constructor example

    The following reference example uses placeholder values for all input properties.

    var vmwareNodePoolResource = new Gcp.GkeOnPrem.VMwareNodePool("vmwareNodePoolResource", new()
    {
        Config = new Gcp.GkeOnPrem.Inputs.VMwareNodePoolConfigArgs
        {
            ImageType = "string",
            BootDiskSizeGb = 0,
            Cpus = 0,
            EnableLoadBalancer = false,
            Image = "string",
            Labels = 
            {
                { "string", "string" },
            },
            MemoryMb = 0,
            Replicas = 0,
            Taints = new[]
            {
                new Gcp.GkeOnPrem.Inputs.VMwareNodePoolConfigTaintArgs
                {
                    Key = "string",
                    Value = "string",
                    Effect = "string",
                },
            },
            VsphereConfig = new Gcp.GkeOnPrem.Inputs.VMwareNodePoolConfigVsphereConfigArgs
            {
                Datastore = "string",
                HostGroups = new[]
                {
                    "string",
                },
                Tags = new[]
                {
                    new Gcp.GkeOnPrem.Inputs.VMwareNodePoolConfigVsphereConfigTagArgs
                    {
                        Category = "string",
                        Tag = "string",
                    },
                },
            },
        },
        Location = "string",
        VmwareCluster = "string",
        Annotations = 
        {
            { "string", "string" },
        },
        DisplayName = "string",
        Name = "string",
        NodePoolAutoscaling = new Gcp.GkeOnPrem.Inputs.VMwareNodePoolNodePoolAutoscalingArgs
        {
            MaxReplicas = 0,
            MinReplicas = 0,
        },
        Project = "string",
    });
    
    example, err := gkeonprem.NewVMwareNodePool(ctx, "vmwareNodePoolResource", &gkeonprem.VMwareNodePoolArgs{
    	Config: &gkeonprem.VMwareNodePoolConfigArgs{
    		ImageType:          pulumi.String("string"),
    		BootDiskSizeGb:     pulumi.Int(0),
    		Cpus:               pulumi.Int(0),
    		EnableLoadBalancer: pulumi.Bool(false),
    		Image:              pulumi.String("string"),
    		Labels: pulumi.StringMap{
    			"string": pulumi.String("string"),
    		},
    		MemoryMb: pulumi.Int(0),
    		Replicas: pulumi.Int(0),
    		Taints: gkeonprem.VMwareNodePoolConfigTaintArray{
    			&gkeonprem.VMwareNodePoolConfigTaintArgs{
    				Key:    pulumi.String("string"),
    				Value:  pulumi.String("string"),
    				Effect: pulumi.String("string"),
    			},
    		},
    		VsphereConfig: &gkeonprem.VMwareNodePoolConfigVsphereConfigArgs{
    			Datastore: pulumi.String("string"),
    			HostGroups: pulumi.StringArray{
    				pulumi.String("string"),
    			},
    			Tags: gkeonprem.VMwareNodePoolConfigVsphereConfigTagArray{
    				&gkeonprem.VMwareNodePoolConfigVsphereConfigTagArgs{
    					Category: pulumi.String("string"),
    					Tag:      pulumi.String("string"),
    				},
    			},
    		},
    	},
    	Location:      pulumi.String("string"),
    	VmwareCluster: pulumi.String("string"),
    	Annotations: pulumi.StringMap{
    		"string": pulumi.String("string"),
    	},
    	DisplayName: pulumi.String("string"),
    	Name:        pulumi.String("string"),
    	NodePoolAutoscaling: &gkeonprem.VMwareNodePoolNodePoolAutoscalingArgs{
    		MaxReplicas: pulumi.Int(0),
    		MinReplicas: pulumi.Int(0),
    	},
    	Project: pulumi.String("string"),
    })
    
    var vmwareNodePoolResource = new VMwareNodePool("vmwareNodePoolResource", VMwareNodePoolArgs.builder()
        .config(VMwareNodePoolConfigArgs.builder()
            .imageType("string")
            .bootDiskSizeGb(0)
            .cpus(0)
            .enableLoadBalancer(false)
            .image("string")
            .labels(Map.of("string", "string"))
            .memoryMb(0)
            .replicas(0)
            .taints(VMwareNodePoolConfigTaintArgs.builder()
                .key("string")
                .value("string")
                .effect("string")
                .build())
            .vsphereConfig(VMwareNodePoolConfigVsphereConfigArgs.builder()
                .datastore("string")
                .hostGroups("string")
                .tags(VMwareNodePoolConfigVsphereConfigTagArgs.builder()
                    .category("string")
                    .tag("string")
                    .build())
                .build())
            .build())
        .location("string")
        .vmwareCluster("string")
        .annotations(Map.of("string", "string"))
        .displayName("string")
        .name("string")
        .nodePoolAutoscaling(VMwareNodePoolNodePoolAutoscalingArgs.builder()
            .maxReplicas(0)
            .minReplicas(0)
            .build())
        .project("string")
        .build());
    
    vmware_node_pool_resource = gcp.gkeonprem.VMwareNodePool("vmwareNodePoolResource",
        config={
            "image_type": "string",
            "boot_disk_size_gb": 0,
            "cpus": 0,
            "enable_load_balancer": False,
            "image": "string",
            "labels": {
                "string": "string",
            },
            "memory_mb": 0,
            "replicas": 0,
            "taints": [{
                "key": "string",
                "value": "string",
                "effect": "string",
            }],
            "vsphere_config": {
                "datastore": "string",
                "host_groups": ["string"],
                "tags": [{
                    "category": "string",
                    "tag": "string",
                }],
            },
        },
        location="string",
        vmware_cluster="string",
        annotations={
            "string": "string",
        },
        display_name="string",
        name="string",
        node_pool_autoscaling={
            "max_replicas": 0,
            "min_replicas": 0,
        },
        project="string")
    
    const vmwareNodePoolResource = new gcp.gkeonprem.VMwareNodePool("vmwareNodePoolResource", {
        config: {
            imageType: "string",
            bootDiskSizeGb: 0,
            cpus: 0,
            enableLoadBalancer: false,
            image: "string",
            labels: {
                string: "string",
            },
            memoryMb: 0,
            replicas: 0,
            taints: [{
                key: "string",
                value: "string",
                effect: "string",
            }],
            vsphereConfig: {
                datastore: "string",
                hostGroups: ["string"],
                tags: [{
                    category: "string",
                    tag: "string",
                }],
            },
        },
        location: "string",
        vmwareCluster: "string",
        annotations: {
            string: "string",
        },
        displayName: "string",
        name: "string",
        nodePoolAutoscaling: {
            maxReplicas: 0,
            minReplicas: 0,
        },
        project: "string",
    });
    
    type: gcp:gkeonprem:VMwareNodePool
    properties:
        annotations:
            string: string
        config:
            bootDiskSizeGb: 0
            cpus: 0
            enableLoadBalancer: false
            image: string
            imageType: string
            labels:
                string: string
            memoryMb: 0
            replicas: 0
            taints:
                - effect: string
                  key: string
                  value: string
            vsphereConfig:
                datastore: string
                hostGroups:
                    - string
                tags:
                    - category: string
                      tag: string
        displayName: string
        location: string
        name: string
        nodePoolAutoscaling:
            maxReplicas: 0
            minReplicas: 0
        project: string
        vmwareCluster: string
    

    VMwareNodePool Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.

    The VMwareNodePool resource accepts the following input properties:

    Config VMwareNodePoolConfig
    The node configuration of the node pool. Structure is documented below.
    Location string
    The location of the resource.
    VmwareCluster string
    The cluster this node pool belongs to.
    Annotations Dictionary<string, string>
    Annotations on the node Pool. This field has the same restrictions as Kubernetes annotations. The total size of all keys and values combined is limited to 256k. Key can have 2 segments: prefix (optional) and name (required), separated by a slash (/). Prefix must be a DNS subdomain. Name must be 63 characters or less, begin and end with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics between. Note: This field is non-authoritative, and will only manage the annotations present in your configuration. Please refer to the field 'effective_annotations' for all of the annotations present on the resource.
    DisplayName string
    The display name for the node pool.
    Name string
    The vmware node pool name.
    NodePoolAutoscaling VMwareNodePoolNodePoolAutoscaling
    Node Pool autoscaling config for the node pool.
    Project string
    Config VMwareNodePoolConfigArgs
    The node configuration of the node pool. Structure is documented below.
    Location string
    The location of the resource.
    VmwareCluster string
    The cluster this node pool belongs to.
    Annotations map[string]string
    Annotations on the node Pool. This field has the same restrictions as Kubernetes annotations. The total size of all keys and values combined is limited to 256k. Key can have 2 segments: prefix (optional) and name (required), separated by a slash (/). Prefix must be a DNS subdomain. Name must be 63 characters or less, begin and end with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics between. Note: This field is non-authoritative, and will only manage the annotations present in your configuration. Please refer to the field 'effective_annotations' for all of the annotations present on the resource.
    DisplayName string
    The display name for the node pool.
    Name string
    The vmware node pool name.
    NodePoolAutoscaling VMwareNodePoolNodePoolAutoscalingArgs
    Node Pool autoscaling config for the node pool.
    Project string
    config VMwareNodePoolConfig
    The node configuration of the node pool. Structure is documented below.
    location String
    The location of the resource.
    vmwareCluster String
    The cluster this node pool belongs to.
    annotations Map<String,String>
    Annotations on the node Pool. This field has the same restrictions as Kubernetes annotations. The total size of all keys and values combined is limited to 256k. Key can have 2 segments: prefix (optional) and name (required), separated by a slash (/). Prefix must be a DNS subdomain. Name must be 63 characters or less, begin and end with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics between. Note: This field is non-authoritative, and will only manage the annotations present in your configuration. Please refer to the field 'effective_annotations' for all of the annotations present on the resource.
    displayName String
    The display name for the node pool.
    name String
    The vmware node pool name.
    nodePoolAutoscaling VMwareNodePoolNodePoolAutoscaling
    Node Pool autoscaling config for the node pool.
    project String
    config VMwareNodePoolConfig
    The node configuration of the node pool. Structure is documented below.
    location string
    The location of the resource.
    vmwareCluster string
    The cluster this node pool belongs to.
    annotations {[key: string]: string}
    Annotations on the node Pool. This field has the same restrictions as Kubernetes annotations. The total size of all keys and values combined is limited to 256k. Key can have 2 segments: prefix (optional) and name (required), separated by a slash (/). Prefix must be a DNS subdomain. Name must be 63 characters or less, begin and end with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics between. Note: This field is non-authoritative, and will only manage the annotations present in your configuration. Please refer to the field 'effective_annotations' for all of the annotations present on the resource.
    displayName string
    The display name for the node pool.
    name string
    The vmware node pool name.
    nodePoolAutoscaling VMwareNodePoolNodePoolAutoscaling
    Node Pool autoscaling config for the node pool.
    project string
    config VMwareNodePoolConfigArgs
    The node configuration of the node pool. Structure is documented below.
    location str
    The location of the resource.
    vmware_cluster str
    The cluster this node pool belongs to.
    annotations Mapping[str, str]
    Annotations on the node Pool. This field has the same restrictions as Kubernetes annotations. The total size of all keys and values combined is limited to 256k. Key can have 2 segments: prefix (optional) and name (required), separated by a slash (/). Prefix must be a DNS subdomain. Name must be 63 characters or less, begin and end with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics between. Note: This field is non-authoritative, and will only manage the annotations present in your configuration. Please refer to the field 'effective_annotations' for all of the annotations present on the resource.
    display_name str
    The display name for the node pool.
    name str
    The vmware node pool name.
    node_pool_autoscaling VMwareNodePoolNodePoolAutoscalingArgs
    Node Pool autoscaling config for the node pool.
    project str
    config Property Map
    The node configuration of the node pool. Structure is documented below.
    location String
    The location of the resource.
    vmwareCluster String
    The cluster this node pool belongs to.
    annotations Map<String>
    Annotations on the node Pool. This field has the same restrictions as Kubernetes annotations. The total size of all keys and values combined is limited to 256k. Key can have 2 segments: prefix (optional) and name (required), separated by a slash (/). Prefix must be a DNS subdomain. Name must be 63 characters or less, begin and end with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics between. Note: This field is non-authoritative, and will only manage the annotations present in your configuration. Please refer to the field 'effective_annotations' for all of the annotations present on the resource.
    displayName String
    The display name for the node pool.
    name String
    The vmware node pool name.
    nodePoolAutoscaling Property Map
    Node Pool autoscaling config for the node pool.
    project String

    Outputs

    All input properties are implicitly available as output properties. Additionally, the VMwareNodePool resource produces the following output properties:

    CreateTime string
    The time the cluster was created, in RFC3339 text format.
    DeleteTime string
    The time the cluster was deleted, in RFC3339 text format.
    EffectiveAnnotations Dictionary<string, string>
    Etag string
    This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. Allows clients to perform consistent read-modify-writes through optimistic concurrency control.
    Id string
    The provider-assigned unique ID for this managed resource.
    OnPremVersion string
    Anthos version for the node pool. Defaults to the user cluster version.
    Reconciling bool
    If set, there are currently changes in flight to the node pool.
    State string
    (Output) The lifecycle state of the condition.
    Statuses List<VMwareNodePoolStatus>
    ResourceStatus representing detailed cluster state. Structure is documented below.
    Uid string
    The unique identifier of the node pool.
    UpdateTime string
    The time the cluster was last updated, in RFC3339 text format.
    CreateTime string
    The time the cluster was created, in RFC3339 text format.
    DeleteTime string
    The time the cluster was deleted, in RFC3339 text format.
    EffectiveAnnotations map[string]string
    Etag string
    This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. Allows clients to perform consistent read-modify-writes through optimistic concurrency control.
    Id string
    The provider-assigned unique ID for this managed resource.
    OnPremVersion string
    Anthos version for the node pool. Defaults to the user cluster version.
    Reconciling bool
    If set, there are currently changes in flight to the node pool.
    State string
    (Output) The lifecycle state of the condition.
    Statuses []VMwareNodePoolStatus
    ResourceStatus representing detailed cluster state. Structure is documented below.
    Uid string
    The unique identifier of the node pool.
    UpdateTime string
    The time the cluster was last updated, in RFC3339 text format.
    createTime String
    The time the cluster was created, in RFC3339 text format.
    deleteTime String
    The time the cluster was deleted, in RFC3339 text format.
    effectiveAnnotations Map<String,String>
    etag String
    This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. Allows clients to perform consistent read-modify-writes through optimistic concurrency control.
    id String
    The provider-assigned unique ID for this managed resource.
    onPremVersion String
    Anthos version for the node pool. Defaults to the user cluster version.
    reconciling Boolean
    If set, there are currently changes in flight to the node pool.
    state String
    (Output) The lifecycle state of the condition.
    statuses List<VMwareNodePoolStatus>
    ResourceStatus representing detailed cluster state. Structure is documented below.
    uid String
    The unique identifier of the node pool.
    updateTime String
    The time the cluster was last updated, in RFC3339 text format.
    createTime string
    The time the cluster was created, in RFC3339 text format.
    deleteTime string
    The time the cluster was deleted, in RFC3339 text format.
    effectiveAnnotations {[key: string]: string}
    etag string
    This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. Allows clients to perform consistent read-modify-writes through optimistic concurrency control.
    id string
    The provider-assigned unique ID for this managed resource.
    onPremVersion string
    Anthos version for the node pool. Defaults to the user cluster version.
    reconciling boolean
    If set, there are currently changes in flight to the node pool.
    state string
    (Output) The lifecycle state of the condition.
    statuses VMwareNodePoolStatus[]
    ResourceStatus representing detailed cluster state. Structure is documented below.
    uid string
    The unique identifier of the node pool.
    updateTime string
    The time the cluster was last updated, in RFC3339 text format.
    create_time str
    The time the cluster was created, in RFC3339 text format.
    delete_time str
    The time the cluster was deleted, in RFC3339 text format.
    effective_annotations Mapping[str, str]
    etag str
    This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. Allows clients to perform consistent read-modify-writes through optimistic concurrency control.
    id str
    The provider-assigned unique ID for this managed resource.
    on_prem_version str
    Anthos version for the node pool. Defaults to the user cluster version.
    reconciling bool
    If set, there are currently changes in flight to the node pool.
    state str
    (Output) The lifecycle state of the condition.
    statuses Sequence[VMwareNodePoolStatus]
    ResourceStatus representing detailed cluster state. Structure is documented below.
    uid str
    The unique identifier of the node pool.
    update_time str
    The time the cluster was last updated, in RFC3339 text format.
    createTime String
    The time the cluster was created, in RFC3339 text format.
    deleteTime String
    The time the cluster was deleted, in RFC3339 text format.
    effectiveAnnotations Map<String>
    etag String
    This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. Allows clients to perform consistent read-modify-writes through optimistic concurrency control.
    id String
    The provider-assigned unique ID for this managed resource.
    onPremVersion String
    Anthos version for the node pool. Defaults to the user cluster version.
    reconciling Boolean
    If set, there are currently changes in flight to the node pool.
    state String
    (Output) The lifecycle state of the condition.
    statuses List<Property Map>
    ResourceStatus representing detailed cluster state. Structure is documented below.
    uid String
    The unique identifier of the node pool.
    updateTime String
    The time the cluster was last updated, in RFC3339 text format.

    Look up Existing VMwareNodePool Resource

    Get an existing VMwareNodePool resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

    public static get(name: string, id: Input<ID>, state?: VMwareNodePoolState, opts?: CustomResourceOptions): VMwareNodePool
    @staticmethod
    def get(resource_name: str,
            id: str,
            opts: Optional[ResourceOptions] = None,
            annotations: Optional[Mapping[str, str]] = None,
            config: Optional[VMwareNodePoolConfigArgs] = None,
            create_time: Optional[str] = None,
            delete_time: Optional[str] = None,
            display_name: Optional[str] = None,
            effective_annotations: Optional[Mapping[str, str]] = None,
            etag: Optional[str] = None,
            location: Optional[str] = None,
            name: Optional[str] = None,
            node_pool_autoscaling: Optional[VMwareNodePoolNodePoolAutoscalingArgs] = None,
            on_prem_version: Optional[str] = None,
            project: Optional[str] = None,
            reconciling: Optional[bool] = None,
            state: Optional[str] = None,
            statuses: Optional[Sequence[VMwareNodePoolStatusArgs]] = None,
            uid: Optional[str] = None,
            update_time: Optional[str] = None,
            vmware_cluster: Optional[str] = None) -> VMwareNodePool
    func GetVMwareNodePool(ctx *Context, name string, id IDInput, state *VMwareNodePoolState, opts ...ResourceOption) (*VMwareNodePool, error)
    public static VMwareNodePool Get(string name, Input<string> id, VMwareNodePoolState? state, CustomResourceOptions? opts = null)
    public static VMwareNodePool get(String name, Output<String> id, VMwareNodePoolState state, CustomResourceOptions options)
    Resource lookup is not supported in YAML
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    resource_name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    The following state arguments are supported:
    Annotations Dictionary<string, string>
    Annotations on the node Pool. This field has the same restrictions as Kubernetes annotations. The total size of all keys and values combined is limited to 256k. Key can have 2 segments: prefix (optional) and name (required), separated by a slash (/). Prefix must be a DNS subdomain. Name must be 63 characters or less, begin and end with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics between. Note: This field is non-authoritative, and will only manage the annotations present in your configuration. Please refer to the field 'effective_annotations' for all of the annotations present on the resource.
    Config VMwareNodePoolConfig
    The node configuration of the node pool. Structure is documented below.
    CreateTime string
    The time the cluster was created, in RFC3339 text format.
    DeleteTime string
    The time the cluster was deleted, in RFC3339 text format.
    DisplayName string
    The display name for the node pool.
    EffectiveAnnotations Dictionary<string, string>
    Etag string
    This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. Allows clients to perform consistent read-modify-writes through optimistic concurrency control.
    Location string
    The location of the resource.
    Name string
    The vmware node pool name.
    NodePoolAutoscaling VMwareNodePoolNodePoolAutoscaling
    Node Pool autoscaling config for the node pool.
    OnPremVersion string
    Anthos version for the node pool. Defaults to the user cluster version.
    Project string
    Reconciling bool
    If set, there are currently changes in flight to the node pool.
    State string
    (Output) The lifecycle state of the condition.
    Statuses List<VMwareNodePoolStatus>
    ResourceStatus representing detailed cluster state. Structure is documented below.
    Uid string
    The unique identifier of the node pool.
    UpdateTime string
    The time the cluster was last updated, in RFC3339 text format.
    VmwareCluster string
    The cluster this node pool belongs to.
    Annotations map[string]string
    Annotations on the node Pool. This field has the same restrictions as Kubernetes annotations. The total size of all keys and values combined is limited to 256k. Key can have 2 segments: prefix (optional) and name (required), separated by a slash (/). Prefix must be a DNS subdomain. Name must be 63 characters or less, begin and end with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics between. Note: This field is non-authoritative, and will only manage the annotations present in your configuration. Please refer to the field 'effective_annotations' for all of the annotations present on the resource.
    Config VMwareNodePoolConfigArgs
    The node configuration of the node pool. Structure is documented below.
    CreateTime string
    The time the cluster was created, in RFC3339 text format.
    DeleteTime string
    The time the cluster was deleted, in RFC3339 text format.
    DisplayName string
    The display name for the node pool.
    EffectiveAnnotations map[string]string
    Etag string
    This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. Allows clients to perform consistent read-modify-writes through optimistic concurrency control.
    Location string
    The location of the resource.
    Name string
    The vmware node pool name.
    NodePoolAutoscaling VMwareNodePoolNodePoolAutoscalingArgs
    Node Pool autoscaling config for the node pool.
    OnPremVersion string
    Anthos version for the node pool. Defaults to the user cluster version.
    Project string
    Reconciling bool
    If set, there are currently changes in flight to the node pool.
    State string
    (Output) The lifecycle state of the condition.
    Statuses []VMwareNodePoolStatusArgs
    ResourceStatus representing detailed cluster state. Structure is documented below.
    Uid string
    The unique identifier of the node pool.
    UpdateTime string
    The time the cluster was last updated, in RFC3339 text format.
    VmwareCluster string
    The cluster this node pool belongs to.
    annotations Map<String,String>
    Annotations on the node Pool. This field has the same restrictions as Kubernetes annotations. The total size of all keys and values combined is limited to 256k. Key can have 2 segments: prefix (optional) and name (required), separated by a slash (/). Prefix must be a DNS subdomain. Name must be 63 characters or less, begin and end with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics between. Note: This field is non-authoritative, and will only manage the annotations present in your configuration. Please refer to the field 'effective_annotations' for all of the annotations present on the resource.
    config VMwareNodePoolConfig
    The node configuration of the node pool. Structure is documented below.
    createTime String
    The time the cluster was created, in RFC3339 text format.
    deleteTime String
    The time the cluster was deleted, in RFC3339 text format.
    displayName String
    The display name for the node pool.
    effectiveAnnotations Map<String,String>
    etag String
    This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. Allows clients to perform consistent read-modify-writes through optimistic concurrency control.
    location String
    The location of the resource.
    name String
    The vmware node pool name.
    nodePoolAutoscaling VMwareNodePoolNodePoolAutoscaling
    Node Pool autoscaling config for the node pool.
    onPremVersion String
    Anthos version for the node pool. Defaults to the user cluster version.
    project String
    reconciling Boolean
    If set, there are currently changes in flight to the node pool.
    state String
    (Output) The lifecycle state of the condition.
    statuses List<VMwareNodePoolStatus>
    ResourceStatus representing detailed cluster state. Structure is documented below.
    uid String
    The unique identifier of the node pool.
    updateTime String
    The time the cluster was last updated, in RFC3339 text format.
    vmwareCluster String
    The cluster this node pool belongs to.
    annotations {[key: string]: string}
    Annotations on the node Pool. This field has the same restrictions as Kubernetes annotations. The total size of all keys and values combined is limited to 256k. Key can have 2 segments: prefix (optional) and name (required), separated by a slash (/). Prefix must be a DNS subdomain. Name must be 63 characters or less, begin and end with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics between. Note: This field is non-authoritative, and will only manage the annotations present in your configuration. Please refer to the field 'effective_annotations' for all of the annotations present on the resource.
    config VMwareNodePoolConfig
    The node configuration of the node pool. Structure is documented below.
    createTime string
    The time the cluster was created, in RFC3339 text format.
    deleteTime string
    The time the cluster was deleted, in RFC3339 text format.
    displayName string
    The display name for the node pool.
    effectiveAnnotations {[key: string]: string}
    etag string
    This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. Allows clients to perform consistent read-modify-writes through optimistic concurrency control.
    location string
    The location of the resource.
    name string
    The vmware node pool name.
    nodePoolAutoscaling VMwareNodePoolNodePoolAutoscaling
    Node Pool autoscaling config for the node pool.
    onPremVersion string
    Anthos version for the node pool. Defaults to the user cluster version.
    project string
    reconciling boolean
    If set, there are currently changes in flight to the node pool.
    state string
    (Output) The lifecycle state of the condition.
    statuses VMwareNodePoolStatus[]
    ResourceStatus representing detailed cluster state. Structure is documented below.
    uid string
    The unique identifier of the node pool.
    updateTime string
    The time the cluster was last updated, in RFC3339 text format.
    vmwareCluster string
    The cluster this node pool belongs to.
    annotations Mapping[str, str]
    Annotations on the node Pool. This field has the same restrictions as Kubernetes annotations. The total size of all keys and values combined is limited to 256k. Key can have 2 segments: prefix (optional) and name (required), separated by a slash (/). Prefix must be a DNS subdomain. Name must be 63 characters or less, begin and end with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics between. Note: This field is non-authoritative, and will only manage the annotations present in your configuration. Please refer to the field 'effective_annotations' for all of the annotations present on the resource.
    config VMwareNodePoolConfigArgs
    The node configuration of the node pool. Structure is documented below.
    create_time str
    The time the cluster was created, in RFC3339 text format.
    delete_time str
    The time the cluster was deleted, in RFC3339 text format.
    display_name str
    The display name for the node pool.
    effective_annotations Mapping[str, str]
    etag str
    This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. Allows clients to perform consistent read-modify-writes through optimistic concurrency control.
    location str
    The location of the resource.
    name str
    The vmware node pool name.
    node_pool_autoscaling VMwareNodePoolNodePoolAutoscalingArgs
    Node Pool autoscaling config for the node pool.
    on_prem_version str
    Anthos version for the node pool. Defaults to the user cluster version.
    project str
    reconciling bool
    If set, there are currently changes in flight to the node pool.
    state str
    (Output) The lifecycle state of the condition.
    statuses Sequence[VMwareNodePoolStatusArgs]
    ResourceStatus representing detailed cluster state. Structure is documented below.
    uid str
    The unique identifier of the node pool.
    update_time str
    The time the cluster was last updated, in RFC3339 text format.
    vmware_cluster str
    The cluster this node pool belongs to.
    annotations Map<String>
    Annotations on the node Pool. This field has the same restrictions as Kubernetes annotations. The total size of all keys and values combined is limited to 256k. Key can have 2 segments: prefix (optional) and name (required), separated by a slash (/). Prefix must be a DNS subdomain. Name must be 63 characters or less, begin and end with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics between. Note: This field is non-authoritative, and will only manage the annotations present in your configuration. Please refer to the field 'effective_annotations' for all of the annotations present on the resource.
    config Property Map
    The node configuration of the node pool. Structure is documented below.
    createTime String
    The time the cluster was created, in RFC3339 text format.
    deleteTime String
    The time the cluster was deleted, in RFC3339 text format.
    displayName String
    The display name for the node pool.
    effectiveAnnotations Map<String>
    etag String
    This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. Allows clients to perform consistent read-modify-writes through optimistic concurrency control.
    location String
    The location of the resource.
    name String
    The vmware node pool name.
    nodePoolAutoscaling Property Map
    Node Pool autoscaling config for the node pool.
    onPremVersion String
    Anthos version for the node pool. Defaults to the user cluster version.
    project String
    reconciling Boolean
    If set, there are currently changes in flight to the node pool.
    state String
    (Output) The lifecycle state of the condition.
    statuses List<Property Map>
    ResourceStatus representing detailed cluster state. Structure is documented below.
    uid String
    The unique identifier of the node pool.
    updateTime String
    The time the cluster was last updated, in RFC3339 text format.
    vmwareCluster String
    The cluster this node pool belongs to.

    Supporting Types

    VMwareNodePoolConfig, VMwareNodePoolConfigArgs

    ImageType string
    The OS image to be used for each node in a node pool. Currently cos, cos_cgv2, ubuntu, ubuntu_cgv2, ubuntu_containerd and windows are supported.
    BootDiskSizeGb int
    VMware disk size to be used during creation.
    Cpus int
    The number of CPUs for each node in the node pool.
    EnableLoadBalancer bool
    Allow node pool traffic to be load balanced. Only works for clusters with MetalLB load balancers.
    Image string
    The OS image name in vCenter, only valid when using Windows.
    Labels Dictionary<string, string>
    The map of Kubernetes labels (key/value pairs) to be applied to each node. These will added in addition to any default label(s) that Kubernetes may apply to the node. In case of conflict in label keys, the applied set may differ depending on the Kubernetes version -- it's best to assume the behavior is undefined and conflicts should be avoided.
    MemoryMb int
    The megabytes of memory for each node in the node pool.
    Replicas int
    The number of nodes in the node pool.
    Taints List<VMwareNodePoolConfigTaint>
    The initial taints assigned to nodes of this node pool. Structure is documented below.
    VsphereConfig VMwareNodePoolConfigVsphereConfig
    Specifies the vSphere config for node pool. Structure is documented below.
    ImageType string
    The OS image to be used for each node in a node pool. Currently cos, cos_cgv2, ubuntu, ubuntu_cgv2, ubuntu_containerd and windows are supported.
    BootDiskSizeGb int
    VMware disk size to be used during creation.
    Cpus int
    The number of CPUs for each node in the node pool.
    EnableLoadBalancer bool
    Allow node pool traffic to be load balanced. Only works for clusters with MetalLB load balancers.
    Image string
    The OS image name in vCenter, only valid when using Windows.
    Labels map[string]string
    The map of Kubernetes labels (key/value pairs) to be applied to each node. These will added in addition to any default label(s) that Kubernetes may apply to the node. In case of conflict in label keys, the applied set may differ depending on the Kubernetes version -- it's best to assume the behavior is undefined and conflicts should be avoided.
    MemoryMb int
    The megabytes of memory for each node in the node pool.
    Replicas int
    The number of nodes in the node pool.
    Taints []VMwareNodePoolConfigTaint
    The initial taints assigned to nodes of this node pool. Structure is documented below.
    VsphereConfig VMwareNodePoolConfigVsphereConfig
    Specifies the vSphere config for node pool. Structure is documented below.
    imageType String
    The OS image to be used for each node in a node pool. Currently cos, cos_cgv2, ubuntu, ubuntu_cgv2, ubuntu_containerd and windows are supported.
    bootDiskSizeGb Integer
    VMware disk size to be used during creation.
    cpus Integer
    The number of CPUs for each node in the node pool.
    enableLoadBalancer Boolean
    Allow node pool traffic to be load balanced. Only works for clusters with MetalLB load balancers.
    image String
    The OS image name in vCenter, only valid when using Windows.
    labels Map<String,String>
    The map of Kubernetes labels (key/value pairs) to be applied to each node. These will added in addition to any default label(s) that Kubernetes may apply to the node. In case of conflict in label keys, the applied set may differ depending on the Kubernetes version -- it's best to assume the behavior is undefined and conflicts should be avoided.
    memoryMb Integer
    The megabytes of memory for each node in the node pool.
    replicas Integer
    The number of nodes in the node pool.
    taints List<VMwareNodePoolConfigTaint>
    The initial taints assigned to nodes of this node pool. Structure is documented below.
    vsphereConfig VMwareNodePoolConfigVsphereConfig
    Specifies the vSphere config for node pool. Structure is documented below.
    imageType string
    The OS image to be used for each node in a node pool. Currently cos, cos_cgv2, ubuntu, ubuntu_cgv2, ubuntu_containerd and windows are supported.
    bootDiskSizeGb number
    VMware disk size to be used during creation.
    cpus number
    The number of CPUs for each node in the node pool.
    enableLoadBalancer boolean
    Allow node pool traffic to be load balanced. Only works for clusters with MetalLB load balancers.
    image string
    The OS image name in vCenter, only valid when using Windows.
    labels {[key: string]: string}
    The map of Kubernetes labels (key/value pairs) to be applied to each node. These will added in addition to any default label(s) that Kubernetes may apply to the node. In case of conflict in label keys, the applied set may differ depending on the Kubernetes version -- it's best to assume the behavior is undefined and conflicts should be avoided.
    memoryMb number
    The megabytes of memory for each node in the node pool.
    replicas number
    The number of nodes in the node pool.
    taints VMwareNodePoolConfigTaint[]
    The initial taints assigned to nodes of this node pool. Structure is documented below.
    vsphereConfig VMwareNodePoolConfigVsphereConfig
    Specifies the vSphere config for node pool. Structure is documented below.
    image_type str
    The OS image to be used for each node in a node pool. Currently cos, cos_cgv2, ubuntu, ubuntu_cgv2, ubuntu_containerd and windows are supported.
    boot_disk_size_gb int
    VMware disk size to be used during creation.
    cpus int
    The number of CPUs for each node in the node pool.
    enable_load_balancer bool
    Allow node pool traffic to be load balanced. Only works for clusters with MetalLB load balancers.
    image str
    The OS image name in vCenter, only valid when using Windows.
    labels Mapping[str, str]
    The map of Kubernetes labels (key/value pairs) to be applied to each node. These will added in addition to any default label(s) that Kubernetes may apply to the node. In case of conflict in label keys, the applied set may differ depending on the Kubernetes version -- it's best to assume the behavior is undefined and conflicts should be avoided.
    memory_mb int
    The megabytes of memory for each node in the node pool.
    replicas int
    The number of nodes in the node pool.
    taints Sequence[VMwareNodePoolConfigTaint]
    The initial taints assigned to nodes of this node pool. Structure is documented below.
    vsphere_config VMwareNodePoolConfigVsphereConfig
    Specifies the vSphere config for node pool. Structure is documented below.
    imageType String
    The OS image to be used for each node in a node pool. Currently cos, cos_cgv2, ubuntu, ubuntu_cgv2, ubuntu_containerd and windows are supported.
    bootDiskSizeGb Number
    VMware disk size to be used during creation.
    cpus Number
    The number of CPUs for each node in the node pool.
    enableLoadBalancer Boolean
    Allow node pool traffic to be load balanced. Only works for clusters with MetalLB load balancers.
    image String
    The OS image name in vCenter, only valid when using Windows.
    labels Map<String>
    The map of Kubernetes labels (key/value pairs) to be applied to each node. These will added in addition to any default label(s) that Kubernetes may apply to the node. In case of conflict in label keys, the applied set may differ depending on the Kubernetes version -- it's best to assume the behavior is undefined and conflicts should be avoided.
    memoryMb Number
    The megabytes of memory for each node in the node pool.
    replicas Number
    The number of nodes in the node pool.
    taints List<Property Map>
    The initial taints assigned to nodes of this node pool. Structure is documented below.
    vsphereConfig Property Map
    Specifies the vSphere config for node pool. Structure is documented below.

    VMwareNodePoolConfigTaint, VMwareNodePoolConfigTaintArgs

    Key string
    Key associated with the effect.
    Value string
    Value associated with the effect.
    Effect string
    Available taint effects. Possible values are: EFFECT_UNSPECIFIED, NO_SCHEDULE, PREFER_NO_SCHEDULE, NO_EXECUTE.
    Key string
    Key associated with the effect.
    Value string
    Value associated with the effect.
    Effect string
    Available taint effects. Possible values are: EFFECT_UNSPECIFIED, NO_SCHEDULE, PREFER_NO_SCHEDULE, NO_EXECUTE.
    key String
    Key associated with the effect.
    value String
    Value associated with the effect.
    effect String
    Available taint effects. Possible values are: EFFECT_UNSPECIFIED, NO_SCHEDULE, PREFER_NO_SCHEDULE, NO_EXECUTE.
    key string
    Key associated with the effect.
    value string
    Value associated with the effect.
    effect string
    Available taint effects. Possible values are: EFFECT_UNSPECIFIED, NO_SCHEDULE, PREFER_NO_SCHEDULE, NO_EXECUTE.
    key str
    Key associated with the effect.
    value str
    Value associated with the effect.
    effect str
    Available taint effects. Possible values are: EFFECT_UNSPECIFIED, NO_SCHEDULE, PREFER_NO_SCHEDULE, NO_EXECUTE.
    key String
    Key associated with the effect.
    value String
    Value associated with the effect.
    effect String
    Available taint effects. Possible values are: EFFECT_UNSPECIFIED, NO_SCHEDULE, PREFER_NO_SCHEDULE, NO_EXECUTE.

    VMwareNodePoolConfigVsphereConfig, VMwareNodePoolConfigVsphereConfigArgs

    Datastore string
    The name of the vCenter datastore. Inherited from the user cluster.
    HostGroups List<string>
    Vsphere host groups to apply to all VMs in the node pool
    Tags List<VMwareNodePoolConfigVsphereConfigTag>
    Tags to apply to VMs. Structure is documented below.
    Datastore string
    The name of the vCenter datastore. Inherited from the user cluster.
    HostGroups []string
    Vsphere host groups to apply to all VMs in the node pool
    Tags []VMwareNodePoolConfigVsphereConfigTag
    Tags to apply to VMs. Structure is documented below.
    datastore String
    The name of the vCenter datastore. Inherited from the user cluster.
    hostGroups List<String>
    Vsphere host groups to apply to all VMs in the node pool
    tags List<VMwareNodePoolConfigVsphereConfigTag>
    Tags to apply to VMs. Structure is documented below.
    datastore string
    The name of the vCenter datastore. Inherited from the user cluster.
    hostGroups string[]
    Vsphere host groups to apply to all VMs in the node pool
    tags VMwareNodePoolConfigVsphereConfigTag[]
    Tags to apply to VMs. Structure is documented below.
    datastore str
    The name of the vCenter datastore. Inherited from the user cluster.
    host_groups Sequence[str]
    Vsphere host groups to apply to all VMs in the node pool
    tags Sequence[VMwareNodePoolConfigVsphereConfigTag]
    Tags to apply to VMs. Structure is documented below.
    datastore String
    The name of the vCenter datastore. Inherited from the user cluster.
    hostGroups List<String>
    Vsphere host groups to apply to all VMs in the node pool
    tags List<Property Map>
    Tags to apply to VMs. Structure is documented below.

    VMwareNodePoolConfigVsphereConfigTag, VMwareNodePoolConfigVsphereConfigTagArgs

    Category string
    The Vsphere tag category.
    Tag string
    The Vsphere tag name.


    Category string
    The Vsphere tag category.
    Tag string
    The Vsphere tag name.


    category String
    The Vsphere tag category.
    tag String
    The Vsphere tag name.


    category string
    The Vsphere tag category.
    tag string
    The Vsphere tag name.


    category str
    The Vsphere tag category.
    tag str
    The Vsphere tag name.


    category String
    The Vsphere tag category.
    tag String
    The Vsphere tag name.


    VMwareNodePoolNodePoolAutoscaling, VMwareNodePoolNodePoolAutoscalingArgs

    MaxReplicas int
    Maximum number of replicas in the NodePool.
    MinReplicas int
    Minimum number of replicas in the NodePool.
    MaxReplicas int
    Maximum number of replicas in the NodePool.
    MinReplicas int
    Minimum number of replicas in the NodePool.
    maxReplicas Integer
    Maximum number of replicas in the NodePool.
    minReplicas Integer
    Minimum number of replicas in the NodePool.
    maxReplicas number
    Maximum number of replicas in the NodePool.
    minReplicas number
    Minimum number of replicas in the NodePool.
    max_replicas int
    Maximum number of replicas in the NodePool.
    min_replicas int
    Minimum number of replicas in the NodePool.
    maxReplicas Number
    Maximum number of replicas in the NodePool.
    minReplicas Number
    Minimum number of replicas in the NodePool.

    VMwareNodePoolStatus, VMwareNodePoolStatusArgs

    Conditions List<VMwareNodePoolStatusCondition>
    (Output) ResourceConditions provide a standard mechanism for higher-level status reporting from user cluster controller. Structure is documented below.
    ErrorMessage string
    (Output) Human-friendly representation of the error message from the user cluster controller. The error message can be temporary as the user cluster controller creates a cluster or node pool. If the error message persists for a longer period of time, it can be used to surface error message to indicate real problems requiring user intervention.
    Conditions []VMwareNodePoolStatusCondition
    (Output) ResourceConditions provide a standard mechanism for higher-level status reporting from user cluster controller. Structure is documented below.
    ErrorMessage string
    (Output) Human-friendly representation of the error message from the user cluster controller. The error message can be temporary as the user cluster controller creates a cluster or node pool. If the error message persists for a longer period of time, it can be used to surface error message to indicate real problems requiring user intervention.
    conditions List<VMwareNodePoolStatusCondition>
    (Output) ResourceConditions provide a standard mechanism for higher-level status reporting from user cluster controller. Structure is documented below.
    errorMessage String
    (Output) Human-friendly representation of the error message from the user cluster controller. The error message can be temporary as the user cluster controller creates a cluster or node pool. If the error message persists for a longer period of time, it can be used to surface error message to indicate real problems requiring user intervention.
    conditions VMwareNodePoolStatusCondition[]
    (Output) ResourceConditions provide a standard mechanism for higher-level status reporting from user cluster controller. Structure is documented below.
    errorMessage string
    (Output) Human-friendly representation of the error message from the user cluster controller. The error message can be temporary as the user cluster controller creates a cluster or node pool. If the error message persists for a longer period of time, it can be used to surface error message to indicate real problems requiring user intervention.
    conditions Sequence[VMwareNodePoolStatusCondition]
    (Output) ResourceConditions provide a standard mechanism for higher-level status reporting from user cluster controller. Structure is documented below.
    error_message str
    (Output) Human-friendly representation of the error message from the user cluster controller. The error message can be temporary as the user cluster controller creates a cluster or node pool. If the error message persists for a longer period of time, it can be used to surface error message to indicate real problems requiring user intervention.
    conditions List<Property Map>
    (Output) ResourceConditions provide a standard mechanism for higher-level status reporting from user cluster controller. Structure is documented below.
    errorMessage String
    (Output) Human-friendly representation of the error message from the user cluster controller. The error message can be temporary as the user cluster controller creates a cluster or node pool. If the error message persists for a longer period of time, it can be used to surface error message to indicate real problems requiring user intervention.

    VMwareNodePoolStatusCondition, VMwareNodePoolStatusConditionArgs

    LastTransitionTime string
    (Output) Last time the condition transit from one status to another.
    Message string
    (Output) Human-readable message indicating details about last transition.
    Reason string
    (Output) Machine-readable message indicating details about last transition.
    State string
    (Output) The lifecycle state of the condition.
    Type string
    (Output) Type of the condition. (e.g., ClusterRunning, NodePoolRunning or ServerSidePreflightReady)
    LastTransitionTime string
    (Output) Last time the condition transit from one status to another.
    Message string
    (Output) Human-readable message indicating details about last transition.
    Reason string
    (Output) Machine-readable message indicating details about last transition.
    State string
    (Output) The lifecycle state of the condition.
    Type string
    (Output) Type of the condition. (e.g., ClusterRunning, NodePoolRunning or ServerSidePreflightReady)
    lastTransitionTime String
    (Output) Last time the condition transit from one status to another.
    message String
    (Output) Human-readable message indicating details about last transition.
    reason String
    (Output) Machine-readable message indicating details about last transition.
    state String
    (Output) The lifecycle state of the condition.
    type String
    (Output) Type of the condition. (e.g., ClusterRunning, NodePoolRunning or ServerSidePreflightReady)
    lastTransitionTime string
    (Output) Last time the condition transit from one status to another.
    message string
    (Output) Human-readable message indicating details about last transition.
    reason string
    (Output) Machine-readable message indicating details about last transition.
    state string
    (Output) The lifecycle state of the condition.
    type string
    (Output) Type of the condition. (e.g., ClusterRunning, NodePoolRunning or ServerSidePreflightReady)
    last_transition_time str
    (Output) Last time the condition transit from one status to another.
    message str
    (Output) Human-readable message indicating details about last transition.
    reason str
    (Output) Machine-readable message indicating details about last transition.
    state str
    (Output) The lifecycle state of the condition.
    type str
    (Output) Type of the condition. (e.g., ClusterRunning, NodePoolRunning or ServerSidePreflightReady)
    lastTransitionTime String
    (Output) Last time the condition transit from one status to another.
    message String
    (Output) Human-readable message indicating details about last transition.
    reason String
    (Output) Machine-readable message indicating details about last transition.
    state String
    (Output) The lifecycle state of the condition.
    type String
    (Output) Type of the condition. (e.g., ClusterRunning, NodePoolRunning or ServerSidePreflightReady)

    Import

    VmwareNodePool can be imported using any of these accepted formats:

    • projects/{{project}}/locations/{{location}}/vmwareClusters/{{vmware_cluster}}/vmwareNodePools/{{name}}

    • {{project}}/{{location}}/{{vmware_cluster}}/{{name}}

    • {{location}}/{{vmware_cluster}}/{{name}}

    When using the pulumi import command, VmwareNodePool can be imported using one of the formats above. For example:

    $ pulumi import gcp:gkeonprem/vMwareNodePool:VMwareNodePool default projects/{{project}}/locations/{{location}}/vmwareClusters/{{vmware_cluster}}/vmwareNodePools/{{name}}
    
    $ pulumi import gcp:gkeonprem/vMwareNodePool:VMwareNodePool default {{project}}/{{location}}/{{vmware_cluster}}/{{name}}
    
    $ pulumi import gcp:gkeonprem/vMwareNodePool:VMwareNodePool default {{location}}/{{vmware_cluster}}/{{name}}
    

    To learn more about importing existing cloud resources, see Importing resources.

    Package Details

    Repository
    Google Cloud (GCP) Classic pulumi/pulumi-gcp
    License
    Apache-2.0
    Notes
    This Pulumi package is based on the google-beta Terraform Provider.
    gcp logo
    Google Cloud Classic v8.9.3 published on Monday, Nov 18, 2024 by Pulumi