1. Packages
  2. Google Cloud (GCP) Classic
  3. API Docs
  4. edgecontainer
  5. NodePool
Google Cloud Classic v8.9.3 published on Monday, Nov 18, 2024 by Pulumi

gcp.edgecontainer.NodePool

Explore with Pulumi AI

gcp logo
Google Cloud Classic v8.9.3 published on Monday, Nov 18, 2024 by Pulumi

    “A set of Kubernetes nodes in a cluster with common configuration and specification.”

    To get more information about NodePool, see:

    Example Usage

    Edgecontainer Node Pool

    import * as pulumi from "@pulumi/pulumi";
    import * as gcp from "@pulumi/gcp";
    
    const project = gcp.organizations.getProject({});
    const cluster = new gcp.edgecontainer.Cluster("cluster", {
        name: "default",
        location: "us-central1",
        authorization: {
            adminUsers: {
                username: "admin@hashicorptest.com",
            },
        },
        networking: {
            clusterIpv4CidrBlocks: ["10.0.0.0/16"],
            servicesIpv4CidrBlocks: ["10.1.0.0/16"],
        },
        fleet: {
            project: project.then(project => `projects/${project.number}`),
        },
    });
    const _default = new gcp.edgecontainer.NodePool("default", {
        name: "nodepool-1",
        cluster: cluster.name,
        location: "us-central1",
        nodeLocation: "us-central1-edge-example-edgesite",
        nodeCount: 3,
        labels: {
            my_key: "my_val",
            other_key: "other_val",
        },
    });
    
    import pulumi
    import pulumi_gcp as gcp
    
    project = gcp.organizations.get_project()
    cluster = gcp.edgecontainer.Cluster("cluster",
        name="default",
        location="us-central1",
        authorization={
            "admin_users": {
                "username": "admin@hashicorptest.com",
            },
        },
        networking={
            "cluster_ipv4_cidr_blocks": ["10.0.0.0/16"],
            "services_ipv4_cidr_blocks": ["10.1.0.0/16"],
        },
        fleet={
            "project": f"projects/{project.number}",
        })
    default = gcp.edgecontainer.NodePool("default",
        name="nodepool-1",
        cluster=cluster.name,
        location="us-central1",
        node_location="us-central1-edge-example-edgesite",
        node_count=3,
        labels={
            "my_key": "my_val",
            "other_key": "other_val",
        })
    
    package main
    
    import (
    	"fmt"
    
    	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/edgecontainer"
    	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		project, err := organizations.LookupProject(ctx, &organizations.LookupProjectArgs{}, nil)
    		if err != nil {
    			return err
    		}
    		cluster, err := edgecontainer.NewCluster(ctx, "cluster", &edgecontainer.ClusterArgs{
    			Name:     pulumi.String("default"),
    			Location: pulumi.String("us-central1"),
    			Authorization: &edgecontainer.ClusterAuthorizationArgs{
    				AdminUsers: &edgecontainer.ClusterAuthorizationAdminUsersArgs{
    					Username: pulumi.String("admin@hashicorptest.com"),
    				},
    			},
    			Networking: &edgecontainer.ClusterNetworkingArgs{
    				ClusterIpv4CidrBlocks: pulumi.StringArray{
    					pulumi.String("10.0.0.0/16"),
    				},
    				ServicesIpv4CidrBlocks: pulumi.StringArray{
    					pulumi.String("10.1.0.0/16"),
    				},
    			},
    			Fleet: &edgecontainer.ClusterFleetArgs{
    				Project: pulumi.Sprintf("projects/%v", project.Number),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		_, err = edgecontainer.NewNodePool(ctx, "default", &edgecontainer.NodePoolArgs{
    			Name:         pulumi.String("nodepool-1"),
    			Cluster:      cluster.Name,
    			Location:     pulumi.String("us-central1"),
    			NodeLocation: pulumi.String("us-central1-edge-example-edgesite"),
    			NodeCount:    pulumi.Int(3),
    			Labels: pulumi.StringMap{
    				"my_key":    pulumi.String("my_val"),
    				"other_key": pulumi.String("other_val"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Gcp = Pulumi.Gcp;
    
    return await Deployment.RunAsync(() => 
    {
        var project = Gcp.Organizations.GetProject.Invoke();
    
        var cluster = new Gcp.EdgeContainer.Cluster("cluster", new()
        {
            Name = "default",
            Location = "us-central1",
            Authorization = new Gcp.EdgeContainer.Inputs.ClusterAuthorizationArgs
            {
                AdminUsers = new Gcp.EdgeContainer.Inputs.ClusterAuthorizationAdminUsersArgs
                {
                    Username = "admin@hashicorptest.com",
                },
            },
            Networking = new Gcp.EdgeContainer.Inputs.ClusterNetworkingArgs
            {
                ClusterIpv4CidrBlocks = new[]
                {
                    "10.0.0.0/16",
                },
                ServicesIpv4CidrBlocks = new[]
                {
                    "10.1.0.0/16",
                },
            },
            Fleet = new Gcp.EdgeContainer.Inputs.ClusterFleetArgs
            {
                Project = $"projects/{project.Apply(getProjectResult => getProjectResult.Number)}",
            },
        });
    
        var @default = new Gcp.EdgeContainer.NodePool("default", new()
        {
            Name = "nodepool-1",
            Cluster = cluster.Name,
            Location = "us-central1",
            NodeLocation = "us-central1-edge-example-edgesite",
            NodeCount = 3,
            Labels = 
            {
                { "my_key", "my_val" },
                { "other_key", "other_val" },
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.gcp.organizations.OrganizationsFunctions;
    import com.pulumi.gcp.organizations.inputs.GetProjectArgs;
    import com.pulumi.gcp.edgecontainer.Cluster;
    import com.pulumi.gcp.edgecontainer.ClusterArgs;
    import com.pulumi.gcp.edgecontainer.inputs.ClusterAuthorizationArgs;
    import com.pulumi.gcp.edgecontainer.inputs.ClusterAuthorizationAdminUsersArgs;
    import com.pulumi.gcp.edgecontainer.inputs.ClusterNetworkingArgs;
    import com.pulumi.gcp.edgecontainer.inputs.ClusterFleetArgs;
    import com.pulumi.gcp.edgecontainer.NodePool;
    import com.pulumi.gcp.edgecontainer.NodePoolArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            final var project = OrganizationsFunctions.getProject();
    
            var cluster = new Cluster("cluster", ClusterArgs.builder()
                .name("default")
                .location("us-central1")
                .authorization(ClusterAuthorizationArgs.builder()
                    .adminUsers(ClusterAuthorizationAdminUsersArgs.builder()
                        .username("admin@hashicorptest.com")
                        .build())
                    .build())
                .networking(ClusterNetworkingArgs.builder()
                    .clusterIpv4CidrBlocks("10.0.0.0/16")
                    .servicesIpv4CidrBlocks("10.1.0.0/16")
                    .build())
                .fleet(ClusterFleetArgs.builder()
                    .project(String.format("projects/%s", project.applyValue(getProjectResult -> getProjectResult.number())))
                    .build())
                .build());
    
            var default_ = new NodePool("default", NodePoolArgs.builder()
                .name("nodepool-1")
                .cluster(cluster.name())
                .location("us-central1")
                .nodeLocation("us-central1-edge-example-edgesite")
                .nodeCount(3)
                .labels(Map.ofEntries(
                    Map.entry("my_key", "my_val"),
                    Map.entry("other_key", "other_val")
                ))
                .build());
    
        }
    }
    
    resources:
      cluster:
        type: gcp:edgecontainer:Cluster
        properties:
          name: default
          location: us-central1
          authorization:
            adminUsers:
              username: admin@hashicorptest.com
          networking:
            clusterIpv4CidrBlocks:
              - 10.0.0.0/16
            servicesIpv4CidrBlocks:
              - 10.1.0.0/16
          fleet:
            project: projects/${project.number}
      default:
        type: gcp:edgecontainer:NodePool
        properties:
          name: nodepool-1
          cluster: ${cluster.name}
          location: us-central1
          nodeLocation: us-central1-edge-example-edgesite
          nodeCount: 3
          labels:
            my_key: my_val
            other_key: other_val
    variables:
      project:
        fn::invoke:
          Function: gcp:organizations:getProject
          Arguments: {}
    

    Edgecontainer Node Pool With Cmek

    import * as pulumi from "@pulumi/pulumi";
    import * as gcp from "@pulumi/gcp";
    
    const project = gcp.organizations.getProject({});
    const cluster = new gcp.edgecontainer.Cluster("cluster", {
        name: "default",
        location: "us-central1",
        authorization: {
            adminUsers: {
                username: "admin@hashicorptest.com",
            },
        },
        networking: {
            clusterIpv4CidrBlocks: ["10.0.0.0/16"],
            servicesIpv4CidrBlocks: ["10.1.0.0/16"],
        },
        fleet: {
            project: project.then(project => `projects/${project.number}`),
        },
    });
    const keyRing = new gcp.kms.KeyRing("key_ring", {
        name: "keyring",
        location: "us-central1",
    });
    const cryptoKeyCryptoKey = new gcp.kms.CryptoKey("crypto_key", {
        name: "key",
        keyRing: keyRing.id,
    });
    const cryptoKey = new gcp.kms.CryptoKeyIAMMember("crypto_key", {
        cryptoKeyId: cryptoKeyCryptoKey.id,
        role: "roles/cloudkms.cryptoKeyEncrypterDecrypter",
        member: project.then(project => `serviceAccount:service-${project.number}@gcp-sa-edgecontainer.iam.gserviceaccount.com`),
    });
    const _default = new gcp.edgecontainer.NodePool("default", {
        name: "nodepool-1",
        cluster: cluster.name,
        location: "us-central1",
        nodeLocation: "us-central1-edge-example-edgesite",
        nodeCount: 3,
        localDiskEncryption: {
            kmsKey: cryptoKeyCryptoKey.id,
        },
    }, {
        dependsOn: [cryptoKey],
    });
    
    import pulumi
    import pulumi_gcp as gcp
    
    project = gcp.organizations.get_project()
    cluster = gcp.edgecontainer.Cluster("cluster",
        name="default",
        location="us-central1",
        authorization={
            "admin_users": {
                "username": "admin@hashicorptest.com",
            },
        },
        networking={
            "cluster_ipv4_cidr_blocks": ["10.0.0.0/16"],
            "services_ipv4_cidr_blocks": ["10.1.0.0/16"],
        },
        fleet={
            "project": f"projects/{project.number}",
        })
    key_ring = gcp.kms.KeyRing("key_ring",
        name="keyring",
        location="us-central1")
    crypto_key_crypto_key = gcp.kms.CryptoKey("crypto_key",
        name="key",
        key_ring=key_ring.id)
    crypto_key = gcp.kms.CryptoKeyIAMMember("crypto_key",
        crypto_key_id=crypto_key_crypto_key.id,
        role="roles/cloudkms.cryptoKeyEncrypterDecrypter",
        member=f"serviceAccount:service-{project.number}@gcp-sa-edgecontainer.iam.gserviceaccount.com")
    default = gcp.edgecontainer.NodePool("default",
        name="nodepool-1",
        cluster=cluster.name,
        location="us-central1",
        node_location="us-central1-edge-example-edgesite",
        node_count=3,
        local_disk_encryption={
            "kms_key": crypto_key_crypto_key.id,
        },
        opts = pulumi.ResourceOptions(depends_on=[crypto_key]))
    
    package main
    
    import (
    	"fmt"
    
    	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/edgecontainer"
    	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/kms"
    	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		project, err := organizations.LookupProject(ctx, &organizations.LookupProjectArgs{}, nil)
    		if err != nil {
    			return err
    		}
    		cluster, err := edgecontainer.NewCluster(ctx, "cluster", &edgecontainer.ClusterArgs{
    			Name:     pulumi.String("default"),
    			Location: pulumi.String("us-central1"),
    			Authorization: &edgecontainer.ClusterAuthorizationArgs{
    				AdminUsers: &edgecontainer.ClusterAuthorizationAdminUsersArgs{
    					Username: pulumi.String("admin@hashicorptest.com"),
    				},
    			},
    			Networking: &edgecontainer.ClusterNetworkingArgs{
    				ClusterIpv4CidrBlocks: pulumi.StringArray{
    					pulumi.String("10.0.0.0/16"),
    				},
    				ServicesIpv4CidrBlocks: pulumi.StringArray{
    					pulumi.String("10.1.0.0/16"),
    				},
    			},
    			Fleet: &edgecontainer.ClusterFleetArgs{
    				Project: pulumi.Sprintf("projects/%v", project.Number),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		keyRing, err := kms.NewKeyRing(ctx, "key_ring", &kms.KeyRingArgs{
    			Name:     pulumi.String("keyring"),
    			Location: pulumi.String("us-central1"),
    		})
    		if err != nil {
    			return err
    		}
    		cryptoKeyCryptoKey, err := kms.NewCryptoKey(ctx, "crypto_key", &kms.CryptoKeyArgs{
    			Name:    pulumi.String("key"),
    			KeyRing: keyRing.ID(),
    		})
    		if err != nil {
    			return err
    		}
    		cryptoKey, err := kms.NewCryptoKeyIAMMember(ctx, "crypto_key", &kms.CryptoKeyIAMMemberArgs{
    			CryptoKeyId: cryptoKeyCryptoKey.ID(),
    			Role:        pulumi.String("roles/cloudkms.cryptoKeyEncrypterDecrypter"),
    			Member:      pulumi.Sprintf("serviceAccount:service-%v@gcp-sa-edgecontainer.iam.gserviceaccount.com", project.Number),
    		})
    		if err != nil {
    			return err
    		}
    		_, err = edgecontainer.NewNodePool(ctx, "default", &edgecontainer.NodePoolArgs{
    			Name:         pulumi.String("nodepool-1"),
    			Cluster:      cluster.Name,
    			Location:     pulumi.String("us-central1"),
    			NodeLocation: pulumi.String("us-central1-edge-example-edgesite"),
    			NodeCount:    pulumi.Int(3),
    			LocalDiskEncryption: &edgecontainer.NodePoolLocalDiskEncryptionArgs{
    				KmsKey: cryptoKeyCryptoKey.ID(),
    			},
    		}, pulumi.DependsOn([]pulumi.Resource{
    			cryptoKey,
    		}))
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Gcp = Pulumi.Gcp;
    
    return await Deployment.RunAsync(() => 
    {
        var project = Gcp.Organizations.GetProject.Invoke();
    
        var cluster = new Gcp.EdgeContainer.Cluster("cluster", new()
        {
            Name = "default",
            Location = "us-central1",
            Authorization = new Gcp.EdgeContainer.Inputs.ClusterAuthorizationArgs
            {
                AdminUsers = new Gcp.EdgeContainer.Inputs.ClusterAuthorizationAdminUsersArgs
                {
                    Username = "admin@hashicorptest.com",
                },
            },
            Networking = new Gcp.EdgeContainer.Inputs.ClusterNetworkingArgs
            {
                ClusterIpv4CidrBlocks = new[]
                {
                    "10.0.0.0/16",
                },
                ServicesIpv4CidrBlocks = new[]
                {
                    "10.1.0.0/16",
                },
            },
            Fleet = new Gcp.EdgeContainer.Inputs.ClusterFleetArgs
            {
                Project = $"projects/{project.Apply(getProjectResult => getProjectResult.Number)}",
            },
        });
    
        var keyRing = new Gcp.Kms.KeyRing("key_ring", new()
        {
            Name = "keyring",
            Location = "us-central1",
        });
    
        var cryptoKeyCryptoKey = new Gcp.Kms.CryptoKey("crypto_key", new()
        {
            Name = "key",
            KeyRing = keyRing.Id,
        });
    
        var cryptoKey = new Gcp.Kms.CryptoKeyIAMMember("crypto_key", new()
        {
            CryptoKeyId = cryptoKeyCryptoKey.Id,
            Role = "roles/cloudkms.cryptoKeyEncrypterDecrypter",
            Member = $"serviceAccount:service-{project.Apply(getProjectResult => getProjectResult.Number)}@gcp-sa-edgecontainer.iam.gserviceaccount.com",
        });
    
        var @default = new Gcp.EdgeContainer.NodePool("default", new()
        {
            Name = "nodepool-1",
            Cluster = cluster.Name,
            Location = "us-central1",
            NodeLocation = "us-central1-edge-example-edgesite",
            NodeCount = 3,
            LocalDiskEncryption = new Gcp.EdgeContainer.Inputs.NodePoolLocalDiskEncryptionArgs
            {
                KmsKey = cryptoKeyCryptoKey.Id,
            },
        }, new CustomResourceOptions
        {
            DependsOn =
            {
                cryptoKey,
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.gcp.organizations.OrganizationsFunctions;
    import com.pulumi.gcp.organizations.inputs.GetProjectArgs;
    import com.pulumi.gcp.edgecontainer.Cluster;
    import com.pulumi.gcp.edgecontainer.ClusterArgs;
    import com.pulumi.gcp.edgecontainer.inputs.ClusterAuthorizationArgs;
    import com.pulumi.gcp.edgecontainer.inputs.ClusterAuthorizationAdminUsersArgs;
    import com.pulumi.gcp.edgecontainer.inputs.ClusterNetworkingArgs;
    import com.pulumi.gcp.edgecontainer.inputs.ClusterFleetArgs;
    import com.pulumi.gcp.kms.KeyRing;
    import com.pulumi.gcp.kms.KeyRingArgs;
    import com.pulumi.gcp.kms.CryptoKey;
    import com.pulumi.gcp.kms.CryptoKeyArgs;
    import com.pulumi.gcp.kms.CryptoKeyIAMMember;
    import com.pulumi.gcp.kms.CryptoKeyIAMMemberArgs;
    import com.pulumi.gcp.edgecontainer.NodePool;
    import com.pulumi.gcp.edgecontainer.NodePoolArgs;
    import com.pulumi.gcp.edgecontainer.inputs.NodePoolLocalDiskEncryptionArgs;
    import com.pulumi.resources.CustomResourceOptions;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            final var project = OrganizationsFunctions.getProject();
    
            var cluster = new Cluster("cluster", ClusterArgs.builder()
                .name("default")
                .location("us-central1")
                .authorization(ClusterAuthorizationArgs.builder()
                    .adminUsers(ClusterAuthorizationAdminUsersArgs.builder()
                        .username("admin@hashicorptest.com")
                        .build())
                    .build())
                .networking(ClusterNetworkingArgs.builder()
                    .clusterIpv4CidrBlocks("10.0.0.0/16")
                    .servicesIpv4CidrBlocks("10.1.0.0/16")
                    .build())
                .fleet(ClusterFleetArgs.builder()
                    .project(String.format("projects/%s", project.applyValue(getProjectResult -> getProjectResult.number())))
                    .build())
                .build());
    
            var keyRing = new KeyRing("keyRing", KeyRingArgs.builder()
                .name("keyring")
                .location("us-central1")
                .build());
    
            var cryptoKeyCryptoKey = new CryptoKey("cryptoKeyCryptoKey", CryptoKeyArgs.builder()
                .name("key")
                .keyRing(keyRing.id())
                .build());
    
            var cryptoKey = new CryptoKeyIAMMember("cryptoKey", CryptoKeyIAMMemberArgs.builder()
                .cryptoKeyId(cryptoKeyCryptoKey.id())
                .role("roles/cloudkms.cryptoKeyEncrypterDecrypter")
                .member(String.format("serviceAccount:service-%s@gcp-sa-edgecontainer.iam.gserviceaccount.com", project.applyValue(getProjectResult -> getProjectResult.number())))
                .build());
    
            var default_ = new NodePool("default", NodePoolArgs.builder()
                .name("nodepool-1")
                .cluster(cluster.name())
                .location("us-central1")
                .nodeLocation("us-central1-edge-example-edgesite")
                .nodeCount(3)
                .localDiskEncryption(NodePoolLocalDiskEncryptionArgs.builder()
                    .kmsKey(cryptoKeyCryptoKey.id())
                    .build())
                .build(), CustomResourceOptions.builder()
                    .dependsOn(cryptoKey)
                    .build());
    
        }
    }
    
    resources:
      cluster:
        type: gcp:edgecontainer:Cluster
        properties:
          name: default
          location: us-central1
          authorization:
            adminUsers:
              username: admin@hashicorptest.com
          networking:
            clusterIpv4CidrBlocks:
              - 10.0.0.0/16
            servicesIpv4CidrBlocks:
              - 10.1.0.0/16
          fleet:
            project: projects/${project.number}
      cryptoKey:
        type: gcp:kms:CryptoKeyIAMMember
        name: crypto_key
        properties:
          cryptoKeyId: ${cryptoKeyCryptoKey.id}
          role: roles/cloudkms.cryptoKeyEncrypterDecrypter
          member: serviceAccount:service-${project.number}@gcp-sa-edgecontainer.iam.gserviceaccount.com
      cryptoKeyCryptoKey:
        type: gcp:kms:CryptoKey
        name: crypto_key
        properties:
          name: key
          keyRing: ${keyRing.id}
      keyRing:
        type: gcp:kms:KeyRing
        name: key_ring
        properties:
          name: keyring
          location: us-central1
      default:
        type: gcp:edgecontainer:NodePool
        properties:
          name: nodepool-1
          cluster: ${cluster.name}
          location: us-central1
          nodeLocation: us-central1-edge-example-edgesite
          nodeCount: 3
          localDiskEncryption:
            kmsKey: ${cryptoKeyCryptoKey.id}
        options:
          dependson:
            - ${cryptoKey}
    variables:
      project:
        fn::invoke:
          Function: gcp:organizations:getProject
          Arguments: {}
    

    Edgecontainer Local Control Plane Node Pool

    import * as pulumi from "@pulumi/pulumi";
    import * as gcp from "@pulumi/gcp";
    
    const project = gcp.organizations.getProject({});
    const _default = new gcp.edgecontainer.Cluster("default", {
        name: "",
        location: "us-central1",
        authorization: {
            adminUsers: {
                username: "admin@hashicorptest.com",
            },
        },
        networking: {
            clusterIpv4CidrBlocks: ["10.0.0.0/16"],
            servicesIpv4CidrBlocks: ["10.1.0.0/16"],
        },
        fleet: {
            project: project.then(project => `projects/${project.number}`),
        },
        externalLoadBalancerIpv4AddressPools: ["10.100.0.0-10.100.0.10"],
        controlPlane: {
            local: {
                nodeLocation: "us-central1-edge-example-edgesite",
                nodeCount: 1,
                machineFilter: "machine-name",
                sharedDeploymentPolicy: "ALLOWED",
            },
        },
    });
    const defaultNodePool = new gcp.edgecontainer.NodePool("default", {
        name: "nodepool-1",
        cluster: cluster.name,
        location: "us-central1",
        nodeLocation: "us-central1-edge-example-edgesite",
        nodeCount: 3,
    });
    
    import pulumi
    import pulumi_gcp as gcp
    
    project = gcp.organizations.get_project()
    default = gcp.edgecontainer.Cluster("default",
        name="",
        location="us-central1",
        authorization={
            "admin_users": {
                "username": "admin@hashicorptest.com",
            },
        },
        networking={
            "cluster_ipv4_cidr_blocks": ["10.0.0.0/16"],
            "services_ipv4_cidr_blocks": ["10.1.0.0/16"],
        },
        fleet={
            "project": f"projects/{project.number}",
        },
        external_load_balancer_ipv4_address_pools=["10.100.0.0-10.100.0.10"],
        control_plane={
            "local": {
                "node_location": "us-central1-edge-example-edgesite",
                "node_count": 1,
                "machine_filter": "machine-name",
                "shared_deployment_policy": "ALLOWED",
            },
        })
    default_node_pool = gcp.edgecontainer.NodePool("default",
        name="nodepool-1",
        cluster=cluster["name"],
        location="us-central1",
        node_location="us-central1-edge-example-edgesite",
        node_count=3)
    
    package main
    
    import (
    	"fmt"
    
    	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/edgecontainer"
    	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		project, err := organizations.LookupProject(ctx, &organizations.LookupProjectArgs{}, nil)
    		if err != nil {
    			return err
    		}
    		_, err = edgecontainer.NewCluster(ctx, "default", &edgecontainer.ClusterArgs{
    			Name:     pulumi.String(""),
    			Location: pulumi.String("us-central1"),
    			Authorization: &edgecontainer.ClusterAuthorizationArgs{
    				AdminUsers: &edgecontainer.ClusterAuthorizationAdminUsersArgs{
    					Username: pulumi.String("admin@hashicorptest.com"),
    				},
    			},
    			Networking: &edgecontainer.ClusterNetworkingArgs{
    				ClusterIpv4CidrBlocks: pulumi.StringArray{
    					pulumi.String("10.0.0.0/16"),
    				},
    				ServicesIpv4CidrBlocks: pulumi.StringArray{
    					pulumi.String("10.1.0.0/16"),
    				},
    			},
    			Fleet: &edgecontainer.ClusterFleetArgs{
    				Project: pulumi.Sprintf("projects/%v", project.Number),
    			},
    			ExternalLoadBalancerIpv4AddressPools: pulumi.StringArray{
    				pulumi.String("10.100.0.0-10.100.0.10"),
    			},
    			ControlPlane: &edgecontainer.ClusterControlPlaneArgs{
    				Local: &edgecontainer.ClusterControlPlaneLocalArgs{
    					NodeLocation:           pulumi.String("us-central1-edge-example-edgesite"),
    					NodeCount:              pulumi.Int(1),
    					MachineFilter:          pulumi.String("machine-name"),
    					SharedDeploymentPolicy: pulumi.String("ALLOWED"),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		_, err = edgecontainer.NewNodePool(ctx, "default", &edgecontainer.NodePoolArgs{
    			Name:         pulumi.String("nodepool-1"),
    			Cluster:      pulumi.Any(cluster.Name),
    			Location:     pulumi.String("us-central1"),
    			NodeLocation: pulumi.String("us-central1-edge-example-edgesite"),
    			NodeCount:    pulumi.Int(3),
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Gcp = Pulumi.Gcp;
    
    return await Deployment.RunAsync(() => 
    {
        var project = Gcp.Organizations.GetProject.Invoke();
    
        var @default = new Gcp.EdgeContainer.Cluster("default", new()
        {
            Name = "",
            Location = "us-central1",
            Authorization = new Gcp.EdgeContainer.Inputs.ClusterAuthorizationArgs
            {
                AdminUsers = new Gcp.EdgeContainer.Inputs.ClusterAuthorizationAdminUsersArgs
                {
                    Username = "admin@hashicorptest.com",
                },
            },
            Networking = new Gcp.EdgeContainer.Inputs.ClusterNetworkingArgs
            {
                ClusterIpv4CidrBlocks = new[]
                {
                    "10.0.0.0/16",
                },
                ServicesIpv4CidrBlocks = new[]
                {
                    "10.1.0.0/16",
                },
            },
            Fleet = new Gcp.EdgeContainer.Inputs.ClusterFleetArgs
            {
                Project = $"projects/{project.Apply(getProjectResult => getProjectResult.Number)}",
            },
            ExternalLoadBalancerIpv4AddressPools = new[]
            {
                "10.100.0.0-10.100.0.10",
            },
            ControlPlane = new Gcp.EdgeContainer.Inputs.ClusterControlPlaneArgs
            {
                Local = new Gcp.EdgeContainer.Inputs.ClusterControlPlaneLocalArgs
                {
                    NodeLocation = "us-central1-edge-example-edgesite",
                    NodeCount = 1,
                    MachineFilter = "machine-name",
                    SharedDeploymentPolicy = "ALLOWED",
                },
            },
        });
    
        var defaultNodePool = new Gcp.EdgeContainer.NodePool("default", new()
        {
            Name = "nodepool-1",
            Cluster = cluster.Name,
            Location = "us-central1",
            NodeLocation = "us-central1-edge-example-edgesite",
            NodeCount = 3,
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.gcp.organizations.OrganizationsFunctions;
    import com.pulumi.gcp.organizations.inputs.GetProjectArgs;
    import com.pulumi.gcp.edgecontainer.Cluster;
    import com.pulumi.gcp.edgecontainer.ClusterArgs;
    import com.pulumi.gcp.edgecontainer.inputs.ClusterAuthorizationArgs;
    import com.pulumi.gcp.edgecontainer.inputs.ClusterAuthorizationAdminUsersArgs;
    import com.pulumi.gcp.edgecontainer.inputs.ClusterNetworkingArgs;
    import com.pulumi.gcp.edgecontainer.inputs.ClusterFleetArgs;
    import com.pulumi.gcp.edgecontainer.inputs.ClusterControlPlaneArgs;
    import com.pulumi.gcp.edgecontainer.inputs.ClusterControlPlaneLocalArgs;
    import com.pulumi.gcp.edgecontainer.NodePool;
    import com.pulumi.gcp.edgecontainer.NodePoolArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            final var project = OrganizationsFunctions.getProject();
    
            var default_ = new Cluster("default", ClusterArgs.builder()
                .name("")
                .location("us-central1")
                .authorization(ClusterAuthorizationArgs.builder()
                    .adminUsers(ClusterAuthorizationAdminUsersArgs.builder()
                        .username("admin@hashicorptest.com")
                        .build())
                    .build())
                .networking(ClusterNetworkingArgs.builder()
                    .clusterIpv4CidrBlocks("10.0.0.0/16")
                    .servicesIpv4CidrBlocks("10.1.0.0/16")
                    .build())
                .fleet(ClusterFleetArgs.builder()
                    .project(String.format("projects/%s", project.applyValue(getProjectResult -> getProjectResult.number())))
                    .build())
                .externalLoadBalancerIpv4AddressPools("10.100.0.0-10.100.0.10")
                .controlPlane(ClusterControlPlaneArgs.builder()
                    .local(ClusterControlPlaneLocalArgs.builder()
                        .nodeLocation("us-central1-edge-example-edgesite")
                        .nodeCount(1)
                        .machineFilter("machine-name")
                        .sharedDeploymentPolicy("ALLOWED")
                        .build())
                    .build())
                .build());
    
            var defaultNodePool = new NodePool("defaultNodePool", NodePoolArgs.builder()
                .name("nodepool-1")
                .cluster(cluster.name())
                .location("us-central1")
                .nodeLocation("us-central1-edge-example-edgesite")
                .nodeCount(3)
                .build());
    
        }
    }
    
    resources:
      default:
        type: gcp:edgecontainer:Cluster
        properties:
          name:
          location: us-central1
          authorization:
            adminUsers:
              username: admin@hashicorptest.com
          networking:
            clusterIpv4CidrBlocks:
              - 10.0.0.0/16
            servicesIpv4CidrBlocks:
              - 10.1.0.0/16
          fleet:
            project: projects/${project.number}
          externalLoadBalancerIpv4AddressPools:
            - 10.100.0.0-10.100.0.10
          controlPlane:
            local:
              nodeLocation: us-central1-edge-example-edgesite
              nodeCount: 1
              machineFilter: machine-name
              sharedDeploymentPolicy: ALLOWED
      defaultNodePool:
        type: gcp:edgecontainer:NodePool
        name: default
        properties:
          name: nodepool-1
          cluster: ${cluster.name}
          location: us-central1
          nodeLocation: us-central1-edge-example-edgesite
          nodeCount: 3
    variables:
      project:
        fn::invoke:
          Function: gcp:organizations:getProject
          Arguments: {}
    

    Create NodePool Resource

    Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

    Constructor syntax

    new NodePool(name: string, args: NodePoolArgs, opts?: CustomResourceOptions);
    @overload
    def NodePool(resource_name: str,
                 args: NodePoolArgs,
                 opts: Optional[ResourceOptions] = None)
    
    @overload
    def NodePool(resource_name: str,
                 opts: Optional[ResourceOptions] = None,
                 cluster: Optional[str] = None,
                 location: Optional[str] = None,
                 node_count: Optional[int] = None,
                 node_location: Optional[str] = None,
                 labels: Optional[Mapping[str, str]] = None,
                 local_disk_encryption: Optional[NodePoolLocalDiskEncryptionArgs] = None,
                 machine_filter: Optional[str] = None,
                 name: Optional[str] = None,
                 node_config: Optional[NodePoolNodeConfigArgs] = None,
                 project: Optional[str] = None)
    func NewNodePool(ctx *Context, name string, args NodePoolArgs, opts ...ResourceOption) (*NodePool, error)
    public NodePool(string name, NodePoolArgs args, CustomResourceOptions? opts = null)
    public NodePool(String name, NodePoolArgs args)
    public NodePool(String name, NodePoolArgs args, CustomResourceOptions options)
    
    type: gcp:edgecontainer:NodePool
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    

    Parameters

    name string
    The unique name of the resource.
    args NodePoolArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args NodePoolArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args NodePoolArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args NodePoolArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args NodePoolArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    Constructor example

    The following reference example uses placeholder values for all input properties.

    var gcpNodePoolResource = new Gcp.EdgeContainer.NodePool("gcpNodePoolResource", new()
    {
        Cluster = "string",
        Location = "string",
        NodeCount = 0,
        NodeLocation = "string",
        Labels = 
        {
            { "string", "string" },
        },
        LocalDiskEncryption = new Gcp.EdgeContainer.Inputs.NodePoolLocalDiskEncryptionArgs
        {
            KmsKey = "string",
            KmsKeyActiveVersion = "string",
            KmsKeyState = "string",
        },
        MachineFilter = "string",
        Name = "string",
        NodeConfig = new Gcp.EdgeContainer.Inputs.NodePoolNodeConfigArgs
        {
            Labels = 
            {
                { "string", "string" },
            },
        },
        Project = "string",
    });
    
    example, err := edgecontainer.NewNodePool(ctx, "gcpNodePoolResource", &edgecontainer.NodePoolArgs{
    	Cluster:      pulumi.String("string"),
    	Location:     pulumi.String("string"),
    	NodeCount:    pulumi.Int(0),
    	NodeLocation: pulumi.String("string"),
    	Labels: pulumi.StringMap{
    		"string": pulumi.String("string"),
    	},
    	LocalDiskEncryption: &edgecontainer.NodePoolLocalDiskEncryptionArgs{
    		KmsKey:              pulumi.String("string"),
    		KmsKeyActiveVersion: pulumi.String("string"),
    		KmsKeyState:         pulumi.String("string"),
    	},
    	MachineFilter: pulumi.String("string"),
    	Name:          pulumi.String("string"),
    	NodeConfig: &edgecontainer.NodePoolNodeConfigArgs{
    		Labels: pulumi.StringMap{
    			"string": pulumi.String("string"),
    		},
    	},
    	Project: pulumi.String("string"),
    })
    
    var gcpNodePoolResource = new NodePool("gcpNodePoolResource", NodePoolArgs.builder()
        .cluster("string")
        .location("string")
        .nodeCount(0)
        .nodeLocation("string")
        .labels(Map.of("string", "string"))
        .localDiskEncryption(NodePoolLocalDiskEncryptionArgs.builder()
            .kmsKey("string")
            .kmsKeyActiveVersion("string")
            .kmsKeyState("string")
            .build())
        .machineFilter("string")
        .name("string")
        .nodeConfig(NodePoolNodeConfigArgs.builder()
            .labels(Map.of("string", "string"))
            .build())
        .project("string")
        .build());
    
    gcp_node_pool_resource = gcp.edgecontainer.NodePool("gcpNodePoolResource",
        cluster="string",
        location="string",
        node_count=0,
        node_location="string",
        labels={
            "string": "string",
        },
        local_disk_encryption={
            "kms_key": "string",
            "kms_key_active_version": "string",
            "kms_key_state": "string",
        },
        machine_filter="string",
        name="string",
        node_config={
            "labels": {
                "string": "string",
            },
        },
        project="string")
    
    const gcpNodePoolResource = new gcp.edgecontainer.NodePool("gcpNodePoolResource", {
        cluster: "string",
        location: "string",
        nodeCount: 0,
        nodeLocation: "string",
        labels: {
            string: "string",
        },
        localDiskEncryption: {
            kmsKey: "string",
            kmsKeyActiveVersion: "string",
            kmsKeyState: "string",
        },
        machineFilter: "string",
        name: "string",
        nodeConfig: {
            labels: {
                string: "string",
            },
        },
        project: "string",
    });
    
    type: gcp:edgecontainer:NodePool
    properties:
        cluster: string
        labels:
            string: string
        localDiskEncryption:
            kmsKey: string
            kmsKeyActiveVersion: string
            kmsKeyState: string
        location: string
        machineFilter: string
        name: string
        nodeConfig:
            labels:
                string: string
        nodeCount: 0
        nodeLocation: string
        project: string
    

    NodePool Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.

    The NodePool resource accepts the following input properties:

    Cluster string
    The name of the target Distributed Cloud Edge Cluster.


    Location string
    The location of the resource.
    NodeCount int
    The number of nodes in the pool.
    NodeLocation string
    Name of the Google Distributed Cloud Edge zone where this node pool will be created. For example: us-central1-edge-customer-a.
    Labels Dictionary<string, string>
    Labels associated with this resource. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.
    LocalDiskEncryption NodePoolLocalDiskEncryption
    Local disk encryption options. This field is only used when enabling CMEK support. Structure is documented below.
    MachineFilter string
    Only machines matching this filter will be allowed to join the node pool. The filtering language accepts strings like "name=", and is documented in more detail in AIP-160.
    Name string
    The resource name of the node pool.
    NodeConfig NodePoolNodeConfig
    Configuration for each node in the NodePool Structure is documented below.
    Project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    Cluster string
    The name of the target Distributed Cloud Edge Cluster.


    Location string
    The location of the resource.
    NodeCount int
    The number of nodes in the pool.
    NodeLocation string
    Name of the Google Distributed Cloud Edge zone where this node pool will be created. For example: us-central1-edge-customer-a.
    Labels map[string]string
    Labels associated with this resource. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.
    LocalDiskEncryption NodePoolLocalDiskEncryptionArgs
    Local disk encryption options. This field is only used when enabling CMEK support. Structure is documented below.
    MachineFilter string
    Only machines matching this filter will be allowed to join the node pool. The filtering language accepts strings like "name=", and is documented in more detail in AIP-160.
    Name string
    The resource name of the node pool.
    NodeConfig NodePoolNodeConfigArgs
    Configuration for each node in the NodePool Structure is documented below.
    Project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    cluster String
    The name of the target Distributed Cloud Edge Cluster.


    location String
    The location of the resource.
    nodeCount Integer
    The number of nodes in the pool.
    nodeLocation String
    Name of the Google Distributed Cloud Edge zone where this node pool will be created. For example: us-central1-edge-customer-a.
    labels Map<String,String>
    Labels associated with this resource. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.
    localDiskEncryption NodePoolLocalDiskEncryption
    Local disk encryption options. This field is only used when enabling CMEK support. Structure is documented below.
    machineFilter String
    Only machines matching this filter will be allowed to join the node pool. The filtering language accepts strings like "name=", and is documented in more detail in AIP-160.
    name String
    The resource name of the node pool.
    nodeConfig NodePoolNodeConfig
    Configuration for each node in the NodePool Structure is documented below.
    project String
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    cluster string
    The name of the target Distributed Cloud Edge Cluster.


    location string
    The location of the resource.
    nodeCount number
    The number of nodes in the pool.
    nodeLocation string
    Name of the Google Distributed Cloud Edge zone where this node pool will be created. For example: us-central1-edge-customer-a.
    labels {[key: string]: string}
    Labels associated with this resource. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.
    localDiskEncryption NodePoolLocalDiskEncryption
    Local disk encryption options. This field is only used when enabling CMEK support. Structure is documented below.
    machineFilter string
    Only machines matching this filter will be allowed to join the node pool. The filtering language accepts strings like "name=", and is documented in more detail in AIP-160.
    name string
    The resource name of the node pool.
    nodeConfig NodePoolNodeConfig
    Configuration for each node in the NodePool Structure is documented below.
    project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    cluster str
    The name of the target Distributed Cloud Edge Cluster.


    location str
    The location of the resource.
    node_count int
    The number of nodes in the pool.
    node_location str
    Name of the Google Distributed Cloud Edge zone where this node pool will be created. For example: us-central1-edge-customer-a.
    labels Mapping[str, str]
    Labels associated with this resource. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.
    local_disk_encryption NodePoolLocalDiskEncryptionArgs
    Local disk encryption options. This field is only used when enabling CMEK support. Structure is documented below.
    machine_filter str
    Only machines matching this filter will be allowed to join the node pool. The filtering language accepts strings like "name=", and is documented in more detail in AIP-160.
    name str
    The resource name of the node pool.
    node_config NodePoolNodeConfigArgs
    Configuration for each node in the NodePool Structure is documented below.
    project str
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    cluster String
    The name of the target Distributed Cloud Edge Cluster.


    location String
    The location of the resource.
    nodeCount Number
    The number of nodes in the pool.
    nodeLocation String
    Name of the Google Distributed Cloud Edge zone where this node pool will be created. For example: us-central1-edge-customer-a.
    labels Map<String>
    Labels associated with this resource. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.
    localDiskEncryption Property Map
    Local disk encryption options. This field is only used when enabling CMEK support. Structure is documented below.
    machineFilter String
    Only machines matching this filter will be allowed to join the node pool. The filtering language accepts strings like "name=", and is documented in more detail in AIP-160.
    name String
    The resource name of the node pool.
    nodeConfig Property Map
    Configuration for each node in the NodePool Structure is documented below.
    project String
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.

    Outputs

    All input properties are implicitly available as output properties. Additionally, the NodePool resource produces the following output properties:

    CreateTime string
    The time when the node pool was created.
    EffectiveLabels Dictionary<string, string>
    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
    Id string
    The provider-assigned unique ID for this managed resource.
    NodeVersion string
    The lowest release version among all worker nodes.
    PulumiLabels Dictionary<string, string>
    The combination of labels configured directly on the resource and default labels configured on the provider.
    UpdateTime string
    The time when the node pool was last updated.
    CreateTime string
    The time when the node pool was created.
    EffectiveLabels map[string]string
    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
    Id string
    The provider-assigned unique ID for this managed resource.
    NodeVersion string
    The lowest release version among all worker nodes.
    PulumiLabels map[string]string
    The combination of labels configured directly on the resource and default labels configured on the provider.
    UpdateTime string
    The time when the node pool was last updated.
    createTime String
    The time when the node pool was created.
    effectiveLabels Map<String,String>
    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
    id String
    The provider-assigned unique ID for this managed resource.
    nodeVersion String
    The lowest release version among all worker nodes.
    pulumiLabels Map<String,String>
    The combination of labels configured directly on the resource and default labels configured on the provider.
    updateTime String
    The time when the node pool was last updated.
    createTime string
    The time when the node pool was created.
    effectiveLabels {[key: string]: string}
    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
    id string
    The provider-assigned unique ID for this managed resource.
    nodeVersion string
    The lowest release version among all worker nodes.
    pulumiLabels {[key: string]: string}
    The combination of labels configured directly on the resource and default labels configured on the provider.
    updateTime string
    The time when the node pool was last updated.
    create_time str
    The time when the node pool was created.
    effective_labels Mapping[str, str]
    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
    id str
    The provider-assigned unique ID for this managed resource.
    node_version str
    The lowest release version among all worker nodes.
    pulumi_labels Mapping[str, str]
    The combination of labels configured directly on the resource and default labels configured on the provider.
    update_time str
    The time when the node pool was last updated.
    createTime String
    The time when the node pool was created.
    effectiveLabels Map<String>
    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
    id String
    The provider-assigned unique ID for this managed resource.
    nodeVersion String
    The lowest release version among all worker nodes.
    pulumiLabels Map<String>
    The combination of labels configured directly on the resource and default labels configured on the provider.
    updateTime String
    The time when the node pool was last updated.

    Look up Existing NodePool Resource

    Get an existing NodePool resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

    public static get(name: string, id: Input<ID>, state?: NodePoolState, opts?: CustomResourceOptions): NodePool
    @staticmethod
    def get(resource_name: str,
            id: str,
            opts: Optional[ResourceOptions] = None,
            cluster: Optional[str] = None,
            create_time: Optional[str] = None,
            effective_labels: Optional[Mapping[str, str]] = None,
            labels: Optional[Mapping[str, str]] = None,
            local_disk_encryption: Optional[NodePoolLocalDiskEncryptionArgs] = None,
            location: Optional[str] = None,
            machine_filter: Optional[str] = None,
            name: Optional[str] = None,
            node_config: Optional[NodePoolNodeConfigArgs] = None,
            node_count: Optional[int] = None,
            node_location: Optional[str] = None,
            node_version: Optional[str] = None,
            project: Optional[str] = None,
            pulumi_labels: Optional[Mapping[str, str]] = None,
            update_time: Optional[str] = None) -> NodePool
    func GetNodePool(ctx *Context, name string, id IDInput, state *NodePoolState, opts ...ResourceOption) (*NodePool, error)
    public static NodePool Get(string name, Input<string> id, NodePoolState? state, CustomResourceOptions? opts = null)
    public static NodePool get(String name, Output<String> id, NodePoolState state, CustomResourceOptions options)
    Resource lookup is not supported in YAML
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    resource_name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    The following state arguments are supported:
    Cluster string
    The name of the target Distributed Cloud Edge Cluster.


    CreateTime string
    The time when the node pool was created.
    EffectiveLabels Dictionary<string, string>
    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
    Labels Dictionary<string, string>
    Labels associated with this resource. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.
    LocalDiskEncryption NodePoolLocalDiskEncryption
    Local disk encryption options. This field is only used when enabling CMEK support. Structure is documented below.
    Location string
    The location of the resource.
    MachineFilter string
    Only machines matching this filter will be allowed to join the node pool. The filtering language accepts strings like "name=", and is documented in more detail in AIP-160.
    Name string
    The resource name of the node pool.
    NodeConfig NodePoolNodeConfig
    Configuration for each node in the NodePool Structure is documented below.
    NodeCount int
    The number of nodes in the pool.
    NodeLocation string
    Name of the Google Distributed Cloud Edge zone where this node pool will be created. For example: us-central1-edge-customer-a.
    NodeVersion string
    The lowest release version among all worker nodes.
    Project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    PulumiLabels Dictionary<string, string>
    The combination of labels configured directly on the resource and default labels configured on the provider.
    UpdateTime string
    The time when the node pool was last updated.
    Cluster string
    The name of the target Distributed Cloud Edge Cluster.


    CreateTime string
    The time when the node pool was created.
    EffectiveLabels map[string]string
    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
    Labels map[string]string
    Labels associated with this resource. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.
    LocalDiskEncryption NodePoolLocalDiskEncryptionArgs
    Local disk encryption options. This field is only used when enabling CMEK support. Structure is documented below.
    Location string
    The location of the resource.
    MachineFilter string
    Only machines matching this filter will be allowed to join the node pool. The filtering language accepts strings like "name=", and is documented in more detail in AIP-160.
    Name string
    The resource name of the node pool.
    NodeConfig NodePoolNodeConfigArgs
    Configuration for each node in the NodePool Structure is documented below.
    NodeCount int
    The number of nodes in the pool.
    NodeLocation string
    Name of the Google Distributed Cloud Edge zone where this node pool will be created. For example: us-central1-edge-customer-a.
    NodeVersion string
    The lowest release version among all worker nodes.
    Project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    PulumiLabels map[string]string
    The combination of labels configured directly on the resource and default labels configured on the provider.
    UpdateTime string
    The time when the node pool was last updated.
    cluster String
    The name of the target Distributed Cloud Edge Cluster.


    createTime String
    The time when the node pool was created.
    effectiveLabels Map<String,String>
    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
    labels Map<String,String>
    Labels associated with this resource. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.
    localDiskEncryption NodePoolLocalDiskEncryption
    Local disk encryption options. This field is only used when enabling CMEK support. Structure is documented below.
    location String
    The location of the resource.
    machineFilter String
    Only machines matching this filter will be allowed to join the node pool. The filtering language accepts strings like "name=", and is documented in more detail in AIP-160.
    name String
    The resource name of the node pool.
    nodeConfig NodePoolNodeConfig
    Configuration for each node in the NodePool Structure is documented below.
    nodeCount Integer
    The number of nodes in the pool.
    nodeLocation String
    Name of the Google Distributed Cloud Edge zone where this node pool will be created. For example: us-central1-edge-customer-a.
    nodeVersion String
    The lowest release version among all worker nodes.
    project String
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    pulumiLabels Map<String,String>
    The combination of labels configured directly on the resource and default labels configured on the provider.
    updateTime String
    The time when the node pool was last updated.
    cluster string
    The name of the target Distributed Cloud Edge Cluster.


    createTime string
    The time when the node pool was created.
    effectiveLabels {[key: string]: string}
    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
    labels {[key: string]: string}
    Labels associated with this resource. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.
    localDiskEncryption NodePoolLocalDiskEncryption
    Local disk encryption options. This field is only used when enabling CMEK support. Structure is documented below.
    location string
    The location of the resource.
    machineFilter string
    Only machines matching this filter will be allowed to join the node pool. The filtering language accepts strings like "name=", and is documented in more detail in AIP-160.
    name string
    The resource name of the node pool.
    nodeConfig NodePoolNodeConfig
    Configuration for each node in the NodePool Structure is documented below.
    nodeCount number
    The number of nodes in the pool.
    nodeLocation string
    Name of the Google Distributed Cloud Edge zone where this node pool will be created. For example: us-central1-edge-customer-a.
    nodeVersion string
    The lowest release version among all worker nodes.
    project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    pulumiLabels {[key: string]: string}
    The combination of labels configured directly on the resource and default labels configured on the provider.
    updateTime string
    The time when the node pool was last updated.
    cluster str
    The name of the target Distributed Cloud Edge Cluster.


    create_time str
    The time when the node pool was created.
    effective_labels Mapping[str, str]
    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
    labels Mapping[str, str]
    Labels associated with this resource. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.
    local_disk_encryption NodePoolLocalDiskEncryptionArgs
    Local disk encryption options. This field is only used when enabling CMEK support. Structure is documented below.
    location str
    The location of the resource.
    machine_filter str
    Only machines matching this filter will be allowed to join the node pool. The filtering language accepts strings like "name=", and is documented in more detail in AIP-160.
    name str
    The resource name of the node pool.
    node_config NodePoolNodeConfigArgs
    Configuration for each node in the NodePool Structure is documented below.
    node_count int
    The number of nodes in the pool.
    node_location str
    Name of the Google Distributed Cloud Edge zone where this node pool will be created. For example: us-central1-edge-customer-a.
    node_version str
    The lowest release version among all worker nodes.
    project str
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    pulumi_labels Mapping[str, str]
    The combination of labels configured directly on the resource and default labels configured on the provider.
    update_time str
    The time when the node pool was last updated.
    cluster String
    The name of the target Distributed Cloud Edge Cluster.


    createTime String
    The time when the node pool was created.
    effectiveLabels Map<String>
    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
    labels Map<String>
    Labels associated with this resource. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.
    localDiskEncryption Property Map
    Local disk encryption options. This field is only used when enabling CMEK support. Structure is documented below.
    location String
    The location of the resource.
    machineFilter String
    Only machines matching this filter will be allowed to join the node pool. The filtering language accepts strings like "name=", and is documented in more detail in AIP-160.
    name String
    The resource name of the node pool.
    nodeConfig Property Map
    Configuration for each node in the NodePool Structure is documented below.
    nodeCount Number
    The number of nodes in the pool.
    nodeLocation String
    Name of the Google Distributed Cloud Edge zone where this node pool will be created. For example: us-central1-edge-customer-a.
    nodeVersion String
    The lowest release version among all worker nodes.
    project String
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    pulumiLabels Map<String>
    The combination of labels configured directly on the resource and default labels configured on the provider.
    updateTime String
    The time when the node pool was last updated.

    Supporting Types

    NodePoolLocalDiskEncryption, NodePoolLocalDiskEncryptionArgs

    KmsKey string
    The Cloud KMS CryptoKey e.g. projects/{project}/locations/{location}/keyRings/{keyRing}/cryptoKeys/{cryptoKey} to use for protecting node local disks. If not specified, a Google-managed key will be used instead.
    KmsKeyActiveVersion string
    (Output) The Cloud KMS CryptoKeyVersion currently in use for protecting node local disks. Only applicable if kmsKey is set.
    KmsKeyState string
    (Output) Availability of the Cloud KMS CryptoKey. If not KEY_AVAILABLE, then nodes may go offline as they cannot access their local data. This can be caused by a lack of permissions to use the key, or if the key is disabled or deleted.
    KmsKey string
    The Cloud KMS CryptoKey e.g. projects/{project}/locations/{location}/keyRings/{keyRing}/cryptoKeys/{cryptoKey} to use for protecting node local disks. If not specified, a Google-managed key will be used instead.
    KmsKeyActiveVersion string
    (Output) The Cloud KMS CryptoKeyVersion currently in use for protecting node local disks. Only applicable if kmsKey is set.
    KmsKeyState string
    (Output) Availability of the Cloud KMS CryptoKey. If not KEY_AVAILABLE, then nodes may go offline as they cannot access their local data. This can be caused by a lack of permissions to use the key, or if the key is disabled or deleted.
    kmsKey String
    The Cloud KMS CryptoKey e.g. projects/{project}/locations/{location}/keyRings/{keyRing}/cryptoKeys/{cryptoKey} to use for protecting node local disks. If not specified, a Google-managed key will be used instead.
    kmsKeyActiveVersion String
    (Output) The Cloud KMS CryptoKeyVersion currently in use for protecting node local disks. Only applicable if kmsKey is set.
    kmsKeyState String
    (Output) Availability of the Cloud KMS CryptoKey. If not KEY_AVAILABLE, then nodes may go offline as they cannot access their local data. This can be caused by a lack of permissions to use the key, or if the key is disabled or deleted.
    kmsKey string
    The Cloud KMS CryptoKey e.g. projects/{project}/locations/{location}/keyRings/{keyRing}/cryptoKeys/{cryptoKey} to use for protecting node local disks. If not specified, a Google-managed key will be used instead.
    kmsKeyActiveVersion string
    (Output) The Cloud KMS CryptoKeyVersion currently in use for protecting node local disks. Only applicable if kmsKey is set.
    kmsKeyState string
    (Output) Availability of the Cloud KMS CryptoKey. If not KEY_AVAILABLE, then nodes may go offline as they cannot access their local data. This can be caused by a lack of permissions to use the key, or if the key is disabled or deleted.
    kms_key str
    The Cloud KMS CryptoKey e.g. projects/{project}/locations/{location}/keyRings/{keyRing}/cryptoKeys/{cryptoKey} to use for protecting node local disks. If not specified, a Google-managed key will be used instead.
    kms_key_active_version str
    (Output) The Cloud KMS CryptoKeyVersion currently in use for protecting node local disks. Only applicable if kmsKey is set.
    kms_key_state str
    (Output) Availability of the Cloud KMS CryptoKey. If not KEY_AVAILABLE, then nodes may go offline as they cannot access their local data. This can be caused by a lack of permissions to use the key, or if the key is disabled or deleted.
    kmsKey String
    The Cloud KMS CryptoKey e.g. projects/{project}/locations/{location}/keyRings/{keyRing}/cryptoKeys/{cryptoKey} to use for protecting node local disks. If not specified, a Google-managed key will be used instead.
    kmsKeyActiveVersion String
    (Output) The Cloud KMS CryptoKeyVersion currently in use for protecting node local disks. Only applicable if kmsKey is set.
    kmsKeyState String
    (Output) Availability of the Cloud KMS CryptoKey. If not KEY_AVAILABLE, then nodes may go offline as they cannot access their local data. This can be caused by a lack of permissions to use the key, or if the key is disabled or deleted.

    NodePoolNodeConfig, NodePoolNodeConfigArgs

    Labels Dictionary<string, string>
    "The Kubernetes node labels"
    Labels map[string]string
    "The Kubernetes node labels"
    labels Map<String,String>
    "The Kubernetes node labels"
    labels {[key: string]: string}
    "The Kubernetes node labels"
    labels Mapping[str, str]
    "The Kubernetes node labels"
    labels Map<String>
    "The Kubernetes node labels"

    Import

    NodePool can be imported using any of these accepted formats:

    • projects/{{project}}/locations/{{location}}/clusters/{{cluster}}/nodePools/{{name}}

    • {{project}}/{{location}}/{{cluster}}/{{name}}

    • {{location}}/{{cluster}}/{{name}}

    When using the pulumi import command, NodePool can be imported using one of the formats above. For example:

    $ pulumi import gcp:edgecontainer/nodePool:NodePool default projects/{{project}}/locations/{{location}}/clusters/{{cluster}}/nodePools/{{name}}
    
    $ pulumi import gcp:edgecontainer/nodePool:NodePool default {{project}}/{{location}}/{{cluster}}/{{name}}
    
    $ pulumi import gcp:edgecontainer/nodePool:NodePool default {{location}}/{{cluster}}/{{name}}
    

    To learn more about importing existing cloud resources, see Importing resources.

    Package Details

    Repository
    Google Cloud (GCP) Classic pulumi/pulumi-gcp
    License
    Apache-2.0
    Notes
    This Pulumi package is based on the google-beta Terraform Provider.
    gcp logo
    Google Cloud Classic v8.9.3 published on Monday, Nov 18, 2024 by Pulumi