1. Packages
  2. Google Cloud (GCP) Classic
  3. API Docs
  4. container
  5. NodePool
Google Cloud Classic v8.9.3 published on Monday, Nov 18, 2024 by Pulumi

gcp.container.NodePool

Explore with Pulumi AI

gcp logo
Google Cloud Classic v8.9.3 published on Monday, Nov 18, 2024 by Pulumi

    Manages a node pool in a Google Kubernetes Engine (GKE) cluster separately from the cluster control plane. For more information see the official documentation and the API reference.

    Example Usage

    import * as pulumi from "@pulumi/pulumi";
    import * as gcp from "@pulumi/gcp";
    
    const _default = new gcp.serviceaccount.Account("default", {
        accountId: "service-account-id",
        displayName: "Service Account",
    });
    const primary = new gcp.container.Cluster("primary", {
        name: "my-gke-cluster",
        location: "us-central1",
        removeDefaultNodePool: true,
        initialNodeCount: 1,
    });
    const primaryPreemptibleNodes = new gcp.container.NodePool("primary_preemptible_nodes", {
        name: "my-node-pool",
        cluster: primary.id,
        nodeCount: 1,
        nodeConfig: {
            preemptible: true,
            machineType: "e2-medium",
            serviceAccount: _default.email,
            oauthScopes: ["https://www.googleapis.com/auth/cloud-platform"],
        },
    });
    
    import pulumi
    import pulumi_gcp as gcp
    
    default = gcp.serviceaccount.Account("default",
        account_id="service-account-id",
        display_name="Service Account")
    primary = gcp.container.Cluster("primary",
        name="my-gke-cluster",
        location="us-central1",
        remove_default_node_pool=True,
        initial_node_count=1)
    primary_preemptible_nodes = gcp.container.NodePool("primary_preemptible_nodes",
        name="my-node-pool",
        cluster=primary.id,
        node_count=1,
        node_config={
            "preemptible": True,
            "machine_type": "e2-medium",
            "service_account": default.email,
            "oauth_scopes": ["https://www.googleapis.com/auth/cloud-platform"],
        })
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/container"
    	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/serviceaccount"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := serviceaccount.NewAccount(ctx, "default", &serviceaccount.AccountArgs{
    			AccountId:   pulumi.String("service-account-id"),
    			DisplayName: pulumi.String("Service Account"),
    		})
    		if err != nil {
    			return err
    		}
    		primary, err := container.NewCluster(ctx, "primary", &container.ClusterArgs{
    			Name:                  pulumi.String("my-gke-cluster"),
    			Location:              pulumi.String("us-central1"),
    			RemoveDefaultNodePool: pulumi.Bool(true),
    			InitialNodeCount:      pulumi.Int(1),
    		})
    		if err != nil {
    			return err
    		}
    		_, err = container.NewNodePool(ctx, "primary_preemptible_nodes", &container.NodePoolArgs{
    			Name:      pulumi.String("my-node-pool"),
    			Cluster:   primary.ID(),
    			NodeCount: pulumi.Int(1),
    			NodeConfig: &container.NodePoolNodeConfigArgs{
    				Preemptible:    pulumi.Bool(true),
    				MachineType:    pulumi.String("e2-medium"),
    				ServiceAccount: _default.Email,
    				OauthScopes: pulumi.StringArray{
    					pulumi.String("https://www.googleapis.com/auth/cloud-platform"),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Gcp = Pulumi.Gcp;
    
    return await Deployment.RunAsync(() => 
    {
        var @default = new Gcp.ServiceAccount.Account("default", new()
        {
            AccountId = "service-account-id",
            DisplayName = "Service Account",
        });
    
        var primary = new Gcp.Container.Cluster("primary", new()
        {
            Name = "my-gke-cluster",
            Location = "us-central1",
            RemoveDefaultNodePool = true,
            InitialNodeCount = 1,
        });
    
        var primaryPreemptibleNodes = new Gcp.Container.NodePool("primary_preemptible_nodes", new()
        {
            Name = "my-node-pool",
            Cluster = primary.Id,
            NodeCount = 1,
            NodeConfig = new Gcp.Container.Inputs.NodePoolNodeConfigArgs
            {
                Preemptible = true,
                MachineType = "e2-medium",
                ServiceAccount = @default.Email,
                OauthScopes = new[]
                {
                    "https://www.googleapis.com/auth/cloud-platform",
                },
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.gcp.serviceaccount.Account;
    import com.pulumi.gcp.serviceaccount.AccountArgs;
    import com.pulumi.gcp.container.Cluster;
    import com.pulumi.gcp.container.ClusterArgs;
    import com.pulumi.gcp.container.NodePool;
    import com.pulumi.gcp.container.NodePoolArgs;
    import com.pulumi.gcp.container.inputs.NodePoolNodeConfigArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var default_ = new Account("default", AccountArgs.builder()
                .accountId("service-account-id")
                .displayName("Service Account")
                .build());
    
            var primary = new Cluster("primary", ClusterArgs.builder()
                .name("my-gke-cluster")
                .location("us-central1")
                .removeDefaultNodePool(true)
                .initialNodeCount(1)
                .build());
    
            var primaryPreemptibleNodes = new NodePool("primaryPreemptibleNodes", NodePoolArgs.builder()
                .name("my-node-pool")
                .cluster(primary.id())
                .nodeCount(1)
                .nodeConfig(NodePoolNodeConfigArgs.builder()
                    .preemptible(true)
                    .machineType("e2-medium")
                    .serviceAccount(default_.email())
                    .oauthScopes("https://www.googleapis.com/auth/cloud-platform")
                    .build())
                .build());
    
        }
    }
    
    resources:
      default:
        type: gcp:serviceaccount:Account
        properties:
          accountId: service-account-id
          displayName: Service Account
      primary:
        type: gcp:container:Cluster
        properties:
          name: my-gke-cluster
          location: us-central1
          removeDefaultNodePool: true
          initialNodeCount: 1
      primaryPreemptibleNodes:
        type: gcp:container:NodePool
        name: primary_preemptible_nodes
        properties:
          name: my-node-pool
          cluster: ${primary.id}
          nodeCount: 1
          nodeConfig:
            preemptible: true
            machineType: e2-medium
            serviceAccount: ${default.email}
            oauthScopes:
              - https://www.googleapis.com/auth/cloud-platform
    

    2 Node Pools, 1 Separately Managed + The Default Node Pool

    import * as pulumi from "@pulumi/pulumi";
    import * as gcp from "@pulumi/gcp";
    
    const _default = new gcp.serviceaccount.Account("default", {
        accountId: "service-account-id",
        displayName: "Service Account",
    });
    const primary = new gcp.container.Cluster("primary", {
        name: "marcellus-wallace",
        location: "us-central1-a",
        initialNodeCount: 3,
        nodeLocations: ["us-central1-c"],
        nodeConfig: {
            serviceAccount: _default.email,
            oauthScopes: ["https://www.googleapis.com/auth/cloud-platform"],
            guestAccelerators: [{
                type: "nvidia-tesla-k80",
                count: 1,
            }],
        },
    });
    const np = new gcp.container.NodePool("np", {
        name: "my-node-pool",
        cluster: primary.id,
        nodeConfig: {
            machineType: "e2-medium",
            serviceAccount: _default.email,
            oauthScopes: ["https://www.googleapis.com/auth/cloud-platform"],
        },
    });
    
    import pulumi
    import pulumi_gcp as gcp
    
    default = gcp.serviceaccount.Account("default",
        account_id="service-account-id",
        display_name="Service Account")
    primary = gcp.container.Cluster("primary",
        name="marcellus-wallace",
        location="us-central1-a",
        initial_node_count=3,
        node_locations=["us-central1-c"],
        node_config={
            "service_account": default.email,
            "oauth_scopes": ["https://www.googleapis.com/auth/cloud-platform"],
            "guest_accelerators": [{
                "type": "nvidia-tesla-k80",
                "count": 1,
            }],
        })
    np = gcp.container.NodePool("np",
        name="my-node-pool",
        cluster=primary.id,
        node_config={
            "machine_type": "e2-medium",
            "service_account": default.email,
            "oauth_scopes": ["https://www.googleapis.com/auth/cloud-platform"],
        })
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/container"
    	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/serviceaccount"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := serviceaccount.NewAccount(ctx, "default", &serviceaccount.AccountArgs{
    			AccountId:   pulumi.String("service-account-id"),
    			DisplayName: pulumi.String("Service Account"),
    		})
    		if err != nil {
    			return err
    		}
    		primary, err := container.NewCluster(ctx, "primary", &container.ClusterArgs{
    			Name:             pulumi.String("marcellus-wallace"),
    			Location:         pulumi.String("us-central1-a"),
    			InitialNodeCount: pulumi.Int(3),
    			NodeLocations: pulumi.StringArray{
    				pulumi.String("us-central1-c"),
    			},
    			NodeConfig: &container.ClusterNodeConfigArgs{
    				ServiceAccount: _default.Email,
    				OauthScopes: pulumi.StringArray{
    					pulumi.String("https://www.googleapis.com/auth/cloud-platform"),
    				},
    				GuestAccelerators: container.ClusterNodeConfigGuestAcceleratorArray{
    					&container.ClusterNodeConfigGuestAcceleratorArgs{
    						Type:  pulumi.String("nvidia-tesla-k80"),
    						Count: pulumi.Int(1),
    					},
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		_, err = container.NewNodePool(ctx, "np", &container.NodePoolArgs{
    			Name:    pulumi.String("my-node-pool"),
    			Cluster: primary.ID(),
    			NodeConfig: &container.NodePoolNodeConfigArgs{
    				MachineType:    pulumi.String("e2-medium"),
    				ServiceAccount: _default.Email,
    				OauthScopes: pulumi.StringArray{
    					pulumi.String("https://www.googleapis.com/auth/cloud-platform"),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Gcp = Pulumi.Gcp;
    
    return await Deployment.RunAsync(() => 
    {
        var @default = new Gcp.ServiceAccount.Account("default", new()
        {
            AccountId = "service-account-id",
            DisplayName = "Service Account",
        });
    
        var primary = new Gcp.Container.Cluster("primary", new()
        {
            Name = "marcellus-wallace",
            Location = "us-central1-a",
            InitialNodeCount = 3,
            NodeLocations = new[]
            {
                "us-central1-c",
            },
            NodeConfig = new Gcp.Container.Inputs.ClusterNodeConfigArgs
            {
                ServiceAccount = @default.Email,
                OauthScopes = new[]
                {
                    "https://www.googleapis.com/auth/cloud-platform",
                },
                GuestAccelerators = new[]
                {
                    new Gcp.Container.Inputs.ClusterNodeConfigGuestAcceleratorArgs
                    {
                        Type = "nvidia-tesla-k80",
                        Count = 1,
                    },
                },
            },
        });
    
        var np = new Gcp.Container.NodePool("np", new()
        {
            Name = "my-node-pool",
            Cluster = primary.Id,
            NodeConfig = new Gcp.Container.Inputs.NodePoolNodeConfigArgs
            {
                MachineType = "e2-medium",
                ServiceAccount = @default.Email,
                OauthScopes = new[]
                {
                    "https://www.googleapis.com/auth/cloud-platform",
                },
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.gcp.serviceaccount.Account;
    import com.pulumi.gcp.serviceaccount.AccountArgs;
    import com.pulumi.gcp.container.Cluster;
    import com.pulumi.gcp.container.ClusterArgs;
    import com.pulumi.gcp.container.inputs.ClusterNodeConfigArgs;
    import com.pulumi.gcp.container.NodePool;
    import com.pulumi.gcp.container.NodePoolArgs;
    import com.pulumi.gcp.container.inputs.NodePoolNodeConfigArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var default_ = new Account("default", AccountArgs.builder()
                .accountId("service-account-id")
                .displayName("Service Account")
                .build());
    
            var primary = new Cluster("primary", ClusterArgs.builder()
                .name("marcellus-wallace")
                .location("us-central1-a")
                .initialNodeCount(3)
                .nodeLocations("us-central1-c")
                .nodeConfig(ClusterNodeConfigArgs.builder()
                    .serviceAccount(default_.email())
                    .oauthScopes("https://www.googleapis.com/auth/cloud-platform")
                    .guestAccelerators(ClusterNodeConfigGuestAcceleratorArgs.builder()
                        .type("nvidia-tesla-k80")
                        .count(1)
                        .build())
                    .build())
                .build());
    
            var np = new NodePool("np", NodePoolArgs.builder()
                .name("my-node-pool")
                .cluster(primary.id())
                .nodeConfig(NodePoolNodeConfigArgs.builder()
                    .machineType("e2-medium")
                    .serviceAccount(default_.email())
                    .oauthScopes("https://www.googleapis.com/auth/cloud-platform")
                    .build())
                .build());
    
        }
    }
    
    resources:
      default:
        type: gcp:serviceaccount:Account
        properties:
          accountId: service-account-id
          displayName: Service Account
      np:
        type: gcp:container:NodePool
        properties:
          name: my-node-pool
          cluster: ${primary.id}
          nodeConfig:
            machineType: e2-medium
            serviceAccount: ${default.email}
            oauthScopes:
              - https://www.googleapis.com/auth/cloud-platform
      primary:
        type: gcp:container:Cluster
        properties:
          name: marcellus-wallace
          location: us-central1-a
          initialNodeCount: 3
          nodeLocations:
            - us-central1-c
          nodeConfig:
            serviceAccount: ${default.email}
            oauthScopes:
              - https://www.googleapis.com/auth/cloud-platform
            guestAccelerators:
              - type: nvidia-tesla-k80
                count: 1
    

    Create NodePool Resource

    Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

    Constructor syntax

    new NodePool(name: string, args: NodePoolArgs, opts?: CustomResourceOptions);
    @overload
    def NodePool(resource_name: str,
                 args: NodePoolArgs,
                 opts: Optional[ResourceOptions] = None)
    
    @overload
    def NodePool(resource_name: str,
                 opts: Optional[ResourceOptions] = None,
                 cluster: Optional[str] = None,
                 network_config: Optional[NodePoolNetworkConfigArgs] = None,
                 name_prefix: Optional[str] = None,
                 location: Optional[str] = None,
                 management: Optional[NodePoolManagementArgs] = None,
                 node_config: Optional[NodePoolNodeConfigArgs] = None,
                 name: Optional[str] = None,
                 initial_node_count: Optional[int] = None,
                 autoscaling: Optional[NodePoolAutoscalingArgs] = None,
                 max_pods_per_node: Optional[int] = None,
                 node_count: Optional[int] = None,
                 node_locations: Optional[Sequence[str]] = None,
                 placement_policy: Optional[NodePoolPlacementPolicyArgs] = None,
                 project: Optional[str] = None,
                 queued_provisioning: Optional[NodePoolQueuedProvisioningArgs] = None,
                 upgrade_settings: Optional[NodePoolUpgradeSettingsArgs] = None,
                 version: Optional[str] = None)
    func NewNodePool(ctx *Context, name string, args NodePoolArgs, opts ...ResourceOption) (*NodePool, error)
    public NodePool(string name, NodePoolArgs args, CustomResourceOptions? opts = null)
    public NodePool(String name, NodePoolArgs args)
    public NodePool(String name, NodePoolArgs args, CustomResourceOptions options)
    
    type: gcp:container:NodePool
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    

    Parameters

    name string
    The unique name of the resource.
    args NodePoolArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args NodePoolArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args NodePoolArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args NodePoolArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args NodePoolArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    Constructor example

    The following reference example uses placeholder values for all input properties.

    var nodePoolResource = new Gcp.Container.NodePool("nodePoolResource", new()
    {
        Cluster = "string",
        NetworkConfig = new Gcp.Container.Inputs.NodePoolNetworkConfigArgs
        {
            AdditionalNodeNetworkConfigs = new[]
            {
                new Gcp.Container.Inputs.NodePoolNetworkConfigAdditionalNodeNetworkConfigArgs
                {
                    Network = "string",
                    Subnetwork = "string",
                },
            },
            AdditionalPodNetworkConfigs = new[]
            {
                new Gcp.Container.Inputs.NodePoolNetworkConfigAdditionalPodNetworkConfigArgs
                {
                    MaxPodsPerNode = 0,
                    SecondaryPodRange = "string",
                    Subnetwork = "string",
                },
            },
            CreatePodRange = false,
            EnablePrivateNodes = false,
            NetworkPerformanceConfig = new Gcp.Container.Inputs.NodePoolNetworkConfigNetworkPerformanceConfigArgs
            {
                TotalEgressBandwidthTier = "string",
            },
            PodCidrOverprovisionConfig = new Gcp.Container.Inputs.NodePoolNetworkConfigPodCidrOverprovisionConfigArgs
            {
                Disabled = false,
            },
            PodIpv4CidrBlock = "string",
            PodRange = "string",
        },
        NamePrefix = "string",
        Location = "string",
        Management = new Gcp.Container.Inputs.NodePoolManagementArgs
        {
            AutoRepair = false,
            AutoUpgrade = false,
        },
        NodeConfig = new Gcp.Container.Inputs.NodePoolNodeConfigArgs
        {
            AdvancedMachineFeatures = new Gcp.Container.Inputs.NodePoolNodeConfigAdvancedMachineFeaturesArgs
            {
                ThreadsPerCore = 0,
                EnableNestedVirtualization = false,
            },
            BootDiskKmsKey = "string",
            ConfidentialNodes = new Gcp.Container.Inputs.NodePoolNodeConfigConfidentialNodesArgs
            {
                Enabled = false,
            },
            ContainerdConfig = new Gcp.Container.Inputs.NodePoolNodeConfigContainerdConfigArgs
            {
                PrivateRegistryAccessConfig = new Gcp.Container.Inputs.NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigArgs
                {
                    Enabled = false,
                    CertificateAuthorityDomainConfigs = new[]
                    {
                        new Gcp.Container.Inputs.NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigArgs
                        {
                            Fqdns = new[]
                            {
                                "string",
                            },
                            GcpSecretManagerCertificateConfig = new Gcp.Container.Inputs.NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigGcpSecretManagerCertificateConfigArgs
                            {
                                SecretUri = "string",
                            },
                        },
                    },
                },
            },
            DiskSizeGb = 0,
            DiskType = "string",
            EffectiveTaints = new[]
            {
                new Gcp.Container.Inputs.NodePoolNodeConfigEffectiveTaintArgs
                {
                    Effect = "string",
                    Key = "string",
                    Value = "string",
                },
            },
            EnableConfidentialStorage = false,
            EphemeralStorageConfig = new Gcp.Container.Inputs.NodePoolNodeConfigEphemeralStorageConfigArgs
            {
                LocalSsdCount = 0,
            },
            EphemeralStorageLocalSsdConfig = new Gcp.Container.Inputs.NodePoolNodeConfigEphemeralStorageLocalSsdConfigArgs
            {
                LocalSsdCount = 0,
            },
            FastSocket = new Gcp.Container.Inputs.NodePoolNodeConfigFastSocketArgs
            {
                Enabled = false,
            },
            GcfsConfig = new Gcp.Container.Inputs.NodePoolNodeConfigGcfsConfigArgs
            {
                Enabled = false,
            },
            GuestAccelerators = new[]
            {
                new Gcp.Container.Inputs.NodePoolNodeConfigGuestAcceleratorArgs
                {
                    Count = 0,
                    Type = "string",
                    GpuDriverInstallationConfig = new Gcp.Container.Inputs.NodePoolNodeConfigGuestAcceleratorGpuDriverInstallationConfigArgs
                    {
                        GpuDriverVersion = "string",
                    },
                    GpuPartitionSize = "string",
                    GpuSharingConfig = new Gcp.Container.Inputs.NodePoolNodeConfigGuestAcceleratorGpuSharingConfigArgs
                    {
                        GpuSharingStrategy = "string",
                        MaxSharedClientsPerGpu = 0,
                    },
                },
            },
            Gvnic = new Gcp.Container.Inputs.NodePoolNodeConfigGvnicArgs
            {
                Enabled = false,
            },
            HostMaintenancePolicy = new Gcp.Container.Inputs.NodePoolNodeConfigHostMaintenancePolicyArgs
            {
                MaintenanceInterval = "string",
            },
            ImageType = "string",
            KubeletConfig = new Gcp.Container.Inputs.NodePoolNodeConfigKubeletConfigArgs
            {
                CpuCfsQuota = false,
                CpuCfsQuotaPeriod = "string",
                CpuManagerPolicy = "string",
                InsecureKubeletReadonlyPortEnabled = "string",
                PodPidsLimit = 0,
            },
            Labels = 
            {
                { "string", "string" },
            },
            LinuxNodeConfig = new Gcp.Container.Inputs.NodePoolNodeConfigLinuxNodeConfigArgs
            {
                CgroupMode = "string",
                HugepagesConfig = new Gcp.Container.Inputs.NodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgs
                {
                    HugepageSize1g = 0,
                    HugepageSize2m = 0,
                },
                Sysctls = 
                {
                    { "string", "string" },
                },
            },
            LocalNvmeSsdBlockConfig = new Gcp.Container.Inputs.NodePoolNodeConfigLocalNvmeSsdBlockConfigArgs
            {
                LocalSsdCount = 0,
            },
            LocalSsdCount = 0,
            LoggingVariant = "string",
            MachineType = "string",
            Metadata = 
            {
                { "string", "string" },
            },
            MinCpuPlatform = "string",
            NodeGroup = "string",
            OauthScopes = new[]
            {
                "string",
            },
            Preemptible = false,
            ReservationAffinity = new Gcp.Container.Inputs.NodePoolNodeConfigReservationAffinityArgs
            {
                ConsumeReservationType = "string",
                Key = "string",
                Values = new[]
                {
                    "string",
                },
            },
            ResourceLabels = 
            {
                { "string", "string" },
            },
            ResourceManagerTags = 
            {
                { "string", "string" },
            },
            SandboxConfig = new Gcp.Container.Inputs.NodePoolNodeConfigSandboxConfigArgs
            {
                SandboxType = "string",
            },
            SecondaryBootDisks = new[]
            {
                new Gcp.Container.Inputs.NodePoolNodeConfigSecondaryBootDiskArgs
                {
                    DiskImage = "string",
                    Mode = "string",
                },
            },
            ServiceAccount = "string",
            ShieldedInstanceConfig = new Gcp.Container.Inputs.NodePoolNodeConfigShieldedInstanceConfigArgs
            {
                EnableIntegrityMonitoring = false,
                EnableSecureBoot = false,
            },
            SoleTenantConfig = new Gcp.Container.Inputs.NodePoolNodeConfigSoleTenantConfigArgs
            {
                NodeAffinities = new[]
                {
                    new Gcp.Container.Inputs.NodePoolNodeConfigSoleTenantConfigNodeAffinityArgs
                    {
                        Key = "string",
                        Operator = "string",
                        Values = new[]
                        {
                            "string",
                        },
                    },
                },
            },
            Spot = false,
            StoragePools = new[]
            {
                "string",
            },
            Tags = new[]
            {
                "string",
            },
            Taints = new[]
            {
                new Gcp.Container.Inputs.NodePoolNodeConfigTaintArgs
                {
                    Effect = "string",
                    Key = "string",
                    Value = "string",
                },
            },
            WorkloadMetadataConfig = new Gcp.Container.Inputs.NodePoolNodeConfigWorkloadMetadataConfigArgs
            {
                Mode = "string",
            },
        },
        Name = "string",
        InitialNodeCount = 0,
        Autoscaling = new Gcp.Container.Inputs.NodePoolAutoscalingArgs
        {
            LocationPolicy = "string",
            MaxNodeCount = 0,
            MinNodeCount = 0,
            TotalMaxNodeCount = 0,
            TotalMinNodeCount = 0,
        },
        MaxPodsPerNode = 0,
        NodeCount = 0,
        NodeLocations = new[]
        {
            "string",
        },
        PlacementPolicy = new Gcp.Container.Inputs.NodePoolPlacementPolicyArgs
        {
            Type = "string",
            PolicyName = "string",
            TpuTopology = "string",
        },
        Project = "string",
        QueuedProvisioning = new Gcp.Container.Inputs.NodePoolQueuedProvisioningArgs
        {
            Enabled = false,
        },
        UpgradeSettings = new Gcp.Container.Inputs.NodePoolUpgradeSettingsArgs
        {
            BlueGreenSettings = new Gcp.Container.Inputs.NodePoolUpgradeSettingsBlueGreenSettingsArgs
            {
                StandardRolloutPolicy = new Gcp.Container.Inputs.NodePoolUpgradeSettingsBlueGreenSettingsStandardRolloutPolicyArgs
                {
                    BatchNodeCount = 0,
                    BatchPercentage = 0,
                    BatchSoakDuration = "string",
                },
                NodePoolSoakDuration = "string",
            },
            MaxSurge = 0,
            MaxUnavailable = 0,
            Strategy = "string",
        },
        Version = "string",
    });
    
    example, err := container.NewNodePool(ctx, "nodePoolResource", &container.NodePoolArgs{
    	Cluster: pulumi.String("string"),
    	NetworkConfig: &container.NodePoolNetworkConfigArgs{
    		AdditionalNodeNetworkConfigs: container.NodePoolNetworkConfigAdditionalNodeNetworkConfigArray{
    			&container.NodePoolNetworkConfigAdditionalNodeNetworkConfigArgs{
    				Network:    pulumi.String("string"),
    				Subnetwork: pulumi.String("string"),
    			},
    		},
    		AdditionalPodNetworkConfigs: container.NodePoolNetworkConfigAdditionalPodNetworkConfigArray{
    			&container.NodePoolNetworkConfigAdditionalPodNetworkConfigArgs{
    				MaxPodsPerNode:    pulumi.Int(0),
    				SecondaryPodRange: pulumi.String("string"),
    				Subnetwork:        pulumi.String("string"),
    			},
    		},
    		CreatePodRange:     pulumi.Bool(false),
    		EnablePrivateNodes: pulumi.Bool(false),
    		NetworkPerformanceConfig: &container.NodePoolNetworkConfigNetworkPerformanceConfigArgs{
    			TotalEgressBandwidthTier: pulumi.String("string"),
    		},
    		PodCidrOverprovisionConfig: &container.NodePoolNetworkConfigPodCidrOverprovisionConfigArgs{
    			Disabled: pulumi.Bool(false),
    		},
    		PodIpv4CidrBlock: pulumi.String("string"),
    		PodRange:         pulumi.String("string"),
    	},
    	NamePrefix: pulumi.String("string"),
    	Location:   pulumi.String("string"),
    	Management: &container.NodePoolManagementArgs{
    		AutoRepair:  pulumi.Bool(false),
    		AutoUpgrade: pulumi.Bool(false),
    	},
    	NodeConfig: &container.NodePoolNodeConfigArgs{
    		AdvancedMachineFeatures: &container.NodePoolNodeConfigAdvancedMachineFeaturesArgs{
    			ThreadsPerCore:             pulumi.Int(0),
    			EnableNestedVirtualization: pulumi.Bool(false),
    		},
    		BootDiskKmsKey: pulumi.String("string"),
    		ConfidentialNodes: &container.NodePoolNodeConfigConfidentialNodesArgs{
    			Enabled: pulumi.Bool(false),
    		},
    		ContainerdConfig: &container.NodePoolNodeConfigContainerdConfigArgs{
    			PrivateRegistryAccessConfig: &container.NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigArgs{
    				Enabled: pulumi.Bool(false),
    				CertificateAuthorityDomainConfigs: container.NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigArray{
    					&container.NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigArgs{
    						Fqdns: pulumi.StringArray{
    							pulumi.String("string"),
    						},
    						GcpSecretManagerCertificateConfig: &container.NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigGcpSecretManagerCertificateConfigArgs{
    							SecretUri: pulumi.String("string"),
    						},
    					},
    				},
    			},
    		},
    		DiskSizeGb: pulumi.Int(0),
    		DiskType:   pulumi.String("string"),
    		EffectiveTaints: container.NodePoolNodeConfigEffectiveTaintArray{
    			&container.NodePoolNodeConfigEffectiveTaintArgs{
    				Effect: pulumi.String("string"),
    				Key:    pulumi.String("string"),
    				Value:  pulumi.String("string"),
    			},
    		},
    		EnableConfidentialStorage: pulumi.Bool(false),
    		EphemeralStorageConfig: &container.NodePoolNodeConfigEphemeralStorageConfigArgs{
    			LocalSsdCount: pulumi.Int(0),
    		},
    		EphemeralStorageLocalSsdConfig: &container.NodePoolNodeConfigEphemeralStorageLocalSsdConfigArgs{
    			LocalSsdCount: pulumi.Int(0),
    		},
    		FastSocket: &container.NodePoolNodeConfigFastSocketArgs{
    			Enabled: pulumi.Bool(false),
    		},
    		GcfsConfig: &container.NodePoolNodeConfigGcfsConfigArgs{
    			Enabled: pulumi.Bool(false),
    		},
    		GuestAccelerators: container.NodePoolNodeConfigGuestAcceleratorArray{
    			&container.NodePoolNodeConfigGuestAcceleratorArgs{
    				Count: pulumi.Int(0),
    				Type:  pulumi.String("string"),
    				GpuDriverInstallationConfig: &container.NodePoolNodeConfigGuestAcceleratorGpuDriverInstallationConfigArgs{
    					GpuDriverVersion: pulumi.String("string"),
    				},
    				GpuPartitionSize: pulumi.String("string"),
    				GpuSharingConfig: &container.NodePoolNodeConfigGuestAcceleratorGpuSharingConfigArgs{
    					GpuSharingStrategy:     pulumi.String("string"),
    					MaxSharedClientsPerGpu: pulumi.Int(0),
    				},
    			},
    		},
    		Gvnic: &container.NodePoolNodeConfigGvnicArgs{
    			Enabled: pulumi.Bool(false),
    		},
    		HostMaintenancePolicy: &container.NodePoolNodeConfigHostMaintenancePolicyArgs{
    			MaintenanceInterval: pulumi.String("string"),
    		},
    		ImageType: pulumi.String("string"),
    		KubeletConfig: &container.NodePoolNodeConfigKubeletConfigArgs{
    			CpuCfsQuota:                        pulumi.Bool(false),
    			CpuCfsQuotaPeriod:                  pulumi.String("string"),
    			CpuManagerPolicy:                   pulumi.String("string"),
    			InsecureKubeletReadonlyPortEnabled: pulumi.String("string"),
    			PodPidsLimit:                       pulumi.Int(0),
    		},
    		Labels: pulumi.StringMap{
    			"string": pulumi.String("string"),
    		},
    		LinuxNodeConfig: &container.NodePoolNodeConfigLinuxNodeConfigArgs{
    			CgroupMode: pulumi.String("string"),
    			HugepagesConfig: &container.NodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgs{
    				HugepageSize1g: pulumi.Int(0),
    				HugepageSize2m: pulumi.Int(0),
    			},
    			Sysctls: pulumi.StringMap{
    				"string": pulumi.String("string"),
    			},
    		},
    		LocalNvmeSsdBlockConfig: &container.NodePoolNodeConfigLocalNvmeSsdBlockConfigArgs{
    			LocalSsdCount: pulumi.Int(0),
    		},
    		LocalSsdCount:  pulumi.Int(0),
    		LoggingVariant: pulumi.String("string"),
    		MachineType:    pulumi.String("string"),
    		Metadata: pulumi.StringMap{
    			"string": pulumi.String("string"),
    		},
    		MinCpuPlatform: pulumi.String("string"),
    		NodeGroup:      pulumi.String("string"),
    		OauthScopes: pulumi.StringArray{
    			pulumi.String("string"),
    		},
    		Preemptible: pulumi.Bool(false),
    		ReservationAffinity: &container.NodePoolNodeConfigReservationAffinityArgs{
    			ConsumeReservationType: pulumi.String("string"),
    			Key:                    pulumi.String("string"),
    			Values: pulumi.StringArray{
    				pulumi.String("string"),
    			},
    		},
    		ResourceLabels: pulumi.StringMap{
    			"string": pulumi.String("string"),
    		},
    		ResourceManagerTags: pulumi.StringMap{
    			"string": pulumi.String("string"),
    		},
    		SandboxConfig: &container.NodePoolNodeConfigSandboxConfigArgs{
    			SandboxType: pulumi.String("string"),
    		},
    		SecondaryBootDisks: container.NodePoolNodeConfigSecondaryBootDiskArray{
    			&container.NodePoolNodeConfigSecondaryBootDiskArgs{
    				DiskImage: pulumi.String("string"),
    				Mode:      pulumi.String("string"),
    			},
    		},
    		ServiceAccount: pulumi.String("string"),
    		ShieldedInstanceConfig: &container.NodePoolNodeConfigShieldedInstanceConfigArgs{
    			EnableIntegrityMonitoring: pulumi.Bool(false),
    			EnableSecureBoot:          pulumi.Bool(false),
    		},
    		SoleTenantConfig: &container.NodePoolNodeConfigSoleTenantConfigArgs{
    			NodeAffinities: container.NodePoolNodeConfigSoleTenantConfigNodeAffinityArray{
    				&container.NodePoolNodeConfigSoleTenantConfigNodeAffinityArgs{
    					Key:      pulumi.String("string"),
    					Operator: pulumi.String("string"),
    					Values: pulumi.StringArray{
    						pulumi.String("string"),
    					},
    				},
    			},
    		},
    		Spot: pulumi.Bool(false),
    		StoragePools: pulumi.StringArray{
    			pulumi.String("string"),
    		},
    		Tags: pulumi.StringArray{
    			pulumi.String("string"),
    		},
    		Taints: container.NodePoolNodeConfigTaintArray{
    			&container.NodePoolNodeConfigTaintArgs{
    				Effect: pulumi.String("string"),
    				Key:    pulumi.String("string"),
    				Value:  pulumi.String("string"),
    			},
    		},
    		WorkloadMetadataConfig: &container.NodePoolNodeConfigWorkloadMetadataConfigArgs{
    			Mode: pulumi.String("string"),
    		},
    	},
    	Name:             pulumi.String("string"),
    	InitialNodeCount: pulumi.Int(0),
    	Autoscaling: &container.NodePoolAutoscalingArgs{
    		LocationPolicy:    pulumi.String("string"),
    		MaxNodeCount:      pulumi.Int(0),
    		MinNodeCount:      pulumi.Int(0),
    		TotalMaxNodeCount: pulumi.Int(0),
    		TotalMinNodeCount: pulumi.Int(0),
    	},
    	MaxPodsPerNode: pulumi.Int(0),
    	NodeCount:      pulumi.Int(0),
    	NodeLocations: pulumi.StringArray{
    		pulumi.String("string"),
    	},
    	PlacementPolicy: &container.NodePoolPlacementPolicyArgs{
    		Type:        pulumi.String("string"),
    		PolicyName:  pulumi.String("string"),
    		TpuTopology: pulumi.String("string"),
    	},
    	Project: pulumi.String("string"),
    	QueuedProvisioning: &container.NodePoolQueuedProvisioningArgs{
    		Enabled: pulumi.Bool(false),
    	},
    	UpgradeSettings: &container.NodePoolUpgradeSettingsArgs{
    		BlueGreenSettings: &container.NodePoolUpgradeSettingsBlueGreenSettingsArgs{
    			StandardRolloutPolicy: &container.NodePoolUpgradeSettingsBlueGreenSettingsStandardRolloutPolicyArgs{
    				BatchNodeCount:    pulumi.Int(0),
    				BatchPercentage:   pulumi.Float64(0),
    				BatchSoakDuration: pulumi.String("string"),
    			},
    			NodePoolSoakDuration: pulumi.String("string"),
    		},
    		MaxSurge:       pulumi.Int(0),
    		MaxUnavailable: pulumi.Int(0),
    		Strategy:       pulumi.String("string"),
    	},
    	Version: pulumi.String("string"),
    })
    
    var nodePoolResource = new NodePool("nodePoolResource", NodePoolArgs.builder()
        .cluster("string")
        .networkConfig(NodePoolNetworkConfigArgs.builder()
            .additionalNodeNetworkConfigs(NodePoolNetworkConfigAdditionalNodeNetworkConfigArgs.builder()
                .network("string")
                .subnetwork("string")
                .build())
            .additionalPodNetworkConfigs(NodePoolNetworkConfigAdditionalPodNetworkConfigArgs.builder()
                .maxPodsPerNode(0)
                .secondaryPodRange("string")
                .subnetwork("string")
                .build())
            .createPodRange(false)
            .enablePrivateNodes(false)
            .networkPerformanceConfig(NodePoolNetworkConfigNetworkPerformanceConfigArgs.builder()
                .totalEgressBandwidthTier("string")
                .build())
            .podCidrOverprovisionConfig(NodePoolNetworkConfigPodCidrOverprovisionConfigArgs.builder()
                .disabled(false)
                .build())
            .podIpv4CidrBlock("string")
            .podRange("string")
            .build())
        .namePrefix("string")
        .location("string")
        .management(NodePoolManagementArgs.builder()
            .autoRepair(false)
            .autoUpgrade(false)
            .build())
        .nodeConfig(NodePoolNodeConfigArgs.builder()
            .advancedMachineFeatures(NodePoolNodeConfigAdvancedMachineFeaturesArgs.builder()
                .threadsPerCore(0)
                .enableNestedVirtualization(false)
                .build())
            .bootDiskKmsKey("string")
            .confidentialNodes(NodePoolNodeConfigConfidentialNodesArgs.builder()
                .enabled(false)
                .build())
            .containerdConfig(NodePoolNodeConfigContainerdConfigArgs.builder()
                .privateRegistryAccessConfig(NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigArgs.builder()
                    .enabled(false)
                    .certificateAuthorityDomainConfigs(NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigArgs.builder()
                        .fqdns("string")
                        .gcpSecretManagerCertificateConfig(NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigGcpSecretManagerCertificateConfigArgs.builder()
                            .secretUri("string")
                            .build())
                        .build())
                    .build())
                .build())
            .diskSizeGb(0)
            .diskType("string")
            .effectiveTaints(NodePoolNodeConfigEffectiveTaintArgs.builder()
                .effect("string")
                .key("string")
                .value("string")
                .build())
            .enableConfidentialStorage(false)
            .ephemeralStorageConfig(NodePoolNodeConfigEphemeralStorageConfigArgs.builder()
                .localSsdCount(0)
                .build())
            .ephemeralStorageLocalSsdConfig(NodePoolNodeConfigEphemeralStorageLocalSsdConfigArgs.builder()
                .localSsdCount(0)
                .build())
            .fastSocket(NodePoolNodeConfigFastSocketArgs.builder()
                .enabled(false)
                .build())
            .gcfsConfig(NodePoolNodeConfigGcfsConfigArgs.builder()
                .enabled(false)
                .build())
            .guestAccelerators(NodePoolNodeConfigGuestAcceleratorArgs.builder()
                .count(0)
                .type("string")
                .gpuDriverInstallationConfig(NodePoolNodeConfigGuestAcceleratorGpuDriverInstallationConfigArgs.builder()
                    .gpuDriverVersion("string")
                    .build())
                .gpuPartitionSize("string")
                .gpuSharingConfig(NodePoolNodeConfigGuestAcceleratorGpuSharingConfigArgs.builder()
                    .gpuSharingStrategy("string")
                    .maxSharedClientsPerGpu(0)
                    .build())
                .build())
            .gvnic(NodePoolNodeConfigGvnicArgs.builder()
                .enabled(false)
                .build())
            .hostMaintenancePolicy(NodePoolNodeConfigHostMaintenancePolicyArgs.builder()
                .maintenanceInterval("string")
                .build())
            .imageType("string")
            .kubeletConfig(NodePoolNodeConfigKubeletConfigArgs.builder()
                .cpuCfsQuota(false)
                .cpuCfsQuotaPeriod("string")
                .cpuManagerPolicy("string")
                .insecureKubeletReadonlyPortEnabled("string")
                .podPidsLimit(0)
                .build())
            .labels(Map.of("string", "string"))
            .linuxNodeConfig(NodePoolNodeConfigLinuxNodeConfigArgs.builder()
                .cgroupMode("string")
                .hugepagesConfig(NodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgs.builder()
                    .hugepageSize1g(0)
                    .hugepageSize2m(0)
                    .build())
                .sysctls(Map.of("string", "string"))
                .build())
            .localNvmeSsdBlockConfig(NodePoolNodeConfigLocalNvmeSsdBlockConfigArgs.builder()
                .localSsdCount(0)
                .build())
            .localSsdCount(0)
            .loggingVariant("string")
            .machineType("string")
            .metadata(Map.of("string", "string"))
            .minCpuPlatform("string")
            .nodeGroup("string")
            .oauthScopes("string")
            .preemptible(false)
            .reservationAffinity(NodePoolNodeConfigReservationAffinityArgs.builder()
                .consumeReservationType("string")
                .key("string")
                .values("string")
                .build())
            .resourceLabels(Map.of("string", "string"))
            .resourceManagerTags(Map.of("string", "string"))
            .sandboxConfig(NodePoolNodeConfigSandboxConfigArgs.builder()
                .sandboxType("string")
                .build())
            .secondaryBootDisks(NodePoolNodeConfigSecondaryBootDiskArgs.builder()
                .diskImage("string")
                .mode("string")
                .build())
            .serviceAccount("string")
            .shieldedInstanceConfig(NodePoolNodeConfigShieldedInstanceConfigArgs.builder()
                .enableIntegrityMonitoring(false)
                .enableSecureBoot(false)
                .build())
            .soleTenantConfig(NodePoolNodeConfigSoleTenantConfigArgs.builder()
                .nodeAffinities(NodePoolNodeConfigSoleTenantConfigNodeAffinityArgs.builder()
                    .key("string")
                    .operator("string")
                    .values("string")
                    .build())
                .build())
            .spot(false)
            .storagePools("string")
            .tags("string")
            .taints(NodePoolNodeConfigTaintArgs.builder()
                .effect("string")
                .key("string")
                .value("string")
                .build())
            .workloadMetadataConfig(NodePoolNodeConfigWorkloadMetadataConfigArgs.builder()
                .mode("string")
                .build())
            .build())
        .name("string")
        .initialNodeCount(0)
        .autoscaling(NodePoolAutoscalingArgs.builder()
            .locationPolicy("string")
            .maxNodeCount(0)
            .minNodeCount(0)
            .totalMaxNodeCount(0)
            .totalMinNodeCount(0)
            .build())
        .maxPodsPerNode(0)
        .nodeCount(0)
        .nodeLocations("string")
        .placementPolicy(NodePoolPlacementPolicyArgs.builder()
            .type("string")
            .policyName("string")
            .tpuTopology("string")
            .build())
        .project("string")
        .queuedProvisioning(NodePoolQueuedProvisioningArgs.builder()
            .enabled(false)
            .build())
        .upgradeSettings(NodePoolUpgradeSettingsArgs.builder()
            .blueGreenSettings(NodePoolUpgradeSettingsBlueGreenSettingsArgs.builder()
                .standardRolloutPolicy(NodePoolUpgradeSettingsBlueGreenSettingsStandardRolloutPolicyArgs.builder()
                    .batchNodeCount(0)
                    .batchPercentage(0)
                    .batchSoakDuration("string")
                    .build())
                .nodePoolSoakDuration("string")
                .build())
            .maxSurge(0)
            .maxUnavailable(0)
            .strategy("string")
            .build())
        .version("string")
        .build());
    
    node_pool_resource = gcp.container.NodePool("nodePoolResource",
        cluster="string",
        network_config={
            "additional_node_network_configs": [{
                "network": "string",
                "subnetwork": "string",
            }],
            "additional_pod_network_configs": [{
                "max_pods_per_node": 0,
                "secondary_pod_range": "string",
                "subnetwork": "string",
            }],
            "create_pod_range": False,
            "enable_private_nodes": False,
            "network_performance_config": {
                "total_egress_bandwidth_tier": "string",
            },
            "pod_cidr_overprovision_config": {
                "disabled": False,
            },
            "pod_ipv4_cidr_block": "string",
            "pod_range": "string",
        },
        name_prefix="string",
        location="string",
        management={
            "auto_repair": False,
            "auto_upgrade": False,
        },
        node_config={
            "advanced_machine_features": {
                "threads_per_core": 0,
                "enable_nested_virtualization": False,
            },
            "boot_disk_kms_key": "string",
            "confidential_nodes": {
                "enabled": False,
            },
            "containerd_config": {
                "private_registry_access_config": {
                    "enabled": False,
                    "certificate_authority_domain_configs": [{
                        "fqdns": ["string"],
                        "gcp_secret_manager_certificate_config": {
                            "secret_uri": "string",
                        },
                    }],
                },
            },
            "disk_size_gb": 0,
            "disk_type": "string",
            "effective_taints": [{
                "effect": "string",
                "key": "string",
                "value": "string",
            }],
            "enable_confidential_storage": False,
            "ephemeral_storage_config": {
                "local_ssd_count": 0,
            },
            "ephemeral_storage_local_ssd_config": {
                "local_ssd_count": 0,
            },
            "fast_socket": {
                "enabled": False,
            },
            "gcfs_config": {
                "enabled": False,
            },
            "guest_accelerators": [{
                "count": 0,
                "type": "string",
                "gpu_driver_installation_config": {
                    "gpu_driver_version": "string",
                },
                "gpu_partition_size": "string",
                "gpu_sharing_config": {
                    "gpu_sharing_strategy": "string",
                    "max_shared_clients_per_gpu": 0,
                },
            }],
            "gvnic": {
                "enabled": False,
            },
            "host_maintenance_policy": {
                "maintenance_interval": "string",
            },
            "image_type": "string",
            "kubelet_config": {
                "cpu_cfs_quota": False,
                "cpu_cfs_quota_period": "string",
                "cpu_manager_policy": "string",
                "insecure_kubelet_readonly_port_enabled": "string",
                "pod_pids_limit": 0,
            },
            "labels": {
                "string": "string",
            },
            "linux_node_config": {
                "cgroup_mode": "string",
                "hugepages_config": {
                    "hugepage_size1g": 0,
                    "hugepage_size2m": 0,
                },
                "sysctls": {
                    "string": "string",
                },
            },
            "local_nvme_ssd_block_config": {
                "local_ssd_count": 0,
            },
            "local_ssd_count": 0,
            "logging_variant": "string",
            "machine_type": "string",
            "metadata": {
                "string": "string",
            },
            "min_cpu_platform": "string",
            "node_group": "string",
            "oauth_scopes": ["string"],
            "preemptible": False,
            "reservation_affinity": {
                "consume_reservation_type": "string",
                "key": "string",
                "values": ["string"],
            },
            "resource_labels": {
                "string": "string",
            },
            "resource_manager_tags": {
                "string": "string",
            },
            "sandbox_config": {
                "sandbox_type": "string",
            },
            "secondary_boot_disks": [{
                "disk_image": "string",
                "mode": "string",
            }],
            "service_account": "string",
            "shielded_instance_config": {
                "enable_integrity_monitoring": False,
                "enable_secure_boot": False,
            },
            "sole_tenant_config": {
                "node_affinities": [{
                    "key": "string",
                    "operator": "string",
                    "values": ["string"],
                }],
            },
            "spot": False,
            "storage_pools": ["string"],
            "tags": ["string"],
            "taints": [{
                "effect": "string",
                "key": "string",
                "value": "string",
            }],
            "workload_metadata_config": {
                "mode": "string",
            },
        },
        name="string",
        initial_node_count=0,
        autoscaling={
            "location_policy": "string",
            "max_node_count": 0,
            "min_node_count": 0,
            "total_max_node_count": 0,
            "total_min_node_count": 0,
        },
        max_pods_per_node=0,
        node_count=0,
        node_locations=["string"],
        placement_policy={
            "type": "string",
            "policy_name": "string",
            "tpu_topology": "string",
        },
        project="string",
        queued_provisioning={
            "enabled": False,
        },
        upgrade_settings={
            "blue_green_settings": {
                "standard_rollout_policy": {
                    "batch_node_count": 0,
                    "batch_percentage": 0,
                    "batch_soak_duration": "string",
                },
                "node_pool_soak_duration": "string",
            },
            "max_surge": 0,
            "max_unavailable": 0,
            "strategy": "string",
        },
        version="string")
    
    const nodePoolResource = new gcp.container.NodePool("nodePoolResource", {
        cluster: "string",
        networkConfig: {
            additionalNodeNetworkConfigs: [{
                network: "string",
                subnetwork: "string",
            }],
            additionalPodNetworkConfigs: [{
                maxPodsPerNode: 0,
                secondaryPodRange: "string",
                subnetwork: "string",
            }],
            createPodRange: false,
            enablePrivateNodes: false,
            networkPerformanceConfig: {
                totalEgressBandwidthTier: "string",
            },
            podCidrOverprovisionConfig: {
                disabled: false,
            },
            podIpv4CidrBlock: "string",
            podRange: "string",
        },
        namePrefix: "string",
        location: "string",
        management: {
            autoRepair: false,
            autoUpgrade: false,
        },
        nodeConfig: {
            advancedMachineFeatures: {
                threadsPerCore: 0,
                enableNestedVirtualization: false,
            },
            bootDiskKmsKey: "string",
            confidentialNodes: {
                enabled: false,
            },
            containerdConfig: {
                privateRegistryAccessConfig: {
                    enabled: false,
                    certificateAuthorityDomainConfigs: [{
                        fqdns: ["string"],
                        gcpSecretManagerCertificateConfig: {
                            secretUri: "string",
                        },
                    }],
                },
            },
            diskSizeGb: 0,
            diskType: "string",
            effectiveTaints: [{
                effect: "string",
                key: "string",
                value: "string",
            }],
            enableConfidentialStorage: false,
            ephemeralStorageConfig: {
                localSsdCount: 0,
            },
            ephemeralStorageLocalSsdConfig: {
                localSsdCount: 0,
            },
            fastSocket: {
                enabled: false,
            },
            gcfsConfig: {
                enabled: false,
            },
            guestAccelerators: [{
                count: 0,
                type: "string",
                gpuDriverInstallationConfig: {
                    gpuDriverVersion: "string",
                },
                gpuPartitionSize: "string",
                gpuSharingConfig: {
                    gpuSharingStrategy: "string",
                    maxSharedClientsPerGpu: 0,
                },
            }],
            gvnic: {
                enabled: false,
            },
            hostMaintenancePolicy: {
                maintenanceInterval: "string",
            },
            imageType: "string",
            kubeletConfig: {
                cpuCfsQuota: false,
                cpuCfsQuotaPeriod: "string",
                cpuManagerPolicy: "string",
                insecureKubeletReadonlyPortEnabled: "string",
                podPidsLimit: 0,
            },
            labels: {
                string: "string",
            },
            linuxNodeConfig: {
                cgroupMode: "string",
                hugepagesConfig: {
                    hugepageSize1g: 0,
                    hugepageSize2m: 0,
                },
                sysctls: {
                    string: "string",
                },
            },
            localNvmeSsdBlockConfig: {
                localSsdCount: 0,
            },
            localSsdCount: 0,
            loggingVariant: "string",
            machineType: "string",
            metadata: {
                string: "string",
            },
            minCpuPlatform: "string",
            nodeGroup: "string",
            oauthScopes: ["string"],
            preemptible: false,
            reservationAffinity: {
                consumeReservationType: "string",
                key: "string",
                values: ["string"],
            },
            resourceLabels: {
                string: "string",
            },
            resourceManagerTags: {
                string: "string",
            },
            sandboxConfig: {
                sandboxType: "string",
            },
            secondaryBootDisks: [{
                diskImage: "string",
                mode: "string",
            }],
            serviceAccount: "string",
            shieldedInstanceConfig: {
                enableIntegrityMonitoring: false,
                enableSecureBoot: false,
            },
            soleTenantConfig: {
                nodeAffinities: [{
                    key: "string",
                    operator: "string",
                    values: ["string"],
                }],
            },
            spot: false,
            storagePools: ["string"],
            tags: ["string"],
            taints: [{
                effect: "string",
                key: "string",
                value: "string",
            }],
            workloadMetadataConfig: {
                mode: "string",
            },
        },
        name: "string",
        initialNodeCount: 0,
        autoscaling: {
            locationPolicy: "string",
            maxNodeCount: 0,
            minNodeCount: 0,
            totalMaxNodeCount: 0,
            totalMinNodeCount: 0,
        },
        maxPodsPerNode: 0,
        nodeCount: 0,
        nodeLocations: ["string"],
        placementPolicy: {
            type: "string",
            policyName: "string",
            tpuTopology: "string",
        },
        project: "string",
        queuedProvisioning: {
            enabled: false,
        },
        upgradeSettings: {
            blueGreenSettings: {
                standardRolloutPolicy: {
                    batchNodeCount: 0,
                    batchPercentage: 0,
                    batchSoakDuration: "string",
                },
                nodePoolSoakDuration: "string",
            },
            maxSurge: 0,
            maxUnavailable: 0,
            strategy: "string",
        },
        version: "string",
    });
    
    type: gcp:container:NodePool
    properties:
        autoscaling:
            locationPolicy: string
            maxNodeCount: 0
            minNodeCount: 0
            totalMaxNodeCount: 0
            totalMinNodeCount: 0
        cluster: string
        initialNodeCount: 0
        location: string
        management:
            autoRepair: false
            autoUpgrade: false
        maxPodsPerNode: 0
        name: string
        namePrefix: string
        networkConfig:
            additionalNodeNetworkConfigs:
                - network: string
                  subnetwork: string
            additionalPodNetworkConfigs:
                - maxPodsPerNode: 0
                  secondaryPodRange: string
                  subnetwork: string
            createPodRange: false
            enablePrivateNodes: false
            networkPerformanceConfig:
                totalEgressBandwidthTier: string
            podCidrOverprovisionConfig:
                disabled: false
            podIpv4CidrBlock: string
            podRange: string
        nodeConfig:
            advancedMachineFeatures:
                enableNestedVirtualization: false
                threadsPerCore: 0
            bootDiskKmsKey: string
            confidentialNodes:
                enabled: false
            containerdConfig:
                privateRegistryAccessConfig:
                    certificateAuthorityDomainConfigs:
                        - fqdns:
                            - string
                          gcpSecretManagerCertificateConfig:
                            secretUri: string
                    enabled: false
            diskSizeGb: 0
            diskType: string
            effectiveTaints:
                - effect: string
                  key: string
                  value: string
            enableConfidentialStorage: false
            ephemeralStorageConfig:
                localSsdCount: 0
            ephemeralStorageLocalSsdConfig:
                localSsdCount: 0
            fastSocket:
                enabled: false
            gcfsConfig:
                enabled: false
            guestAccelerators:
                - count: 0
                  gpuDriverInstallationConfig:
                    gpuDriverVersion: string
                  gpuPartitionSize: string
                  gpuSharingConfig:
                    gpuSharingStrategy: string
                    maxSharedClientsPerGpu: 0
                  type: string
            gvnic:
                enabled: false
            hostMaintenancePolicy:
                maintenanceInterval: string
            imageType: string
            kubeletConfig:
                cpuCfsQuota: false
                cpuCfsQuotaPeriod: string
                cpuManagerPolicy: string
                insecureKubeletReadonlyPortEnabled: string
                podPidsLimit: 0
            labels:
                string: string
            linuxNodeConfig:
                cgroupMode: string
                hugepagesConfig:
                    hugepageSize1g: 0
                    hugepageSize2m: 0
                sysctls:
                    string: string
            localNvmeSsdBlockConfig:
                localSsdCount: 0
            localSsdCount: 0
            loggingVariant: string
            machineType: string
            metadata:
                string: string
            minCpuPlatform: string
            nodeGroup: string
            oauthScopes:
                - string
            preemptible: false
            reservationAffinity:
                consumeReservationType: string
                key: string
                values:
                    - string
            resourceLabels:
                string: string
            resourceManagerTags:
                string: string
            sandboxConfig:
                sandboxType: string
            secondaryBootDisks:
                - diskImage: string
                  mode: string
            serviceAccount: string
            shieldedInstanceConfig:
                enableIntegrityMonitoring: false
                enableSecureBoot: false
            soleTenantConfig:
                nodeAffinities:
                    - key: string
                      operator: string
                      values:
                        - string
            spot: false
            storagePools:
                - string
            tags:
                - string
            taints:
                - effect: string
                  key: string
                  value: string
            workloadMetadataConfig:
                mode: string
        nodeCount: 0
        nodeLocations:
            - string
        placementPolicy:
            policyName: string
            tpuTopology: string
            type: string
        project: string
        queuedProvisioning:
            enabled: false
        upgradeSettings:
            blueGreenSettings:
                nodePoolSoakDuration: string
                standardRolloutPolicy:
                    batchNodeCount: 0
                    batchPercentage: 0
                    batchSoakDuration: string
            maxSurge: 0
            maxUnavailable: 0
            strategy: string
        version: string
    

    NodePool Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.

    The NodePool resource accepts the following input properties:

    Cluster string
    The cluster to create the node pool for. Cluster must be present in location provided for clusters. May be specified in the format projects/{{project}}/locations/{{location}}/clusters/{{cluster}} or as just the name of the cluster.


    Autoscaling NodePoolAutoscaling
    Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
    InitialNodeCount int
    The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
    Location string
    The location (region or zone) of the cluster.


    Management NodePoolManagement
    Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
    MaxPodsPerNode int
    The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
    Name string
    The name of the node pool. If left blank, the provider will auto-generate a unique name.
    NamePrefix string
    Creates a unique name for the node pool beginning with the specified prefix. Conflicts with name.
    NetworkConfig NodePoolNetworkConfig
    The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
    NodeConfig NodePoolNodeConfig
    Parameters used in creating the node pool. See gcp.container.Cluster for schema.
    NodeCount int
    The number of nodes per instance group. This field can be used to update the number of nodes per instance group but should not be used alongside autoscaling.
    NodeLocations List<string>

    The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level node_locations will be used.

    Note: node_locations will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.

    PlacementPolicy NodePoolPlacementPolicy
    Specifies a custom placement policy for the nodes.
    Project string
    The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
    QueuedProvisioning NodePoolQueuedProvisioning
    Specifies node pool-level settings of queued provisioning. Structure is documented below.
    UpgradeSettings NodePoolUpgradeSettings
    Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
    Version string
    The Kubernetes version for the nodes in this pool. Note that if this field and auto_upgrade are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See the gcp.container.getEngineVersions data source's version_prefix field to approximate fuzzy versions in a provider-compatible way.
    Cluster string
    The cluster to create the node pool for. Cluster must be present in location provided for clusters. May be specified in the format projects/{{project}}/locations/{{location}}/clusters/{{cluster}} or as just the name of the cluster.


    Autoscaling NodePoolAutoscalingArgs
    Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
    InitialNodeCount int
    The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
    Location string
    The location (region or zone) of the cluster.


    Management NodePoolManagementArgs
    Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
    MaxPodsPerNode int
    The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
    Name string
    The name of the node pool. If left blank, the provider will auto-generate a unique name.
    NamePrefix string
    Creates a unique name for the node pool beginning with the specified prefix. Conflicts with name.
    NetworkConfig NodePoolNetworkConfigArgs
    The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
    NodeConfig NodePoolNodeConfigArgs
    Parameters used in creating the node pool. See gcp.container.Cluster for schema.
    NodeCount int
    The number of nodes per instance group. This field can be used to update the number of nodes per instance group but should not be used alongside autoscaling.
    NodeLocations []string

    The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level node_locations will be used.

    Note: node_locations will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.

    PlacementPolicy NodePoolPlacementPolicyArgs
    Specifies a custom placement policy for the nodes.
    Project string
    The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
    QueuedProvisioning NodePoolQueuedProvisioningArgs
    Specifies node pool-level settings of queued provisioning. Structure is documented below.
    UpgradeSettings NodePoolUpgradeSettingsArgs
    Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
    Version string
    The Kubernetes version for the nodes in this pool. Note that if this field and auto_upgrade are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See the gcp.container.getEngineVersions data source's version_prefix field to approximate fuzzy versions in a provider-compatible way.
    cluster String
    The cluster to create the node pool for. Cluster must be present in location provided for clusters. May be specified in the format projects/{{project}}/locations/{{location}}/clusters/{{cluster}} or as just the name of the cluster.


    autoscaling NodePoolAutoscaling
    Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
    initialNodeCount Integer
    The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
    location String
    The location (region or zone) of the cluster.


    management NodePoolManagement
    Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
    maxPodsPerNode Integer
    The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
    name String
    The name of the node pool. If left blank, the provider will auto-generate a unique name.
    namePrefix String
    Creates a unique name for the node pool beginning with the specified prefix. Conflicts with name.
    networkConfig NodePoolNetworkConfig
    The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
    nodeConfig NodePoolNodeConfig
    Parameters used in creating the node pool. See gcp.container.Cluster for schema.
    nodeCount Integer
    The number of nodes per instance group. This field can be used to update the number of nodes per instance group but should not be used alongside autoscaling.
    nodeLocations List<String>

    The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level node_locations will be used.

    Note: node_locations will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.

    placementPolicy NodePoolPlacementPolicy
    Specifies a custom placement policy for the nodes.
    project String
    The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
    queuedProvisioning NodePoolQueuedProvisioning
    Specifies node pool-level settings of queued provisioning. Structure is documented below.
    upgradeSettings NodePoolUpgradeSettings
    Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
    version String
    The Kubernetes version for the nodes in this pool. Note that if this field and auto_upgrade are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See the gcp.container.getEngineVersions data source's version_prefix field to approximate fuzzy versions in a provider-compatible way.
    cluster string
    The cluster to create the node pool for. Cluster must be present in location provided for clusters. May be specified in the format projects/{{project}}/locations/{{location}}/clusters/{{cluster}} or as just the name of the cluster.


    autoscaling NodePoolAutoscaling
    Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
    initialNodeCount number
    The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
    location string
    The location (region or zone) of the cluster.


    management NodePoolManagement
    Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
    maxPodsPerNode number
    The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
    name string
    The name of the node pool. If left blank, the provider will auto-generate a unique name.
    namePrefix string
    Creates a unique name for the node pool beginning with the specified prefix. Conflicts with name.
    networkConfig NodePoolNetworkConfig
    The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
    nodeConfig NodePoolNodeConfig
    Parameters used in creating the node pool. See gcp.container.Cluster for schema.
    nodeCount number
    The number of nodes per instance group. This field can be used to update the number of nodes per instance group but should not be used alongside autoscaling.
    nodeLocations string[]

    The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level node_locations will be used.

    Note: node_locations will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.

    placementPolicy NodePoolPlacementPolicy
    Specifies a custom placement policy for the nodes.
    project string
    The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
    queuedProvisioning NodePoolQueuedProvisioning
    Specifies node pool-level settings of queued provisioning. Structure is documented below.
    upgradeSettings NodePoolUpgradeSettings
    Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
    version string
    The Kubernetes version for the nodes in this pool. Note that if this field and auto_upgrade are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See the gcp.container.getEngineVersions data source's version_prefix field to approximate fuzzy versions in a provider-compatible way.
    cluster str
    The cluster to create the node pool for. Cluster must be present in location provided for clusters. May be specified in the format projects/{{project}}/locations/{{location}}/clusters/{{cluster}} or as just the name of the cluster.


    autoscaling NodePoolAutoscalingArgs
    Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
    initial_node_count int
    The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
    location str
    The location (region or zone) of the cluster.


    management NodePoolManagementArgs
    Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
    max_pods_per_node int
    The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
    name str
    The name of the node pool. If left blank, the provider will auto-generate a unique name.
    name_prefix str
    Creates a unique name for the node pool beginning with the specified prefix. Conflicts with name.
    network_config NodePoolNetworkConfigArgs
    The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
    node_config NodePoolNodeConfigArgs
    Parameters used in creating the node pool. See gcp.container.Cluster for schema.
    node_count int
    The number of nodes per instance group. This field can be used to update the number of nodes per instance group but should not be used alongside autoscaling.
    node_locations Sequence[str]

    The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level node_locations will be used.

    Note: node_locations will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.

    placement_policy NodePoolPlacementPolicyArgs
    Specifies a custom placement policy for the nodes.
    project str
    The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
    queued_provisioning NodePoolQueuedProvisioningArgs
    Specifies node pool-level settings of queued provisioning. Structure is documented below.
    upgrade_settings NodePoolUpgradeSettingsArgs
    Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
    version str
    The Kubernetes version for the nodes in this pool. Note that if this field and auto_upgrade are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See the gcp.container.getEngineVersions data source's version_prefix field to approximate fuzzy versions in a provider-compatible way.
    cluster String
    The cluster to create the node pool for. Cluster must be present in location provided for clusters. May be specified in the format projects/{{project}}/locations/{{location}}/clusters/{{cluster}} or as just the name of the cluster.


    autoscaling Property Map
    Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
    initialNodeCount Number
    The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
    location String
    The location (region or zone) of the cluster.


    management Property Map
    Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
    maxPodsPerNode Number
    The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
    name String
    The name of the node pool. If left blank, the provider will auto-generate a unique name.
    namePrefix String
    Creates a unique name for the node pool beginning with the specified prefix. Conflicts with name.
    networkConfig Property Map
    The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
    nodeConfig Property Map
    Parameters used in creating the node pool. See gcp.container.Cluster for schema.
    nodeCount Number
    The number of nodes per instance group. This field can be used to update the number of nodes per instance group but should not be used alongside autoscaling.
    nodeLocations List<String>

    The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level node_locations will be used.

    Note: node_locations will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.

    placementPolicy Property Map
    Specifies a custom placement policy for the nodes.
    project String
    The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
    queuedProvisioning Property Map
    Specifies node pool-level settings of queued provisioning. Structure is documented below.
    upgradeSettings Property Map
    Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
    version String
    The Kubernetes version for the nodes in this pool. Note that if this field and auto_upgrade are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See the gcp.container.getEngineVersions data source's version_prefix field to approximate fuzzy versions in a provider-compatible way.

    Outputs

    All input properties are implicitly available as output properties. Additionally, the NodePool resource produces the following output properties:

    Id string
    The provider-assigned unique ID for this managed resource.
    InstanceGroupUrls List<string>
    The resource URLs of the managed instance groups associated with this node pool.
    ManagedInstanceGroupUrls List<string>
    List of instance group URLs which have been assigned to this node pool.
    Operation string
    Id string
    The provider-assigned unique ID for this managed resource.
    InstanceGroupUrls []string
    The resource URLs of the managed instance groups associated with this node pool.
    ManagedInstanceGroupUrls []string
    List of instance group URLs which have been assigned to this node pool.
    Operation string
    id String
    The provider-assigned unique ID for this managed resource.
    instanceGroupUrls List<String>
    The resource URLs of the managed instance groups associated with this node pool.
    managedInstanceGroupUrls List<String>
    List of instance group URLs which have been assigned to this node pool.
    operation String
    id string
    The provider-assigned unique ID for this managed resource.
    instanceGroupUrls string[]
    The resource URLs of the managed instance groups associated with this node pool.
    managedInstanceGroupUrls string[]
    List of instance group URLs which have been assigned to this node pool.
    operation string
    id str
    The provider-assigned unique ID for this managed resource.
    instance_group_urls Sequence[str]
    The resource URLs of the managed instance groups associated with this node pool.
    managed_instance_group_urls Sequence[str]
    List of instance group URLs which have been assigned to this node pool.
    operation str
    id String
    The provider-assigned unique ID for this managed resource.
    instanceGroupUrls List<String>
    The resource URLs of the managed instance groups associated with this node pool.
    managedInstanceGroupUrls List<String>
    List of instance group URLs which have been assigned to this node pool.
    operation String

    Look up Existing NodePool Resource

    Get an existing NodePool resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

    public static get(name: string, id: Input<ID>, state?: NodePoolState, opts?: CustomResourceOptions): NodePool
    @staticmethod
    def get(resource_name: str,
            id: str,
            opts: Optional[ResourceOptions] = None,
            autoscaling: Optional[NodePoolAutoscalingArgs] = None,
            cluster: Optional[str] = None,
            initial_node_count: Optional[int] = None,
            instance_group_urls: Optional[Sequence[str]] = None,
            location: Optional[str] = None,
            managed_instance_group_urls: Optional[Sequence[str]] = None,
            management: Optional[NodePoolManagementArgs] = None,
            max_pods_per_node: Optional[int] = None,
            name: Optional[str] = None,
            name_prefix: Optional[str] = None,
            network_config: Optional[NodePoolNetworkConfigArgs] = None,
            node_config: Optional[NodePoolNodeConfigArgs] = None,
            node_count: Optional[int] = None,
            node_locations: Optional[Sequence[str]] = None,
            operation: Optional[str] = None,
            placement_policy: Optional[NodePoolPlacementPolicyArgs] = None,
            project: Optional[str] = None,
            queued_provisioning: Optional[NodePoolQueuedProvisioningArgs] = None,
            upgrade_settings: Optional[NodePoolUpgradeSettingsArgs] = None,
            version: Optional[str] = None) -> NodePool
    func GetNodePool(ctx *Context, name string, id IDInput, state *NodePoolState, opts ...ResourceOption) (*NodePool, error)
    public static NodePool Get(string name, Input<string> id, NodePoolState? state, CustomResourceOptions? opts = null)
    public static NodePool get(String name, Output<String> id, NodePoolState state, CustomResourceOptions options)
    Resource lookup is not supported in YAML
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    resource_name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    The following state arguments are supported:
    Autoscaling NodePoolAutoscaling
    Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
    Cluster string
    The cluster to create the node pool for. Cluster must be present in location provided for clusters. May be specified in the format projects/{{project}}/locations/{{location}}/clusters/{{cluster}} or as just the name of the cluster.


    InitialNodeCount int
    The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
    InstanceGroupUrls List<string>
    The resource URLs of the managed instance groups associated with this node pool.
    Location string
    The location (region or zone) of the cluster.


    ManagedInstanceGroupUrls List<string>
    List of instance group URLs which have been assigned to this node pool.
    Management NodePoolManagement
    Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
    MaxPodsPerNode int
    The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
    Name string
    The name of the node pool. If left blank, the provider will auto-generate a unique name.
    NamePrefix string
    Creates a unique name for the node pool beginning with the specified prefix. Conflicts with name.
    NetworkConfig NodePoolNetworkConfig
    The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
    NodeConfig NodePoolNodeConfig
    Parameters used in creating the node pool. See gcp.container.Cluster for schema.
    NodeCount int
    The number of nodes per instance group. This field can be used to update the number of nodes per instance group but should not be used alongside autoscaling.
    NodeLocations List<string>

    The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level node_locations will be used.

    Note: node_locations will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.

    Operation string
    PlacementPolicy NodePoolPlacementPolicy
    Specifies a custom placement policy for the nodes.
    Project string
    The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
    QueuedProvisioning NodePoolQueuedProvisioning
    Specifies node pool-level settings of queued provisioning. Structure is documented below.
    UpgradeSettings NodePoolUpgradeSettings
    Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
    Version string
    The Kubernetes version for the nodes in this pool. Note that if this field and auto_upgrade are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See the gcp.container.getEngineVersions data source's version_prefix field to approximate fuzzy versions in a provider-compatible way.
    Autoscaling NodePoolAutoscalingArgs
    Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
    Cluster string
    The cluster to create the node pool for. Cluster must be present in location provided for clusters. May be specified in the format projects/{{project}}/locations/{{location}}/clusters/{{cluster}} or as just the name of the cluster.


    InitialNodeCount int
    The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
    InstanceGroupUrls []string
    The resource URLs of the managed instance groups associated with this node pool.
    Location string
    The location (region or zone) of the cluster.


    ManagedInstanceGroupUrls []string
    List of instance group URLs which have been assigned to this node pool.
    Management NodePoolManagementArgs
    Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
    MaxPodsPerNode int
    The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
    Name string
    The name of the node pool. If left blank, the provider will auto-generate a unique name.
    NamePrefix string
    Creates a unique name for the node pool beginning with the specified prefix. Conflicts with name.
    NetworkConfig NodePoolNetworkConfigArgs
    The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
    NodeConfig NodePoolNodeConfigArgs
    Parameters used in creating the node pool. See gcp.container.Cluster for schema.
    NodeCount int
    The number of nodes per instance group. This field can be used to update the number of nodes per instance group but should not be used alongside autoscaling.
    NodeLocations []string

    The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level node_locations will be used.

    Note: node_locations will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.

    Operation string
    PlacementPolicy NodePoolPlacementPolicyArgs
    Specifies a custom placement policy for the nodes.
    Project string
    The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
    QueuedProvisioning NodePoolQueuedProvisioningArgs
    Specifies node pool-level settings of queued provisioning. Structure is documented below.
    UpgradeSettings NodePoolUpgradeSettingsArgs
    Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
    Version string
    The Kubernetes version for the nodes in this pool. Note that if this field and auto_upgrade are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See the gcp.container.getEngineVersions data source's version_prefix field to approximate fuzzy versions in a provider-compatible way.
    autoscaling NodePoolAutoscaling
    Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
    cluster String
    The cluster to create the node pool for. Cluster must be present in location provided for clusters. May be specified in the format projects/{{project}}/locations/{{location}}/clusters/{{cluster}} or as just the name of the cluster.


    initialNodeCount Integer
    The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
    instanceGroupUrls List<String>
    The resource URLs of the managed instance groups associated with this node pool.
    location String
    The location (region or zone) of the cluster.


    managedInstanceGroupUrls List<String>
    List of instance group URLs which have been assigned to this node pool.
    management NodePoolManagement
    Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
    maxPodsPerNode Integer
    The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
    name String
    The name of the node pool. If left blank, the provider will auto-generate a unique name.
    namePrefix String
    Creates a unique name for the node pool beginning with the specified prefix. Conflicts with name.
    networkConfig NodePoolNetworkConfig
    The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
    nodeConfig NodePoolNodeConfig
    Parameters used in creating the node pool. See gcp.container.Cluster for schema.
    nodeCount Integer
    The number of nodes per instance group. This field can be used to update the number of nodes per instance group but should not be used alongside autoscaling.
    nodeLocations List<String>

    The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level node_locations will be used.

    Note: node_locations will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.

    operation String
    placementPolicy NodePoolPlacementPolicy
    Specifies a custom placement policy for the nodes.
    project String
    The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
    queuedProvisioning NodePoolQueuedProvisioning
    Specifies node pool-level settings of queued provisioning. Structure is documented below.
    upgradeSettings NodePoolUpgradeSettings
    Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
    version String
    The Kubernetes version for the nodes in this pool. Note that if this field and auto_upgrade are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See the gcp.container.getEngineVersions data source's version_prefix field to approximate fuzzy versions in a provider-compatible way.
    autoscaling NodePoolAutoscaling
    Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
    cluster string
    The cluster to create the node pool for. Cluster must be present in location provided for clusters. May be specified in the format projects/{{project}}/locations/{{location}}/clusters/{{cluster}} or as just the name of the cluster.


    initialNodeCount number
    The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
    instanceGroupUrls string[]
    The resource URLs of the managed instance groups associated with this node pool.
    location string
    The location (region or zone) of the cluster.


    managedInstanceGroupUrls string[]
    List of instance group URLs which have been assigned to this node pool.
    management NodePoolManagement
    Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
    maxPodsPerNode number
    The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
    name string
    The name of the node pool. If left blank, the provider will auto-generate a unique name.
    namePrefix string
    Creates a unique name for the node pool beginning with the specified prefix. Conflicts with name.
    networkConfig NodePoolNetworkConfig
    The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
    nodeConfig NodePoolNodeConfig
    Parameters used in creating the node pool. See gcp.container.Cluster for schema.
    nodeCount number
    The number of nodes per instance group. This field can be used to update the number of nodes per instance group but should not be used alongside autoscaling.
    nodeLocations string[]

    The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level node_locations will be used.

    Note: node_locations will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.

    operation string
    placementPolicy NodePoolPlacementPolicy
    Specifies a custom placement policy for the nodes.
    project string
    The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
    queuedProvisioning NodePoolQueuedProvisioning
    Specifies node pool-level settings of queued provisioning. Structure is documented below.
    upgradeSettings NodePoolUpgradeSettings
    Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
    version string
    The Kubernetes version for the nodes in this pool. Note that if this field and auto_upgrade are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See the gcp.container.getEngineVersions data source's version_prefix field to approximate fuzzy versions in a provider-compatible way.
    autoscaling NodePoolAutoscalingArgs
    Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
    cluster str
    The cluster to create the node pool for. Cluster must be present in location provided for clusters. May be specified in the format projects/{{project}}/locations/{{location}}/clusters/{{cluster}} or as just the name of the cluster.


    initial_node_count int
    The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
    instance_group_urls Sequence[str]
    The resource URLs of the managed instance groups associated with this node pool.
    location str
    The location (region or zone) of the cluster.


    managed_instance_group_urls Sequence[str]
    List of instance group URLs which have been assigned to this node pool.
    management NodePoolManagementArgs
    Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
    max_pods_per_node int
    The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
    name str
    The name of the node pool. If left blank, the provider will auto-generate a unique name.
    name_prefix str
    Creates a unique name for the node pool beginning with the specified prefix. Conflicts with name.
    network_config NodePoolNetworkConfigArgs
    The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
    node_config NodePoolNodeConfigArgs
    Parameters used in creating the node pool. See gcp.container.Cluster for schema.
    node_count int
    The number of nodes per instance group. This field can be used to update the number of nodes per instance group but should not be used alongside autoscaling.
    node_locations Sequence[str]

    The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level node_locations will be used.

    Note: node_locations will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.

    operation str
    placement_policy NodePoolPlacementPolicyArgs
    Specifies a custom placement policy for the nodes.
    project str
    The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
    queued_provisioning NodePoolQueuedProvisioningArgs
    Specifies node pool-level settings of queued provisioning. Structure is documented below.
    upgrade_settings NodePoolUpgradeSettingsArgs
    Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
    version str
    The Kubernetes version for the nodes in this pool. Note that if this field and auto_upgrade are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See the gcp.container.getEngineVersions data source's version_prefix field to approximate fuzzy versions in a provider-compatible way.
    autoscaling Property Map
    Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below.
    cluster String
    The cluster to create the node pool for. Cluster must be present in location provided for clusters. May be specified in the format projects/{{project}}/locations/{{location}}/clusters/{{cluster}} or as just the name of the cluster.


    initialNodeCount Number
    The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually may change this value in your existing cluster, which will trigger destruction and recreation on the next provider run (to rectify the discrepancy). If you don't need this value, don't set it. If you do need it, you can use a lifecycle block to ignore subsqeuent changes to this field.
    instanceGroupUrls List<String>
    The resource URLs of the managed instance groups associated with this node pool.
    location String
    The location (region or zone) of the cluster.


    managedInstanceGroupUrls List<String>
    List of instance group URLs which have been assigned to this node pool.
    management Property Map
    Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below.
    maxPodsPerNode Number
    The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled. See the official documentation for more information.
    name String
    The name of the node pool. If left blank, the provider will auto-generate a unique name.
    namePrefix String
    Creates a unique name for the node pool beginning with the specified prefix. Conflicts with name.
    networkConfig Property Map
    The network configuration of the pool. Such as configuration for Adding Pod IP address ranges) to the node pool. Or enabling private nodes. Structure is documented below
    nodeConfig Property Map
    Parameters used in creating the node pool. See gcp.container.Cluster for schema.
    nodeCount Number
    The number of nodes per instance group. This field can be used to update the number of nodes per instance group but should not be used alongside autoscaling.
    nodeLocations List<String>

    The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level node_locations will be used.

    Note: node_locations will not revert to the cluster's default set of zones upon being unset. You must manually reconcile the list of zones with your cluster.

    operation String
    placementPolicy Property Map
    Specifies a custom placement policy for the nodes.
    project String
    The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.
    queuedProvisioning Property Map
    Specifies node pool-level settings of queued provisioning. Structure is documented below.
    upgradeSettings Property Map
    Specify node upgrade settings to change how GKE upgrades nodes. The maximum number of nodes upgraded simultaneously is limited to 20. Structure is documented below.
    version String
    The Kubernetes version for the nodes in this pool. Note that if this field and auto_upgrade are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as the provider will see spurious diffs when fuzzy versions are used. See the gcp.container.getEngineVersions data source's version_prefix field to approximate fuzzy versions in a provider-compatible way.

    Supporting Types

    NodePoolAutoscaling, NodePoolAutoscalingArgs

    LocationPolicy string
    Location policy specifies the algorithm used when scaling-up the node pool. Location policy is supported only in 1.24.1+ clusters.

    • "BALANCED" - Is a best effort policy that aims to balance the sizes of available zones.
    • "ANY" - Instructs the cluster autoscaler to prioritize utilization of unused reservations, and reduce preemption risk for Spot VMs.
    MaxNodeCount int
    Maximum number of nodes per zone in the NodePool. Must be >= min_node_count. Cannot be used with total limits.
    MinNodeCount int
    Minimum number of nodes per zone in the NodePool. Must be >=0 and <= max_node_count. Cannot be used with total limits.
    TotalMaxNodeCount int
    Total maximum number of nodes in the NodePool. Must be >= total_min_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
    TotalMinNodeCount int
    Total minimum number of nodes in the NodePool. Must be >=0 and <= total_max_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
    LocationPolicy string
    Location policy specifies the algorithm used when scaling-up the node pool. Location policy is supported only in 1.24.1+ clusters.

    • "BALANCED" - Is a best effort policy that aims to balance the sizes of available zones.
    • "ANY" - Instructs the cluster autoscaler to prioritize utilization of unused reservations, and reduce preemption risk for Spot VMs.
    MaxNodeCount int
    Maximum number of nodes per zone in the NodePool. Must be >= min_node_count. Cannot be used with total limits.
    MinNodeCount int
    Minimum number of nodes per zone in the NodePool. Must be >=0 and <= max_node_count. Cannot be used with total limits.
    TotalMaxNodeCount int
    Total maximum number of nodes in the NodePool. Must be >= total_min_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
    TotalMinNodeCount int
    Total minimum number of nodes in the NodePool. Must be >=0 and <= total_max_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
    locationPolicy String
    Location policy specifies the algorithm used when scaling-up the node pool. Location policy is supported only in 1.24.1+ clusters.

    • "BALANCED" - Is a best effort policy that aims to balance the sizes of available zones.
    • "ANY" - Instructs the cluster autoscaler to prioritize utilization of unused reservations, and reduce preemption risk for Spot VMs.
    maxNodeCount Integer
    Maximum number of nodes per zone in the NodePool. Must be >= min_node_count. Cannot be used with total limits.
    minNodeCount Integer
    Minimum number of nodes per zone in the NodePool. Must be >=0 and <= max_node_count. Cannot be used with total limits.
    totalMaxNodeCount Integer
    Total maximum number of nodes in the NodePool. Must be >= total_min_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
    totalMinNodeCount Integer
    Total minimum number of nodes in the NodePool. Must be >=0 and <= total_max_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
    locationPolicy string
    Location policy specifies the algorithm used when scaling-up the node pool. Location policy is supported only in 1.24.1+ clusters.

    • "BALANCED" - Is a best effort policy that aims to balance the sizes of available zones.
    • "ANY" - Instructs the cluster autoscaler to prioritize utilization of unused reservations, and reduce preemption risk for Spot VMs.
    maxNodeCount number
    Maximum number of nodes per zone in the NodePool. Must be >= min_node_count. Cannot be used with total limits.
    minNodeCount number
    Minimum number of nodes per zone in the NodePool. Must be >=0 and <= max_node_count. Cannot be used with total limits.
    totalMaxNodeCount number
    Total maximum number of nodes in the NodePool. Must be >= total_min_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
    totalMinNodeCount number
    Total minimum number of nodes in the NodePool. Must be >=0 and <= total_max_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
    location_policy str
    Location policy specifies the algorithm used when scaling-up the node pool. Location policy is supported only in 1.24.1+ clusters.

    • "BALANCED" - Is a best effort policy that aims to balance the sizes of available zones.
    • "ANY" - Instructs the cluster autoscaler to prioritize utilization of unused reservations, and reduce preemption risk for Spot VMs.
    max_node_count int
    Maximum number of nodes per zone in the NodePool. Must be >= min_node_count. Cannot be used with total limits.
    min_node_count int
    Minimum number of nodes per zone in the NodePool. Must be >=0 and <= max_node_count. Cannot be used with total limits.
    total_max_node_count int
    Total maximum number of nodes in the NodePool. Must be >= total_min_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
    total_min_node_count int
    Total minimum number of nodes in the NodePool. Must be >=0 and <= total_max_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
    locationPolicy String
    Location policy specifies the algorithm used when scaling-up the node pool. Location policy is supported only in 1.24.1+ clusters.

    • "BALANCED" - Is a best effort policy that aims to balance the sizes of available zones.
    • "ANY" - Instructs the cluster autoscaler to prioritize utilization of unused reservations, and reduce preemption risk for Spot VMs.
    maxNodeCount Number
    Maximum number of nodes per zone in the NodePool. Must be >= min_node_count. Cannot be used with total limits.
    minNodeCount Number
    Minimum number of nodes per zone in the NodePool. Must be >=0 and <= max_node_count. Cannot be used with total limits.
    totalMaxNodeCount Number
    Total maximum number of nodes in the NodePool. Must be >= total_min_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.
    totalMinNodeCount Number
    Total minimum number of nodes in the NodePool. Must be >=0 and <= total_max_node_count. Cannot be used with per zone limits. Total size limits are supported only in 1.24.1+ clusters.

    NodePoolManagement, NodePoolManagementArgs

    AutoRepair bool
    Whether the nodes will be automatically repaired. Enabled by default.
    AutoUpgrade bool
    Whether the nodes will be automatically upgraded. Enabled by default.
    AutoRepair bool
    Whether the nodes will be automatically repaired. Enabled by default.
    AutoUpgrade bool
    Whether the nodes will be automatically upgraded. Enabled by default.
    autoRepair Boolean
    Whether the nodes will be automatically repaired. Enabled by default.
    autoUpgrade Boolean
    Whether the nodes will be automatically upgraded. Enabled by default.
    autoRepair boolean
    Whether the nodes will be automatically repaired. Enabled by default.
    autoUpgrade boolean
    Whether the nodes will be automatically upgraded. Enabled by default.
    auto_repair bool
    Whether the nodes will be automatically repaired. Enabled by default.
    auto_upgrade bool
    Whether the nodes will be automatically upgraded. Enabled by default.
    autoRepair Boolean
    Whether the nodes will be automatically repaired. Enabled by default.
    autoUpgrade Boolean
    Whether the nodes will be automatically upgraded. Enabled by default.

    NodePoolNetworkConfig, NodePoolNetworkConfigArgs

    AdditionalNodeNetworkConfigs List<NodePoolNetworkConfigAdditionalNodeNetworkConfig>
    We specify the additional node networks for this node pool using this list. Each node network corresponds to an additional interface. Structure is documented below
    AdditionalPodNetworkConfigs List<NodePoolNetworkConfigAdditionalPodNetworkConfig>
    We specify the additional pod networks for this node pool using this list. Each pod network corresponds to an additional alias IP range for the node. Structure is documented below
    CreatePodRange bool
    Whether to create a new range for pod IPs in this node pool. Defaults are provided for pod_range and pod_ipv4_cidr_block if they are not specified.
    EnablePrivateNodes bool
    Whether nodes have internal IP addresses only.
    NetworkPerformanceConfig NodePoolNetworkConfigNetworkPerformanceConfig
    Network bandwidth tier configuration. Structure is documented below.
    PodCidrOverprovisionConfig NodePoolNetworkConfigPodCidrOverprovisionConfig
    Configuration for node-pool level pod cidr overprovision. If not set, the cluster level setting will be inherited. Structure is documented below.
    PodIpv4CidrBlock string
    The IP address range for pod IPs in this node pool. Only applicable if createPodRange is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use.
    PodRange string
    The ID of the secondary range for pod IPs. If create_pod_range is true, this ID is used for the new range. If create_pod_range is false, uses an existing secondary range with this ID.
    AdditionalNodeNetworkConfigs []NodePoolNetworkConfigAdditionalNodeNetworkConfig
    We specify the additional node networks for this node pool using this list. Each node network corresponds to an additional interface. Structure is documented below
    AdditionalPodNetworkConfigs []NodePoolNetworkConfigAdditionalPodNetworkConfig
    We specify the additional pod networks for this node pool using this list. Each pod network corresponds to an additional alias IP range for the node. Structure is documented below
    CreatePodRange bool
    Whether to create a new range for pod IPs in this node pool. Defaults are provided for pod_range and pod_ipv4_cidr_block if they are not specified.
    EnablePrivateNodes bool
    Whether nodes have internal IP addresses only.
    NetworkPerformanceConfig NodePoolNetworkConfigNetworkPerformanceConfig
    Network bandwidth tier configuration. Structure is documented below.
    PodCidrOverprovisionConfig NodePoolNetworkConfigPodCidrOverprovisionConfig
    Configuration for node-pool level pod cidr overprovision. If not set, the cluster level setting will be inherited. Structure is documented below.
    PodIpv4CidrBlock string
    The IP address range for pod IPs in this node pool. Only applicable if createPodRange is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use.
    PodRange string
    The ID of the secondary range for pod IPs. If create_pod_range is true, this ID is used for the new range. If create_pod_range is false, uses an existing secondary range with this ID.
    additionalNodeNetworkConfigs List<NodePoolNetworkConfigAdditionalNodeNetworkConfig>
    We specify the additional node networks for this node pool using this list. Each node network corresponds to an additional interface. Structure is documented below
    additionalPodNetworkConfigs List<NodePoolNetworkConfigAdditionalPodNetworkConfig>
    We specify the additional pod networks for this node pool using this list. Each pod network corresponds to an additional alias IP range for the node. Structure is documented below
    createPodRange Boolean
    Whether to create a new range for pod IPs in this node pool. Defaults are provided for pod_range and pod_ipv4_cidr_block if they are not specified.
    enablePrivateNodes Boolean
    Whether nodes have internal IP addresses only.
    networkPerformanceConfig NodePoolNetworkConfigNetworkPerformanceConfig
    Network bandwidth tier configuration. Structure is documented below.
    podCidrOverprovisionConfig NodePoolNetworkConfigPodCidrOverprovisionConfig
    Configuration for node-pool level pod cidr overprovision. If not set, the cluster level setting will be inherited. Structure is documented below.
    podIpv4CidrBlock String
    The IP address range for pod IPs in this node pool. Only applicable if createPodRange is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use.
    podRange String
    The ID of the secondary range for pod IPs. If create_pod_range is true, this ID is used for the new range. If create_pod_range is false, uses an existing secondary range with this ID.
    additionalNodeNetworkConfigs NodePoolNetworkConfigAdditionalNodeNetworkConfig[]
    We specify the additional node networks for this node pool using this list. Each node network corresponds to an additional interface. Structure is documented below
    additionalPodNetworkConfigs NodePoolNetworkConfigAdditionalPodNetworkConfig[]
    We specify the additional pod networks for this node pool using this list. Each pod network corresponds to an additional alias IP range for the node. Structure is documented below
    createPodRange boolean
    Whether to create a new range for pod IPs in this node pool. Defaults are provided for pod_range and pod_ipv4_cidr_block if they are not specified.
    enablePrivateNodes boolean
    Whether nodes have internal IP addresses only.
    networkPerformanceConfig NodePoolNetworkConfigNetworkPerformanceConfig
    Network bandwidth tier configuration. Structure is documented below.
    podCidrOverprovisionConfig NodePoolNetworkConfigPodCidrOverprovisionConfig
    Configuration for node-pool level pod cidr overprovision. If not set, the cluster level setting will be inherited. Structure is documented below.
    podIpv4CidrBlock string
    The IP address range for pod IPs in this node pool. Only applicable if createPodRange is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use.
    podRange string
    The ID of the secondary range for pod IPs. If create_pod_range is true, this ID is used for the new range. If create_pod_range is false, uses an existing secondary range with this ID.
    additional_node_network_configs Sequence[NodePoolNetworkConfigAdditionalNodeNetworkConfig]
    We specify the additional node networks for this node pool using this list. Each node network corresponds to an additional interface. Structure is documented below
    additional_pod_network_configs Sequence[NodePoolNetworkConfigAdditionalPodNetworkConfig]
    We specify the additional pod networks for this node pool using this list. Each pod network corresponds to an additional alias IP range for the node. Structure is documented below
    create_pod_range bool
    Whether to create a new range for pod IPs in this node pool. Defaults are provided for pod_range and pod_ipv4_cidr_block if they are not specified.
    enable_private_nodes bool
    Whether nodes have internal IP addresses only.
    network_performance_config NodePoolNetworkConfigNetworkPerformanceConfig
    Network bandwidth tier configuration. Structure is documented below.
    pod_cidr_overprovision_config NodePoolNetworkConfigPodCidrOverprovisionConfig
    Configuration for node-pool level pod cidr overprovision. If not set, the cluster level setting will be inherited. Structure is documented below.
    pod_ipv4_cidr_block str
    The IP address range for pod IPs in this node pool. Only applicable if createPodRange is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use.
    pod_range str
    The ID of the secondary range for pod IPs. If create_pod_range is true, this ID is used for the new range. If create_pod_range is false, uses an existing secondary range with this ID.
    additionalNodeNetworkConfigs List<Property Map>
    We specify the additional node networks for this node pool using this list. Each node network corresponds to an additional interface. Structure is documented below
    additionalPodNetworkConfigs List<Property Map>
    We specify the additional pod networks for this node pool using this list. Each pod network corresponds to an additional alias IP range for the node. Structure is documented below
    createPodRange Boolean
    Whether to create a new range for pod IPs in this node pool. Defaults are provided for pod_range and pod_ipv4_cidr_block if they are not specified.
    enablePrivateNodes Boolean
    Whether nodes have internal IP addresses only.
    networkPerformanceConfig Property Map
    Network bandwidth tier configuration. Structure is documented below.
    podCidrOverprovisionConfig Property Map
    Configuration for node-pool level pod cidr overprovision. If not set, the cluster level setting will be inherited. Structure is documented below.
    podIpv4CidrBlock String
    The IP address range for pod IPs in this node pool. Only applicable if createPodRange is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use.
    podRange String
    The ID of the secondary range for pod IPs. If create_pod_range is true, this ID is used for the new range. If create_pod_range is false, uses an existing secondary range with this ID.

    NodePoolNetworkConfigAdditionalNodeNetworkConfig, NodePoolNetworkConfigAdditionalNodeNetworkConfigArgs

    Network string
    Name of the VPC where the additional interface belongs.
    Subnetwork string
    Name of the subnetwork where the additional interface belongs.
    Network string
    Name of the VPC where the additional interface belongs.
    Subnetwork string
    Name of the subnetwork where the additional interface belongs.
    network String
    Name of the VPC where the additional interface belongs.
    subnetwork String
    Name of the subnetwork where the additional interface belongs.
    network string
    Name of the VPC where the additional interface belongs.
    subnetwork string
    Name of the subnetwork where the additional interface belongs.
    network str
    Name of the VPC where the additional interface belongs.
    subnetwork str
    Name of the subnetwork where the additional interface belongs.
    network String
    Name of the VPC where the additional interface belongs.
    subnetwork String
    Name of the subnetwork where the additional interface belongs.

    NodePoolNetworkConfigAdditionalPodNetworkConfig, NodePoolNetworkConfigAdditionalPodNetworkConfigArgs

    MaxPodsPerNode int
    The maximum number of pods per node which use this pod network.
    SecondaryPodRange string
    The name of the secondary range on the subnet which provides IP address for this pod range.
    Subnetwork string
    Name of the subnetwork where the additional pod network belongs.
    MaxPodsPerNode int
    The maximum number of pods per node which use this pod network.
    SecondaryPodRange string
    The name of the secondary range on the subnet which provides IP address for this pod range.
    Subnetwork string
    Name of the subnetwork where the additional pod network belongs.
    maxPodsPerNode Integer
    The maximum number of pods per node which use this pod network.
    secondaryPodRange String
    The name of the secondary range on the subnet which provides IP address for this pod range.
    subnetwork String
    Name of the subnetwork where the additional pod network belongs.
    maxPodsPerNode number
    The maximum number of pods per node which use this pod network.
    secondaryPodRange string
    The name of the secondary range on the subnet which provides IP address for this pod range.
    subnetwork string
    Name of the subnetwork where the additional pod network belongs.
    max_pods_per_node int
    The maximum number of pods per node which use this pod network.
    secondary_pod_range str
    The name of the secondary range on the subnet which provides IP address for this pod range.
    subnetwork str
    Name of the subnetwork where the additional pod network belongs.
    maxPodsPerNode Number
    The maximum number of pods per node which use this pod network.
    secondaryPodRange String
    The name of the secondary range on the subnet which provides IP address for this pod range.
    subnetwork String
    Name of the subnetwork where the additional pod network belongs.

    NodePoolNetworkConfigNetworkPerformanceConfig, NodePoolNetworkConfigNetworkPerformanceConfigArgs

    TotalEgressBandwidthTier string
    Specifies the total network bandwidth tier for the NodePool.
    TotalEgressBandwidthTier string
    Specifies the total network bandwidth tier for the NodePool.
    totalEgressBandwidthTier String
    Specifies the total network bandwidth tier for the NodePool.
    totalEgressBandwidthTier string
    Specifies the total network bandwidth tier for the NodePool.
    total_egress_bandwidth_tier str
    Specifies the total network bandwidth tier for the NodePool.
    totalEgressBandwidthTier String
    Specifies the total network bandwidth tier for the NodePool.

    NodePoolNetworkConfigPodCidrOverprovisionConfig, NodePoolNetworkConfigPodCidrOverprovisionConfigArgs

    Disabled bool
    Whether pod cidr overprovision is disabled.
    Disabled bool
    Whether pod cidr overprovision is disabled.
    disabled Boolean
    Whether pod cidr overprovision is disabled.
    disabled boolean
    Whether pod cidr overprovision is disabled.
    disabled bool
    Whether pod cidr overprovision is disabled.
    disabled Boolean
    Whether pod cidr overprovision is disabled.

    NodePoolNodeConfig, NodePoolNodeConfigArgs

    AdvancedMachineFeatures NodePoolNodeConfigAdvancedMachineFeatures
    Specifies options for controlling advanced machine features.
    BootDiskKmsKey string
    The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool.
    ConfidentialNodes NodePoolNodeConfigConfidentialNodes
    Configuration for the confidential nodes feature, which makes nodes run on confidential VMs. Warning: This configuration can't be changed (or added/removed) after pool creation without deleting and recreating the entire pool.
    ContainerdConfig NodePoolNodeConfigContainerdConfig
    Parameters for containerd configuration.
    DiskSizeGb int
    Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.
    DiskType string
    Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd
    EffectiveTaints List<NodePoolNodeConfigEffectiveTaint>
    List of kubernetes taints applied to each node.
    EnableConfidentialStorage bool
    If enabled boot disks are configured with confidential mode.
    EphemeralStorageConfig NodePoolNodeConfigEphemeralStorageConfig
    Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
    EphemeralStorageLocalSsdConfig NodePoolNodeConfigEphemeralStorageLocalSsdConfig
    Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
    FastSocket NodePoolNodeConfigFastSocket
    Enable or disable NCCL Fast Socket in the node pool.
    GcfsConfig NodePoolNodeConfigGcfsConfig
    GCFS configuration for this node.
    GuestAccelerators List<NodePoolNodeConfigGuestAccelerator>
    List of the type and count of accelerator cards attached to the instance.
    Gvnic NodePoolNodeConfigGvnic
    Enable or disable gvnic in the node pool.
    HostMaintenancePolicy NodePoolNodeConfigHostMaintenancePolicy
    The maintenance policy for the hosts on which the GKE VMs run on.
    ImageType string
    The image type to use for this node. Note that for a given image type, the latest version of it will be used.
    KubeletConfig NodePoolNodeConfigKubeletConfig
    Node kubelet configs.
    Labels Dictionary<string, string>
    The map of Kubernetes labels (key/value pairs) to be applied to each node. These will added in addition to any default label(s) that Kubernetes may apply to the node.
    LinuxNodeConfig NodePoolNodeConfigLinuxNodeConfig
    Parameters that can be configured on Linux nodes.
    LocalNvmeSsdBlockConfig NodePoolNodeConfigLocalNvmeSsdBlockConfig
    Parameters for raw-block local NVMe SSDs.
    LocalSsdCount int
    The number of local SSD disks to be attached to the node.
    LoggingVariant string
    Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT.
    MachineType string
    The name of a Google Compute Engine machine type.
    Metadata Dictionary<string, string>
    The metadata key/value pairs assigned to instances in the cluster.
    MinCpuPlatform string
    Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or newer CPU platform.
    NodeGroup string
    Setting this field will assign instances of this pool to run on the specified node group. This is useful for running workloads on sole tenant nodes.
    OauthScopes List<string>
    The set of Google API scopes to be made available on all of the node VMs.
    Preemptible bool
    Whether the nodes are created as preemptible VM instances.
    ReservationAffinity NodePoolNodeConfigReservationAffinity

    The configuration of the desired reservation which instances could take capacity from. Structure is documented below.

    The autoscaling block supports (either total or per zone limits are required):

    ResourceLabels Dictionary<string, string>
    The GCE resource labels (a map of key/value pairs) to be applied to the node pool.
    ResourceManagerTags Dictionary<string, string>
    A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty.
    SandboxConfig NodePoolNodeConfigSandboxConfig
    Sandbox configuration for this node.
    SecondaryBootDisks List<NodePoolNodeConfigSecondaryBootDisk>
    Secondary boot disks for preloading data or container images.
    ServiceAccount string
    The Google Cloud Platform Service Account to be used by the node VMs.
    ShieldedInstanceConfig NodePoolNodeConfigShieldedInstanceConfig
    Shielded Instance options.
    SoleTenantConfig NodePoolNodeConfigSoleTenantConfig
    Node affinity options for sole tenant node pools.
    Spot bool
    Whether the nodes are created as spot VM instances.
    StoragePools List<string>
    The list of Storage Pools where boot disks are provisioned.
    Tags List<string>
    The list of instance tags applied to all nodes.
    Taints List<NodePoolNodeConfigTaint>
    List of Kubernetes taints to be applied to each node.
    WorkloadMetadataConfig NodePoolNodeConfigWorkloadMetadataConfig
    The workload metadata configuration for this node.
    AdvancedMachineFeatures NodePoolNodeConfigAdvancedMachineFeatures
    Specifies options for controlling advanced machine features.
    BootDiskKmsKey string
    The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool.
    ConfidentialNodes NodePoolNodeConfigConfidentialNodes
    Configuration for the confidential nodes feature, which makes nodes run on confidential VMs. Warning: This configuration can't be changed (or added/removed) after pool creation without deleting and recreating the entire pool.
    ContainerdConfig NodePoolNodeConfigContainerdConfig
    Parameters for containerd configuration.
    DiskSizeGb int
    Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.
    DiskType string
    Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd
    EffectiveTaints []NodePoolNodeConfigEffectiveTaint
    List of kubernetes taints applied to each node.
    EnableConfidentialStorage bool
    If enabled boot disks are configured with confidential mode.
    EphemeralStorageConfig NodePoolNodeConfigEphemeralStorageConfig
    Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
    EphemeralStorageLocalSsdConfig NodePoolNodeConfigEphemeralStorageLocalSsdConfig
    Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
    FastSocket NodePoolNodeConfigFastSocket
    Enable or disable NCCL Fast Socket in the node pool.
    GcfsConfig NodePoolNodeConfigGcfsConfig
    GCFS configuration for this node.
    GuestAccelerators []NodePoolNodeConfigGuestAccelerator
    List of the type and count of accelerator cards attached to the instance.
    Gvnic NodePoolNodeConfigGvnic
    Enable or disable gvnic in the node pool.
    HostMaintenancePolicy NodePoolNodeConfigHostMaintenancePolicy
    The maintenance policy for the hosts on which the GKE VMs run on.
    ImageType string
    The image type to use for this node. Note that for a given image type, the latest version of it will be used.
    KubeletConfig NodePoolNodeConfigKubeletConfig
    Node kubelet configs.
    Labels map[string]string
    The map of Kubernetes labels (key/value pairs) to be applied to each node. These will added in addition to any default label(s) that Kubernetes may apply to the node.
    LinuxNodeConfig NodePoolNodeConfigLinuxNodeConfig
    Parameters that can be configured on Linux nodes.
    LocalNvmeSsdBlockConfig NodePoolNodeConfigLocalNvmeSsdBlockConfig
    Parameters for raw-block local NVMe SSDs.
    LocalSsdCount int
    The number of local SSD disks to be attached to the node.
    LoggingVariant string
    Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT.
    MachineType string
    The name of a Google Compute Engine machine type.
    Metadata map[string]string
    The metadata key/value pairs assigned to instances in the cluster.
    MinCpuPlatform string
    Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or newer CPU platform.
    NodeGroup string
    Setting this field will assign instances of this pool to run on the specified node group. This is useful for running workloads on sole tenant nodes.
    OauthScopes []string
    The set of Google API scopes to be made available on all of the node VMs.
    Preemptible bool
    Whether the nodes are created as preemptible VM instances.
    ReservationAffinity NodePoolNodeConfigReservationAffinity

    The configuration of the desired reservation which instances could take capacity from. Structure is documented below.

    The autoscaling block supports (either total or per zone limits are required):

    ResourceLabels map[string]string
    The GCE resource labels (a map of key/value pairs) to be applied to the node pool.
    ResourceManagerTags map[string]string
    A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty.
    SandboxConfig NodePoolNodeConfigSandboxConfig
    Sandbox configuration for this node.
    SecondaryBootDisks []NodePoolNodeConfigSecondaryBootDisk
    Secondary boot disks for preloading data or container images.
    ServiceAccount string
    The Google Cloud Platform Service Account to be used by the node VMs.
    ShieldedInstanceConfig NodePoolNodeConfigShieldedInstanceConfig
    Shielded Instance options.
    SoleTenantConfig NodePoolNodeConfigSoleTenantConfig
    Node affinity options for sole tenant node pools.
    Spot bool
    Whether the nodes are created as spot VM instances.
    StoragePools []string
    The list of Storage Pools where boot disks are provisioned.
    Tags []string
    The list of instance tags applied to all nodes.
    Taints []NodePoolNodeConfigTaint
    List of Kubernetes taints to be applied to each node.
    WorkloadMetadataConfig NodePoolNodeConfigWorkloadMetadataConfig
    The workload metadata configuration for this node.
    advancedMachineFeatures NodePoolNodeConfigAdvancedMachineFeatures
    Specifies options for controlling advanced machine features.
    bootDiskKmsKey String
    The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool.
    confidentialNodes NodePoolNodeConfigConfidentialNodes
    Configuration for the confidential nodes feature, which makes nodes run on confidential VMs. Warning: This configuration can't be changed (or added/removed) after pool creation without deleting and recreating the entire pool.
    containerdConfig NodePoolNodeConfigContainerdConfig
    Parameters for containerd configuration.
    diskSizeGb Integer
    Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.
    diskType String
    Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd
    effectiveTaints List<NodePoolNodeConfigEffectiveTaint>
    List of kubernetes taints applied to each node.
    enableConfidentialStorage Boolean
    If enabled boot disks are configured with confidential mode.
    ephemeralStorageConfig NodePoolNodeConfigEphemeralStorageConfig
    Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
    ephemeralStorageLocalSsdConfig NodePoolNodeConfigEphemeralStorageLocalSsdConfig
    Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
    fastSocket NodePoolNodeConfigFastSocket
    Enable or disable NCCL Fast Socket in the node pool.
    gcfsConfig NodePoolNodeConfigGcfsConfig
    GCFS configuration for this node.
    guestAccelerators List<NodePoolNodeConfigGuestAccelerator>
    List of the type and count of accelerator cards attached to the instance.
    gvnic NodePoolNodeConfigGvnic
    Enable or disable gvnic in the node pool.
    hostMaintenancePolicy NodePoolNodeConfigHostMaintenancePolicy
    The maintenance policy for the hosts on which the GKE VMs run on.
    imageType String
    The image type to use for this node. Note that for a given image type, the latest version of it will be used.
    kubeletConfig NodePoolNodeConfigKubeletConfig
    Node kubelet configs.
    labels Map<String,String>
    The map of Kubernetes labels (key/value pairs) to be applied to each node. These will added in addition to any default label(s) that Kubernetes may apply to the node.
    linuxNodeConfig NodePoolNodeConfigLinuxNodeConfig
    Parameters that can be configured on Linux nodes.
    localNvmeSsdBlockConfig NodePoolNodeConfigLocalNvmeSsdBlockConfig
    Parameters for raw-block local NVMe SSDs.
    localSsdCount Integer
    The number of local SSD disks to be attached to the node.
    loggingVariant String
    Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT.
    machineType String
    The name of a Google Compute Engine machine type.
    metadata Map<String,String>
    The metadata key/value pairs assigned to instances in the cluster.
    minCpuPlatform String
    Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or newer CPU platform.
    nodeGroup String
    Setting this field will assign instances of this pool to run on the specified node group. This is useful for running workloads on sole tenant nodes.
    oauthScopes List<String>
    The set of Google API scopes to be made available on all of the node VMs.
    preemptible Boolean
    Whether the nodes are created as preemptible VM instances.
    reservationAffinity NodePoolNodeConfigReservationAffinity

    The configuration of the desired reservation which instances could take capacity from. Structure is documented below.

    The autoscaling block supports (either total or per zone limits are required):

    resourceLabels Map<String,String>
    The GCE resource labels (a map of key/value pairs) to be applied to the node pool.
    resourceManagerTags Map<String,String>
    A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty.
    sandboxConfig NodePoolNodeConfigSandboxConfig
    Sandbox configuration for this node.
    secondaryBootDisks List<NodePoolNodeConfigSecondaryBootDisk>
    Secondary boot disks for preloading data or container images.
    serviceAccount String
    The Google Cloud Platform Service Account to be used by the node VMs.
    shieldedInstanceConfig NodePoolNodeConfigShieldedInstanceConfig
    Shielded Instance options.
    soleTenantConfig NodePoolNodeConfigSoleTenantConfig
    Node affinity options for sole tenant node pools.
    spot Boolean
    Whether the nodes are created as spot VM instances.
    storagePools List<String>
    The list of Storage Pools where boot disks are provisioned.
    tags List<String>
    The list of instance tags applied to all nodes.
    taints List<NodePoolNodeConfigTaint>
    List of Kubernetes taints to be applied to each node.
    workloadMetadataConfig NodePoolNodeConfigWorkloadMetadataConfig
    The workload metadata configuration for this node.
    advancedMachineFeatures NodePoolNodeConfigAdvancedMachineFeatures
    Specifies options for controlling advanced machine features.
    bootDiskKmsKey string
    The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool.
    confidentialNodes NodePoolNodeConfigConfidentialNodes
    Configuration for the confidential nodes feature, which makes nodes run on confidential VMs. Warning: This configuration can't be changed (or added/removed) after pool creation without deleting and recreating the entire pool.
    containerdConfig NodePoolNodeConfigContainerdConfig
    Parameters for containerd configuration.
    diskSizeGb number
    Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.
    diskType string
    Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd
    effectiveTaints NodePoolNodeConfigEffectiveTaint[]
    List of kubernetes taints applied to each node.
    enableConfidentialStorage boolean
    If enabled boot disks are configured with confidential mode.
    ephemeralStorageConfig NodePoolNodeConfigEphemeralStorageConfig
    Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
    ephemeralStorageLocalSsdConfig NodePoolNodeConfigEphemeralStorageLocalSsdConfig
    Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
    fastSocket NodePoolNodeConfigFastSocket
    Enable or disable NCCL Fast Socket in the node pool.
    gcfsConfig NodePoolNodeConfigGcfsConfig
    GCFS configuration for this node.
    guestAccelerators NodePoolNodeConfigGuestAccelerator[]
    List of the type and count of accelerator cards attached to the instance.
    gvnic NodePoolNodeConfigGvnic
    Enable or disable gvnic in the node pool.
    hostMaintenancePolicy NodePoolNodeConfigHostMaintenancePolicy
    The maintenance policy for the hosts on which the GKE VMs run on.
    imageType string
    The image type to use for this node. Note that for a given image type, the latest version of it will be used.
    kubeletConfig NodePoolNodeConfigKubeletConfig
    Node kubelet configs.
    labels {[key: string]: string}
    The map of Kubernetes labels (key/value pairs) to be applied to each node. These will added in addition to any default label(s) that Kubernetes may apply to the node.
    linuxNodeConfig NodePoolNodeConfigLinuxNodeConfig
    Parameters that can be configured on Linux nodes.
    localNvmeSsdBlockConfig NodePoolNodeConfigLocalNvmeSsdBlockConfig
    Parameters for raw-block local NVMe SSDs.
    localSsdCount number
    The number of local SSD disks to be attached to the node.
    loggingVariant string
    Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT.
    machineType string
    The name of a Google Compute Engine machine type.
    metadata {[key: string]: string}
    The metadata key/value pairs assigned to instances in the cluster.
    minCpuPlatform string
    Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or newer CPU platform.
    nodeGroup string
    Setting this field will assign instances of this pool to run on the specified node group. This is useful for running workloads on sole tenant nodes.
    oauthScopes string[]
    The set of Google API scopes to be made available on all of the node VMs.
    preemptible boolean
    Whether the nodes are created as preemptible VM instances.
    reservationAffinity NodePoolNodeConfigReservationAffinity

    The configuration of the desired reservation which instances could take capacity from. Structure is documented below.

    The autoscaling block supports (either total or per zone limits are required):

    resourceLabels {[key: string]: string}
    The GCE resource labels (a map of key/value pairs) to be applied to the node pool.
    resourceManagerTags {[key: string]: string}
    A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty.
    sandboxConfig NodePoolNodeConfigSandboxConfig
    Sandbox configuration for this node.
    secondaryBootDisks NodePoolNodeConfigSecondaryBootDisk[]
    Secondary boot disks for preloading data or container images.
    serviceAccount string
    The Google Cloud Platform Service Account to be used by the node VMs.
    shieldedInstanceConfig NodePoolNodeConfigShieldedInstanceConfig
    Shielded Instance options.
    soleTenantConfig NodePoolNodeConfigSoleTenantConfig
    Node affinity options for sole tenant node pools.
    spot boolean
    Whether the nodes are created as spot VM instances.
    storagePools string[]
    The list of Storage Pools where boot disks are provisioned.
    tags string[]
    The list of instance tags applied to all nodes.
    taints NodePoolNodeConfigTaint[]
    List of Kubernetes taints to be applied to each node.
    workloadMetadataConfig NodePoolNodeConfigWorkloadMetadataConfig
    The workload metadata configuration for this node.
    advanced_machine_features NodePoolNodeConfigAdvancedMachineFeatures
    Specifies options for controlling advanced machine features.
    boot_disk_kms_key str
    The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool.
    confidential_nodes NodePoolNodeConfigConfidentialNodes
    Configuration for the confidential nodes feature, which makes nodes run on confidential VMs. Warning: This configuration can't be changed (or added/removed) after pool creation without deleting and recreating the entire pool.
    containerd_config NodePoolNodeConfigContainerdConfig
    Parameters for containerd configuration.
    disk_size_gb int
    Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.
    disk_type str
    Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd
    effective_taints Sequence[NodePoolNodeConfigEffectiveTaint]
    List of kubernetes taints applied to each node.
    enable_confidential_storage bool
    If enabled boot disks are configured with confidential mode.
    ephemeral_storage_config NodePoolNodeConfigEphemeralStorageConfig
    Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
    ephemeral_storage_local_ssd_config NodePoolNodeConfigEphemeralStorageLocalSsdConfig
    Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
    fast_socket NodePoolNodeConfigFastSocket
    Enable or disable NCCL Fast Socket in the node pool.
    gcfs_config NodePoolNodeConfigGcfsConfig
    GCFS configuration for this node.
    guest_accelerators Sequence[NodePoolNodeConfigGuestAccelerator]
    List of the type and count of accelerator cards attached to the instance.
    gvnic NodePoolNodeConfigGvnic
    Enable or disable gvnic in the node pool.
    host_maintenance_policy NodePoolNodeConfigHostMaintenancePolicy
    The maintenance policy for the hosts on which the GKE VMs run on.
    image_type str
    The image type to use for this node. Note that for a given image type, the latest version of it will be used.
    kubelet_config NodePoolNodeConfigKubeletConfig
    Node kubelet configs.
    labels Mapping[str, str]
    The map of Kubernetes labels (key/value pairs) to be applied to each node. These will added in addition to any default label(s) that Kubernetes may apply to the node.
    linux_node_config NodePoolNodeConfigLinuxNodeConfig
    Parameters that can be configured on Linux nodes.
    local_nvme_ssd_block_config NodePoolNodeConfigLocalNvmeSsdBlockConfig
    Parameters for raw-block local NVMe SSDs.
    local_ssd_count int
    The number of local SSD disks to be attached to the node.
    logging_variant str
    Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT.
    machine_type str
    The name of a Google Compute Engine machine type.
    metadata Mapping[str, str]
    The metadata key/value pairs assigned to instances in the cluster.
    min_cpu_platform str
    Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or newer CPU platform.
    node_group str
    Setting this field will assign instances of this pool to run on the specified node group. This is useful for running workloads on sole tenant nodes.
    oauth_scopes Sequence[str]
    The set of Google API scopes to be made available on all of the node VMs.
    preemptible bool
    Whether the nodes are created as preemptible VM instances.
    reservation_affinity NodePoolNodeConfigReservationAffinity

    The configuration of the desired reservation which instances could take capacity from. Structure is documented below.

    The autoscaling block supports (either total or per zone limits are required):

    resource_labels Mapping[str, str]
    The GCE resource labels (a map of key/value pairs) to be applied to the node pool.
    resource_manager_tags Mapping[str, str]
    A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty.
    sandbox_config NodePoolNodeConfigSandboxConfig
    Sandbox configuration for this node.
    secondary_boot_disks Sequence[NodePoolNodeConfigSecondaryBootDisk]
    Secondary boot disks for preloading data or container images.
    service_account str
    The Google Cloud Platform Service Account to be used by the node VMs.
    shielded_instance_config NodePoolNodeConfigShieldedInstanceConfig
    Shielded Instance options.
    sole_tenant_config NodePoolNodeConfigSoleTenantConfig
    Node affinity options for sole tenant node pools.
    spot bool
    Whether the nodes are created as spot VM instances.
    storage_pools Sequence[str]
    The list of Storage Pools where boot disks are provisioned.
    tags Sequence[str]
    The list of instance tags applied to all nodes.
    taints Sequence[NodePoolNodeConfigTaint]
    List of Kubernetes taints to be applied to each node.
    workload_metadata_config NodePoolNodeConfigWorkloadMetadataConfig
    The workload metadata configuration for this node.
    advancedMachineFeatures Property Map
    Specifies options for controlling advanced machine features.
    bootDiskKmsKey String
    The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool.
    confidentialNodes Property Map
    Configuration for the confidential nodes feature, which makes nodes run on confidential VMs. Warning: This configuration can't be changed (or added/removed) after pool creation without deleting and recreating the entire pool.
    containerdConfig Property Map
    Parameters for containerd configuration.
    diskSizeGb Number
    Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.
    diskType String
    Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd
    effectiveTaints List<Property Map>
    List of kubernetes taints applied to each node.
    enableConfidentialStorage Boolean
    If enabled boot disks are configured with confidential mode.
    ephemeralStorageConfig Property Map
    Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
    ephemeralStorageLocalSsdConfig Property Map
    Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
    fastSocket Property Map
    Enable or disable NCCL Fast Socket in the node pool.
    gcfsConfig Property Map
    GCFS configuration for this node.
    guestAccelerators List<Property Map>
    List of the type and count of accelerator cards attached to the instance.
    gvnic Property Map
    Enable or disable gvnic in the node pool.
    hostMaintenancePolicy Property Map
    The maintenance policy for the hosts on which the GKE VMs run on.
    imageType String
    The image type to use for this node. Note that for a given image type, the latest version of it will be used.
    kubeletConfig Property Map
    Node kubelet configs.
    labels Map<String>
    The map of Kubernetes labels (key/value pairs) to be applied to each node. These will added in addition to any default label(s) that Kubernetes may apply to the node.
    linuxNodeConfig Property Map
    Parameters that can be configured on Linux nodes.
    localNvmeSsdBlockConfig Property Map
    Parameters for raw-block local NVMe SSDs.
    localSsdCount Number
    The number of local SSD disks to be attached to the node.
    loggingVariant String
    Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT.
    machineType String
    The name of a Google Compute Engine machine type.
    metadata Map<String>
    The metadata key/value pairs assigned to instances in the cluster.
    minCpuPlatform String
    Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or newer CPU platform.
    nodeGroup String
    Setting this field will assign instances of this pool to run on the specified node group. This is useful for running workloads on sole tenant nodes.
    oauthScopes List<String>
    The set of Google API scopes to be made available on all of the node VMs.
    preemptible Boolean
    Whether the nodes are created as preemptible VM instances.
    reservationAffinity Property Map

    The configuration of the desired reservation which instances could take capacity from. Structure is documented below.

    The autoscaling block supports (either total or per zone limits are required):

    resourceLabels Map<String>
    The GCE resource labels (a map of key/value pairs) to be applied to the node pool.
    resourceManagerTags Map<String>
    A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty.
    sandboxConfig Property Map
    Sandbox configuration for this node.
    secondaryBootDisks List<Property Map>
    Secondary boot disks for preloading data or container images.
    serviceAccount String
    The Google Cloud Platform Service Account to be used by the node VMs.
    shieldedInstanceConfig Property Map
    Shielded Instance options.
    soleTenantConfig Property Map
    Node affinity options for sole tenant node pools.
    spot Boolean
    Whether the nodes are created as spot VM instances.
    storagePools List<String>
    The list of Storage Pools where boot disks are provisioned.
    tags List<String>
    The list of instance tags applied to all nodes.
    taints List<Property Map>
    List of Kubernetes taints to be applied to each node.
    workloadMetadataConfig Property Map
    The workload metadata configuration for this node.

    NodePoolNodeConfigAdvancedMachineFeatures, NodePoolNodeConfigAdvancedMachineFeaturesArgs

    ThreadsPerCore int
    The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.
    EnableNestedVirtualization bool
    Whether the node should have nested virtualization enabled.
    ThreadsPerCore int
    The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.
    EnableNestedVirtualization bool
    Whether the node should have nested virtualization enabled.
    threadsPerCore Integer
    The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.
    enableNestedVirtualization Boolean
    Whether the node should have nested virtualization enabled.
    threadsPerCore number
    The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.
    enableNestedVirtualization boolean
    Whether the node should have nested virtualization enabled.
    threads_per_core int
    The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.
    enable_nested_virtualization bool
    Whether the node should have nested virtualization enabled.
    threadsPerCore Number
    The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.
    enableNestedVirtualization Boolean
    Whether the node should have nested virtualization enabled.

    NodePoolNodeConfigConfidentialNodes, NodePoolNodeConfigConfidentialNodesArgs

    Enabled bool
    Whether Confidential Nodes feature is enabled for all nodes in this pool.
    Enabled bool
    Whether Confidential Nodes feature is enabled for all nodes in this pool.
    enabled Boolean
    Whether Confidential Nodes feature is enabled for all nodes in this pool.
    enabled boolean
    Whether Confidential Nodes feature is enabled for all nodes in this pool.
    enabled bool
    Whether Confidential Nodes feature is enabled for all nodes in this pool.
    enabled Boolean
    Whether Confidential Nodes feature is enabled for all nodes in this pool.

    NodePoolNodeConfigContainerdConfig, NodePoolNodeConfigContainerdConfigArgs

    PrivateRegistryAccessConfig NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfig
    Parameters for private container registries configuration.
    PrivateRegistryAccessConfig NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfig
    Parameters for private container registries configuration.
    privateRegistryAccessConfig NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfig
    Parameters for private container registries configuration.
    privateRegistryAccessConfig NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfig
    Parameters for private container registries configuration.
    privateRegistryAccessConfig Property Map
    Parameters for private container registries configuration.

    NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfig, NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigArgs

    Enabled bool
    Whether or not private registries are configured.
    CertificateAuthorityDomainConfigs List<NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfig>
    Parameters for configuring CA certificate and domains.
    Enabled bool
    Whether or not private registries are configured.
    CertificateAuthorityDomainConfigs []NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfig
    Parameters for configuring CA certificate and domains.
    enabled Boolean
    Whether or not private registries are configured.
    certificateAuthorityDomainConfigs List<NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfig>
    Parameters for configuring CA certificate and domains.
    enabled boolean
    Whether or not private registries are configured.
    certificateAuthorityDomainConfigs NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfig[]
    Parameters for configuring CA certificate and domains.
    enabled bool
    Whether or not private registries are configured.
    certificate_authority_domain_configs Sequence[NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfig]
    Parameters for configuring CA certificate and domains.
    enabled Boolean
    Whether or not private registries are configured.
    certificateAuthorityDomainConfigs List<Property Map>
    Parameters for configuring CA certificate and domains.

    NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfig, NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigArgs

    Fqdns List<string>
    List of fully-qualified-domain-names. IPv4s and port specification are supported.
    GcpSecretManagerCertificateConfig NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigGcpSecretManagerCertificateConfig
    Parameters for configuring a certificate hosted in GCP SecretManager.
    Fqdns []string
    List of fully-qualified-domain-names. IPv4s and port specification are supported.
    GcpSecretManagerCertificateConfig NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigGcpSecretManagerCertificateConfig
    Parameters for configuring a certificate hosted in GCP SecretManager.
    fqdns List<String>
    List of fully-qualified-domain-names. IPv4s and port specification are supported.
    gcpSecretManagerCertificateConfig NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigGcpSecretManagerCertificateConfig
    Parameters for configuring a certificate hosted in GCP SecretManager.
    fqdns string[]
    List of fully-qualified-domain-names. IPv4s and port specification are supported.
    gcpSecretManagerCertificateConfig NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigGcpSecretManagerCertificateConfig
    Parameters for configuring a certificate hosted in GCP SecretManager.
    fqdns Sequence[str]
    List of fully-qualified-domain-names. IPv4s and port specification are supported.
    gcp_secret_manager_certificate_config NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigGcpSecretManagerCertificateConfig
    Parameters for configuring a certificate hosted in GCP SecretManager.
    fqdns List<String>
    List of fully-qualified-domain-names. IPv4s and port specification are supported.
    gcpSecretManagerCertificateConfig Property Map
    Parameters for configuring a certificate hosted in GCP SecretManager.

    NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigGcpSecretManagerCertificateConfig, NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigCertificateAuthorityDomainConfigGcpSecretManagerCertificateConfigArgs

    SecretUri string
    URI for the secret that hosts a certificate. Must be in the format 'projects/PROJECT_NUM/secrets/SECRET_NAME/versions/VERSION_OR_LATEST'.
    SecretUri string
    URI for the secret that hosts a certificate. Must be in the format 'projects/PROJECT_NUM/secrets/SECRET_NAME/versions/VERSION_OR_LATEST'.
    secretUri String
    URI for the secret that hosts a certificate. Must be in the format 'projects/PROJECT_NUM/secrets/SECRET_NAME/versions/VERSION_OR_LATEST'.
    secretUri string
    URI for the secret that hosts a certificate. Must be in the format 'projects/PROJECT_NUM/secrets/SECRET_NAME/versions/VERSION_OR_LATEST'.
    secret_uri str
    URI for the secret that hosts a certificate. Must be in the format 'projects/PROJECT_NUM/secrets/SECRET_NAME/versions/VERSION_OR_LATEST'.
    secretUri String
    URI for the secret that hosts a certificate. Must be in the format 'projects/PROJECT_NUM/secrets/SECRET_NAME/versions/VERSION_OR_LATEST'.

    NodePoolNodeConfigEffectiveTaint, NodePoolNodeConfigEffectiveTaintArgs

    Effect string
    Effect for taint.
    Key string
    Key for taint.
    Value string
    Value for taint.
    Effect string
    Effect for taint.
    Key string
    Key for taint.
    Value string
    Value for taint.
    effect String
    Effect for taint.
    key String
    Key for taint.
    value String
    Value for taint.
    effect string
    Effect for taint.
    key string
    Key for taint.
    value string
    Value for taint.
    effect str
    Effect for taint.
    key str
    Key for taint.
    value str
    Value for taint.
    effect String
    Effect for taint.
    key String
    Key for taint.
    value String
    Value for taint.

    NodePoolNodeConfigEphemeralStorageConfig, NodePoolNodeConfigEphemeralStorageConfigArgs

    LocalSsdCount int
    Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
    LocalSsdCount int
    Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
    localSsdCount Integer
    Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
    localSsdCount number
    Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
    local_ssd_count int
    Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
    localSsdCount Number
    Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.

    NodePoolNodeConfigEphemeralStorageLocalSsdConfig, NodePoolNodeConfigEphemeralStorageLocalSsdConfigArgs

    LocalSsdCount int
    Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
    LocalSsdCount int
    Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
    localSsdCount Integer
    Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
    localSsdCount number
    Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
    local_ssd_count int
    Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
    localSsdCount Number
    Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.

    NodePoolNodeConfigFastSocket, NodePoolNodeConfigFastSocketArgs

    Enabled bool
    Whether or not NCCL Fast Socket is enabled
    Enabled bool
    Whether or not NCCL Fast Socket is enabled
    enabled Boolean
    Whether or not NCCL Fast Socket is enabled
    enabled boolean
    Whether or not NCCL Fast Socket is enabled
    enabled bool
    Whether or not NCCL Fast Socket is enabled
    enabled Boolean
    Whether or not NCCL Fast Socket is enabled

    NodePoolNodeConfigGcfsConfig, NodePoolNodeConfigGcfsConfigArgs

    Enabled bool
    Whether or not GCFS is enabled
    Enabled bool
    Whether or not GCFS is enabled
    enabled Boolean
    Whether or not GCFS is enabled
    enabled boolean
    Whether or not GCFS is enabled
    enabled bool
    Whether or not GCFS is enabled
    enabled Boolean
    Whether or not GCFS is enabled

    NodePoolNodeConfigGuestAccelerator, NodePoolNodeConfigGuestAcceleratorArgs

    Count int
    The number of the accelerator cards exposed to an instance.
    Type string
    The accelerator type resource name.
    GpuDriverInstallationConfig NodePoolNodeConfigGuestAcceleratorGpuDriverInstallationConfig
    Configuration for auto installation of GPU driver.
    GpuPartitionSize string
    Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning)
    GpuSharingConfig NodePoolNodeConfigGuestAcceleratorGpuSharingConfig
    Configuration for GPU sharing.
    Count int
    The number of the accelerator cards exposed to an instance.
    Type string
    The accelerator type resource name.
    GpuDriverInstallationConfig NodePoolNodeConfigGuestAcceleratorGpuDriverInstallationConfig
    Configuration for auto installation of GPU driver.
    GpuPartitionSize string
    Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning)
    GpuSharingConfig NodePoolNodeConfigGuestAcceleratorGpuSharingConfig
    Configuration for GPU sharing.
    count Integer
    The number of the accelerator cards exposed to an instance.
    type String
    The accelerator type resource name.
    gpuDriverInstallationConfig NodePoolNodeConfigGuestAcceleratorGpuDriverInstallationConfig
    Configuration for auto installation of GPU driver.
    gpuPartitionSize String
    Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning)
    gpuSharingConfig NodePoolNodeConfigGuestAcceleratorGpuSharingConfig
    Configuration for GPU sharing.
    count number
    The number of the accelerator cards exposed to an instance.
    type string
    The accelerator type resource name.
    gpuDriverInstallationConfig NodePoolNodeConfigGuestAcceleratorGpuDriverInstallationConfig
    Configuration for auto installation of GPU driver.
    gpuPartitionSize string
    Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning)
    gpuSharingConfig NodePoolNodeConfigGuestAcceleratorGpuSharingConfig
    Configuration for GPU sharing.
    count int
    The number of the accelerator cards exposed to an instance.
    type str
    The accelerator type resource name.
    gpu_driver_installation_config NodePoolNodeConfigGuestAcceleratorGpuDriverInstallationConfig
    Configuration for auto installation of GPU driver.
    gpu_partition_size str
    Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning)
    gpu_sharing_config NodePoolNodeConfigGuestAcceleratorGpuSharingConfig
    Configuration for GPU sharing.
    count Number
    The number of the accelerator cards exposed to an instance.
    type String
    The accelerator type resource name.
    gpuDriverInstallationConfig Property Map
    Configuration for auto installation of GPU driver.
    gpuPartitionSize String
    Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning)
    gpuSharingConfig Property Map
    Configuration for GPU sharing.

    NodePoolNodeConfigGuestAcceleratorGpuDriverInstallationConfig, NodePoolNodeConfigGuestAcceleratorGpuDriverInstallationConfigArgs

    GpuDriverVersion string
    Mode for how the GPU driver is installed.
    GpuDriverVersion string
    Mode for how the GPU driver is installed.
    gpuDriverVersion String
    Mode for how the GPU driver is installed.
    gpuDriverVersion string
    Mode for how the GPU driver is installed.
    gpu_driver_version str
    Mode for how the GPU driver is installed.
    gpuDriverVersion String
    Mode for how the GPU driver is installed.

    NodePoolNodeConfigGuestAcceleratorGpuSharingConfig, NodePoolNodeConfigGuestAcceleratorGpuSharingConfigArgs

    GpuSharingStrategy string
    The type of GPU sharing strategy to enable on the GPU node. Possible values are described in the API package (https://pkg.go.dev/google.golang.org/api/container/v1#GPUSharingConfig)
    MaxSharedClientsPerGpu int
    The maximum number of containers that can share a GPU.
    GpuSharingStrategy string
    The type of GPU sharing strategy to enable on the GPU node. Possible values are described in the API package (https://pkg.go.dev/google.golang.org/api/container/v1#GPUSharingConfig)
    MaxSharedClientsPerGpu int
    The maximum number of containers that can share a GPU.
    gpuSharingStrategy String
    The type of GPU sharing strategy to enable on the GPU node. Possible values are described in the API package (https://pkg.go.dev/google.golang.org/api/container/v1#GPUSharingConfig)
    maxSharedClientsPerGpu Integer
    The maximum number of containers that can share a GPU.
    gpuSharingStrategy string
    The type of GPU sharing strategy to enable on the GPU node. Possible values are described in the API package (https://pkg.go.dev/google.golang.org/api/container/v1#GPUSharingConfig)
    maxSharedClientsPerGpu number
    The maximum number of containers that can share a GPU.
    gpu_sharing_strategy str
    The type of GPU sharing strategy to enable on the GPU node. Possible values are described in the API package (https://pkg.go.dev/google.golang.org/api/container/v1#GPUSharingConfig)
    max_shared_clients_per_gpu int
    The maximum number of containers that can share a GPU.
    gpuSharingStrategy String
    The type of GPU sharing strategy to enable on the GPU node. Possible values are described in the API package (https://pkg.go.dev/google.golang.org/api/container/v1#GPUSharingConfig)
    maxSharedClientsPerGpu Number
    The maximum number of containers that can share a GPU.

    NodePoolNodeConfigGvnic, NodePoolNodeConfigGvnicArgs

    Enabled bool
    Whether or not gvnic is enabled
    Enabled bool
    Whether or not gvnic is enabled
    enabled Boolean
    Whether or not gvnic is enabled
    enabled boolean
    Whether or not gvnic is enabled
    enabled bool
    Whether or not gvnic is enabled
    enabled Boolean
    Whether or not gvnic is enabled

    NodePoolNodeConfigHostMaintenancePolicy, NodePoolNodeConfigHostMaintenancePolicyArgs

    NodePoolNodeConfigKubeletConfig, NodePoolNodeConfigKubeletConfigArgs

    CpuCfsQuota bool
    Enable CPU CFS quota enforcement for containers that specify CPU limits.
    CpuCfsQuotaPeriod string
    Set the CPU CFS quota period value 'cpu.cfs_period_us'.
    CpuManagerPolicy string
    Control the CPU management policy on the node.
    InsecureKubeletReadonlyPortEnabled string
    Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to FALSE. Possible values: TRUE, FALSE.
    PodPidsLimit int
    Controls the maximum number of processes allowed to run in a pod.
    CpuCfsQuota bool
    Enable CPU CFS quota enforcement for containers that specify CPU limits.
    CpuCfsQuotaPeriod string
    Set the CPU CFS quota period value 'cpu.cfs_period_us'.
    CpuManagerPolicy string
    Control the CPU management policy on the node.
    InsecureKubeletReadonlyPortEnabled string
    Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to FALSE. Possible values: TRUE, FALSE.
    PodPidsLimit int
    Controls the maximum number of processes allowed to run in a pod.
    cpuCfsQuota Boolean
    Enable CPU CFS quota enforcement for containers that specify CPU limits.
    cpuCfsQuotaPeriod String
    Set the CPU CFS quota period value 'cpu.cfs_period_us'.
    cpuManagerPolicy String
    Control the CPU management policy on the node.
    insecureKubeletReadonlyPortEnabled String
    Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to FALSE. Possible values: TRUE, FALSE.
    podPidsLimit Integer
    Controls the maximum number of processes allowed to run in a pod.
    cpuCfsQuota boolean
    Enable CPU CFS quota enforcement for containers that specify CPU limits.
    cpuCfsQuotaPeriod string
    Set the CPU CFS quota period value 'cpu.cfs_period_us'.
    cpuManagerPolicy string
    Control the CPU management policy on the node.
    insecureKubeletReadonlyPortEnabled string
    Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to FALSE. Possible values: TRUE, FALSE.
    podPidsLimit number
    Controls the maximum number of processes allowed to run in a pod.
    cpu_cfs_quota bool
    Enable CPU CFS quota enforcement for containers that specify CPU limits.
    cpu_cfs_quota_period str
    Set the CPU CFS quota period value 'cpu.cfs_period_us'.
    cpu_manager_policy str
    Control the CPU management policy on the node.
    insecure_kubelet_readonly_port_enabled str
    Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to FALSE. Possible values: TRUE, FALSE.
    pod_pids_limit int
    Controls the maximum number of processes allowed to run in a pod.
    cpuCfsQuota Boolean
    Enable CPU CFS quota enforcement for containers that specify CPU limits.
    cpuCfsQuotaPeriod String
    Set the CPU CFS quota period value 'cpu.cfs_period_us'.
    cpuManagerPolicy String
    Control the CPU management policy on the node.
    insecureKubeletReadonlyPortEnabled String
    Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to FALSE. Possible values: TRUE, FALSE.
    podPidsLimit Number
    Controls the maximum number of processes allowed to run in a pod.

    NodePoolNodeConfigLinuxNodeConfig, NodePoolNodeConfigLinuxNodeConfigArgs

    CgroupMode string
    cgroupMode specifies the cgroup mode to be used on the node.
    HugepagesConfig NodePoolNodeConfigLinuxNodeConfigHugepagesConfig
    Amounts for 2M and 1G hugepages.
    Sysctls Dictionary<string, string>
    The Linux kernel parameters to be applied to the nodes and all pods running on the nodes.
    CgroupMode string
    cgroupMode specifies the cgroup mode to be used on the node.
    HugepagesConfig NodePoolNodeConfigLinuxNodeConfigHugepagesConfig
    Amounts for 2M and 1G hugepages.
    Sysctls map[string]string
    The Linux kernel parameters to be applied to the nodes and all pods running on the nodes.
    cgroupMode String
    cgroupMode specifies the cgroup mode to be used on the node.
    hugepagesConfig NodePoolNodeConfigLinuxNodeConfigHugepagesConfig
    Amounts for 2M and 1G hugepages.
    sysctls Map<String,String>
    The Linux kernel parameters to be applied to the nodes and all pods running on the nodes.
    cgroupMode string
    cgroupMode specifies the cgroup mode to be used on the node.
    hugepagesConfig NodePoolNodeConfigLinuxNodeConfigHugepagesConfig
    Amounts for 2M and 1G hugepages.
    sysctls {[key: string]: string}
    The Linux kernel parameters to be applied to the nodes and all pods running on the nodes.
    cgroup_mode str
    cgroupMode specifies the cgroup mode to be used on the node.
    hugepages_config NodePoolNodeConfigLinuxNodeConfigHugepagesConfig
    Amounts for 2M and 1G hugepages.
    sysctls Mapping[str, str]
    The Linux kernel parameters to be applied to the nodes and all pods running on the nodes.
    cgroupMode String
    cgroupMode specifies the cgroup mode to be used on the node.
    hugepagesConfig Property Map
    Amounts for 2M and 1G hugepages.
    sysctls Map<String>
    The Linux kernel parameters to be applied to the nodes and all pods running on the nodes.

    NodePoolNodeConfigLinuxNodeConfigHugepagesConfig, NodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgs

    HugepageSize1g int
    Amount of 1G hugepages.
    HugepageSize2m int
    Amount of 2M hugepages.
    HugepageSize1g int
    Amount of 1G hugepages.
    HugepageSize2m int
    Amount of 2M hugepages.
    hugepageSize1g Integer
    Amount of 1G hugepages.
    hugepageSize2m Integer
    Amount of 2M hugepages.
    hugepageSize1g number
    Amount of 1G hugepages.
    hugepageSize2m number
    Amount of 2M hugepages.
    hugepage_size1g int
    Amount of 1G hugepages.
    hugepage_size2m int
    Amount of 2M hugepages.
    hugepageSize1g Number
    Amount of 1G hugepages.
    hugepageSize2m Number
    Amount of 2M hugepages.

    NodePoolNodeConfigLocalNvmeSsdBlockConfig, NodePoolNodeConfigLocalNvmeSsdBlockConfigArgs

    LocalSsdCount int
    Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size.
    LocalSsdCount int
    Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size.
    localSsdCount Integer
    Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size.
    localSsdCount number
    Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size.
    local_ssd_count int
    Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size.
    localSsdCount Number
    Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size.

    NodePoolNodeConfigReservationAffinity, NodePoolNodeConfigReservationAffinityArgs

    ConsumeReservationType string
    The type of reservation consumption Accepted values are:

    • "UNSPECIFIED": Default value. This should not be used.
    • "NO_RESERVATION": Do not consume from any reserved capacity.
    • "ANY_RESERVATION": Consume any reservation available.
    • "SPECIFIC_RESERVATION": Must consume from a specific reservation. Must specify key value fields for specifying the reservations.
    Key string
    The label key of a reservation resource. To target a SPECIFIC_RESERVATION by name, specify "compute.googleapis.com/reservation-name" as the key and specify the name of your reservation as its value.
    Values List<string>
    The list of label values of reservation resources. For example: the name of the specific reservation when using a key of "compute.googleapis.com/reservation-name"
    ConsumeReservationType string
    The type of reservation consumption Accepted values are:

    • "UNSPECIFIED": Default value. This should not be used.
    • "NO_RESERVATION": Do not consume from any reserved capacity.
    • "ANY_RESERVATION": Consume any reservation available.
    • "SPECIFIC_RESERVATION": Must consume from a specific reservation. Must specify key value fields for specifying the reservations.
    Key string
    The label key of a reservation resource. To target a SPECIFIC_RESERVATION by name, specify "compute.googleapis.com/reservation-name" as the key and specify the name of your reservation as its value.
    Values []string
    The list of label values of reservation resources. For example: the name of the specific reservation when using a key of "compute.googleapis.com/reservation-name"
    consumeReservationType String
    The type of reservation consumption Accepted values are:

    • "UNSPECIFIED": Default value. This should not be used.
    • "NO_RESERVATION": Do not consume from any reserved capacity.
    • "ANY_RESERVATION": Consume any reservation available.
    • "SPECIFIC_RESERVATION": Must consume from a specific reservation. Must specify key value fields for specifying the reservations.
    key String
    The label key of a reservation resource. To target a SPECIFIC_RESERVATION by name, specify "compute.googleapis.com/reservation-name" as the key and specify the name of your reservation as its value.
    values List<String>
    The list of label values of reservation resources. For example: the name of the specific reservation when using a key of "compute.googleapis.com/reservation-name"
    consumeReservationType string
    The type of reservation consumption Accepted values are:

    • "UNSPECIFIED": Default value. This should not be used.
    • "NO_RESERVATION": Do not consume from any reserved capacity.
    • "ANY_RESERVATION": Consume any reservation available.
    • "SPECIFIC_RESERVATION": Must consume from a specific reservation. Must specify key value fields for specifying the reservations.
    key string
    The label key of a reservation resource. To target a SPECIFIC_RESERVATION by name, specify "compute.googleapis.com/reservation-name" as the key and specify the name of your reservation as its value.
    values string[]
    The list of label values of reservation resources. For example: the name of the specific reservation when using a key of "compute.googleapis.com/reservation-name"
    consume_reservation_type str
    The type of reservation consumption Accepted values are:

    • "UNSPECIFIED": Default value. This should not be used.
    • "NO_RESERVATION": Do not consume from any reserved capacity.
    • "ANY_RESERVATION": Consume any reservation available.
    • "SPECIFIC_RESERVATION": Must consume from a specific reservation. Must specify key value fields for specifying the reservations.
    key str
    The label key of a reservation resource. To target a SPECIFIC_RESERVATION by name, specify "compute.googleapis.com/reservation-name" as the key and specify the name of your reservation as its value.
    values Sequence[str]
    The list of label values of reservation resources. For example: the name of the specific reservation when using a key of "compute.googleapis.com/reservation-name"
    consumeReservationType String
    The type of reservation consumption Accepted values are:

    • "UNSPECIFIED": Default value. This should not be used.
    • "NO_RESERVATION": Do not consume from any reserved capacity.
    • "ANY_RESERVATION": Consume any reservation available.
    • "SPECIFIC_RESERVATION": Must consume from a specific reservation. Must specify key value fields for specifying the reservations.
    key String
    The label key of a reservation resource. To target a SPECIFIC_RESERVATION by name, specify "compute.googleapis.com/reservation-name" as the key and specify the name of your reservation as its value.
    values List<String>
    The list of label values of reservation resources. For example: the name of the specific reservation when using a key of "compute.googleapis.com/reservation-name"

    NodePoolNodeConfigSandboxConfig, NodePoolNodeConfigSandboxConfigArgs

    SandboxType string
    Type of the sandbox to use for the node (e.g. 'gvisor')
    SandboxType string
    Type of the sandbox to use for the node (e.g. 'gvisor')
    sandboxType String
    Type of the sandbox to use for the node (e.g. 'gvisor')
    sandboxType string
    Type of the sandbox to use for the node (e.g. 'gvisor')
    sandbox_type str
    Type of the sandbox to use for the node (e.g. 'gvisor')
    sandboxType String
    Type of the sandbox to use for the node (e.g. 'gvisor')

    NodePoolNodeConfigSecondaryBootDisk, NodePoolNodeConfigSecondaryBootDiskArgs

    DiskImage string
    Disk image to create the secondary boot disk from
    Mode string
    Mode for how the secondary boot disk is used.
    DiskImage string
    Disk image to create the secondary boot disk from
    Mode string
    Mode for how the secondary boot disk is used.
    diskImage String
    Disk image to create the secondary boot disk from
    mode String
    Mode for how the secondary boot disk is used.
    diskImage string
    Disk image to create the secondary boot disk from
    mode string
    Mode for how the secondary boot disk is used.
    disk_image str
    Disk image to create the secondary boot disk from
    mode str
    Mode for how the secondary boot disk is used.
    diskImage String
    Disk image to create the secondary boot disk from
    mode String
    Mode for how the secondary boot disk is used.

    NodePoolNodeConfigShieldedInstanceConfig, NodePoolNodeConfigShieldedInstanceConfigArgs

    EnableIntegrityMonitoring bool
    Defines whether the instance has integrity monitoring enabled.
    EnableSecureBoot bool
    Defines whether the instance has Secure Boot enabled.
    EnableIntegrityMonitoring bool
    Defines whether the instance has integrity monitoring enabled.
    EnableSecureBoot bool
    Defines whether the instance has Secure Boot enabled.
    enableIntegrityMonitoring Boolean
    Defines whether the instance has integrity monitoring enabled.
    enableSecureBoot Boolean
    Defines whether the instance has Secure Boot enabled.
    enableIntegrityMonitoring boolean
    Defines whether the instance has integrity monitoring enabled.
    enableSecureBoot boolean
    Defines whether the instance has Secure Boot enabled.
    enable_integrity_monitoring bool
    Defines whether the instance has integrity monitoring enabled.
    enable_secure_boot bool
    Defines whether the instance has Secure Boot enabled.
    enableIntegrityMonitoring Boolean
    Defines whether the instance has integrity monitoring enabled.
    enableSecureBoot Boolean
    Defines whether the instance has Secure Boot enabled.

    NodePoolNodeConfigSoleTenantConfig, NodePoolNodeConfigSoleTenantConfigArgs

    NodePoolNodeConfigSoleTenantConfigNodeAffinity, NodePoolNodeConfigSoleTenantConfigNodeAffinityArgs

    Key string
    .
    Operator string
    .
    Values List<string>
    .
    Key string
    .
    Operator string
    .
    Values []string
    .
    key String
    .
    operator String
    .
    values List<String>
    .
    key string
    .
    operator string
    .
    values string[]
    .
    key str
    .
    operator str
    .
    values Sequence[str]
    .
    key String
    .
    operator String
    .
    values List<String>
    .

    NodePoolNodeConfigTaint, NodePoolNodeConfigTaintArgs

    Effect string
    Effect for taint.
    Key string
    Key for taint.
    Value string
    Value for taint.
    Effect string
    Effect for taint.
    Key string
    Key for taint.
    Value string
    Value for taint.
    effect String
    Effect for taint.
    key String
    Key for taint.
    value String
    Value for taint.
    effect string
    Effect for taint.
    key string
    Key for taint.
    value string
    Value for taint.
    effect str
    Effect for taint.
    key str
    Key for taint.
    value str
    Value for taint.
    effect String
    Effect for taint.
    key String
    Key for taint.
    value String
    Value for taint.

    NodePoolNodeConfigWorkloadMetadataConfig, NodePoolNodeConfigWorkloadMetadataConfigArgs

    Mode string
    Mode is the configuration for how to expose metadata to workloads running on the node.
    Mode string
    Mode is the configuration for how to expose metadata to workloads running on the node.
    mode String
    Mode is the configuration for how to expose metadata to workloads running on the node.
    mode string
    Mode is the configuration for how to expose metadata to workloads running on the node.
    mode str
    Mode is the configuration for how to expose metadata to workloads running on the node.
    mode String
    Mode is the configuration for how to expose metadata to workloads running on the node.

    NodePoolPlacementPolicy, NodePoolPlacementPolicyArgs

    Type string
    The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.
    PolicyName string
    If set, refers to the name of a custom resource policy supplied by the user. The resource policy must be in the same project and region as the node pool. If not found, InvalidArgument error is returned.
    TpuTopology string
    The TPU placement topology for pod slice node pool.
    Type string
    The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.
    PolicyName string
    If set, refers to the name of a custom resource policy supplied by the user. The resource policy must be in the same project and region as the node pool. If not found, InvalidArgument error is returned.
    TpuTopology string
    The TPU placement topology for pod slice node pool.
    type String
    The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.
    policyName String
    If set, refers to the name of a custom resource policy supplied by the user. The resource policy must be in the same project and region as the node pool. If not found, InvalidArgument error is returned.
    tpuTopology String
    The TPU placement topology for pod slice node pool.
    type string
    The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.
    policyName string
    If set, refers to the name of a custom resource policy supplied by the user. The resource policy must be in the same project and region as the node pool. If not found, InvalidArgument error is returned.
    tpuTopology string
    The TPU placement topology for pod slice node pool.
    type str
    The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.
    policy_name str
    If set, refers to the name of a custom resource policy supplied by the user. The resource policy must be in the same project and region as the node pool. If not found, InvalidArgument error is returned.
    tpu_topology str
    The TPU placement topology for pod slice node pool.
    type String
    The type of the policy. Supports a single value: COMPACT. Specifying COMPACT placement policy type places node pool's nodes in a closer physical proximity in order to reduce network latency between nodes.
    policyName String
    If set, refers to the name of a custom resource policy supplied by the user. The resource policy must be in the same project and region as the node pool. If not found, InvalidArgument error is returned.
    tpuTopology String
    The TPU placement topology for pod slice node pool.

    NodePoolQueuedProvisioning, NodePoolQueuedProvisioningArgs

    Enabled bool
    Makes nodes obtainable through the ProvisioningRequest API exclusively.
    Enabled bool
    Makes nodes obtainable through the ProvisioningRequest API exclusively.
    enabled Boolean
    Makes nodes obtainable through the ProvisioningRequest API exclusively.
    enabled boolean
    Makes nodes obtainable through the ProvisioningRequest API exclusively.
    enabled bool
    Makes nodes obtainable through the ProvisioningRequest API exclusively.
    enabled Boolean
    Makes nodes obtainable through the ProvisioningRequest API exclusively.

    NodePoolUpgradeSettings, NodePoolUpgradeSettingsArgs

    BlueGreenSettings NodePoolUpgradeSettingsBlueGreenSettings
    The settings to adjust blue green upgrades. Structure is documented below
    MaxSurge int
    The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater.
    MaxUnavailable int

    The number of nodes that can be simultaneously unavailable during an upgrade. Increasing max_unavailable raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater.

    max_surge and max_unavailable must not be negative and at least one of them must be greater than zero.

    Strategy string
    The upgrade strategy to be used for upgrading the nodes.
    BlueGreenSettings NodePoolUpgradeSettingsBlueGreenSettings
    The settings to adjust blue green upgrades. Structure is documented below
    MaxSurge int
    The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater.
    MaxUnavailable int

    The number of nodes that can be simultaneously unavailable during an upgrade. Increasing max_unavailable raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater.

    max_surge and max_unavailable must not be negative and at least one of them must be greater than zero.

    Strategy string
    The upgrade strategy to be used for upgrading the nodes.
    blueGreenSettings NodePoolUpgradeSettingsBlueGreenSettings
    The settings to adjust blue green upgrades. Structure is documented below
    maxSurge Integer
    The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater.
    maxUnavailable Integer

    The number of nodes that can be simultaneously unavailable during an upgrade. Increasing max_unavailable raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater.

    max_surge and max_unavailable must not be negative and at least one of them must be greater than zero.

    strategy String
    The upgrade strategy to be used for upgrading the nodes.
    blueGreenSettings NodePoolUpgradeSettingsBlueGreenSettings
    The settings to adjust blue green upgrades. Structure is documented below
    maxSurge number
    The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater.
    maxUnavailable number

    The number of nodes that can be simultaneously unavailable during an upgrade. Increasing max_unavailable raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater.

    max_surge and max_unavailable must not be negative and at least one of them must be greater than zero.

    strategy string
    The upgrade strategy to be used for upgrading the nodes.
    blue_green_settings NodePoolUpgradeSettingsBlueGreenSettings
    The settings to adjust blue green upgrades. Structure is documented below
    max_surge int
    The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater.
    max_unavailable int

    The number of nodes that can be simultaneously unavailable during an upgrade. Increasing max_unavailable raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater.

    max_surge and max_unavailable must not be negative and at least one of them must be greater than zero.

    strategy str
    The upgrade strategy to be used for upgrading the nodes.
    blueGreenSettings Property Map
    The settings to adjust blue green upgrades. Structure is documented below
    maxSurge Number
    The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater.
    maxUnavailable Number

    The number of nodes that can be simultaneously unavailable during an upgrade. Increasing max_unavailable raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater.

    max_surge and max_unavailable must not be negative and at least one of them must be greater than zero.

    strategy String
    The upgrade strategy to be used for upgrading the nodes.

    NodePoolUpgradeSettingsBlueGreenSettings, NodePoolUpgradeSettingsBlueGreenSettingsArgs

    StandardRolloutPolicy NodePoolUpgradeSettingsBlueGreenSettingsStandardRolloutPolicy
    Specifies the standard policy settings for blue-green upgrades.
    NodePoolSoakDuration string
    Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up.
    StandardRolloutPolicy NodePoolUpgradeSettingsBlueGreenSettingsStandardRolloutPolicy
    Specifies the standard policy settings for blue-green upgrades.
    NodePoolSoakDuration string
    Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up.
    standardRolloutPolicy NodePoolUpgradeSettingsBlueGreenSettingsStandardRolloutPolicy
    Specifies the standard policy settings for blue-green upgrades.
    nodePoolSoakDuration String
    Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up.
    standardRolloutPolicy NodePoolUpgradeSettingsBlueGreenSettingsStandardRolloutPolicy
    Specifies the standard policy settings for blue-green upgrades.
    nodePoolSoakDuration string
    Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up.
    standard_rollout_policy NodePoolUpgradeSettingsBlueGreenSettingsStandardRolloutPolicy
    Specifies the standard policy settings for blue-green upgrades.
    node_pool_soak_duration str
    Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up.
    standardRolloutPolicy Property Map
    Specifies the standard policy settings for blue-green upgrades.
    nodePoolSoakDuration String
    Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up.

    NodePoolUpgradeSettingsBlueGreenSettingsStandardRolloutPolicy, NodePoolUpgradeSettingsBlueGreenSettingsStandardRolloutPolicyArgs

    BatchNodeCount int
    Number of blue nodes to drain in a batch.
    BatchPercentage double
    Percentage of the blue pool nodes to drain in a batch.
    BatchSoakDuration string
    Soak time after each batch gets drained.
    BatchNodeCount int
    Number of blue nodes to drain in a batch.
    BatchPercentage float64
    Percentage of the blue pool nodes to drain in a batch.
    BatchSoakDuration string
    Soak time after each batch gets drained.
    batchNodeCount Integer
    Number of blue nodes to drain in a batch.
    batchPercentage Double
    Percentage of the blue pool nodes to drain in a batch.
    batchSoakDuration String
    Soak time after each batch gets drained.
    batchNodeCount number
    Number of blue nodes to drain in a batch.
    batchPercentage number
    Percentage of the blue pool nodes to drain in a batch.
    batchSoakDuration string
    Soak time after each batch gets drained.
    batch_node_count int
    Number of blue nodes to drain in a batch.
    batch_percentage float
    Percentage of the blue pool nodes to drain in a batch.
    batch_soak_duration str
    Soak time after each batch gets drained.
    batchNodeCount Number
    Number of blue nodes to drain in a batch.
    batchPercentage Number
    Percentage of the blue pool nodes to drain in a batch.
    batchSoakDuration String
    Soak time after each batch gets drained.

    Import

    Node pools can be imported using the project, location, cluster and name. If

    the project is omitted, the project value in the provider configuration will be used. Examples:

    • {{project_id}}/{{location}}/{{cluster_id}}/{{pool_id}}

    • {{location}}/{{cluster_id}}/{{pool_id}}

    When using the pulumi import command, node pools can be imported using one of the formats above. For example:

    $ pulumi import gcp:container/nodePool:NodePool default {{project_id}}/{{location}}/{{cluster_id}}/{{pool_id}}
    
    $ pulumi import gcp:container/nodePool:NodePool default {{location}}/{{cluster_id}}/{{pool_id}}
    

    To learn more about importing existing cloud resources, see Importing resources.

    Package Details

    Repository
    Google Cloud (GCP) Classic pulumi/pulumi-gcp
    License
    Apache-2.0
    Notes
    This Pulumi package is based on the google-beta Terraform Provider.
    gcp logo
    Google Cloud Classic v8.9.3 published on Monday, Nov 18, 2024 by Pulumi