1. Packages
  2. Google Cloud (GCP) Classic
  3. API Docs
  4. dataproc
  5. Batch
Google Cloud Classic v8.9.3 published on Monday, Nov 18, 2024 by Pulumi

gcp.dataproc.Batch

Explore with Pulumi AI

gcp logo
Google Cloud Classic v8.9.3 published on Monday, Nov 18, 2024 by Pulumi

    Dataproc Serverless Batches lets you run Spark workloads without requiring you to provision and manage your own Dataproc cluster.

    To get more information about Batch, see:

    Example Usage

    Dataproc Batch Spark

    import * as pulumi from "@pulumi/pulumi";
    import * as gcp from "@pulumi/gcp";
    
    const exampleBatchSpark = new gcp.dataproc.Batch("example_batch_spark", {
        batchId: "tf-test-batch_75125",
        location: "us-central1",
        labels: {
            batch_test: "terraform",
        },
        runtimeConfig: {
            properties: {
                "spark.dynamicAllocation.enabled": "false",
                "spark.executor.instances": "2",
            },
        },
        environmentConfig: {
            executionConfig: {
                subnetworkUri: "default",
                ttl: "3600s",
                networkTags: ["tag1"],
            },
        },
        sparkBatch: {
            mainClass: "org.apache.spark.examples.SparkPi",
            args: ["10"],
            jarFileUris: ["file:///usr/lib/spark/examples/jars/spark-examples.jar"],
        },
    });
    
    import pulumi
    import pulumi_gcp as gcp
    
    example_batch_spark = gcp.dataproc.Batch("example_batch_spark",
        batch_id="tf-test-batch_75125",
        location="us-central1",
        labels={
            "batch_test": "terraform",
        },
        runtime_config={
            "properties": {
                "spark.dynamicAllocation.enabled": "false",
                "spark.executor.instances": "2",
            },
        },
        environment_config={
            "execution_config": {
                "subnetwork_uri": "default",
                "ttl": "3600s",
                "network_tags": ["tag1"],
            },
        },
        spark_batch={
            "main_class": "org.apache.spark.examples.SparkPi",
            "args": ["10"],
            "jar_file_uris": ["file:///usr/lib/spark/examples/jars/spark-examples.jar"],
        })
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/dataproc"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := dataproc.NewBatch(ctx, "example_batch_spark", &dataproc.BatchArgs{
    			BatchId:  pulumi.String("tf-test-batch_75125"),
    			Location: pulumi.String("us-central1"),
    			Labels: pulumi.StringMap{
    				"batch_test": pulumi.String("terraform"),
    			},
    			RuntimeConfig: &dataproc.BatchRuntimeConfigArgs{
    				Properties: pulumi.StringMap{
    					"spark.dynamicAllocation.enabled": pulumi.String("false"),
    					"spark.executor.instances":        pulumi.String("2"),
    				},
    			},
    			EnvironmentConfig: &dataproc.BatchEnvironmentConfigArgs{
    				ExecutionConfig: &dataproc.BatchEnvironmentConfigExecutionConfigArgs{
    					SubnetworkUri: pulumi.String("default"),
    					Ttl:           pulumi.String("3600s"),
    					NetworkTags: pulumi.StringArray{
    						pulumi.String("tag1"),
    					},
    				},
    			},
    			SparkBatch: &dataproc.BatchSparkBatchArgs{
    				MainClass: pulumi.String("org.apache.spark.examples.SparkPi"),
    				Args: pulumi.StringArray{
    					pulumi.String("10"),
    				},
    				JarFileUris: pulumi.StringArray{
    					pulumi.String("file:///usr/lib/spark/examples/jars/spark-examples.jar"),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Gcp = Pulumi.Gcp;
    
    return await Deployment.RunAsync(() => 
    {
        var exampleBatchSpark = new Gcp.Dataproc.Batch("example_batch_spark", new()
        {
            BatchId = "tf-test-batch_75125",
            Location = "us-central1",
            Labels = 
            {
                { "batch_test", "terraform" },
            },
            RuntimeConfig = new Gcp.Dataproc.Inputs.BatchRuntimeConfigArgs
            {
                Properties = 
                {
                    { "spark.dynamicAllocation.enabled", "false" },
                    { "spark.executor.instances", "2" },
                },
            },
            EnvironmentConfig = new Gcp.Dataproc.Inputs.BatchEnvironmentConfigArgs
            {
                ExecutionConfig = new Gcp.Dataproc.Inputs.BatchEnvironmentConfigExecutionConfigArgs
                {
                    SubnetworkUri = "default",
                    Ttl = "3600s",
                    NetworkTags = new[]
                    {
                        "tag1",
                    },
                },
            },
            SparkBatch = new Gcp.Dataproc.Inputs.BatchSparkBatchArgs
            {
                MainClass = "org.apache.spark.examples.SparkPi",
                Args = new[]
                {
                    "10",
                },
                JarFileUris = new[]
                {
                    "file:///usr/lib/spark/examples/jars/spark-examples.jar",
                },
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.gcp.dataproc.Batch;
    import com.pulumi.gcp.dataproc.BatchArgs;
    import com.pulumi.gcp.dataproc.inputs.BatchRuntimeConfigArgs;
    import com.pulumi.gcp.dataproc.inputs.BatchEnvironmentConfigArgs;
    import com.pulumi.gcp.dataproc.inputs.BatchEnvironmentConfigExecutionConfigArgs;
    import com.pulumi.gcp.dataproc.inputs.BatchSparkBatchArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var exampleBatchSpark = new Batch("exampleBatchSpark", BatchArgs.builder()
                .batchId("tf-test-batch_75125")
                .location("us-central1")
                .labels(Map.of("batch_test", "terraform"))
                .runtimeConfig(BatchRuntimeConfigArgs.builder()
                    .properties(Map.ofEntries(
                        Map.entry("spark.dynamicAllocation.enabled", "false"),
                        Map.entry("spark.executor.instances", "2")
                    ))
                    .build())
                .environmentConfig(BatchEnvironmentConfigArgs.builder()
                    .executionConfig(BatchEnvironmentConfigExecutionConfigArgs.builder()
                        .subnetworkUri("default")
                        .ttl("3600s")
                        .networkTags("tag1")
                        .build())
                    .build())
                .sparkBatch(BatchSparkBatchArgs.builder()
                    .mainClass("org.apache.spark.examples.SparkPi")
                    .args("10")
                    .jarFileUris("file:///usr/lib/spark/examples/jars/spark-examples.jar")
                    .build())
                .build());
    
        }
    }
    
    resources:
      exampleBatchSpark:
        type: gcp:dataproc:Batch
        name: example_batch_spark
        properties:
          batchId: tf-test-batch_75125
          location: us-central1
          labels:
            batch_test: terraform
          runtimeConfig:
            properties:
              spark.dynamicAllocation.enabled: 'false'
              spark.executor.instances: '2'
          environmentConfig:
            executionConfig:
              subnetworkUri: default
              ttl: 3600s
              networkTags:
                - tag1
          sparkBatch:
            mainClass: org.apache.spark.examples.SparkPi
            args:
              - '10'
            jarFileUris:
              - file:///usr/lib/spark/examples/jars/spark-examples.jar
    

    Dataproc Batch Spark Full

    import * as pulumi from "@pulumi/pulumi";
    import * as gcp from "@pulumi/gcp";
    
    const project = gcp.organizations.getProject({});
    const gcsAccount = gcp.storage.getProjectServiceAccount({});
    const bucket = new gcp.storage.Bucket("bucket", {
        uniformBucketLevelAccess: true,
        name: "dataproc-bucket",
        location: "US",
        forceDestroy: true,
    });
    const keyRing = new gcp.kms.KeyRing("key_ring", {
        name: "example-keyring",
        location: "us-central1",
    });
    const cryptoKey = new gcp.kms.CryptoKey("crypto_key", {
        name: "example-key",
        keyRing: keyRing.id,
        purpose: "ENCRYPT_DECRYPT",
    });
    const cryptoKeyMember1 = new gcp.kms.CryptoKeyIAMMember("crypto_key_member_1", {
        cryptoKeyId: cryptoKey.id,
        role: "roles/cloudkms.cryptoKeyEncrypterDecrypter",
        member: project.then(project => `serviceAccount:service-${project.number}@dataproc-accounts.iam.gserviceaccount.com`),
    });
    const ms = new gcp.dataproc.MetastoreService("ms", {
        serviceId: "dataproc-batch",
        location: "us-central1",
        port: 9080,
        tier: "DEVELOPER",
        maintenanceWindow: {
            hourOfDay: 2,
            dayOfWeek: "SUNDAY",
        },
        hiveMetastoreConfig: {
            version: "3.1.2",
        },
    });
    const basic = new gcp.dataproc.Cluster("basic", {
        name: "dataproc-batch",
        region: "us-central1",
        clusterConfig: {
            softwareConfig: {
                overrideProperties: {
                    "dataproc:dataproc.allow.zero.workers": "true",
                    "spark:spark.history.fs.logDirectory": pulumi.interpolate`gs://${bucket.name}/*/spark-job-history`,
                },
            },
            endpointConfig: {
                enableHttpPortAccess: true,
            },
            masterConfig: {
                numInstances: 1,
                machineType: "e2-standard-2",
                diskConfig: {
                    bootDiskSizeGb: 35,
                },
            },
            metastoreConfig: {
                dataprocMetastoreService: ms.name,
            },
        },
    });
    const exampleBatchSpark = new gcp.dataproc.Batch("example_batch_spark", {
        batchId: "dataproc-batch",
        location: "us-central1",
        labels: {
            batch_test: "terraform",
        },
        runtimeConfig: {
            properties: {
                "spark.dynamicAllocation.enabled": "false",
                "spark.executor.instances": "2",
            },
            version: "2.2",
        },
        environmentConfig: {
            executionConfig: {
                ttl: "3600s",
                networkTags: ["tag1"],
                kmsKey: cryptoKey.id,
                networkUri: "default",
                serviceAccount: project.then(project => `${project.number}-compute@developer.gserviceaccount.com`),
                stagingBucket: bucket.name,
            },
            peripheralsConfig: {
                metastoreService: ms.name,
                sparkHistoryServerConfig: {
                    dataprocCluster: basic.id,
                },
            },
        },
        sparkBatch: {
            mainClass: "org.apache.spark.examples.SparkPi",
            args: ["10"],
            jarFileUris: ["file:///usr/lib/spark/examples/jars/spark-examples.jar"],
        },
    }, {
        dependsOn: [cryptoKeyMember1],
    });
    
    import pulumi
    import pulumi_gcp as gcp
    
    project = gcp.organizations.get_project()
    gcs_account = gcp.storage.get_project_service_account()
    bucket = gcp.storage.Bucket("bucket",
        uniform_bucket_level_access=True,
        name="dataproc-bucket",
        location="US",
        force_destroy=True)
    key_ring = gcp.kms.KeyRing("key_ring",
        name="example-keyring",
        location="us-central1")
    crypto_key = gcp.kms.CryptoKey("crypto_key",
        name="example-key",
        key_ring=key_ring.id,
        purpose="ENCRYPT_DECRYPT")
    crypto_key_member1 = gcp.kms.CryptoKeyIAMMember("crypto_key_member_1",
        crypto_key_id=crypto_key.id,
        role="roles/cloudkms.cryptoKeyEncrypterDecrypter",
        member=f"serviceAccount:service-{project.number}@dataproc-accounts.iam.gserviceaccount.com")
    ms = gcp.dataproc.MetastoreService("ms",
        service_id="dataproc-batch",
        location="us-central1",
        port=9080,
        tier="DEVELOPER",
        maintenance_window={
            "hour_of_day": 2,
            "day_of_week": "SUNDAY",
        },
        hive_metastore_config={
            "version": "3.1.2",
        })
    basic = gcp.dataproc.Cluster("basic",
        name="dataproc-batch",
        region="us-central1",
        cluster_config={
            "software_config": {
                "override_properties": {
                    "dataproc:dataproc.allow.zero.workers": "true",
                    "spark:spark.history.fs.logDirectory": bucket.name.apply(lambda name: f"gs://{name}/*/spark-job-history"),
                },
            },
            "endpoint_config": {
                "enable_http_port_access": True,
            },
            "master_config": {
                "num_instances": 1,
                "machine_type": "e2-standard-2",
                "disk_config": {
                    "boot_disk_size_gb": 35,
                },
            },
            "metastore_config": {
                "dataproc_metastore_service": ms.name,
            },
        })
    example_batch_spark = gcp.dataproc.Batch("example_batch_spark",
        batch_id="dataproc-batch",
        location="us-central1",
        labels={
            "batch_test": "terraform",
        },
        runtime_config={
            "properties": {
                "spark.dynamicAllocation.enabled": "false",
                "spark.executor.instances": "2",
            },
            "version": "2.2",
        },
        environment_config={
            "execution_config": {
                "ttl": "3600s",
                "network_tags": ["tag1"],
                "kms_key": crypto_key.id,
                "network_uri": "default",
                "service_account": f"{project.number}-compute@developer.gserviceaccount.com",
                "staging_bucket": bucket.name,
            },
            "peripherals_config": {
                "metastore_service": ms.name,
                "spark_history_server_config": {
                    "dataproc_cluster": basic.id,
                },
            },
        },
        spark_batch={
            "main_class": "org.apache.spark.examples.SparkPi",
            "args": ["10"],
            "jar_file_uris": ["file:///usr/lib/spark/examples/jars/spark-examples.jar"],
        },
        opts = pulumi.ResourceOptions(depends_on=[crypto_key_member1]))
    
    package main
    
    import (
    	"fmt"
    
    	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/dataproc"
    	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/kms"
    	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations"
    	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/storage"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		project, err := organizations.LookupProject(ctx, &organizations.LookupProjectArgs{}, nil)
    		if err != nil {
    			return err
    		}
    		_, err = storage.GetProjectServiceAccount(ctx, &storage.GetProjectServiceAccountArgs{}, nil)
    		if err != nil {
    			return err
    		}
    		bucket, err := storage.NewBucket(ctx, "bucket", &storage.BucketArgs{
    			UniformBucketLevelAccess: pulumi.Bool(true),
    			Name:                     pulumi.String("dataproc-bucket"),
    			Location:                 pulumi.String("US"),
    			ForceDestroy:             pulumi.Bool(true),
    		})
    		if err != nil {
    			return err
    		}
    		keyRing, err := kms.NewKeyRing(ctx, "key_ring", &kms.KeyRingArgs{
    			Name:     pulumi.String("example-keyring"),
    			Location: pulumi.String("us-central1"),
    		})
    		if err != nil {
    			return err
    		}
    		cryptoKey, err := kms.NewCryptoKey(ctx, "crypto_key", &kms.CryptoKeyArgs{
    			Name:    pulumi.String("example-key"),
    			KeyRing: keyRing.ID(),
    			Purpose: pulumi.String("ENCRYPT_DECRYPT"),
    		})
    		if err != nil {
    			return err
    		}
    		cryptoKeyMember1, err := kms.NewCryptoKeyIAMMember(ctx, "crypto_key_member_1", &kms.CryptoKeyIAMMemberArgs{
    			CryptoKeyId: cryptoKey.ID(),
    			Role:        pulumi.String("roles/cloudkms.cryptoKeyEncrypterDecrypter"),
    			Member:      pulumi.Sprintf("serviceAccount:service-%v@dataproc-accounts.iam.gserviceaccount.com", project.Number),
    		})
    		if err != nil {
    			return err
    		}
    		ms, err := dataproc.NewMetastoreService(ctx, "ms", &dataproc.MetastoreServiceArgs{
    			ServiceId: pulumi.String("dataproc-batch"),
    			Location:  pulumi.String("us-central1"),
    			Port:      pulumi.Int(9080),
    			Tier:      pulumi.String("DEVELOPER"),
    			MaintenanceWindow: &dataproc.MetastoreServiceMaintenanceWindowArgs{
    				HourOfDay: pulumi.Int(2),
    				DayOfWeek: pulumi.String("SUNDAY"),
    			},
    			HiveMetastoreConfig: &dataproc.MetastoreServiceHiveMetastoreConfigArgs{
    				Version: pulumi.String("3.1.2"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		basic, err := dataproc.NewCluster(ctx, "basic", &dataproc.ClusterArgs{
    			Name:   pulumi.String("dataproc-batch"),
    			Region: pulumi.String("us-central1"),
    			ClusterConfig: &dataproc.ClusterClusterConfigArgs{
    				SoftwareConfig: &dataproc.ClusterClusterConfigSoftwareConfigArgs{
    					OverrideProperties: pulumi.StringMap{
    						"dataproc:dataproc.allow.zero.workers": pulumi.String("true"),
    						"spark:spark.history.fs.logDirectory": bucket.Name.ApplyT(func(name string) (string, error) {
    							return fmt.Sprintf("gs://%v/*/spark-job-history", name), nil
    						}).(pulumi.StringOutput),
    					},
    				},
    				EndpointConfig: &dataproc.ClusterClusterConfigEndpointConfigArgs{
    					EnableHttpPortAccess: pulumi.Bool(true),
    				},
    				MasterConfig: &dataproc.ClusterClusterConfigMasterConfigArgs{
    					NumInstances: pulumi.Int(1),
    					MachineType:  pulumi.String("e2-standard-2"),
    					DiskConfig: &dataproc.ClusterClusterConfigMasterConfigDiskConfigArgs{
    						BootDiskSizeGb: pulumi.Int(35),
    					},
    				},
    				MetastoreConfig: &dataproc.ClusterClusterConfigMetastoreConfigArgs{
    					DataprocMetastoreService: ms.Name,
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		_, err = dataproc.NewBatch(ctx, "example_batch_spark", &dataproc.BatchArgs{
    			BatchId:  pulumi.String("dataproc-batch"),
    			Location: pulumi.String("us-central1"),
    			Labels: pulumi.StringMap{
    				"batch_test": pulumi.String("terraform"),
    			},
    			RuntimeConfig: &dataproc.BatchRuntimeConfigArgs{
    				Properties: pulumi.StringMap{
    					"spark.dynamicAllocation.enabled": pulumi.String("false"),
    					"spark.executor.instances":        pulumi.String("2"),
    				},
    				Version: pulumi.String("2.2"),
    			},
    			EnvironmentConfig: &dataproc.BatchEnvironmentConfigArgs{
    				ExecutionConfig: &dataproc.BatchEnvironmentConfigExecutionConfigArgs{
    					Ttl: pulumi.String("3600s"),
    					NetworkTags: pulumi.StringArray{
    						pulumi.String("tag1"),
    					},
    					KmsKey:         cryptoKey.ID(),
    					NetworkUri:     pulumi.String("default"),
    					ServiceAccount: pulumi.Sprintf("%v-compute@developer.gserviceaccount.com", project.Number),
    					StagingBucket:  bucket.Name,
    				},
    				PeripheralsConfig: &dataproc.BatchEnvironmentConfigPeripheralsConfigArgs{
    					MetastoreService: ms.Name,
    					SparkHistoryServerConfig: &dataproc.BatchEnvironmentConfigPeripheralsConfigSparkHistoryServerConfigArgs{
    						DataprocCluster: basic.ID(),
    					},
    				},
    			},
    			SparkBatch: &dataproc.BatchSparkBatchArgs{
    				MainClass: pulumi.String("org.apache.spark.examples.SparkPi"),
    				Args: pulumi.StringArray{
    					pulumi.String("10"),
    				},
    				JarFileUris: pulumi.StringArray{
    					pulumi.String("file:///usr/lib/spark/examples/jars/spark-examples.jar"),
    				},
    			},
    		}, pulumi.DependsOn([]pulumi.Resource{
    			cryptoKeyMember1,
    		}))
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Gcp = Pulumi.Gcp;
    
    return await Deployment.RunAsync(() => 
    {
        var project = Gcp.Organizations.GetProject.Invoke();
    
        var gcsAccount = Gcp.Storage.GetProjectServiceAccount.Invoke();
    
        var bucket = new Gcp.Storage.Bucket("bucket", new()
        {
            UniformBucketLevelAccess = true,
            Name = "dataproc-bucket",
            Location = "US",
            ForceDestroy = true,
        });
    
        var keyRing = new Gcp.Kms.KeyRing("key_ring", new()
        {
            Name = "example-keyring",
            Location = "us-central1",
        });
    
        var cryptoKey = new Gcp.Kms.CryptoKey("crypto_key", new()
        {
            Name = "example-key",
            KeyRing = keyRing.Id,
            Purpose = "ENCRYPT_DECRYPT",
        });
    
        var cryptoKeyMember1 = new Gcp.Kms.CryptoKeyIAMMember("crypto_key_member_1", new()
        {
            CryptoKeyId = cryptoKey.Id,
            Role = "roles/cloudkms.cryptoKeyEncrypterDecrypter",
            Member = $"serviceAccount:service-{project.Apply(getProjectResult => getProjectResult.Number)}@dataproc-accounts.iam.gserviceaccount.com",
        });
    
        var ms = new Gcp.Dataproc.MetastoreService("ms", new()
        {
            ServiceId = "dataproc-batch",
            Location = "us-central1",
            Port = 9080,
            Tier = "DEVELOPER",
            MaintenanceWindow = new Gcp.Dataproc.Inputs.MetastoreServiceMaintenanceWindowArgs
            {
                HourOfDay = 2,
                DayOfWeek = "SUNDAY",
            },
            HiveMetastoreConfig = new Gcp.Dataproc.Inputs.MetastoreServiceHiveMetastoreConfigArgs
            {
                Version = "3.1.2",
            },
        });
    
        var basic = new Gcp.Dataproc.Cluster("basic", new()
        {
            Name = "dataproc-batch",
            Region = "us-central1",
            ClusterConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigArgs
            {
                SoftwareConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigSoftwareConfigArgs
                {
                    OverrideProperties = 
                    {
                        { "dataproc:dataproc.allow.zero.workers", "true" },
                        { "spark:spark.history.fs.logDirectory", bucket.Name.Apply(name => $"gs://{name}/*/spark-job-history") },
                    },
                },
                EndpointConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigEndpointConfigArgs
                {
                    EnableHttpPortAccess = true,
                },
                MasterConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigMasterConfigArgs
                {
                    NumInstances = 1,
                    MachineType = "e2-standard-2",
                    DiskConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigMasterConfigDiskConfigArgs
                    {
                        BootDiskSizeGb = 35,
                    },
                },
                MetastoreConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigMetastoreConfigArgs
                {
                    DataprocMetastoreService = ms.Name,
                },
            },
        });
    
        var exampleBatchSpark = new Gcp.Dataproc.Batch("example_batch_spark", new()
        {
            BatchId = "dataproc-batch",
            Location = "us-central1",
            Labels = 
            {
                { "batch_test", "terraform" },
            },
            RuntimeConfig = new Gcp.Dataproc.Inputs.BatchRuntimeConfigArgs
            {
                Properties = 
                {
                    { "spark.dynamicAllocation.enabled", "false" },
                    { "spark.executor.instances", "2" },
                },
                Version = "2.2",
            },
            EnvironmentConfig = new Gcp.Dataproc.Inputs.BatchEnvironmentConfigArgs
            {
                ExecutionConfig = new Gcp.Dataproc.Inputs.BatchEnvironmentConfigExecutionConfigArgs
                {
                    Ttl = "3600s",
                    NetworkTags = new[]
                    {
                        "tag1",
                    },
                    KmsKey = cryptoKey.Id,
                    NetworkUri = "default",
                    ServiceAccount = $"{project.Apply(getProjectResult => getProjectResult.Number)}-compute@developer.gserviceaccount.com",
                    StagingBucket = bucket.Name,
                },
                PeripheralsConfig = new Gcp.Dataproc.Inputs.BatchEnvironmentConfigPeripheralsConfigArgs
                {
                    MetastoreService = ms.Name,
                    SparkHistoryServerConfig = new Gcp.Dataproc.Inputs.BatchEnvironmentConfigPeripheralsConfigSparkHistoryServerConfigArgs
                    {
                        DataprocCluster = basic.Id,
                    },
                },
            },
            SparkBatch = new Gcp.Dataproc.Inputs.BatchSparkBatchArgs
            {
                MainClass = "org.apache.spark.examples.SparkPi",
                Args = new[]
                {
                    "10",
                },
                JarFileUris = new[]
                {
                    "file:///usr/lib/spark/examples/jars/spark-examples.jar",
                },
            },
        }, new CustomResourceOptions
        {
            DependsOn =
            {
                cryptoKeyMember1,
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.gcp.organizations.OrganizationsFunctions;
    import com.pulumi.gcp.organizations.inputs.GetProjectArgs;
    import com.pulumi.gcp.storage.StorageFunctions;
    import com.pulumi.gcp.storage.inputs.GetProjectServiceAccountArgs;
    import com.pulumi.gcp.storage.Bucket;
    import com.pulumi.gcp.storage.BucketArgs;
    import com.pulumi.gcp.kms.KeyRing;
    import com.pulumi.gcp.kms.KeyRingArgs;
    import com.pulumi.gcp.kms.CryptoKey;
    import com.pulumi.gcp.kms.CryptoKeyArgs;
    import com.pulumi.gcp.kms.CryptoKeyIAMMember;
    import com.pulumi.gcp.kms.CryptoKeyIAMMemberArgs;
    import com.pulumi.gcp.dataproc.MetastoreService;
    import com.pulumi.gcp.dataproc.MetastoreServiceArgs;
    import com.pulumi.gcp.dataproc.inputs.MetastoreServiceMaintenanceWindowArgs;
    import com.pulumi.gcp.dataproc.inputs.MetastoreServiceHiveMetastoreConfigArgs;
    import com.pulumi.gcp.dataproc.Cluster;
    import com.pulumi.gcp.dataproc.ClusterArgs;
    import com.pulumi.gcp.dataproc.inputs.ClusterClusterConfigArgs;
    import com.pulumi.gcp.dataproc.inputs.ClusterClusterConfigSoftwareConfigArgs;
    import com.pulumi.gcp.dataproc.inputs.ClusterClusterConfigEndpointConfigArgs;
    import com.pulumi.gcp.dataproc.inputs.ClusterClusterConfigMasterConfigArgs;
    import com.pulumi.gcp.dataproc.inputs.ClusterClusterConfigMasterConfigDiskConfigArgs;
    import com.pulumi.gcp.dataproc.inputs.ClusterClusterConfigMetastoreConfigArgs;
    import com.pulumi.gcp.dataproc.Batch;
    import com.pulumi.gcp.dataproc.BatchArgs;
    import com.pulumi.gcp.dataproc.inputs.BatchRuntimeConfigArgs;
    import com.pulumi.gcp.dataproc.inputs.BatchEnvironmentConfigArgs;
    import com.pulumi.gcp.dataproc.inputs.BatchEnvironmentConfigExecutionConfigArgs;
    import com.pulumi.gcp.dataproc.inputs.BatchEnvironmentConfigPeripheralsConfigArgs;
    import com.pulumi.gcp.dataproc.inputs.BatchEnvironmentConfigPeripheralsConfigSparkHistoryServerConfigArgs;
    import com.pulumi.gcp.dataproc.inputs.BatchSparkBatchArgs;
    import com.pulumi.resources.CustomResourceOptions;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            final var project = OrganizationsFunctions.getProject();
    
            final var gcsAccount = StorageFunctions.getProjectServiceAccount();
    
            var bucket = new Bucket("bucket", BucketArgs.builder()
                .uniformBucketLevelAccess(true)
                .name("dataproc-bucket")
                .location("US")
                .forceDestroy(true)
                .build());
    
            var keyRing = new KeyRing("keyRing", KeyRingArgs.builder()
                .name("example-keyring")
                .location("us-central1")
                .build());
    
            var cryptoKey = new CryptoKey("cryptoKey", CryptoKeyArgs.builder()
                .name("example-key")
                .keyRing(keyRing.id())
                .purpose("ENCRYPT_DECRYPT")
                .build());
    
            var cryptoKeyMember1 = new CryptoKeyIAMMember("cryptoKeyMember1", CryptoKeyIAMMemberArgs.builder()
                .cryptoKeyId(cryptoKey.id())
                .role("roles/cloudkms.cryptoKeyEncrypterDecrypter")
                .member(String.format("serviceAccount:service-%s@dataproc-accounts.iam.gserviceaccount.com", project.applyValue(getProjectResult -> getProjectResult.number())))
                .build());
    
            var ms = new MetastoreService("ms", MetastoreServiceArgs.builder()
                .serviceId("dataproc-batch")
                .location("us-central1")
                .port(9080)
                .tier("DEVELOPER")
                .maintenanceWindow(MetastoreServiceMaintenanceWindowArgs.builder()
                    .hourOfDay(2)
                    .dayOfWeek("SUNDAY")
                    .build())
                .hiveMetastoreConfig(MetastoreServiceHiveMetastoreConfigArgs.builder()
                    .version("3.1.2")
                    .build())
                .build());
    
            var basic = new Cluster("basic", ClusterArgs.builder()
                .name("dataproc-batch")
                .region("us-central1")
                .clusterConfig(ClusterClusterConfigArgs.builder()
                    .softwareConfig(ClusterClusterConfigSoftwareConfigArgs.builder()
                        .overrideProperties(Map.ofEntries(
                            Map.entry("dataproc:dataproc.allow.zero.workers", "true"),
                            Map.entry("spark:spark.history.fs.logDirectory", bucket.name().applyValue(name -> String.format("gs://%s/*/spark-job-history", name)))
                        ))
                        .build())
                    .endpointConfig(ClusterClusterConfigEndpointConfigArgs.builder()
                        .enableHttpPortAccess(true)
                        .build())
                    .masterConfig(ClusterClusterConfigMasterConfigArgs.builder()
                        .numInstances(1)
                        .machineType("e2-standard-2")
                        .diskConfig(ClusterClusterConfigMasterConfigDiskConfigArgs.builder()
                            .bootDiskSizeGb(35)
                            .build())
                        .build())
                    .metastoreConfig(ClusterClusterConfigMetastoreConfigArgs.builder()
                        .dataprocMetastoreService(ms.name())
                        .build())
                    .build())
                .build());
    
            var exampleBatchSpark = new Batch("exampleBatchSpark", BatchArgs.builder()
                .batchId("dataproc-batch")
                .location("us-central1")
                .labels(Map.of("batch_test", "terraform"))
                .runtimeConfig(BatchRuntimeConfigArgs.builder()
                    .properties(Map.ofEntries(
                        Map.entry("spark.dynamicAllocation.enabled", "false"),
                        Map.entry("spark.executor.instances", "2")
                    ))
                    .version("2.2")
                    .build())
                .environmentConfig(BatchEnvironmentConfigArgs.builder()
                    .executionConfig(BatchEnvironmentConfigExecutionConfigArgs.builder()
                        .ttl("3600s")
                        .networkTags("tag1")
                        .kmsKey(cryptoKey.id())
                        .networkUri("default")
                        .serviceAccount(String.format("%s-compute@developer.gserviceaccount.com", project.applyValue(getProjectResult -> getProjectResult.number())))
                        .stagingBucket(bucket.name())
                        .build())
                    .peripheralsConfig(BatchEnvironmentConfigPeripheralsConfigArgs.builder()
                        .metastoreService(ms.name())
                        .sparkHistoryServerConfig(BatchEnvironmentConfigPeripheralsConfigSparkHistoryServerConfigArgs.builder()
                            .dataprocCluster(basic.id())
                            .build())
                        .build())
                    .build())
                .sparkBatch(BatchSparkBatchArgs.builder()
                    .mainClass("org.apache.spark.examples.SparkPi")
                    .args("10")
                    .jarFileUris("file:///usr/lib/spark/examples/jars/spark-examples.jar")
                    .build())
                .build(), CustomResourceOptions.builder()
                    .dependsOn(cryptoKeyMember1)
                    .build());
    
        }
    }
    
    resources:
      exampleBatchSpark:
        type: gcp:dataproc:Batch
        name: example_batch_spark
        properties:
          batchId: dataproc-batch
          location: us-central1
          labels:
            batch_test: terraform
          runtimeConfig:
            properties:
              spark.dynamicAllocation.enabled: 'false'
              spark.executor.instances: '2'
            version: '2.2'
          environmentConfig:
            executionConfig:
              ttl: 3600s
              networkTags:
                - tag1
              kmsKey: ${cryptoKey.id}
              networkUri: default
              serviceAccount: ${project.number}-compute@developer.gserviceaccount.com
              stagingBucket: ${bucket.name}
            peripheralsConfig:
              metastoreService: ${ms.name}
              sparkHistoryServerConfig:
                dataprocCluster: ${basic.id}
          sparkBatch:
            mainClass: org.apache.spark.examples.SparkPi
            args:
              - '10'
            jarFileUris:
              - file:///usr/lib/spark/examples/jars/spark-examples.jar
        options:
          dependson:
            - ${cryptoKeyMember1}
      bucket:
        type: gcp:storage:Bucket
        properties:
          uniformBucketLevelAccess: true
          name: dataproc-bucket
          location: US
          forceDestroy: true
      cryptoKey:
        type: gcp:kms:CryptoKey
        name: crypto_key
        properties:
          name: example-key
          keyRing: ${keyRing.id}
          purpose: ENCRYPT_DECRYPT
      keyRing:
        type: gcp:kms:KeyRing
        name: key_ring
        properties:
          name: example-keyring
          location: us-central1
      cryptoKeyMember1:
        type: gcp:kms:CryptoKeyIAMMember
        name: crypto_key_member_1
        properties:
          cryptoKeyId: ${cryptoKey.id}
          role: roles/cloudkms.cryptoKeyEncrypterDecrypter
          member: serviceAccount:service-${project.number}@dataproc-accounts.iam.gserviceaccount.com
      basic:
        type: gcp:dataproc:Cluster
        properties:
          name: dataproc-batch
          region: us-central1
          clusterConfig:
            softwareConfig:
              overrideProperties:
                dataproc:dataproc.allow.zero.workers: 'true'
                spark:spark.history.fs.logDirectory: gs://${bucket.name}/*/spark-job-history
            endpointConfig:
              enableHttpPortAccess: true
            masterConfig:
              numInstances: 1
              machineType: e2-standard-2
              diskConfig:
                bootDiskSizeGb: 35
            metastoreConfig:
              dataprocMetastoreService: ${ms.name}
      ms:
        type: gcp:dataproc:MetastoreService
        properties:
          serviceId: dataproc-batch
          location: us-central1
          port: 9080
          tier: DEVELOPER
          maintenanceWindow:
            hourOfDay: 2
            dayOfWeek: SUNDAY
          hiveMetastoreConfig:
            version: 3.1.2
    variables:
      project:
        fn::invoke:
          Function: gcp:organizations:getProject
          Arguments: {}
      gcsAccount:
        fn::invoke:
          Function: gcp:storage:getProjectServiceAccount
          Arguments: {}
    

    Dataproc Batch Sparksql

    import * as pulumi from "@pulumi/pulumi";
    import * as gcp from "@pulumi/gcp";
    
    const exampleBatchSparsql = new gcp.dataproc.Batch("example_batch_sparsql", {
        batchId: "tf-test-batch_88722",
        location: "us-central1",
        runtimeConfig: {
            properties: {
                "spark.dynamicAllocation.enabled": "false",
                "spark.executor.instances": "2",
            },
        },
        environmentConfig: {
            executionConfig: {
                subnetworkUri: "default",
            },
        },
        sparkSqlBatch: {
            queryFileUri: "gs://dataproc-examples/spark-sql/natality/cigarette_correlations.sql",
            jarFileUris: ["file:///usr/lib/spark/examples/jars/spark-examples.jar"],
            queryVariables: {
                name: "value",
            },
        },
    });
    
    import pulumi
    import pulumi_gcp as gcp
    
    example_batch_sparsql = gcp.dataproc.Batch("example_batch_sparsql",
        batch_id="tf-test-batch_88722",
        location="us-central1",
        runtime_config={
            "properties": {
                "spark.dynamicAllocation.enabled": "false",
                "spark.executor.instances": "2",
            },
        },
        environment_config={
            "execution_config": {
                "subnetwork_uri": "default",
            },
        },
        spark_sql_batch={
            "query_file_uri": "gs://dataproc-examples/spark-sql/natality/cigarette_correlations.sql",
            "jar_file_uris": ["file:///usr/lib/spark/examples/jars/spark-examples.jar"],
            "query_variables": {
                "name": "value",
            },
        })
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/dataproc"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := dataproc.NewBatch(ctx, "example_batch_sparsql", &dataproc.BatchArgs{
    			BatchId:  pulumi.String("tf-test-batch_88722"),
    			Location: pulumi.String("us-central1"),
    			RuntimeConfig: &dataproc.BatchRuntimeConfigArgs{
    				Properties: pulumi.StringMap{
    					"spark.dynamicAllocation.enabled": pulumi.String("false"),
    					"spark.executor.instances":        pulumi.String("2"),
    				},
    			},
    			EnvironmentConfig: &dataproc.BatchEnvironmentConfigArgs{
    				ExecutionConfig: &dataproc.BatchEnvironmentConfigExecutionConfigArgs{
    					SubnetworkUri: pulumi.String("default"),
    				},
    			},
    			SparkSqlBatch: &dataproc.BatchSparkSqlBatchArgs{
    				QueryFileUri: pulumi.String("gs://dataproc-examples/spark-sql/natality/cigarette_correlations.sql"),
    				JarFileUris: pulumi.StringArray{
    					pulumi.String("file:///usr/lib/spark/examples/jars/spark-examples.jar"),
    				},
    				QueryVariables: pulumi.StringMap{
    					"name": pulumi.String("value"),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Gcp = Pulumi.Gcp;
    
    return await Deployment.RunAsync(() => 
    {
        var exampleBatchSparsql = new Gcp.Dataproc.Batch("example_batch_sparsql", new()
        {
            BatchId = "tf-test-batch_88722",
            Location = "us-central1",
            RuntimeConfig = new Gcp.Dataproc.Inputs.BatchRuntimeConfigArgs
            {
                Properties = 
                {
                    { "spark.dynamicAllocation.enabled", "false" },
                    { "spark.executor.instances", "2" },
                },
            },
            EnvironmentConfig = new Gcp.Dataproc.Inputs.BatchEnvironmentConfigArgs
            {
                ExecutionConfig = new Gcp.Dataproc.Inputs.BatchEnvironmentConfigExecutionConfigArgs
                {
                    SubnetworkUri = "default",
                },
            },
            SparkSqlBatch = new Gcp.Dataproc.Inputs.BatchSparkSqlBatchArgs
            {
                QueryFileUri = "gs://dataproc-examples/spark-sql/natality/cigarette_correlations.sql",
                JarFileUris = new[]
                {
                    "file:///usr/lib/spark/examples/jars/spark-examples.jar",
                },
                QueryVariables = 
                {
                    { "name", "value" },
                },
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.gcp.dataproc.Batch;
    import com.pulumi.gcp.dataproc.BatchArgs;
    import com.pulumi.gcp.dataproc.inputs.BatchRuntimeConfigArgs;
    import com.pulumi.gcp.dataproc.inputs.BatchEnvironmentConfigArgs;
    import com.pulumi.gcp.dataproc.inputs.BatchEnvironmentConfigExecutionConfigArgs;
    import com.pulumi.gcp.dataproc.inputs.BatchSparkSqlBatchArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var exampleBatchSparsql = new Batch("exampleBatchSparsql", BatchArgs.builder()
                .batchId("tf-test-batch_88722")
                .location("us-central1")
                .runtimeConfig(BatchRuntimeConfigArgs.builder()
                    .properties(Map.ofEntries(
                        Map.entry("spark.dynamicAllocation.enabled", "false"),
                        Map.entry("spark.executor.instances", "2")
                    ))
                    .build())
                .environmentConfig(BatchEnvironmentConfigArgs.builder()
                    .executionConfig(BatchEnvironmentConfigExecutionConfigArgs.builder()
                        .subnetworkUri("default")
                        .build())
                    .build())
                .sparkSqlBatch(BatchSparkSqlBatchArgs.builder()
                    .queryFileUri("gs://dataproc-examples/spark-sql/natality/cigarette_correlations.sql")
                    .jarFileUris("file:///usr/lib/spark/examples/jars/spark-examples.jar")
                    .queryVariables(Map.of("name", "value"))
                    .build())
                .build());
    
        }
    }
    
    resources:
      exampleBatchSparsql:
        type: gcp:dataproc:Batch
        name: example_batch_sparsql
        properties:
          batchId: tf-test-batch_88722
          location: us-central1
          runtimeConfig:
            properties:
              spark.dynamicAllocation.enabled: 'false'
              spark.executor.instances: '2'
          environmentConfig:
            executionConfig:
              subnetworkUri: default
          sparkSqlBatch:
            queryFileUri: gs://dataproc-examples/spark-sql/natality/cigarette_correlations.sql
            jarFileUris:
              - file:///usr/lib/spark/examples/jars/spark-examples.jar
            queryVariables:
              name: value
    

    Dataproc Batch Pyspark

    import * as pulumi from "@pulumi/pulumi";
    import * as gcp from "@pulumi/gcp";
    
    const exampleBatchPyspark = new gcp.dataproc.Batch("example_batch_pyspark", {
        batchId: "tf-test-batch_39249",
        location: "us-central1",
        runtimeConfig: {
            properties: {
                "spark.dynamicAllocation.enabled": "false",
                "spark.executor.instances": "2",
            },
        },
        environmentConfig: {
            executionConfig: {
                subnetworkUri: "default",
            },
        },
        pysparkBatch: {
            mainPythonFileUri: "https://storage.googleapis.com/terraform-batches/test_util.py",
            args: ["10"],
            jarFileUris: ["file:///usr/lib/spark/examples/jars/spark-examples.jar"],
            pythonFileUris: ["gs://dataproc-examples/pyspark/hello-world/hello-world.py"],
            archiveUris: [
                "https://storage.googleapis.com/terraform-batches/animals.txt.tar.gz#unpacked",
                "https://storage.googleapis.com/terraform-batches/animals.txt.jar",
                "https://storage.googleapis.com/terraform-batches/animals.txt",
            ],
            fileUris: ["https://storage.googleapis.com/terraform-batches/people.txt"],
        },
    });
    
    import pulumi
    import pulumi_gcp as gcp
    
    example_batch_pyspark = gcp.dataproc.Batch("example_batch_pyspark",
        batch_id="tf-test-batch_39249",
        location="us-central1",
        runtime_config={
            "properties": {
                "spark.dynamicAllocation.enabled": "false",
                "spark.executor.instances": "2",
            },
        },
        environment_config={
            "execution_config": {
                "subnetwork_uri": "default",
            },
        },
        pyspark_batch={
            "main_python_file_uri": "https://storage.googleapis.com/terraform-batches/test_util.py",
            "args": ["10"],
            "jar_file_uris": ["file:///usr/lib/spark/examples/jars/spark-examples.jar"],
            "python_file_uris": ["gs://dataproc-examples/pyspark/hello-world/hello-world.py"],
            "archive_uris": [
                "https://storage.googleapis.com/terraform-batches/animals.txt.tar.gz#unpacked",
                "https://storage.googleapis.com/terraform-batches/animals.txt.jar",
                "https://storage.googleapis.com/terraform-batches/animals.txt",
            ],
            "file_uris": ["https://storage.googleapis.com/terraform-batches/people.txt"],
        })
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/dataproc"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := dataproc.NewBatch(ctx, "example_batch_pyspark", &dataproc.BatchArgs{
    			BatchId:  pulumi.String("tf-test-batch_39249"),
    			Location: pulumi.String("us-central1"),
    			RuntimeConfig: &dataproc.BatchRuntimeConfigArgs{
    				Properties: pulumi.StringMap{
    					"spark.dynamicAllocation.enabled": pulumi.String("false"),
    					"spark.executor.instances":        pulumi.String("2"),
    				},
    			},
    			EnvironmentConfig: &dataproc.BatchEnvironmentConfigArgs{
    				ExecutionConfig: &dataproc.BatchEnvironmentConfigExecutionConfigArgs{
    					SubnetworkUri: pulumi.String("default"),
    				},
    			},
    			PysparkBatch: &dataproc.BatchPysparkBatchArgs{
    				MainPythonFileUri: pulumi.String("https://storage.googleapis.com/terraform-batches/test_util.py"),
    				Args: pulumi.StringArray{
    					pulumi.String("10"),
    				},
    				JarFileUris: pulumi.StringArray{
    					pulumi.String("file:///usr/lib/spark/examples/jars/spark-examples.jar"),
    				},
    				PythonFileUris: pulumi.StringArray{
    					pulumi.String("gs://dataproc-examples/pyspark/hello-world/hello-world.py"),
    				},
    				ArchiveUris: pulumi.StringArray{
    					pulumi.String("https://storage.googleapis.com/terraform-batches/animals.txt.tar.gz#unpacked"),
    					pulumi.String("https://storage.googleapis.com/terraform-batches/animals.txt.jar"),
    					pulumi.String("https://storage.googleapis.com/terraform-batches/animals.txt"),
    				},
    				FileUris: pulumi.StringArray{
    					pulumi.String("https://storage.googleapis.com/terraform-batches/people.txt"),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Gcp = Pulumi.Gcp;
    
    return await Deployment.RunAsync(() => 
    {
        var exampleBatchPyspark = new Gcp.Dataproc.Batch("example_batch_pyspark", new()
        {
            BatchId = "tf-test-batch_39249",
            Location = "us-central1",
            RuntimeConfig = new Gcp.Dataproc.Inputs.BatchRuntimeConfigArgs
            {
                Properties = 
                {
                    { "spark.dynamicAllocation.enabled", "false" },
                    { "spark.executor.instances", "2" },
                },
            },
            EnvironmentConfig = new Gcp.Dataproc.Inputs.BatchEnvironmentConfigArgs
            {
                ExecutionConfig = new Gcp.Dataproc.Inputs.BatchEnvironmentConfigExecutionConfigArgs
                {
                    SubnetworkUri = "default",
                },
            },
            PysparkBatch = new Gcp.Dataproc.Inputs.BatchPysparkBatchArgs
            {
                MainPythonFileUri = "https://storage.googleapis.com/terraform-batches/test_util.py",
                Args = new[]
                {
                    "10",
                },
                JarFileUris = new[]
                {
                    "file:///usr/lib/spark/examples/jars/spark-examples.jar",
                },
                PythonFileUris = new[]
                {
                    "gs://dataproc-examples/pyspark/hello-world/hello-world.py",
                },
                ArchiveUris = new[]
                {
                    "https://storage.googleapis.com/terraform-batches/animals.txt.tar.gz#unpacked",
                    "https://storage.googleapis.com/terraform-batches/animals.txt.jar",
                    "https://storage.googleapis.com/terraform-batches/animals.txt",
                },
                FileUris = new[]
                {
                    "https://storage.googleapis.com/terraform-batches/people.txt",
                },
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.gcp.dataproc.Batch;
    import com.pulumi.gcp.dataproc.BatchArgs;
    import com.pulumi.gcp.dataproc.inputs.BatchRuntimeConfigArgs;
    import com.pulumi.gcp.dataproc.inputs.BatchEnvironmentConfigArgs;
    import com.pulumi.gcp.dataproc.inputs.BatchEnvironmentConfigExecutionConfigArgs;
    import com.pulumi.gcp.dataproc.inputs.BatchPysparkBatchArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var exampleBatchPyspark = new Batch("exampleBatchPyspark", BatchArgs.builder()
                .batchId("tf-test-batch_39249")
                .location("us-central1")
                .runtimeConfig(BatchRuntimeConfigArgs.builder()
                    .properties(Map.ofEntries(
                        Map.entry("spark.dynamicAllocation.enabled", "false"),
                        Map.entry("spark.executor.instances", "2")
                    ))
                    .build())
                .environmentConfig(BatchEnvironmentConfigArgs.builder()
                    .executionConfig(BatchEnvironmentConfigExecutionConfigArgs.builder()
                        .subnetworkUri("default")
                        .build())
                    .build())
                .pysparkBatch(BatchPysparkBatchArgs.builder()
                    .mainPythonFileUri("https://storage.googleapis.com/terraform-batches/test_util.py")
                    .args("10")
                    .jarFileUris("file:///usr/lib/spark/examples/jars/spark-examples.jar")
                    .pythonFileUris("gs://dataproc-examples/pyspark/hello-world/hello-world.py")
                    .archiveUris(                
                        "https://storage.googleapis.com/terraform-batches/animals.txt.tar.gz#unpacked",
                        "https://storage.googleapis.com/terraform-batches/animals.txt.jar",
                        "https://storage.googleapis.com/terraform-batches/animals.txt")
                    .fileUris("https://storage.googleapis.com/terraform-batches/people.txt")
                    .build())
                .build());
    
        }
    }
    
    resources:
      exampleBatchPyspark:
        type: gcp:dataproc:Batch
        name: example_batch_pyspark
        properties:
          batchId: tf-test-batch_39249
          location: us-central1
          runtimeConfig:
            properties:
              spark.dynamicAllocation.enabled: 'false'
              spark.executor.instances: '2'
          environmentConfig:
            executionConfig:
              subnetworkUri: default
          pysparkBatch:
            mainPythonFileUri: https://storage.googleapis.com/terraform-batches/test_util.py
            args:
              - '10'
            jarFileUris:
              - file:///usr/lib/spark/examples/jars/spark-examples.jar
            pythonFileUris:
              - gs://dataproc-examples/pyspark/hello-world/hello-world.py
            archiveUris:
              - https://storage.googleapis.com/terraform-batches/animals.txt.tar.gz#unpacked
              - https://storage.googleapis.com/terraform-batches/animals.txt.jar
              - https://storage.googleapis.com/terraform-batches/animals.txt
            fileUris:
              - https://storage.googleapis.com/terraform-batches/people.txt
    

    Dataproc Batch Sparkr

    import * as pulumi from "@pulumi/pulumi";
    import * as gcp from "@pulumi/gcp";
    
    const exampleBatchSparkr = new gcp.dataproc.Batch("example_batch_sparkr", {
        batchId: "tf-test-batch_74391",
        location: "us-central1",
        labels: {
            batch_test: "terraform",
        },
        runtimeConfig: {
            properties: {
                "spark.dynamicAllocation.enabled": "false",
                "spark.executor.instances": "2",
            },
        },
        environmentConfig: {
            executionConfig: {
                subnetworkUri: "default",
                ttl: "3600s",
                networkTags: ["tag1"],
            },
        },
        sparkRBatch: {
            mainRFileUri: "https://storage.googleapis.com/terraform-batches/spark-r-flights.r",
            args: ["https://storage.googleapis.com/terraform-batches/flights.csv"],
        },
    });
    
    import pulumi
    import pulumi_gcp as gcp
    
    example_batch_sparkr = gcp.dataproc.Batch("example_batch_sparkr",
        batch_id="tf-test-batch_74391",
        location="us-central1",
        labels={
            "batch_test": "terraform",
        },
        runtime_config={
            "properties": {
                "spark.dynamicAllocation.enabled": "false",
                "spark.executor.instances": "2",
            },
        },
        environment_config={
            "execution_config": {
                "subnetwork_uri": "default",
                "ttl": "3600s",
                "network_tags": ["tag1"],
            },
        },
        spark_r_batch={
            "main_r_file_uri": "https://storage.googleapis.com/terraform-batches/spark-r-flights.r",
            "args": ["https://storage.googleapis.com/terraform-batches/flights.csv"],
        })
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/dataproc"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := dataproc.NewBatch(ctx, "example_batch_sparkr", &dataproc.BatchArgs{
    			BatchId:  pulumi.String("tf-test-batch_74391"),
    			Location: pulumi.String("us-central1"),
    			Labels: pulumi.StringMap{
    				"batch_test": pulumi.String("terraform"),
    			},
    			RuntimeConfig: &dataproc.BatchRuntimeConfigArgs{
    				Properties: pulumi.StringMap{
    					"spark.dynamicAllocation.enabled": pulumi.String("false"),
    					"spark.executor.instances":        pulumi.String("2"),
    				},
    			},
    			EnvironmentConfig: &dataproc.BatchEnvironmentConfigArgs{
    				ExecutionConfig: &dataproc.BatchEnvironmentConfigExecutionConfigArgs{
    					SubnetworkUri: pulumi.String("default"),
    					Ttl:           pulumi.String("3600s"),
    					NetworkTags: pulumi.StringArray{
    						pulumi.String("tag1"),
    					},
    				},
    			},
    			SparkRBatch: &dataproc.BatchSparkRBatchArgs{
    				MainRFileUri: pulumi.String("https://storage.googleapis.com/terraform-batches/spark-r-flights.r"),
    				Args: pulumi.StringArray{
    					pulumi.String("https://storage.googleapis.com/terraform-batches/flights.csv"),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Gcp = Pulumi.Gcp;
    
    return await Deployment.RunAsync(() => 
    {
        var exampleBatchSparkr = new Gcp.Dataproc.Batch("example_batch_sparkr", new()
        {
            BatchId = "tf-test-batch_74391",
            Location = "us-central1",
            Labels = 
            {
                { "batch_test", "terraform" },
            },
            RuntimeConfig = new Gcp.Dataproc.Inputs.BatchRuntimeConfigArgs
            {
                Properties = 
                {
                    { "spark.dynamicAllocation.enabled", "false" },
                    { "spark.executor.instances", "2" },
                },
            },
            EnvironmentConfig = new Gcp.Dataproc.Inputs.BatchEnvironmentConfigArgs
            {
                ExecutionConfig = new Gcp.Dataproc.Inputs.BatchEnvironmentConfigExecutionConfigArgs
                {
                    SubnetworkUri = "default",
                    Ttl = "3600s",
                    NetworkTags = new[]
                    {
                        "tag1",
                    },
                },
            },
            SparkRBatch = new Gcp.Dataproc.Inputs.BatchSparkRBatchArgs
            {
                MainRFileUri = "https://storage.googleapis.com/terraform-batches/spark-r-flights.r",
                Args = new[]
                {
                    "https://storage.googleapis.com/terraform-batches/flights.csv",
                },
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.gcp.dataproc.Batch;
    import com.pulumi.gcp.dataproc.BatchArgs;
    import com.pulumi.gcp.dataproc.inputs.BatchRuntimeConfigArgs;
    import com.pulumi.gcp.dataproc.inputs.BatchEnvironmentConfigArgs;
    import com.pulumi.gcp.dataproc.inputs.BatchEnvironmentConfigExecutionConfigArgs;
    import com.pulumi.gcp.dataproc.inputs.BatchSparkRBatchArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var exampleBatchSparkr = new Batch("exampleBatchSparkr", BatchArgs.builder()
                .batchId("tf-test-batch_74391")
                .location("us-central1")
                .labels(Map.of("batch_test", "terraform"))
                .runtimeConfig(BatchRuntimeConfigArgs.builder()
                    .properties(Map.ofEntries(
                        Map.entry("spark.dynamicAllocation.enabled", "false"),
                        Map.entry("spark.executor.instances", "2")
                    ))
                    .build())
                .environmentConfig(BatchEnvironmentConfigArgs.builder()
                    .executionConfig(BatchEnvironmentConfigExecutionConfigArgs.builder()
                        .subnetworkUri("default")
                        .ttl("3600s")
                        .networkTags("tag1")
                        .build())
                    .build())
                .sparkRBatch(BatchSparkRBatchArgs.builder()
                    .mainRFileUri("https://storage.googleapis.com/terraform-batches/spark-r-flights.r")
                    .args("https://storage.googleapis.com/terraform-batches/flights.csv")
                    .build())
                .build());
    
        }
    }
    
    resources:
      exampleBatchSparkr:
        type: gcp:dataproc:Batch
        name: example_batch_sparkr
        properties:
          batchId: tf-test-batch_74391
          location: us-central1
          labels:
            batch_test: terraform
          runtimeConfig:
            properties:
              spark.dynamicAllocation.enabled: 'false'
              spark.executor.instances: '2'
          environmentConfig:
            executionConfig:
              subnetworkUri: default
              ttl: 3600s
              networkTags:
                - tag1
          sparkRBatch:
            mainRFileUri: https://storage.googleapis.com/terraform-batches/spark-r-flights.r
            args:
              - https://storage.googleapis.com/terraform-batches/flights.csv
    

    Create Batch Resource

    Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

    Constructor syntax

    new Batch(name: string, args?: BatchArgs, opts?: CustomResourceOptions);
    @overload
    def Batch(resource_name: str,
              args: Optional[BatchArgs] = None,
              opts: Optional[ResourceOptions] = None)
    
    @overload
    def Batch(resource_name: str,
              opts: Optional[ResourceOptions] = None,
              batch_id: Optional[str] = None,
              environment_config: Optional[BatchEnvironmentConfigArgs] = None,
              labels: Optional[Mapping[str, str]] = None,
              location: Optional[str] = None,
              project: Optional[str] = None,
              pyspark_batch: Optional[BatchPysparkBatchArgs] = None,
              runtime_config: Optional[BatchRuntimeConfigArgs] = None,
              spark_batch: Optional[BatchSparkBatchArgs] = None,
              spark_r_batch: Optional[BatchSparkRBatchArgs] = None,
              spark_sql_batch: Optional[BatchSparkSqlBatchArgs] = None)
    func NewBatch(ctx *Context, name string, args *BatchArgs, opts ...ResourceOption) (*Batch, error)
    public Batch(string name, BatchArgs? args = null, CustomResourceOptions? opts = null)
    public Batch(String name, BatchArgs args)
    public Batch(String name, BatchArgs args, CustomResourceOptions options)
    
    type: gcp:dataproc:Batch
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    

    Parameters

    name string
    The unique name of the resource.
    args BatchArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args BatchArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args BatchArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args BatchArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args BatchArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    Constructor example

    The following reference example uses placeholder values for all input properties.

    var batchResource = new Gcp.Dataproc.Batch("batchResource", new()
    {
        BatchId = "string",
        EnvironmentConfig = new Gcp.Dataproc.Inputs.BatchEnvironmentConfigArgs
        {
            ExecutionConfig = new Gcp.Dataproc.Inputs.BatchEnvironmentConfigExecutionConfigArgs
            {
                KmsKey = "string",
                NetworkTags = new[]
                {
                    "string",
                },
                NetworkUri = "string",
                ServiceAccount = "string",
                StagingBucket = "string",
                SubnetworkUri = "string",
                Ttl = "string",
            },
            PeripheralsConfig = new Gcp.Dataproc.Inputs.BatchEnvironmentConfigPeripheralsConfigArgs
            {
                MetastoreService = "string",
                SparkHistoryServerConfig = new Gcp.Dataproc.Inputs.BatchEnvironmentConfigPeripheralsConfigSparkHistoryServerConfigArgs
                {
                    DataprocCluster = "string",
                },
            },
        },
        Labels = 
        {
            { "string", "string" },
        },
        Location = "string",
        Project = "string",
        PysparkBatch = new Gcp.Dataproc.Inputs.BatchPysparkBatchArgs
        {
            ArchiveUris = new[]
            {
                "string",
            },
            Args = new[]
            {
                "string",
            },
            FileUris = new[]
            {
                "string",
            },
            JarFileUris = new[]
            {
                "string",
            },
            MainPythonFileUri = "string",
            PythonFileUris = new[]
            {
                "string",
            },
        },
        RuntimeConfig = new Gcp.Dataproc.Inputs.BatchRuntimeConfigArgs
        {
            ContainerImage = "string",
            EffectiveProperties = 
            {
                { "string", "string" },
            },
            Properties = 
            {
                { "string", "string" },
            },
            Version = "string",
        },
        SparkBatch = new Gcp.Dataproc.Inputs.BatchSparkBatchArgs
        {
            ArchiveUris = new[]
            {
                "string",
            },
            Args = new[]
            {
                "string",
            },
            FileUris = new[]
            {
                "string",
            },
            JarFileUris = new[]
            {
                "string",
            },
            MainClass = "string",
            MainJarFileUri = "string",
        },
        SparkRBatch = new Gcp.Dataproc.Inputs.BatchSparkRBatchArgs
        {
            ArchiveUris = new[]
            {
                "string",
            },
            Args = new[]
            {
                "string",
            },
            FileUris = new[]
            {
                "string",
            },
            MainRFileUri = "string",
        },
        SparkSqlBatch = new Gcp.Dataproc.Inputs.BatchSparkSqlBatchArgs
        {
            JarFileUris = new[]
            {
                "string",
            },
            QueryFileUri = "string",
            QueryVariables = 
            {
                { "string", "string" },
            },
        },
    });
    
    example, err := dataproc.NewBatch(ctx, "batchResource", &dataproc.BatchArgs{
    	BatchId: pulumi.String("string"),
    	EnvironmentConfig: &dataproc.BatchEnvironmentConfigArgs{
    		ExecutionConfig: &dataproc.BatchEnvironmentConfigExecutionConfigArgs{
    			KmsKey: pulumi.String("string"),
    			NetworkTags: pulumi.StringArray{
    				pulumi.String("string"),
    			},
    			NetworkUri:     pulumi.String("string"),
    			ServiceAccount: pulumi.String("string"),
    			StagingBucket:  pulumi.String("string"),
    			SubnetworkUri:  pulumi.String("string"),
    			Ttl:            pulumi.String("string"),
    		},
    		PeripheralsConfig: &dataproc.BatchEnvironmentConfigPeripheralsConfigArgs{
    			MetastoreService: pulumi.String("string"),
    			SparkHistoryServerConfig: &dataproc.BatchEnvironmentConfigPeripheralsConfigSparkHistoryServerConfigArgs{
    				DataprocCluster: pulumi.String("string"),
    			},
    		},
    	},
    	Labels: pulumi.StringMap{
    		"string": pulumi.String("string"),
    	},
    	Location: pulumi.String("string"),
    	Project:  pulumi.String("string"),
    	PysparkBatch: &dataproc.BatchPysparkBatchArgs{
    		ArchiveUris: pulumi.StringArray{
    			pulumi.String("string"),
    		},
    		Args: pulumi.StringArray{
    			pulumi.String("string"),
    		},
    		FileUris: pulumi.StringArray{
    			pulumi.String("string"),
    		},
    		JarFileUris: pulumi.StringArray{
    			pulumi.String("string"),
    		},
    		MainPythonFileUri: pulumi.String("string"),
    		PythonFileUris: pulumi.StringArray{
    			pulumi.String("string"),
    		},
    	},
    	RuntimeConfig: &dataproc.BatchRuntimeConfigArgs{
    		ContainerImage: pulumi.String("string"),
    		EffectiveProperties: pulumi.StringMap{
    			"string": pulumi.String("string"),
    		},
    		Properties: pulumi.StringMap{
    			"string": pulumi.String("string"),
    		},
    		Version: pulumi.String("string"),
    	},
    	SparkBatch: &dataproc.BatchSparkBatchArgs{
    		ArchiveUris: pulumi.StringArray{
    			pulumi.String("string"),
    		},
    		Args: pulumi.StringArray{
    			pulumi.String("string"),
    		},
    		FileUris: pulumi.StringArray{
    			pulumi.String("string"),
    		},
    		JarFileUris: pulumi.StringArray{
    			pulumi.String("string"),
    		},
    		MainClass:      pulumi.String("string"),
    		MainJarFileUri: pulumi.String("string"),
    	},
    	SparkRBatch: &dataproc.BatchSparkRBatchArgs{
    		ArchiveUris: pulumi.StringArray{
    			pulumi.String("string"),
    		},
    		Args: pulumi.StringArray{
    			pulumi.String("string"),
    		},
    		FileUris: pulumi.StringArray{
    			pulumi.String("string"),
    		},
    		MainRFileUri: pulumi.String("string"),
    	},
    	SparkSqlBatch: &dataproc.BatchSparkSqlBatchArgs{
    		JarFileUris: pulumi.StringArray{
    			pulumi.String("string"),
    		},
    		QueryFileUri: pulumi.String("string"),
    		QueryVariables: pulumi.StringMap{
    			"string": pulumi.String("string"),
    		},
    	},
    })
    
    var batchResource = new Batch("batchResource", BatchArgs.builder()
        .batchId("string")
        .environmentConfig(BatchEnvironmentConfigArgs.builder()
            .executionConfig(BatchEnvironmentConfigExecutionConfigArgs.builder()
                .kmsKey("string")
                .networkTags("string")
                .networkUri("string")
                .serviceAccount("string")
                .stagingBucket("string")
                .subnetworkUri("string")
                .ttl("string")
                .build())
            .peripheralsConfig(BatchEnvironmentConfigPeripheralsConfigArgs.builder()
                .metastoreService("string")
                .sparkHistoryServerConfig(BatchEnvironmentConfigPeripheralsConfigSparkHistoryServerConfigArgs.builder()
                    .dataprocCluster("string")
                    .build())
                .build())
            .build())
        .labels(Map.of("string", "string"))
        .location("string")
        .project("string")
        .pysparkBatch(BatchPysparkBatchArgs.builder()
            .archiveUris("string")
            .args("string")
            .fileUris("string")
            .jarFileUris("string")
            .mainPythonFileUri("string")
            .pythonFileUris("string")
            .build())
        .runtimeConfig(BatchRuntimeConfigArgs.builder()
            .containerImage("string")
            .effectiveProperties(Map.of("string", "string"))
            .properties(Map.of("string", "string"))
            .version("string")
            .build())
        .sparkBatch(BatchSparkBatchArgs.builder()
            .archiveUris("string")
            .args("string")
            .fileUris("string")
            .jarFileUris("string")
            .mainClass("string")
            .mainJarFileUri("string")
            .build())
        .sparkRBatch(BatchSparkRBatchArgs.builder()
            .archiveUris("string")
            .args("string")
            .fileUris("string")
            .mainRFileUri("string")
            .build())
        .sparkSqlBatch(BatchSparkSqlBatchArgs.builder()
            .jarFileUris("string")
            .queryFileUri("string")
            .queryVariables(Map.of("string", "string"))
            .build())
        .build());
    
    batch_resource = gcp.dataproc.Batch("batchResource",
        batch_id="string",
        environment_config={
            "execution_config": {
                "kms_key": "string",
                "network_tags": ["string"],
                "network_uri": "string",
                "service_account": "string",
                "staging_bucket": "string",
                "subnetwork_uri": "string",
                "ttl": "string",
            },
            "peripherals_config": {
                "metastore_service": "string",
                "spark_history_server_config": {
                    "dataproc_cluster": "string",
                },
            },
        },
        labels={
            "string": "string",
        },
        location="string",
        project="string",
        pyspark_batch={
            "archive_uris": ["string"],
            "args": ["string"],
            "file_uris": ["string"],
            "jar_file_uris": ["string"],
            "main_python_file_uri": "string",
            "python_file_uris": ["string"],
        },
        runtime_config={
            "container_image": "string",
            "effective_properties": {
                "string": "string",
            },
            "properties": {
                "string": "string",
            },
            "version": "string",
        },
        spark_batch={
            "archive_uris": ["string"],
            "args": ["string"],
            "file_uris": ["string"],
            "jar_file_uris": ["string"],
            "main_class": "string",
            "main_jar_file_uri": "string",
        },
        spark_r_batch={
            "archive_uris": ["string"],
            "args": ["string"],
            "file_uris": ["string"],
            "main_r_file_uri": "string",
        },
        spark_sql_batch={
            "jar_file_uris": ["string"],
            "query_file_uri": "string",
            "query_variables": {
                "string": "string",
            },
        })
    
    const batchResource = new gcp.dataproc.Batch("batchResource", {
        batchId: "string",
        environmentConfig: {
            executionConfig: {
                kmsKey: "string",
                networkTags: ["string"],
                networkUri: "string",
                serviceAccount: "string",
                stagingBucket: "string",
                subnetworkUri: "string",
                ttl: "string",
            },
            peripheralsConfig: {
                metastoreService: "string",
                sparkHistoryServerConfig: {
                    dataprocCluster: "string",
                },
            },
        },
        labels: {
            string: "string",
        },
        location: "string",
        project: "string",
        pysparkBatch: {
            archiveUris: ["string"],
            args: ["string"],
            fileUris: ["string"],
            jarFileUris: ["string"],
            mainPythonFileUri: "string",
            pythonFileUris: ["string"],
        },
        runtimeConfig: {
            containerImage: "string",
            effectiveProperties: {
                string: "string",
            },
            properties: {
                string: "string",
            },
            version: "string",
        },
        sparkBatch: {
            archiveUris: ["string"],
            args: ["string"],
            fileUris: ["string"],
            jarFileUris: ["string"],
            mainClass: "string",
            mainJarFileUri: "string",
        },
        sparkRBatch: {
            archiveUris: ["string"],
            args: ["string"],
            fileUris: ["string"],
            mainRFileUri: "string",
        },
        sparkSqlBatch: {
            jarFileUris: ["string"],
            queryFileUri: "string",
            queryVariables: {
                string: "string",
            },
        },
    });
    
    type: gcp:dataproc:Batch
    properties:
        batchId: string
        environmentConfig:
            executionConfig:
                kmsKey: string
                networkTags:
                    - string
                networkUri: string
                serviceAccount: string
                stagingBucket: string
                subnetworkUri: string
                ttl: string
            peripheralsConfig:
                metastoreService: string
                sparkHistoryServerConfig:
                    dataprocCluster: string
        labels:
            string: string
        location: string
        project: string
        pysparkBatch:
            archiveUris:
                - string
            args:
                - string
            fileUris:
                - string
            jarFileUris:
                - string
            mainPythonFileUri: string
            pythonFileUris:
                - string
        runtimeConfig:
            containerImage: string
            effectiveProperties:
                string: string
            properties:
                string: string
            version: string
        sparkBatch:
            archiveUris:
                - string
            args:
                - string
            fileUris:
                - string
            jarFileUris:
                - string
            mainClass: string
            mainJarFileUri: string
        sparkRBatch:
            archiveUris:
                - string
            args:
                - string
            fileUris:
                - string
            mainRFileUri: string
        sparkSqlBatch:
            jarFileUris:
                - string
            queryFileUri: string
            queryVariables:
                string: string
    

    Batch Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.

    The Batch resource accepts the following input properties:

    BatchId string
    The ID to use for the batch, which will become the final component of the batch's resource name. This value must be 4-63 characters. Valid characters are /[a-z][0-9]-/.
    EnvironmentConfig BatchEnvironmentConfig
    Environment configuration for the batch execution. Structure is documented below.
    Labels Dictionary<string, string>

    The labels to associate with this batch.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.

    Location string
    The location in which the batch will be created in.
    Project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    PysparkBatch BatchPysparkBatch
    PySpark batch config. Structure is documented below.
    RuntimeConfig BatchRuntimeConfig
    Runtime configuration for the batch execution. Structure is documented below.
    SparkBatch BatchSparkBatch
    Spark batch config. Structure is documented below.
    SparkRBatch BatchSparkRBatch
    SparkR batch config. Structure is documented below.
    SparkSqlBatch BatchSparkSqlBatch
    Spark SQL batch config. Structure is documented below.
    BatchId string
    The ID to use for the batch, which will become the final component of the batch's resource name. This value must be 4-63 characters. Valid characters are /[a-z][0-9]-/.
    EnvironmentConfig BatchEnvironmentConfigArgs
    Environment configuration for the batch execution. Structure is documented below.
    Labels map[string]string

    The labels to associate with this batch.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.

    Location string
    The location in which the batch will be created in.
    Project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    PysparkBatch BatchPysparkBatchArgs
    PySpark batch config. Structure is documented below.
    RuntimeConfig BatchRuntimeConfigArgs
    Runtime configuration for the batch execution. Structure is documented below.
    SparkBatch BatchSparkBatchArgs
    Spark batch config. Structure is documented below.
    SparkRBatch BatchSparkRBatchArgs
    SparkR batch config. Structure is documented below.
    SparkSqlBatch BatchSparkSqlBatchArgs
    Spark SQL batch config. Structure is documented below.
    batchId String
    The ID to use for the batch, which will become the final component of the batch's resource name. This value must be 4-63 characters. Valid characters are /[a-z][0-9]-/.
    environmentConfig BatchEnvironmentConfig
    Environment configuration for the batch execution. Structure is documented below.
    labels Map<String,String>

    The labels to associate with this batch.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.

    location String
    The location in which the batch will be created in.
    project String
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    pysparkBatch BatchPysparkBatch
    PySpark batch config. Structure is documented below.
    runtimeConfig BatchRuntimeConfig
    Runtime configuration for the batch execution. Structure is documented below.
    sparkBatch BatchSparkBatch
    Spark batch config. Structure is documented below.
    sparkRBatch BatchSparkRBatch
    SparkR batch config. Structure is documented below.
    sparkSqlBatch BatchSparkSqlBatch
    Spark SQL batch config. Structure is documented below.
    batchId string
    The ID to use for the batch, which will become the final component of the batch's resource name. This value must be 4-63 characters. Valid characters are /[a-z][0-9]-/.
    environmentConfig BatchEnvironmentConfig
    Environment configuration for the batch execution. Structure is documented below.
    labels {[key: string]: string}

    The labels to associate with this batch.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.

    location string
    The location in which the batch will be created in.
    project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    pysparkBatch BatchPysparkBatch
    PySpark batch config. Structure is documented below.
    runtimeConfig BatchRuntimeConfig
    Runtime configuration for the batch execution. Structure is documented below.
    sparkBatch BatchSparkBatch
    Spark batch config. Structure is documented below.
    sparkRBatch BatchSparkRBatch
    SparkR batch config. Structure is documented below.
    sparkSqlBatch BatchSparkSqlBatch
    Spark SQL batch config. Structure is documented below.
    batch_id str
    The ID to use for the batch, which will become the final component of the batch's resource name. This value must be 4-63 characters. Valid characters are /[a-z][0-9]-/.
    environment_config BatchEnvironmentConfigArgs
    Environment configuration for the batch execution. Structure is documented below.
    labels Mapping[str, str]

    The labels to associate with this batch.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.

    location str
    The location in which the batch will be created in.
    project str
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    pyspark_batch BatchPysparkBatchArgs
    PySpark batch config. Structure is documented below.
    runtime_config BatchRuntimeConfigArgs
    Runtime configuration for the batch execution. Structure is documented below.
    spark_batch BatchSparkBatchArgs
    Spark batch config. Structure is documented below.
    spark_r_batch BatchSparkRBatchArgs
    SparkR batch config. Structure is documented below.
    spark_sql_batch BatchSparkSqlBatchArgs
    Spark SQL batch config. Structure is documented below.
    batchId String
    The ID to use for the batch, which will become the final component of the batch's resource name. This value must be 4-63 characters. Valid characters are /[a-z][0-9]-/.
    environmentConfig Property Map
    Environment configuration for the batch execution. Structure is documented below.
    labels Map<String>

    The labels to associate with this batch.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.

    location String
    The location in which the batch will be created in.
    project String
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    pysparkBatch Property Map
    PySpark batch config. Structure is documented below.
    runtimeConfig Property Map
    Runtime configuration for the batch execution. Structure is documented below.
    sparkBatch Property Map
    Spark batch config. Structure is documented below.
    sparkRBatch Property Map
    SparkR batch config. Structure is documented below.
    sparkSqlBatch Property Map
    Spark SQL batch config. Structure is documented below.

    Outputs

    All input properties are implicitly available as output properties. Additionally, the Batch resource produces the following output properties:

    CreateTime string
    The time when the batch was created.
    Creator string
    The email address of the user who created the batch.
    EffectiveLabels Dictionary<string, string>
    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
    Id string
    The provider-assigned unique ID for this managed resource.
    Name string
    The resource name of the batch.
    Operation string
    The resource name of the operation associated with this batch.
    PulumiLabels Dictionary<string, string>
    The combination of labels configured directly on the resource and default labels configured on the provider.
    RuntimeInfos List<BatchRuntimeInfo>
    Runtime information about batch execution. Structure is documented below.
    State string
    (Output) The state of the batch at this point in history. For possible values, see the API documentation.
    StateHistories List<BatchStateHistory>
    Historical state information for the batch. Structure is documented below.
    StateMessage string
    (Output) Details about the state at this point in history.
    StateTime string
    Batch state details, such as a failure description if the state is FAILED.
    Uuid string
    A batch UUID (Unique Universal Identifier). The service generates this value when it creates the batch.
    CreateTime string
    The time when the batch was created.
    Creator string
    The email address of the user who created the batch.
    EffectiveLabels map[string]string
    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
    Id string
    The provider-assigned unique ID for this managed resource.
    Name string
    The resource name of the batch.
    Operation string
    The resource name of the operation associated with this batch.
    PulumiLabels map[string]string
    The combination of labels configured directly on the resource and default labels configured on the provider.
    RuntimeInfos []BatchRuntimeInfo
    Runtime information about batch execution. Structure is documented below.
    State string
    (Output) The state of the batch at this point in history. For possible values, see the API documentation.
    StateHistories []BatchStateHistory
    Historical state information for the batch. Structure is documented below.
    StateMessage string
    (Output) Details about the state at this point in history.
    StateTime string
    Batch state details, such as a failure description if the state is FAILED.
    Uuid string
    A batch UUID (Unique Universal Identifier). The service generates this value when it creates the batch.
    createTime String
    The time when the batch was created.
    creator String
    The email address of the user who created the batch.
    effectiveLabels Map<String,String>
    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
    id String
    The provider-assigned unique ID for this managed resource.
    name String
    The resource name of the batch.
    operation String
    The resource name of the operation associated with this batch.
    pulumiLabels Map<String,String>
    The combination of labels configured directly on the resource and default labels configured on the provider.
    runtimeInfos List<BatchRuntimeInfo>
    Runtime information about batch execution. Structure is documented below.
    state String
    (Output) The state of the batch at this point in history. For possible values, see the API documentation.
    stateHistories List<BatchStateHistory>
    Historical state information for the batch. Structure is documented below.
    stateMessage String
    (Output) Details about the state at this point in history.
    stateTime String
    Batch state details, such as a failure description if the state is FAILED.
    uuid String
    A batch UUID (Unique Universal Identifier). The service generates this value when it creates the batch.
    createTime string
    The time when the batch was created.
    creator string
    The email address of the user who created the batch.
    effectiveLabels {[key: string]: string}
    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
    id string
    The provider-assigned unique ID for this managed resource.
    name string
    The resource name of the batch.
    operation string
    The resource name of the operation associated with this batch.
    pulumiLabels {[key: string]: string}
    The combination of labels configured directly on the resource and default labels configured on the provider.
    runtimeInfos BatchRuntimeInfo[]
    Runtime information about batch execution. Structure is documented below.
    state string
    (Output) The state of the batch at this point in history. For possible values, see the API documentation.
    stateHistories BatchStateHistory[]
    Historical state information for the batch. Structure is documented below.
    stateMessage string
    (Output) Details about the state at this point in history.
    stateTime string
    Batch state details, such as a failure description if the state is FAILED.
    uuid string
    A batch UUID (Unique Universal Identifier). The service generates this value when it creates the batch.
    create_time str
    The time when the batch was created.
    creator str
    The email address of the user who created the batch.
    effective_labels Mapping[str, str]
    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
    id str
    The provider-assigned unique ID for this managed resource.
    name str
    The resource name of the batch.
    operation str
    The resource name of the operation associated with this batch.
    pulumi_labels Mapping[str, str]
    The combination of labels configured directly on the resource and default labels configured on the provider.
    runtime_infos Sequence[BatchRuntimeInfo]
    Runtime information about batch execution. Structure is documented below.
    state str
    (Output) The state of the batch at this point in history. For possible values, see the API documentation.
    state_histories Sequence[BatchStateHistory]
    Historical state information for the batch. Structure is documented below.
    state_message str
    (Output) Details about the state at this point in history.
    state_time str
    Batch state details, such as a failure description if the state is FAILED.
    uuid str
    A batch UUID (Unique Universal Identifier). The service generates this value when it creates the batch.
    createTime String
    The time when the batch was created.
    creator String
    The email address of the user who created the batch.
    effectiveLabels Map<String>
    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
    id String
    The provider-assigned unique ID for this managed resource.
    name String
    The resource name of the batch.
    operation String
    The resource name of the operation associated with this batch.
    pulumiLabels Map<String>
    The combination of labels configured directly on the resource and default labels configured on the provider.
    runtimeInfos List<Property Map>
    Runtime information about batch execution. Structure is documented below.
    state String
    (Output) The state of the batch at this point in history. For possible values, see the API documentation.
    stateHistories List<Property Map>
    Historical state information for the batch. Structure is documented below.
    stateMessage String
    (Output) Details about the state at this point in history.
    stateTime String
    Batch state details, such as a failure description if the state is FAILED.
    uuid String
    A batch UUID (Unique Universal Identifier). The service generates this value when it creates the batch.

    Look up Existing Batch Resource

    Get an existing Batch resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

    public static get(name: string, id: Input<ID>, state?: BatchState, opts?: CustomResourceOptions): Batch
    @staticmethod
    def get(resource_name: str,
            id: str,
            opts: Optional[ResourceOptions] = None,
            batch_id: Optional[str] = None,
            create_time: Optional[str] = None,
            creator: Optional[str] = None,
            effective_labels: Optional[Mapping[str, str]] = None,
            environment_config: Optional[BatchEnvironmentConfigArgs] = None,
            labels: Optional[Mapping[str, str]] = None,
            location: Optional[str] = None,
            name: Optional[str] = None,
            operation: Optional[str] = None,
            project: Optional[str] = None,
            pulumi_labels: Optional[Mapping[str, str]] = None,
            pyspark_batch: Optional[BatchPysparkBatchArgs] = None,
            runtime_config: Optional[BatchRuntimeConfigArgs] = None,
            runtime_infos: Optional[Sequence[BatchRuntimeInfoArgs]] = None,
            spark_batch: Optional[BatchSparkBatchArgs] = None,
            spark_r_batch: Optional[BatchSparkRBatchArgs] = None,
            spark_sql_batch: Optional[BatchSparkSqlBatchArgs] = None,
            state: Optional[str] = None,
            state_histories: Optional[Sequence[BatchStateHistoryArgs]] = None,
            state_message: Optional[str] = None,
            state_time: Optional[str] = None,
            uuid: Optional[str] = None) -> Batch
    func GetBatch(ctx *Context, name string, id IDInput, state *BatchState, opts ...ResourceOption) (*Batch, error)
    public static Batch Get(string name, Input<string> id, BatchState? state, CustomResourceOptions? opts = null)
    public static Batch get(String name, Output<String> id, BatchState state, CustomResourceOptions options)
    Resource lookup is not supported in YAML
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    resource_name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    The following state arguments are supported:
    BatchId string
    The ID to use for the batch, which will become the final component of the batch's resource name. This value must be 4-63 characters. Valid characters are /[a-z][0-9]-/.
    CreateTime string
    The time when the batch was created.
    Creator string
    The email address of the user who created the batch.
    EffectiveLabels Dictionary<string, string>
    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
    EnvironmentConfig BatchEnvironmentConfig
    Environment configuration for the batch execution. Structure is documented below.
    Labels Dictionary<string, string>

    The labels to associate with this batch.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.

    Location string
    The location in which the batch will be created in.
    Name string
    The resource name of the batch.
    Operation string
    The resource name of the operation associated with this batch.
    Project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    PulumiLabels Dictionary<string, string>
    The combination of labels configured directly on the resource and default labels configured on the provider.
    PysparkBatch BatchPysparkBatch
    PySpark batch config. Structure is documented below.
    RuntimeConfig BatchRuntimeConfig
    Runtime configuration for the batch execution. Structure is documented below.
    RuntimeInfos List<BatchRuntimeInfo>
    Runtime information about batch execution. Structure is documented below.
    SparkBatch BatchSparkBatch
    Spark batch config. Structure is documented below.
    SparkRBatch BatchSparkRBatch
    SparkR batch config. Structure is documented below.
    SparkSqlBatch BatchSparkSqlBatch
    Spark SQL batch config. Structure is documented below.
    State string
    (Output) The state of the batch at this point in history. For possible values, see the API documentation.
    StateHistories List<BatchStateHistory>
    Historical state information for the batch. Structure is documented below.
    StateMessage string
    (Output) Details about the state at this point in history.
    StateTime string
    Batch state details, such as a failure description if the state is FAILED.
    Uuid string
    A batch UUID (Unique Universal Identifier). The service generates this value when it creates the batch.
    BatchId string
    The ID to use for the batch, which will become the final component of the batch's resource name. This value must be 4-63 characters. Valid characters are /[a-z][0-9]-/.
    CreateTime string
    The time when the batch was created.
    Creator string
    The email address of the user who created the batch.
    EffectiveLabels map[string]string
    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
    EnvironmentConfig BatchEnvironmentConfigArgs
    Environment configuration for the batch execution. Structure is documented below.
    Labels map[string]string

    The labels to associate with this batch.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.

    Location string
    The location in which the batch will be created in.
    Name string
    The resource name of the batch.
    Operation string
    The resource name of the operation associated with this batch.
    Project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    PulumiLabels map[string]string
    The combination of labels configured directly on the resource and default labels configured on the provider.
    PysparkBatch BatchPysparkBatchArgs
    PySpark batch config. Structure is documented below.
    RuntimeConfig BatchRuntimeConfigArgs
    Runtime configuration for the batch execution. Structure is documented below.
    RuntimeInfos []BatchRuntimeInfoArgs
    Runtime information about batch execution. Structure is documented below.
    SparkBatch BatchSparkBatchArgs
    Spark batch config. Structure is documented below.
    SparkRBatch BatchSparkRBatchArgs
    SparkR batch config. Structure is documented below.
    SparkSqlBatch BatchSparkSqlBatchArgs
    Spark SQL batch config. Structure is documented below.
    State string
    (Output) The state of the batch at this point in history. For possible values, see the API documentation.
    StateHistories []BatchStateHistoryArgs
    Historical state information for the batch. Structure is documented below.
    StateMessage string
    (Output) Details about the state at this point in history.
    StateTime string
    Batch state details, such as a failure description if the state is FAILED.
    Uuid string
    A batch UUID (Unique Universal Identifier). The service generates this value when it creates the batch.
    batchId String
    The ID to use for the batch, which will become the final component of the batch's resource name. This value must be 4-63 characters. Valid characters are /[a-z][0-9]-/.
    createTime String
    The time when the batch was created.
    creator String
    The email address of the user who created the batch.
    effectiveLabels Map<String,String>
    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
    environmentConfig BatchEnvironmentConfig
    Environment configuration for the batch execution. Structure is documented below.
    labels Map<String,String>

    The labels to associate with this batch.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.

    location String
    The location in which the batch will be created in.
    name String
    The resource name of the batch.
    operation String
    The resource name of the operation associated with this batch.
    project String
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    pulumiLabels Map<String,String>
    The combination of labels configured directly on the resource and default labels configured on the provider.
    pysparkBatch BatchPysparkBatch
    PySpark batch config. Structure is documented below.
    runtimeConfig BatchRuntimeConfig
    Runtime configuration for the batch execution. Structure is documented below.
    runtimeInfos List<BatchRuntimeInfo>
    Runtime information about batch execution. Structure is documented below.
    sparkBatch BatchSparkBatch
    Spark batch config. Structure is documented below.
    sparkRBatch BatchSparkRBatch
    SparkR batch config. Structure is documented below.
    sparkSqlBatch BatchSparkSqlBatch
    Spark SQL batch config. Structure is documented below.
    state String
    (Output) The state of the batch at this point in history. For possible values, see the API documentation.
    stateHistories List<BatchStateHistory>
    Historical state information for the batch. Structure is documented below.
    stateMessage String
    (Output) Details about the state at this point in history.
    stateTime String
    Batch state details, such as a failure description if the state is FAILED.
    uuid String
    A batch UUID (Unique Universal Identifier). The service generates this value when it creates the batch.
    batchId string
    The ID to use for the batch, which will become the final component of the batch's resource name. This value must be 4-63 characters. Valid characters are /[a-z][0-9]-/.
    createTime string
    The time when the batch was created.
    creator string
    The email address of the user who created the batch.
    effectiveLabels {[key: string]: string}
    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
    environmentConfig BatchEnvironmentConfig
    Environment configuration for the batch execution. Structure is documented below.
    labels {[key: string]: string}

    The labels to associate with this batch.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.

    location string
    The location in which the batch will be created in.
    name string
    The resource name of the batch.
    operation string
    The resource name of the operation associated with this batch.
    project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    pulumiLabels {[key: string]: string}
    The combination of labels configured directly on the resource and default labels configured on the provider.
    pysparkBatch BatchPysparkBatch
    PySpark batch config. Structure is documented below.
    runtimeConfig BatchRuntimeConfig
    Runtime configuration for the batch execution. Structure is documented below.
    runtimeInfos BatchRuntimeInfo[]
    Runtime information about batch execution. Structure is documented below.
    sparkBatch BatchSparkBatch
    Spark batch config. Structure is documented below.
    sparkRBatch BatchSparkRBatch
    SparkR batch config. Structure is documented below.
    sparkSqlBatch BatchSparkSqlBatch
    Spark SQL batch config. Structure is documented below.
    state string
    (Output) The state of the batch at this point in history. For possible values, see the API documentation.
    stateHistories BatchStateHistory[]
    Historical state information for the batch. Structure is documented below.
    stateMessage string
    (Output) Details about the state at this point in history.
    stateTime string
    Batch state details, such as a failure description if the state is FAILED.
    uuid string
    A batch UUID (Unique Universal Identifier). The service generates this value when it creates the batch.
    batch_id str
    The ID to use for the batch, which will become the final component of the batch's resource name. This value must be 4-63 characters. Valid characters are /[a-z][0-9]-/.
    create_time str
    The time when the batch was created.
    creator str
    The email address of the user who created the batch.
    effective_labels Mapping[str, str]
    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
    environment_config BatchEnvironmentConfigArgs
    Environment configuration for the batch execution. Structure is documented below.
    labels Mapping[str, str]

    The labels to associate with this batch.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.

    location str
    The location in which the batch will be created in.
    name str
    The resource name of the batch.
    operation str
    The resource name of the operation associated with this batch.
    project str
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    pulumi_labels Mapping[str, str]
    The combination of labels configured directly on the resource and default labels configured on the provider.
    pyspark_batch BatchPysparkBatchArgs
    PySpark batch config. Structure is documented below.
    runtime_config BatchRuntimeConfigArgs
    Runtime configuration for the batch execution. Structure is documented below.
    runtime_infos Sequence[BatchRuntimeInfoArgs]
    Runtime information about batch execution. Structure is documented below.
    spark_batch BatchSparkBatchArgs
    Spark batch config. Structure is documented below.
    spark_r_batch BatchSparkRBatchArgs
    SparkR batch config. Structure is documented below.
    spark_sql_batch BatchSparkSqlBatchArgs
    Spark SQL batch config. Structure is documented below.
    state str
    (Output) The state of the batch at this point in history. For possible values, see the API documentation.
    state_histories Sequence[BatchStateHistoryArgs]
    Historical state information for the batch. Structure is documented below.
    state_message str
    (Output) Details about the state at this point in history.
    state_time str
    Batch state details, such as a failure description if the state is FAILED.
    uuid str
    A batch UUID (Unique Universal Identifier). The service generates this value when it creates the batch.
    batchId String
    The ID to use for the batch, which will become the final component of the batch's resource name. This value must be 4-63 characters. Valid characters are /[a-z][0-9]-/.
    createTime String
    The time when the batch was created.
    creator String
    The email address of the user who created the batch.
    effectiveLabels Map<String>
    All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
    environmentConfig Property Map
    Environment configuration for the batch execution. Structure is documented below.
    labels Map<String>

    The labels to associate with this batch.

    Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field effective_labels for all of the labels present on the resource.

    location String
    The location in which the batch will be created in.
    name String
    The resource name of the batch.
    operation String
    The resource name of the operation associated with this batch.
    project String
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    pulumiLabels Map<String>
    The combination of labels configured directly on the resource and default labels configured on the provider.
    pysparkBatch Property Map
    PySpark batch config. Structure is documented below.
    runtimeConfig Property Map
    Runtime configuration for the batch execution. Structure is documented below.
    runtimeInfos List<Property Map>
    Runtime information about batch execution. Structure is documented below.
    sparkBatch Property Map
    Spark batch config. Structure is documented below.
    sparkRBatch Property Map
    SparkR batch config. Structure is documented below.
    sparkSqlBatch Property Map
    Spark SQL batch config. Structure is documented below.
    state String
    (Output) The state of the batch at this point in history. For possible values, see the API documentation.
    stateHistories List<Property Map>
    Historical state information for the batch. Structure is documented below.
    stateMessage String
    (Output) Details about the state at this point in history.
    stateTime String
    Batch state details, such as a failure description if the state is FAILED.
    uuid String
    A batch UUID (Unique Universal Identifier). The service generates this value when it creates the batch.

    Supporting Types

    BatchEnvironmentConfig, BatchEnvironmentConfigArgs

    ExecutionConfig BatchEnvironmentConfigExecutionConfig
    Execution configuration for a workload. Structure is documented below.
    PeripheralsConfig BatchEnvironmentConfigPeripheralsConfig
    Peripherals configuration that workload has access to. Structure is documented below.
    ExecutionConfig BatchEnvironmentConfigExecutionConfig
    Execution configuration for a workload. Structure is documented below.
    PeripheralsConfig BatchEnvironmentConfigPeripheralsConfig
    Peripherals configuration that workload has access to. Structure is documented below.
    executionConfig BatchEnvironmentConfigExecutionConfig
    Execution configuration for a workload. Structure is documented below.
    peripheralsConfig BatchEnvironmentConfigPeripheralsConfig
    Peripherals configuration that workload has access to. Structure is documented below.
    executionConfig BatchEnvironmentConfigExecutionConfig
    Execution configuration for a workload. Structure is documented below.
    peripheralsConfig BatchEnvironmentConfigPeripheralsConfig
    Peripherals configuration that workload has access to. Structure is documented below.
    execution_config BatchEnvironmentConfigExecutionConfig
    Execution configuration for a workload. Structure is documented below.
    peripherals_config BatchEnvironmentConfigPeripheralsConfig
    Peripherals configuration that workload has access to. Structure is documented below.
    executionConfig Property Map
    Execution configuration for a workload. Structure is documented below.
    peripheralsConfig Property Map
    Peripherals configuration that workload has access to. Structure is documented below.

    BatchEnvironmentConfigExecutionConfig, BatchEnvironmentConfigExecutionConfigArgs

    KmsKey string
    The Cloud KMS key to use for encryption.
    NetworkTags List<string>
    Tags used for network traffic control.
    NetworkUri string
    Network configuration for workload execution.
    ServiceAccount string
    Service account that used to execute workload.
    StagingBucket string
    A Cloud Storage bucket used to stage workload dependencies, config files, and store workload output and other ephemeral data, such as Spark history files. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location according to the region where your workload is running, and then create and manage project-level, per-location staging and temporary buckets. This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.
    SubnetworkUri string
    Subnetwork configuration for workload execution.
    Ttl string
    The duration after which the workload will be terminated. When the workload exceeds this duration, it will be unconditionally terminated without waiting for ongoing work to finish. If ttl is not specified for a batch workload, the workload will be allowed to run until it exits naturally (or run forever without exiting). If ttl is not specified for an interactive session, it defaults to 24 hours. If ttl is not specified for a batch that uses 2.1+ runtime version, it defaults to 4 hours. Minimum value is 10 minutes; maximum value is 14 days. If both ttl and idleTtl are specified (for an interactive session), the conditions are treated as OR conditions: the workload will be terminated when it has been idle for idleTtl or when ttl has been exceeded, whichever occurs first.
    KmsKey string
    The Cloud KMS key to use for encryption.
    NetworkTags []string
    Tags used for network traffic control.
    NetworkUri string
    Network configuration for workload execution.
    ServiceAccount string
    Service account that used to execute workload.
    StagingBucket string
    A Cloud Storage bucket used to stage workload dependencies, config files, and store workload output and other ephemeral data, such as Spark history files. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location according to the region where your workload is running, and then create and manage project-level, per-location staging and temporary buckets. This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.
    SubnetworkUri string
    Subnetwork configuration for workload execution.
    Ttl string
    The duration after which the workload will be terminated. When the workload exceeds this duration, it will be unconditionally terminated without waiting for ongoing work to finish. If ttl is not specified for a batch workload, the workload will be allowed to run until it exits naturally (or run forever without exiting). If ttl is not specified for an interactive session, it defaults to 24 hours. If ttl is not specified for a batch that uses 2.1+ runtime version, it defaults to 4 hours. Minimum value is 10 minutes; maximum value is 14 days. If both ttl and idleTtl are specified (for an interactive session), the conditions are treated as OR conditions: the workload will be terminated when it has been idle for idleTtl or when ttl has been exceeded, whichever occurs first.
    kmsKey String
    The Cloud KMS key to use for encryption.
    networkTags List<String>
    Tags used for network traffic control.
    networkUri String
    Network configuration for workload execution.
    serviceAccount String
    Service account that used to execute workload.
    stagingBucket String
    A Cloud Storage bucket used to stage workload dependencies, config files, and store workload output and other ephemeral data, such as Spark history files. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location according to the region where your workload is running, and then create and manage project-level, per-location staging and temporary buckets. This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.
    subnetworkUri String
    Subnetwork configuration for workload execution.
    ttl String
    The duration after which the workload will be terminated. When the workload exceeds this duration, it will be unconditionally terminated without waiting for ongoing work to finish. If ttl is not specified for a batch workload, the workload will be allowed to run until it exits naturally (or run forever without exiting). If ttl is not specified for an interactive session, it defaults to 24 hours. If ttl is not specified for a batch that uses 2.1+ runtime version, it defaults to 4 hours. Minimum value is 10 minutes; maximum value is 14 days. If both ttl and idleTtl are specified (for an interactive session), the conditions are treated as OR conditions: the workload will be terminated when it has been idle for idleTtl or when ttl has been exceeded, whichever occurs first.
    kmsKey string
    The Cloud KMS key to use for encryption.
    networkTags string[]
    Tags used for network traffic control.
    networkUri string
    Network configuration for workload execution.
    serviceAccount string
    Service account that used to execute workload.
    stagingBucket string
    A Cloud Storage bucket used to stage workload dependencies, config files, and store workload output and other ephemeral data, such as Spark history files. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location according to the region where your workload is running, and then create and manage project-level, per-location staging and temporary buckets. This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.
    subnetworkUri string
    Subnetwork configuration for workload execution.
    ttl string
    The duration after which the workload will be terminated. When the workload exceeds this duration, it will be unconditionally terminated without waiting for ongoing work to finish. If ttl is not specified for a batch workload, the workload will be allowed to run until it exits naturally (or run forever without exiting). If ttl is not specified for an interactive session, it defaults to 24 hours. If ttl is not specified for a batch that uses 2.1+ runtime version, it defaults to 4 hours. Minimum value is 10 minutes; maximum value is 14 days. If both ttl and idleTtl are specified (for an interactive session), the conditions are treated as OR conditions: the workload will be terminated when it has been idle for idleTtl or when ttl has been exceeded, whichever occurs first.
    kms_key str
    The Cloud KMS key to use for encryption.
    network_tags Sequence[str]
    Tags used for network traffic control.
    network_uri str
    Network configuration for workload execution.
    service_account str
    Service account that used to execute workload.
    staging_bucket str
    A Cloud Storage bucket used to stage workload dependencies, config files, and store workload output and other ephemeral data, such as Spark history files. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location according to the region where your workload is running, and then create and manage project-level, per-location staging and temporary buckets. This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.
    subnetwork_uri str
    Subnetwork configuration for workload execution.
    ttl str
    The duration after which the workload will be terminated. When the workload exceeds this duration, it will be unconditionally terminated without waiting for ongoing work to finish. If ttl is not specified for a batch workload, the workload will be allowed to run until it exits naturally (or run forever without exiting). If ttl is not specified for an interactive session, it defaults to 24 hours. If ttl is not specified for a batch that uses 2.1+ runtime version, it defaults to 4 hours. Minimum value is 10 minutes; maximum value is 14 days. If both ttl and idleTtl are specified (for an interactive session), the conditions are treated as OR conditions: the workload will be terminated when it has been idle for idleTtl or when ttl has been exceeded, whichever occurs first.
    kmsKey String
    The Cloud KMS key to use for encryption.
    networkTags List<String>
    Tags used for network traffic control.
    networkUri String
    Network configuration for workload execution.
    serviceAccount String
    Service account that used to execute workload.
    stagingBucket String
    A Cloud Storage bucket used to stage workload dependencies, config files, and store workload output and other ephemeral data, such as Spark history files. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location according to the region where your workload is running, and then create and manage project-level, per-location staging and temporary buckets. This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.
    subnetworkUri String
    Subnetwork configuration for workload execution.
    ttl String
    The duration after which the workload will be terminated. When the workload exceeds this duration, it will be unconditionally terminated without waiting for ongoing work to finish. If ttl is not specified for a batch workload, the workload will be allowed to run until it exits naturally (or run forever without exiting). If ttl is not specified for an interactive session, it defaults to 24 hours. If ttl is not specified for a batch that uses 2.1+ runtime version, it defaults to 4 hours. Minimum value is 10 minutes; maximum value is 14 days. If both ttl and idleTtl are specified (for an interactive session), the conditions are treated as OR conditions: the workload will be terminated when it has been idle for idleTtl or when ttl has been exceeded, whichever occurs first.

    BatchEnvironmentConfigPeripheralsConfig, BatchEnvironmentConfigPeripheralsConfigArgs

    MetastoreService string
    Resource name of an existing Dataproc Metastore service.
    SparkHistoryServerConfig BatchEnvironmentConfigPeripheralsConfigSparkHistoryServerConfig
    The Spark History Server configuration for the workload. Structure is documented below.
    MetastoreService string
    Resource name of an existing Dataproc Metastore service.
    SparkHistoryServerConfig BatchEnvironmentConfigPeripheralsConfigSparkHistoryServerConfig
    The Spark History Server configuration for the workload. Structure is documented below.
    metastoreService String
    Resource name of an existing Dataproc Metastore service.
    sparkHistoryServerConfig BatchEnvironmentConfigPeripheralsConfigSparkHistoryServerConfig
    The Spark History Server configuration for the workload. Structure is documented below.
    metastoreService string
    Resource name of an existing Dataproc Metastore service.
    sparkHistoryServerConfig BatchEnvironmentConfigPeripheralsConfigSparkHistoryServerConfig
    The Spark History Server configuration for the workload. Structure is documented below.
    metastore_service str
    Resource name of an existing Dataproc Metastore service.
    spark_history_server_config BatchEnvironmentConfigPeripheralsConfigSparkHistoryServerConfig
    The Spark History Server configuration for the workload. Structure is documented below.
    metastoreService String
    Resource name of an existing Dataproc Metastore service.
    sparkHistoryServerConfig Property Map
    The Spark History Server configuration for the workload. Structure is documented below.

    BatchEnvironmentConfigPeripheralsConfigSparkHistoryServerConfig, BatchEnvironmentConfigPeripheralsConfigSparkHistoryServerConfigArgs

    DataprocCluster string
    Resource name of an existing Dataproc Cluster to act as a Spark History Server for the workload.
    DataprocCluster string
    Resource name of an existing Dataproc Cluster to act as a Spark History Server for the workload.
    dataprocCluster String
    Resource name of an existing Dataproc Cluster to act as a Spark History Server for the workload.
    dataprocCluster string
    Resource name of an existing Dataproc Cluster to act as a Spark History Server for the workload.
    dataproc_cluster str
    Resource name of an existing Dataproc Cluster to act as a Spark History Server for the workload.
    dataprocCluster String
    Resource name of an existing Dataproc Cluster to act as a Spark History Server for the workload.

    BatchPysparkBatch, BatchPysparkBatchArgs

    ArchiveUris List<string>
    HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
    Args List<string>
    The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.
    FileUris List<string>
    HCFS URIs of files to be placed in the working directory of each executor.
    JarFileUris List<string>
    HCFS URIs of jar files to add to the classpath of the Spark driver and tasks.
    MainPythonFileUri string
    The HCFS URI of the main Python file to use as the Spark driver. Must be a .py file.
    PythonFileUris List<string>
    HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.
    ArchiveUris []string
    HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
    Args []string
    The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.
    FileUris []string
    HCFS URIs of files to be placed in the working directory of each executor.
    JarFileUris []string
    HCFS URIs of jar files to add to the classpath of the Spark driver and tasks.
    MainPythonFileUri string
    The HCFS URI of the main Python file to use as the Spark driver. Must be a .py file.
    PythonFileUris []string
    HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.
    archiveUris List<String>
    HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
    args List<String>
    The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.
    fileUris List<String>
    HCFS URIs of files to be placed in the working directory of each executor.
    jarFileUris List<String>
    HCFS URIs of jar files to add to the classpath of the Spark driver and tasks.
    mainPythonFileUri String
    The HCFS URI of the main Python file to use as the Spark driver. Must be a .py file.
    pythonFileUris List<String>
    HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.
    archiveUris string[]
    HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
    args string[]
    The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.
    fileUris string[]
    HCFS URIs of files to be placed in the working directory of each executor.
    jarFileUris string[]
    HCFS URIs of jar files to add to the classpath of the Spark driver and tasks.
    mainPythonFileUri string
    The HCFS URI of the main Python file to use as the Spark driver. Must be a .py file.
    pythonFileUris string[]
    HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.
    archive_uris Sequence[str]
    HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
    args Sequence[str]
    The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.
    file_uris Sequence[str]
    HCFS URIs of files to be placed in the working directory of each executor.
    jar_file_uris Sequence[str]
    HCFS URIs of jar files to add to the classpath of the Spark driver and tasks.
    main_python_file_uri str
    The HCFS URI of the main Python file to use as the Spark driver. Must be a .py file.
    python_file_uris Sequence[str]
    HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.
    archiveUris List<String>
    HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
    args List<String>
    The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.
    fileUris List<String>
    HCFS URIs of files to be placed in the working directory of each executor.
    jarFileUris List<String>
    HCFS URIs of jar files to add to the classpath of the Spark driver and tasks.
    mainPythonFileUri String
    The HCFS URI of the main Python file to use as the Spark driver. Must be a .py file.
    pythonFileUris List<String>
    HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.

    BatchRuntimeConfig, BatchRuntimeConfigArgs

    ContainerImage string
    Optional custom container image for the job runtime environment. If not specified, a default container image will be used.
    EffectiveProperties Dictionary<string, string>
    (Output) A mapping of property names to values, which are used to configure workload execution.
    Properties Dictionary<string, string>
    A mapping of property names to values, which are used to configure workload execution.
    Version string
    Version of the batch runtime.
    ContainerImage string
    Optional custom container image for the job runtime environment. If not specified, a default container image will be used.
    EffectiveProperties map[string]string
    (Output) A mapping of property names to values, which are used to configure workload execution.
    Properties map[string]string
    A mapping of property names to values, which are used to configure workload execution.
    Version string
    Version of the batch runtime.
    containerImage String
    Optional custom container image for the job runtime environment. If not specified, a default container image will be used.
    effectiveProperties Map<String,String>
    (Output) A mapping of property names to values, which are used to configure workload execution.
    properties Map<String,String>
    A mapping of property names to values, which are used to configure workload execution.
    version String
    Version of the batch runtime.
    containerImage string
    Optional custom container image for the job runtime environment. If not specified, a default container image will be used.
    effectiveProperties {[key: string]: string}
    (Output) A mapping of property names to values, which are used to configure workload execution.
    properties {[key: string]: string}
    A mapping of property names to values, which are used to configure workload execution.
    version string
    Version of the batch runtime.
    container_image str
    Optional custom container image for the job runtime environment. If not specified, a default container image will be used.
    effective_properties Mapping[str, str]
    (Output) A mapping of property names to values, which are used to configure workload execution.
    properties Mapping[str, str]
    A mapping of property names to values, which are used to configure workload execution.
    version str
    Version of the batch runtime.
    containerImage String
    Optional custom container image for the job runtime environment. If not specified, a default container image will be used.
    effectiveProperties Map<String>
    (Output) A mapping of property names to values, which are used to configure workload execution.
    properties Map<String>
    A mapping of property names to values, which are used to configure workload execution.
    version String
    Version of the batch runtime.

    BatchRuntimeInfo, BatchRuntimeInfoArgs

    ApproximateUsages List<BatchRuntimeInfoApproximateUsage>
    (Output) Approximate workload resource usage, calculated when the workload completes(see Dataproc Serverless pricing) Structure is documented below.
    CurrentUsages List<BatchRuntimeInfoCurrentUsage>
    (Output) Snapshot of current workload resource usage(see Dataproc Serverless pricing) Structure is documented below.
    DiagnosticOutputUri string
    (Output) A URI pointing to the location of the diagnostics tarball.
    Endpoints Dictionary<string, string>
    (Output) Map of remote access endpoints (such as web interfaces and APIs) to their URIs.
    OutputUri string
    (Output) A URI pointing to the location of the stdout and stderr of the workload.
    ApproximateUsages []BatchRuntimeInfoApproximateUsage
    (Output) Approximate workload resource usage, calculated when the workload completes(see Dataproc Serverless pricing) Structure is documented below.
    CurrentUsages []BatchRuntimeInfoCurrentUsage
    (Output) Snapshot of current workload resource usage(see Dataproc Serverless pricing) Structure is documented below.
    DiagnosticOutputUri string
    (Output) A URI pointing to the location of the diagnostics tarball.
    Endpoints map[string]string
    (Output) Map of remote access endpoints (such as web interfaces and APIs) to their URIs.
    OutputUri string
    (Output) A URI pointing to the location of the stdout and stderr of the workload.
    approximateUsages List<BatchRuntimeInfoApproximateUsage>
    (Output) Approximate workload resource usage, calculated when the workload completes(see Dataproc Serverless pricing) Structure is documented below.
    currentUsages List<BatchRuntimeInfoCurrentUsage>
    (Output) Snapshot of current workload resource usage(see Dataproc Serverless pricing) Structure is documented below.
    diagnosticOutputUri String
    (Output) A URI pointing to the location of the diagnostics tarball.
    endpoints Map<String,String>
    (Output) Map of remote access endpoints (such as web interfaces and APIs) to their URIs.
    outputUri String
    (Output) A URI pointing to the location of the stdout and stderr of the workload.
    approximateUsages BatchRuntimeInfoApproximateUsage[]
    (Output) Approximate workload resource usage, calculated when the workload completes(see Dataproc Serverless pricing) Structure is documented below.
    currentUsages BatchRuntimeInfoCurrentUsage[]
    (Output) Snapshot of current workload resource usage(see Dataproc Serverless pricing) Structure is documented below.
    diagnosticOutputUri string
    (Output) A URI pointing to the location of the diagnostics tarball.
    endpoints {[key: string]: string}
    (Output) Map of remote access endpoints (such as web interfaces and APIs) to their URIs.
    outputUri string
    (Output) A URI pointing to the location of the stdout and stderr of the workload.
    approximate_usages Sequence[BatchRuntimeInfoApproximateUsage]
    (Output) Approximate workload resource usage, calculated when the workload completes(see Dataproc Serverless pricing) Structure is documented below.
    current_usages Sequence[BatchRuntimeInfoCurrentUsage]
    (Output) Snapshot of current workload resource usage(see Dataproc Serverless pricing) Structure is documented below.
    diagnostic_output_uri str
    (Output) A URI pointing to the location of the diagnostics tarball.
    endpoints Mapping[str, str]
    (Output) Map of remote access endpoints (such as web interfaces and APIs) to their URIs.
    output_uri str
    (Output) A URI pointing to the location of the stdout and stderr of the workload.
    approximateUsages List<Property Map>
    (Output) Approximate workload resource usage, calculated when the workload completes(see Dataproc Serverless pricing) Structure is documented below.
    currentUsages List<Property Map>
    (Output) Snapshot of current workload resource usage(see Dataproc Serverless pricing) Structure is documented below.
    diagnosticOutputUri String
    (Output) A URI pointing to the location of the diagnostics tarball.
    endpoints Map<String>
    (Output) Map of remote access endpoints (such as web interfaces and APIs) to their URIs.
    outputUri String
    (Output) A URI pointing to the location of the stdout and stderr of the workload.

    BatchRuntimeInfoApproximateUsage, BatchRuntimeInfoApproximateUsageArgs

    AcceleratorType string
    (Output) Accelerator type being used, if any.
    MilliAcceleratorSeconds string
    (Output) Accelerator usage in (milliAccelerator x seconds)
    MilliDcuSeconds string
    (Output) DCU (Dataproc Compute Units) usage in (milliDCU x seconds)
    ShuffleStorageGbSeconds string
    (Output) Shuffle storage usage in (GB x seconds)
    AcceleratorType string
    (Output) Accelerator type being used, if any.
    MilliAcceleratorSeconds string
    (Output) Accelerator usage in (milliAccelerator x seconds)
    MilliDcuSeconds string
    (Output) DCU (Dataproc Compute Units) usage in (milliDCU x seconds)
    ShuffleStorageGbSeconds string
    (Output) Shuffle storage usage in (GB x seconds)
    acceleratorType String
    (Output) Accelerator type being used, if any.
    milliAcceleratorSeconds String
    (Output) Accelerator usage in (milliAccelerator x seconds)
    milliDcuSeconds String
    (Output) DCU (Dataproc Compute Units) usage in (milliDCU x seconds)
    shuffleStorageGbSeconds String
    (Output) Shuffle storage usage in (GB x seconds)
    acceleratorType string
    (Output) Accelerator type being used, if any.
    milliAcceleratorSeconds string
    (Output) Accelerator usage in (milliAccelerator x seconds)
    milliDcuSeconds string
    (Output) DCU (Dataproc Compute Units) usage in (milliDCU x seconds)
    shuffleStorageGbSeconds string
    (Output) Shuffle storage usage in (GB x seconds)
    accelerator_type str
    (Output) Accelerator type being used, if any.
    milli_accelerator_seconds str
    (Output) Accelerator usage in (milliAccelerator x seconds)
    milli_dcu_seconds str
    (Output) DCU (Dataproc Compute Units) usage in (milliDCU x seconds)
    shuffle_storage_gb_seconds str
    (Output) Shuffle storage usage in (GB x seconds)
    acceleratorType String
    (Output) Accelerator type being used, if any.
    milliAcceleratorSeconds String
    (Output) Accelerator usage in (milliAccelerator x seconds)
    milliDcuSeconds String
    (Output) DCU (Dataproc Compute Units) usage in (milliDCU x seconds)
    shuffleStorageGbSeconds String
    (Output) Shuffle storage usage in (GB x seconds)

    BatchRuntimeInfoCurrentUsage, BatchRuntimeInfoCurrentUsageArgs

    AcceleratorType string
    (Output) Accelerator type being used, if any.
    MilliAccelerator string
    (Output) Milli (one-thousandth) accelerator..
    MilliDcu string
    (Output) Milli (one-thousandth) Dataproc Compute Units (DCUs).
    MilliDcuPremium string
    (Output) Milli (one-thousandth) Dataproc Compute Units (DCUs) charged at premium tier.
    ShuffleStorageGb string
    (Output) Shuffle Storage in gigabytes (GB).
    ShuffleStorageGbPremium string
    (Output) Shuffle Storage in gigabytes (GB) charged at premium tier.
    SnapshotTime string
    (Output) The timestamp of the usage snapshot.
    AcceleratorType string
    (Output) Accelerator type being used, if any.
    MilliAccelerator string
    (Output) Milli (one-thousandth) accelerator..
    MilliDcu string
    (Output) Milli (one-thousandth) Dataproc Compute Units (DCUs).
    MilliDcuPremium string
    (Output) Milli (one-thousandth) Dataproc Compute Units (DCUs) charged at premium tier.
    ShuffleStorageGb string
    (Output) Shuffle Storage in gigabytes (GB).
    ShuffleStorageGbPremium string
    (Output) Shuffle Storage in gigabytes (GB) charged at premium tier.
    SnapshotTime string
    (Output) The timestamp of the usage snapshot.
    acceleratorType String
    (Output) Accelerator type being used, if any.
    milliAccelerator String
    (Output) Milli (one-thousandth) accelerator..
    milliDcu String
    (Output) Milli (one-thousandth) Dataproc Compute Units (DCUs).
    milliDcuPremium String
    (Output) Milli (one-thousandth) Dataproc Compute Units (DCUs) charged at premium tier.
    shuffleStorageGb String
    (Output) Shuffle Storage in gigabytes (GB).
    shuffleStorageGbPremium String
    (Output) Shuffle Storage in gigabytes (GB) charged at premium tier.
    snapshotTime String
    (Output) The timestamp of the usage snapshot.
    acceleratorType string
    (Output) Accelerator type being used, if any.
    milliAccelerator string
    (Output) Milli (one-thousandth) accelerator..
    milliDcu string
    (Output) Milli (one-thousandth) Dataproc Compute Units (DCUs).
    milliDcuPremium string
    (Output) Milli (one-thousandth) Dataproc Compute Units (DCUs) charged at premium tier.
    shuffleStorageGb string
    (Output) Shuffle Storage in gigabytes (GB).
    shuffleStorageGbPremium string
    (Output) Shuffle Storage in gigabytes (GB) charged at premium tier.
    snapshotTime string
    (Output) The timestamp of the usage snapshot.
    accelerator_type str
    (Output) Accelerator type being used, if any.
    milli_accelerator str
    (Output) Milli (one-thousandth) accelerator..
    milli_dcu str
    (Output) Milli (one-thousandth) Dataproc Compute Units (DCUs).
    milli_dcu_premium str
    (Output) Milli (one-thousandth) Dataproc Compute Units (DCUs) charged at premium tier.
    shuffle_storage_gb str
    (Output) Shuffle Storage in gigabytes (GB).
    shuffle_storage_gb_premium str
    (Output) Shuffle Storage in gigabytes (GB) charged at premium tier.
    snapshot_time str
    (Output) The timestamp of the usage snapshot.
    acceleratorType String
    (Output) Accelerator type being used, if any.
    milliAccelerator String
    (Output) Milli (one-thousandth) accelerator..
    milliDcu String
    (Output) Milli (one-thousandth) Dataproc Compute Units (DCUs).
    milliDcuPremium String
    (Output) Milli (one-thousandth) Dataproc Compute Units (DCUs) charged at premium tier.
    shuffleStorageGb String
    (Output) Shuffle Storage in gigabytes (GB).
    shuffleStorageGbPremium String
    (Output) Shuffle Storage in gigabytes (GB) charged at premium tier.
    snapshotTime String
    (Output) The timestamp of the usage snapshot.

    BatchSparkBatch, BatchSparkBatchArgs

    ArchiveUris List<string>
    HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
    Args List<string>
    The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.
    FileUris List<string>
    HCFS URIs of files to be placed in the working directory of each executor.
    JarFileUris List<string>
    HCFS URIs of jar files to add to the classpath of the Spark driver and tasks.
    MainClass string
    The name of the driver main class. The jar file that contains the class must be in the classpath or specified in jarFileUris.
    MainJarFileUri string
    The HCFS URI of the jar file that contains the main class.
    ArchiveUris []string
    HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
    Args []string
    The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.
    FileUris []string
    HCFS URIs of files to be placed in the working directory of each executor.
    JarFileUris []string
    HCFS URIs of jar files to add to the classpath of the Spark driver and tasks.
    MainClass string
    The name of the driver main class. The jar file that contains the class must be in the classpath or specified in jarFileUris.
    MainJarFileUri string
    The HCFS URI of the jar file that contains the main class.
    archiveUris List<String>
    HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
    args List<String>
    The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.
    fileUris List<String>
    HCFS URIs of files to be placed in the working directory of each executor.
    jarFileUris List<String>
    HCFS URIs of jar files to add to the classpath of the Spark driver and tasks.
    mainClass String
    The name of the driver main class. The jar file that contains the class must be in the classpath or specified in jarFileUris.
    mainJarFileUri String
    The HCFS URI of the jar file that contains the main class.
    archiveUris string[]
    HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
    args string[]
    The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.
    fileUris string[]
    HCFS URIs of files to be placed in the working directory of each executor.
    jarFileUris string[]
    HCFS URIs of jar files to add to the classpath of the Spark driver and tasks.
    mainClass string
    The name of the driver main class. The jar file that contains the class must be in the classpath or specified in jarFileUris.
    mainJarFileUri string
    The HCFS URI of the jar file that contains the main class.
    archive_uris Sequence[str]
    HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
    args Sequence[str]
    The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.
    file_uris Sequence[str]
    HCFS URIs of files to be placed in the working directory of each executor.
    jar_file_uris Sequence[str]
    HCFS URIs of jar files to add to the classpath of the Spark driver and tasks.
    main_class str
    The name of the driver main class. The jar file that contains the class must be in the classpath or specified in jarFileUris.
    main_jar_file_uri str
    The HCFS URI of the jar file that contains the main class.
    archiveUris List<String>
    HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
    args List<String>
    The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.
    fileUris List<String>
    HCFS URIs of files to be placed in the working directory of each executor.
    jarFileUris List<String>
    HCFS URIs of jar files to add to the classpath of the Spark driver and tasks.
    mainClass String
    The name of the driver main class. The jar file that contains the class must be in the classpath or specified in jarFileUris.
    mainJarFileUri String
    The HCFS URI of the jar file that contains the main class.

    BatchSparkRBatch, BatchSparkRBatchArgs

    ArchiveUris List<string>
    HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
    Args List<string>
    The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.
    FileUris List<string>
    HCFS URIs of files to be placed in the working directory of each executor.
    MainRFileUri string
    The HCFS URI of the main R file to use as the driver. Must be a .R or .r file.
    ArchiveUris []string
    HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
    Args []string
    The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.
    FileUris []string
    HCFS URIs of files to be placed in the working directory of each executor.
    MainRFileUri string
    The HCFS URI of the main R file to use as the driver. Must be a .R or .r file.
    archiveUris List<String>
    HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
    args List<String>
    The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.
    fileUris List<String>
    HCFS URIs of files to be placed in the working directory of each executor.
    mainRFileUri String
    The HCFS URI of the main R file to use as the driver. Must be a .R or .r file.
    archiveUris string[]
    HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
    args string[]
    The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.
    fileUris string[]
    HCFS URIs of files to be placed in the working directory of each executor.
    mainRFileUri string
    The HCFS URI of the main R file to use as the driver. Must be a .R or .r file.
    archive_uris Sequence[str]
    HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
    args Sequence[str]
    The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.
    file_uris Sequence[str]
    HCFS URIs of files to be placed in the working directory of each executor.
    main_r_file_uri str
    The HCFS URI of the main R file to use as the driver. Must be a .R or .r file.
    archiveUris List<String>
    HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
    args List<String>
    The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.
    fileUris List<String>
    HCFS URIs of files to be placed in the working directory of each executor.
    mainRFileUri String
    The HCFS URI of the main R file to use as the driver. Must be a .R or .r file.

    BatchSparkSqlBatch, BatchSparkSqlBatchArgs

    JarFileUris List<string>
    HCFS URIs of jar files to be added to the Spark CLASSPATH.
    QueryFileUri string
    The HCFS URI of the script that contains Spark SQL queries to execute.
    QueryVariables Dictionary<string, string>
    Mapping of query variable names to values (equivalent to the Spark SQL command: SET name="value";).
    JarFileUris []string
    HCFS URIs of jar files to be added to the Spark CLASSPATH.
    QueryFileUri string
    The HCFS URI of the script that contains Spark SQL queries to execute.
    QueryVariables map[string]string
    Mapping of query variable names to values (equivalent to the Spark SQL command: SET name="value";).
    jarFileUris List<String>
    HCFS URIs of jar files to be added to the Spark CLASSPATH.
    queryFileUri String
    The HCFS URI of the script that contains Spark SQL queries to execute.
    queryVariables Map<String,String>
    Mapping of query variable names to values (equivalent to the Spark SQL command: SET name="value";).
    jarFileUris string[]
    HCFS URIs of jar files to be added to the Spark CLASSPATH.
    queryFileUri string
    The HCFS URI of the script that contains Spark SQL queries to execute.
    queryVariables {[key: string]: string}
    Mapping of query variable names to values (equivalent to the Spark SQL command: SET name="value";).
    jar_file_uris Sequence[str]
    HCFS URIs of jar files to be added to the Spark CLASSPATH.
    query_file_uri str
    The HCFS URI of the script that contains Spark SQL queries to execute.
    query_variables Mapping[str, str]
    Mapping of query variable names to values (equivalent to the Spark SQL command: SET name="value";).
    jarFileUris List<String>
    HCFS URIs of jar files to be added to the Spark CLASSPATH.
    queryFileUri String
    The HCFS URI of the script that contains Spark SQL queries to execute.
    queryVariables Map<String>
    Mapping of query variable names to values (equivalent to the Spark SQL command: SET name="value";).

    BatchStateHistory, BatchStateHistoryArgs

    State string
    (Output) The state of the batch at this point in history. For possible values, see the API documentation.
    StateMessage string
    (Output) Details about the state at this point in history.
    StateStartTime string
    (Output) The time when the batch entered the historical state.
    State string
    (Output) The state of the batch at this point in history. For possible values, see the API documentation.
    StateMessage string
    (Output) Details about the state at this point in history.
    StateStartTime string
    (Output) The time when the batch entered the historical state.
    state String
    (Output) The state of the batch at this point in history. For possible values, see the API documentation.
    stateMessage String
    (Output) Details about the state at this point in history.
    stateStartTime String
    (Output) The time when the batch entered the historical state.
    state string
    (Output) The state of the batch at this point in history. For possible values, see the API documentation.
    stateMessage string
    (Output) Details about the state at this point in history.
    stateStartTime string
    (Output) The time when the batch entered the historical state.
    state str
    (Output) The state of the batch at this point in history. For possible values, see the API documentation.
    state_message str
    (Output) Details about the state at this point in history.
    state_start_time str
    (Output) The time when the batch entered the historical state.
    state String
    (Output) The state of the batch at this point in history. For possible values, see the API documentation.
    stateMessage String
    (Output) Details about the state at this point in history.
    stateStartTime String
    (Output) The time when the batch entered the historical state.

    Import

    Batch can be imported using any of these accepted formats:

    • projects/{{project}}/locations/{{location}}/batches/{{batch_id}}

    • {{project}}/{{location}}/{{batch_id}}

    • {{location}}/{{batch_id}}

    When using the pulumi import command, Batch can be imported using one of the formats above. For example:

    $ pulumi import gcp:dataproc/batch:Batch default projects/{{project}}/locations/{{location}}/batches/{{batch_id}}
    
    $ pulumi import gcp:dataproc/batch:Batch default {{project}}/{{location}}/{{batch_id}}
    
    $ pulumi import gcp:dataproc/batch:Batch default {{location}}/{{batch_id}}
    

    To learn more about importing existing cloud resources, see Importing resources.

    Package Details

    Repository
    Google Cloud (GCP) Classic pulumi/pulumi-gcp
    License
    Apache-2.0
    Notes
    This Pulumi package is based on the google-beta Terraform Provider.
    gcp logo
    Google Cloud Classic v8.9.3 published on Monday, Nov 18, 2024 by Pulumi