1. Packages
  2. Databricks
  3. API Docs
  4. MwsWorkspaces
Databricks v1.56.0 published on Tuesday, Nov 12, 2024 by Pulumi

databricks.MwsWorkspaces

Explore with Pulumi AI

databricks logo
Databricks v1.56.0 published on Tuesday, Nov 12, 2024 by Pulumi

    Example Usage

    Creating a Databricks on AWS workspace

    !Simplest multiworkspace

    To get workspace running, you have to configure a couple of things:

    • databricks.MwsCredentials - You can share a credentials (cross-account IAM role) configuration ID with multiple workspaces. It is not required to create a new one for each workspace.
    • databricks.MwsStorageConfigurations - You can share a root S3 bucket with multiple workspaces in a single account. You do not have to create new ones for each workspace. If you share a root S3 bucket for multiple workspaces in an account, data on the root S3 bucket is partitioned into separate directories by workspace.
    • databricks.MwsNetworks - (optional, but recommended) You can share one customer-managed VPC with multiple workspaces in a single account. You do not have to create a new VPC for each workspace. However, you cannot reuse subnets or security groups with other resources, including other workspaces or non-Databricks resources. If you plan to share one VPC with multiple workspaces, be sure to size your VPC and subnets accordingly. Because a Databricks databricks.MwsNetworks encapsulates this information, you cannot reuse it across workspaces.
    • databricks.MwsCustomerManagedKeys - You can share a customer-managed key across workspaces.
    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    
    const config = new pulumi.Config();
    // Account ID that can be found in the dropdown under the email address in the upper-right corner of https://accounts.cloud.databricks.com/
    const databricksAccountId = config.requireObject("databricksAccountId");
    // register cross-account ARN
    const _this = new databricks.MwsCredentials("this", {
        accountId: databricksAccountId,
        credentialsName: `${prefix}-creds`,
        roleArn: crossaccountArn,
    });
    // register root bucket
    const thisMwsStorageConfigurations = new databricks.MwsStorageConfigurations("this", {
        accountId: databricksAccountId,
        storageConfigurationName: `${prefix}-storage`,
        bucketName: rootBucket,
    });
    // register VPC
    const thisMwsNetworks = new databricks.MwsNetworks("this", {
        accountId: databricksAccountId,
        networkName: `${prefix}-network`,
        vpcId: vpcId,
        subnetIds: subnetsPrivate,
        securityGroupIds: [securityGroup],
    });
    // create workspace in given VPC with DBFS on root bucket
    const thisMwsWorkspaces = new databricks.MwsWorkspaces("this", {
        accountId: databricksAccountId,
        workspaceName: prefix,
        awsRegion: region,
        credentialsId: _this.credentialsId,
        storageConfigurationId: thisMwsStorageConfigurations.storageConfigurationId,
        networkId: thisMwsNetworks.networkId,
        token: {},
    });
    export const databricksToken = thisMwsWorkspaces.token.apply(token => token?.tokenValue);
    
    import pulumi
    import pulumi_databricks as databricks
    
    config = pulumi.Config()
    # Account ID that can be found in the dropdown under the email address in the upper-right corner of https://accounts.cloud.databricks.com/
    databricks_account_id = config.require_object("databricksAccountId")
    # register cross-account ARN
    this = databricks.MwsCredentials("this",
        account_id=databricks_account_id,
        credentials_name=f"{prefix}-creds",
        role_arn=crossaccount_arn)
    # register root bucket
    this_mws_storage_configurations = databricks.MwsStorageConfigurations("this",
        account_id=databricks_account_id,
        storage_configuration_name=f"{prefix}-storage",
        bucket_name=root_bucket)
    # register VPC
    this_mws_networks = databricks.MwsNetworks("this",
        account_id=databricks_account_id,
        network_name=f"{prefix}-network",
        vpc_id=vpc_id,
        subnet_ids=subnets_private,
        security_group_ids=[security_group])
    # create workspace in given VPC with DBFS on root bucket
    this_mws_workspaces = databricks.MwsWorkspaces("this",
        account_id=databricks_account_id,
        workspace_name=prefix,
        aws_region=region,
        credentials_id=this.credentials_id,
        storage_configuration_id=this_mws_storage_configurations.storage_configuration_id,
        network_id=this_mws_networks.network_id,
        token={})
    pulumi.export("databricksToken", this_mws_workspaces.token.token_value)
    
    package main
    
    import (
    	"fmt"
    
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi/config"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		cfg := config.New(ctx, "")
    		// Account ID that can be found in the dropdown under the email address in the upper-right corner of https://accounts.cloud.databricks.com/
    		databricksAccountId := cfg.RequireObject("databricksAccountId")
    		// register cross-account ARN
    		this, err := databricks.NewMwsCredentials(ctx, "this", &databricks.MwsCredentialsArgs{
    			AccountId:       pulumi.Any(databricksAccountId),
    			CredentialsName: pulumi.Sprintf("%v-creds", prefix),
    			RoleArn:         pulumi.Any(crossaccountArn),
    		})
    		if err != nil {
    			return err
    		}
    		// register root bucket
    		thisMwsStorageConfigurations, err := databricks.NewMwsStorageConfigurations(ctx, "this", &databricks.MwsStorageConfigurationsArgs{
    			AccountId:                pulumi.Any(databricksAccountId),
    			StorageConfigurationName: pulumi.Sprintf("%v-storage", prefix),
    			BucketName:               pulumi.Any(rootBucket),
    		})
    		if err != nil {
    			return err
    		}
    		// register VPC
    		thisMwsNetworks, err := databricks.NewMwsNetworks(ctx, "this", &databricks.MwsNetworksArgs{
    			AccountId:   pulumi.Any(databricksAccountId),
    			NetworkName: pulumi.Sprintf("%v-network", prefix),
    			VpcId:       pulumi.Any(vpcId),
    			SubnetIds:   pulumi.Any(subnetsPrivate),
    			SecurityGroupIds: pulumi.StringArray{
    				securityGroup,
    			},
    		})
    		if err != nil {
    			return err
    		}
    		// create workspace in given VPC with DBFS on root bucket
    		thisMwsWorkspaces, err := databricks.NewMwsWorkspaces(ctx, "this", &databricks.MwsWorkspacesArgs{
    			AccountId:              pulumi.Any(databricksAccountId),
    			WorkspaceName:          pulumi.Any(prefix),
    			AwsRegion:              pulumi.Any(region),
    			CredentialsId:          this.CredentialsId,
    			StorageConfigurationId: thisMwsStorageConfigurations.StorageConfigurationId,
    			NetworkId:              thisMwsNetworks.NetworkId,
    			Token:                  &databricks.MwsWorkspacesTokenArgs{},
    		})
    		if err != nil {
    			return err
    		}
    		ctx.Export("databricksToken", thisMwsWorkspaces.Token.ApplyT(func(token databricks.MwsWorkspacesToken) (*string, error) {
    			return &token.TokenValue, nil
    		}).(pulumi.StringPtrOutput))
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    
    return await Deployment.RunAsync(() => 
    {
        var config = new Config();
        // Account ID that can be found in the dropdown under the email address in the upper-right corner of https://accounts.cloud.databricks.com/
        var databricksAccountId = config.RequireObject<dynamic>("databricksAccountId");
        // register cross-account ARN
        var @this = new Databricks.MwsCredentials("this", new()
        {
            AccountId = databricksAccountId,
            CredentialsName = $"{prefix}-creds",
            RoleArn = crossaccountArn,
        });
    
        // register root bucket
        var thisMwsStorageConfigurations = new Databricks.MwsStorageConfigurations("this", new()
        {
            AccountId = databricksAccountId,
            StorageConfigurationName = $"{prefix}-storage",
            BucketName = rootBucket,
        });
    
        // register VPC
        var thisMwsNetworks = new Databricks.MwsNetworks("this", new()
        {
            AccountId = databricksAccountId,
            NetworkName = $"{prefix}-network",
            VpcId = vpcId,
            SubnetIds = subnetsPrivate,
            SecurityGroupIds = new[]
            {
                securityGroup,
            },
        });
    
        // create workspace in given VPC with DBFS on root bucket
        var thisMwsWorkspaces = new Databricks.MwsWorkspaces("this", new()
        {
            AccountId = databricksAccountId,
            WorkspaceName = prefix,
            AwsRegion = region,
            CredentialsId = @this.CredentialsId,
            StorageConfigurationId = thisMwsStorageConfigurations.StorageConfigurationId,
            NetworkId = thisMwsNetworks.NetworkId,
            Token = null,
        });
    
        return new Dictionary<string, object?>
        {
            ["databricksToken"] = thisMwsWorkspaces.Token.Apply(token => token?.TokenValue),
        };
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.databricks.MwsCredentials;
    import com.pulumi.databricks.MwsCredentialsArgs;
    import com.pulumi.databricks.MwsStorageConfigurations;
    import com.pulumi.databricks.MwsStorageConfigurationsArgs;
    import com.pulumi.databricks.MwsNetworks;
    import com.pulumi.databricks.MwsNetworksArgs;
    import com.pulumi.databricks.MwsWorkspaces;
    import com.pulumi.databricks.MwsWorkspacesArgs;
    import com.pulumi.databricks.inputs.MwsWorkspacesTokenArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            final var config = ctx.config();
            final var databricksAccountId = config.get("databricksAccountId");
            // register cross-account ARN
            var this_ = new MwsCredentials("this", MwsCredentialsArgs.builder()
                .accountId(databricksAccountId)
                .credentialsName(String.format("%s-creds", prefix))
                .roleArn(crossaccountArn)
                .build());
    
            // register root bucket
            var thisMwsStorageConfigurations = new MwsStorageConfigurations("thisMwsStorageConfigurations", MwsStorageConfigurationsArgs.builder()
                .accountId(databricksAccountId)
                .storageConfigurationName(String.format("%s-storage", prefix))
                .bucketName(rootBucket)
                .build());
    
            // register VPC
            var thisMwsNetworks = new MwsNetworks("thisMwsNetworks", MwsNetworksArgs.builder()
                .accountId(databricksAccountId)
                .networkName(String.format("%s-network", prefix))
                .vpcId(vpcId)
                .subnetIds(subnetsPrivate)
                .securityGroupIds(securityGroup)
                .build());
    
            // create workspace in given VPC with DBFS on root bucket
            var thisMwsWorkspaces = new MwsWorkspaces("thisMwsWorkspaces", MwsWorkspacesArgs.builder()
                .accountId(databricksAccountId)
                .workspaceName(prefix)
                .awsRegion(region)
                .credentialsId(this_.credentialsId())
                .storageConfigurationId(thisMwsStorageConfigurations.storageConfigurationId())
                .networkId(thisMwsNetworks.networkId())
                .token()
                .build());
    
            ctx.export("databricksToken", thisMwsWorkspaces.token().applyValue(token -> token.tokenValue()));
        }
    }
    
    configuration:
      databricksAccountId:
        type: dynamic
    resources:
      # register cross-account ARN
      this:
        type: databricks:MwsCredentials
        properties:
          accountId: ${databricksAccountId}
          credentialsName: ${prefix}-creds
          roleArn: ${crossaccountArn}
      # register root bucket
      thisMwsStorageConfigurations:
        type: databricks:MwsStorageConfigurations
        name: this
        properties:
          accountId: ${databricksAccountId}
          storageConfigurationName: ${prefix}-storage
          bucketName: ${rootBucket}
      # register VPC
      thisMwsNetworks:
        type: databricks:MwsNetworks
        name: this
        properties:
          accountId: ${databricksAccountId}
          networkName: ${prefix}-network
          vpcId: ${vpcId}
          subnetIds: ${subnetsPrivate}
          securityGroupIds:
            - ${securityGroup}
      # create workspace in given VPC with DBFS on root bucket
      thisMwsWorkspaces:
        type: databricks:MwsWorkspaces
        name: this
        properties:
          accountId: ${databricksAccountId}
          workspaceName: ${prefix}
          awsRegion: ${region}
          credentialsId: ${this.credentialsId}
          storageConfigurationId: ${thisMwsStorageConfigurations.storageConfigurationId}
          networkId: ${thisMwsNetworks.networkId}
          token: {}
    outputs:
      databricksToken: ${thisMwsWorkspaces.token.tokenValue}
    

    Creating a Databricks on AWS workspace with Databricks-Managed VPC

    VPCs

    By default, Databricks creates a VPC in your AWS account for each workspace. Databricks uses it for running clusters in the workspace. Optionally, you can use your VPC for the workspace, using the feature customer-managed VPC. Databricks recommends that you provide your VPC with databricks.MwsNetworks so that you can configure it according to your organization’s enterprise cloud standards while still conforming to Databricks requirements. You cannot migrate an existing workspace to your VPC. Please see the difference described through IAM policy actions on this page.

    import * as pulumi from "@pulumi/pulumi";
    import * as aws from "@pulumi/aws";
    import * as databricks from "@pulumi/databricks";
    import * as random from "@pulumi/random";
    
    const config = new pulumi.Config();
    // Account Id that could be found in the top right corner of https://accounts.cloud.databricks.com/
    const databricksAccountId = config.requireObject("databricksAccountId");
    const naming = new random.index.String("naming", {
        special: false,
        upper: false,
        length: 6,
    });
    const prefix = `dltp${naming.result}`;
    const this = databricks.getAwsAssumeRolePolicy({
        externalId: databricksAccountId,
    });
    const crossAccountRole = new aws.iam.Role("cross_account_role", {
        name: `${prefix}-crossaccount`,
        assumeRolePolicy: _this.then(_this => _this.json),
        tags: tags,
    });
    const thisGetAwsCrossAccountPolicy = databricks.getAwsCrossAccountPolicy({});
    const thisRolePolicy = new aws.iam.RolePolicy("this", {
        name: `${prefix}-policy`,
        role: crossAccountRole.id,
        policy: thisGetAwsCrossAccountPolicy.then(thisGetAwsCrossAccountPolicy => thisGetAwsCrossAccountPolicy.json),
    });
    const thisMwsCredentials = new databricks.MwsCredentials("this", {
        accountId: databricksAccountId,
        credentialsName: `${prefix}-creds`,
        roleArn: crossAccountRole.arn,
    });
    const rootStorageBucket = new aws.s3.BucketV2("root_storage_bucket", {
        bucket: `${prefix}-rootbucket`,
        acl: "private",
        forceDestroy: true,
        tags: tags,
    });
    const rootVersioning = new aws.s3.BucketVersioningV2("root_versioning", {
        bucket: rootStorageBucket.id,
        versioningConfiguration: {
            status: "Disabled",
        },
    });
    const rootStorageBucketBucketServerSideEncryptionConfigurationV2 = new aws.s3.BucketServerSideEncryptionConfigurationV2("root_storage_bucket", {
        bucket: rootStorageBucket.bucket,
        rules: [{
            applyServerSideEncryptionByDefault: {
                sseAlgorithm: "AES256",
            },
        }],
    });
    const rootStorageBucketBucketPublicAccessBlock = new aws.s3.BucketPublicAccessBlock("root_storage_bucket", {
        bucket: rootStorageBucket.id,
        blockPublicAcls: true,
        blockPublicPolicy: true,
        ignorePublicAcls: true,
        restrictPublicBuckets: true,
    }, {
        dependsOn: [rootStorageBucket],
    });
    const thisGetAwsBucketPolicy = databricks.getAwsBucketPolicyOutput({
        bucket: rootStorageBucket.bucket,
    });
    const rootBucketPolicy = new aws.s3.BucketPolicy("root_bucket_policy", {
        bucket: rootStorageBucket.id,
        policy: thisGetAwsBucketPolicy.apply(thisGetAwsBucketPolicy => thisGetAwsBucketPolicy.json),
    }, {
        dependsOn: [rootStorageBucketBucketPublicAccessBlock],
    });
    const thisMwsStorageConfigurations = new databricks.MwsStorageConfigurations("this", {
        accountId: databricksAccountId,
        storageConfigurationName: `${prefix}-storage`,
        bucketName: rootStorageBucket.bucket,
    });
    const thisMwsWorkspaces = new databricks.MwsWorkspaces("this", {
        accountId: databricksAccountId,
        workspaceName: prefix,
        awsRegion: "us-east-1",
        credentialsId: thisMwsCredentials.credentialsId,
        storageConfigurationId: thisMwsStorageConfigurations.storageConfigurationId,
        token: {},
        customTags: {
            SoldToCode: "1234",
        },
    });
    export const databricksToken = thisMwsWorkspaces.token.apply(token => token?.tokenValue);
    
    import pulumi
    import pulumi_aws as aws
    import pulumi_databricks as databricks
    import pulumi_random as random
    
    config = pulumi.Config()
    # Account Id that could be found in the top right corner of https://accounts.cloud.databricks.com/
    databricks_account_id = config.require_object("databricksAccountId")
    naming = random.index.String("naming",
        special=False,
        upper=False,
        length=6)
    prefix = f"dltp{naming['result']}"
    this = databricks.get_aws_assume_role_policy(external_id=databricks_account_id)
    cross_account_role = aws.iam.Role("cross_account_role",
        name=f"{prefix}-crossaccount",
        assume_role_policy=this.json,
        tags=tags)
    this_get_aws_cross_account_policy = databricks.get_aws_cross_account_policy()
    this_role_policy = aws.iam.RolePolicy("this",
        name=f"{prefix}-policy",
        role=cross_account_role.id,
        policy=this_get_aws_cross_account_policy.json)
    this_mws_credentials = databricks.MwsCredentials("this",
        account_id=databricks_account_id,
        credentials_name=f"{prefix}-creds",
        role_arn=cross_account_role.arn)
    root_storage_bucket = aws.s3.BucketV2("root_storage_bucket",
        bucket=f"{prefix}-rootbucket",
        acl="private",
        force_destroy=True,
        tags=tags)
    root_versioning = aws.s3.BucketVersioningV2("root_versioning",
        bucket=root_storage_bucket.id,
        versioning_configuration={
            "status": "Disabled",
        })
    root_storage_bucket_bucket_server_side_encryption_configuration_v2 = aws.s3.BucketServerSideEncryptionConfigurationV2("root_storage_bucket",
        bucket=root_storage_bucket.bucket,
        rules=[{
            "apply_server_side_encryption_by_default": {
                "sse_algorithm": "AES256",
            },
        }])
    root_storage_bucket_bucket_public_access_block = aws.s3.BucketPublicAccessBlock("root_storage_bucket",
        bucket=root_storage_bucket.id,
        block_public_acls=True,
        block_public_policy=True,
        ignore_public_acls=True,
        restrict_public_buckets=True,
        opts = pulumi.ResourceOptions(depends_on=[root_storage_bucket]))
    this_get_aws_bucket_policy = databricks.get_aws_bucket_policy_output(bucket=root_storage_bucket.bucket)
    root_bucket_policy = aws.s3.BucketPolicy("root_bucket_policy",
        bucket=root_storage_bucket.id,
        policy=this_get_aws_bucket_policy.json,
        opts = pulumi.ResourceOptions(depends_on=[root_storage_bucket_bucket_public_access_block]))
    this_mws_storage_configurations = databricks.MwsStorageConfigurations("this",
        account_id=databricks_account_id,
        storage_configuration_name=f"{prefix}-storage",
        bucket_name=root_storage_bucket.bucket)
    this_mws_workspaces = databricks.MwsWorkspaces("this",
        account_id=databricks_account_id,
        workspace_name=prefix,
        aws_region="us-east-1",
        credentials_id=this_mws_credentials.credentials_id,
        storage_configuration_id=this_mws_storage_configurations.storage_configuration_id,
        token={},
        custom_tags={
            "SoldToCode": "1234",
        })
    pulumi.export("databricksToken", this_mws_workspaces.token.token_value)
    
    package main
    
    import (
    	"fmt"
    
    	"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/iam"
    	"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/s3"
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi-random/sdk/v4/go/random"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi/config"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		cfg := config.New(ctx, "")
    		// Account Id that could be found in the top right corner of https://accounts.cloud.databricks.com/
    		databricksAccountId := cfg.RequireObject("databricksAccountId")
    		naming, err := random.NewString(ctx, "naming", &random.StringArgs{
    			Special: false,
    			Upper:   false,
    			Length:  6,
    		})
    		if err != nil {
    			return err
    		}
    		prefix := fmt.Sprintf("dltp%v", naming.Result)
    		this, err := databricks.GetAwsAssumeRolePolicy(ctx, &databricks.GetAwsAssumeRolePolicyArgs{
    			ExternalId: databricksAccountId,
    		}, nil)
    		if err != nil {
    			return err
    		}
    		crossAccountRole, err := iam.NewRole(ctx, "cross_account_role", &iam.RoleArgs{
    			Name:             pulumi.Sprintf("%v-crossaccount", prefix),
    			AssumeRolePolicy: pulumi.String(this.Json),
    			Tags:             pulumi.Any(tags),
    		})
    		if err != nil {
    			return err
    		}
    		thisGetAwsCrossAccountPolicy, err := databricks.GetAwsCrossAccountPolicy(ctx, &databricks.GetAwsCrossAccountPolicyArgs{}, nil)
    		if err != nil {
    			return err
    		}
    		_, err = iam.NewRolePolicy(ctx, "this", &iam.RolePolicyArgs{
    			Name:   pulumi.Sprintf("%v-policy", prefix),
    			Role:   crossAccountRole.ID(),
    			Policy: pulumi.String(thisGetAwsCrossAccountPolicy.Json),
    		})
    		if err != nil {
    			return err
    		}
    		thisMwsCredentials, err := databricks.NewMwsCredentials(ctx, "this", &databricks.MwsCredentialsArgs{
    			AccountId:       pulumi.Any(databricksAccountId),
    			CredentialsName: pulumi.Sprintf("%v-creds", prefix),
    			RoleArn:         crossAccountRole.Arn,
    		})
    		if err != nil {
    			return err
    		}
    		rootStorageBucket, err := s3.NewBucketV2(ctx, "root_storage_bucket", &s3.BucketV2Args{
    			Bucket:       pulumi.Sprintf("%v-rootbucket", prefix),
    			Acl:          pulumi.String("private"),
    			ForceDestroy: pulumi.Bool(true),
    			Tags:         pulumi.Any(tags),
    		})
    		if err != nil {
    			return err
    		}
    		_, err = s3.NewBucketVersioningV2(ctx, "root_versioning", &s3.BucketVersioningV2Args{
    			Bucket: rootStorageBucket.ID(),
    			VersioningConfiguration: &s3.BucketVersioningV2VersioningConfigurationArgs{
    				Status: pulumi.String("Disabled"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		_, err = s3.NewBucketServerSideEncryptionConfigurationV2(ctx, "root_storage_bucket", &s3.BucketServerSideEncryptionConfigurationV2Args{
    			Bucket: rootStorageBucket.Bucket,
    			Rules: s3.BucketServerSideEncryptionConfigurationV2RuleArray{
    				&s3.BucketServerSideEncryptionConfigurationV2RuleArgs{
    					ApplyServerSideEncryptionByDefault: &s3.BucketServerSideEncryptionConfigurationV2RuleApplyServerSideEncryptionByDefaultArgs{
    						SseAlgorithm: pulumi.String("AES256"),
    					},
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		rootStorageBucketBucketPublicAccessBlock, err := s3.NewBucketPublicAccessBlock(ctx, "root_storage_bucket", &s3.BucketPublicAccessBlockArgs{
    			Bucket:                rootStorageBucket.ID(),
    			BlockPublicAcls:       pulumi.Bool(true),
    			BlockPublicPolicy:     pulumi.Bool(true),
    			IgnorePublicAcls:      pulumi.Bool(true),
    			RestrictPublicBuckets: pulumi.Bool(true),
    		}, pulumi.DependsOn([]pulumi.Resource{
    			rootStorageBucket,
    		}))
    		if err != nil {
    			return err
    		}
    		thisGetAwsBucketPolicy := databricks.GetAwsBucketPolicyOutput(ctx, databricks.GetAwsBucketPolicyOutputArgs{
    			Bucket: rootStorageBucket.Bucket,
    		}, nil)
    		_, err = s3.NewBucketPolicy(ctx, "root_bucket_policy", &s3.BucketPolicyArgs{
    			Bucket: rootStorageBucket.ID(),
    			Policy: pulumi.String(thisGetAwsBucketPolicy.ApplyT(func(thisGetAwsBucketPolicy databricks.GetAwsBucketPolicyResult) (*string, error) {
    				return &thisGetAwsBucketPolicy.Json, nil
    			}).(pulumi.StringPtrOutput)),
    		}, pulumi.DependsOn([]pulumi.Resource{
    			rootStorageBucketBucketPublicAccessBlock,
    		}))
    		if err != nil {
    			return err
    		}
    		thisMwsStorageConfigurations, err := databricks.NewMwsStorageConfigurations(ctx, "this", &databricks.MwsStorageConfigurationsArgs{
    			AccountId:                pulumi.Any(databricksAccountId),
    			StorageConfigurationName: pulumi.Sprintf("%v-storage", prefix),
    			BucketName:               rootStorageBucket.Bucket,
    		})
    		if err != nil {
    			return err
    		}
    		thisMwsWorkspaces, err := databricks.NewMwsWorkspaces(ctx, "this", &databricks.MwsWorkspacesArgs{
    			AccountId:              pulumi.Any(databricksAccountId),
    			WorkspaceName:          pulumi.String(prefix),
    			AwsRegion:              pulumi.String("us-east-1"),
    			CredentialsId:          thisMwsCredentials.CredentialsId,
    			StorageConfigurationId: thisMwsStorageConfigurations.StorageConfigurationId,
    			Token:                  &databricks.MwsWorkspacesTokenArgs{},
    			CustomTags: pulumi.StringMap{
    				"SoldToCode": pulumi.String("1234"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		ctx.Export("databricksToken", thisMwsWorkspaces.Token.ApplyT(func(token databricks.MwsWorkspacesToken) (*string, error) {
    			return &token.TokenValue, nil
    		}).(pulumi.StringPtrOutput))
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Aws = Pulumi.Aws;
    using Databricks = Pulumi.Databricks;
    using Random = Pulumi.Random;
    
    return await Deployment.RunAsync(() => 
    {
        var config = new Config();
        // Account Id that could be found in the top right corner of https://accounts.cloud.databricks.com/
        var databricksAccountId = config.RequireObject<dynamic>("databricksAccountId");
        var naming = new Random.Index.String("naming", new()
        {
            Special = false,
            Upper = false,
            Length = 6,
        });
    
        var prefix = $"dltp{naming.Result}";
    
        var @this = Databricks.GetAwsAssumeRolePolicy.Invoke(new()
        {
            ExternalId = databricksAccountId,
        });
    
        var crossAccountRole = new Aws.Iam.Role("cross_account_role", new()
        {
            Name = $"{prefix}-crossaccount",
            AssumeRolePolicy = @this.Apply(@this => @this.Apply(getAwsAssumeRolePolicyResult => getAwsAssumeRolePolicyResult.Json)),
            Tags = tags,
        });
    
        var thisGetAwsCrossAccountPolicy = Databricks.GetAwsCrossAccountPolicy.Invoke();
    
        var thisRolePolicy = new Aws.Iam.RolePolicy("this", new()
        {
            Name = $"{prefix}-policy",
            Role = crossAccountRole.Id,
            Policy = thisGetAwsCrossAccountPolicy.Apply(getAwsCrossAccountPolicyResult => getAwsCrossAccountPolicyResult.Json),
        });
    
        var thisMwsCredentials = new Databricks.MwsCredentials("this", new()
        {
            AccountId = databricksAccountId,
            CredentialsName = $"{prefix}-creds",
            RoleArn = crossAccountRole.Arn,
        });
    
        var rootStorageBucket = new Aws.S3.BucketV2("root_storage_bucket", new()
        {
            Bucket = $"{prefix}-rootbucket",
            Acl = "private",
            ForceDestroy = true,
            Tags = tags,
        });
    
        var rootVersioning = new Aws.S3.BucketVersioningV2("root_versioning", new()
        {
            Bucket = rootStorageBucket.Id,
            VersioningConfiguration = new Aws.S3.Inputs.BucketVersioningV2VersioningConfigurationArgs
            {
                Status = "Disabled",
            },
        });
    
        var rootStorageBucketBucketServerSideEncryptionConfigurationV2 = new Aws.S3.BucketServerSideEncryptionConfigurationV2("root_storage_bucket", new()
        {
            Bucket = rootStorageBucket.Bucket,
            Rules = new[]
            {
                new Aws.S3.Inputs.BucketServerSideEncryptionConfigurationV2RuleArgs
                {
                    ApplyServerSideEncryptionByDefault = new Aws.S3.Inputs.BucketServerSideEncryptionConfigurationV2RuleApplyServerSideEncryptionByDefaultArgs
                    {
                        SseAlgorithm = "AES256",
                    },
                },
            },
        });
    
        var rootStorageBucketBucketPublicAccessBlock = new Aws.S3.BucketPublicAccessBlock("root_storage_bucket", new()
        {
            Bucket = rootStorageBucket.Id,
            BlockPublicAcls = true,
            BlockPublicPolicy = true,
            IgnorePublicAcls = true,
            RestrictPublicBuckets = true,
        }, new CustomResourceOptions
        {
            DependsOn =
            {
                rootStorageBucket,
            },
        });
    
        var thisGetAwsBucketPolicy = Databricks.GetAwsBucketPolicy.Invoke(new()
        {
            Bucket = rootStorageBucket.Bucket,
        });
    
        var rootBucketPolicy = new Aws.S3.BucketPolicy("root_bucket_policy", new()
        {
            Bucket = rootStorageBucket.Id,
            Policy = thisGetAwsBucketPolicy.Apply(getAwsBucketPolicyResult => getAwsBucketPolicyResult.Json),
        }, new CustomResourceOptions
        {
            DependsOn =
            {
                rootStorageBucketBucketPublicAccessBlock,
            },
        });
    
        var thisMwsStorageConfigurations = new Databricks.MwsStorageConfigurations("this", new()
        {
            AccountId = databricksAccountId,
            StorageConfigurationName = $"{prefix}-storage",
            BucketName = rootStorageBucket.Bucket,
        });
    
        var thisMwsWorkspaces = new Databricks.MwsWorkspaces("this", new()
        {
            AccountId = databricksAccountId,
            WorkspaceName = prefix,
            AwsRegion = "us-east-1",
            CredentialsId = thisMwsCredentials.CredentialsId,
            StorageConfigurationId = thisMwsStorageConfigurations.StorageConfigurationId,
            Token = null,
            CustomTags = 
            {
                { "SoldToCode", "1234" },
            },
        });
    
        return new Dictionary<string, object?>
        {
            ["databricksToken"] = thisMwsWorkspaces.Token.Apply(token => token?.TokenValue),
        };
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.random.string;
    import com.pulumi.random.StringArgs;
    import com.pulumi.databricks.DatabricksFunctions;
    import com.pulumi.databricks.inputs.GetAwsAssumeRolePolicyArgs;
    import com.pulumi.aws.iam.Role;
    import com.pulumi.aws.iam.RoleArgs;
    import com.pulumi.databricks.inputs.GetAwsCrossAccountPolicyArgs;
    import com.pulumi.aws.iam.RolePolicy;
    import com.pulumi.aws.iam.RolePolicyArgs;
    import com.pulumi.databricks.MwsCredentials;
    import com.pulumi.databricks.MwsCredentialsArgs;
    import com.pulumi.aws.s3.BucketV2;
    import com.pulumi.aws.s3.BucketV2Args;
    import com.pulumi.aws.s3.BucketVersioningV2;
    import com.pulumi.aws.s3.BucketVersioningV2Args;
    import com.pulumi.aws.s3.inputs.BucketVersioningV2VersioningConfigurationArgs;
    import com.pulumi.aws.s3.BucketServerSideEncryptionConfigurationV2;
    import com.pulumi.aws.s3.BucketServerSideEncryptionConfigurationV2Args;
    import com.pulumi.aws.s3.inputs.BucketServerSideEncryptionConfigurationV2RuleArgs;
    import com.pulumi.aws.s3.inputs.BucketServerSideEncryptionConfigurationV2RuleApplyServerSideEncryptionByDefaultArgs;
    import com.pulumi.aws.s3.BucketPublicAccessBlock;
    import com.pulumi.aws.s3.BucketPublicAccessBlockArgs;
    import com.pulumi.databricks.inputs.GetAwsBucketPolicyArgs;
    import com.pulumi.aws.s3.BucketPolicy;
    import com.pulumi.aws.s3.BucketPolicyArgs;
    import com.pulumi.databricks.MwsStorageConfigurations;
    import com.pulumi.databricks.MwsStorageConfigurationsArgs;
    import com.pulumi.databricks.MwsWorkspaces;
    import com.pulumi.databricks.MwsWorkspacesArgs;
    import com.pulumi.databricks.inputs.MwsWorkspacesTokenArgs;
    import com.pulumi.resources.CustomResourceOptions;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            final var config = ctx.config();
            final var databricksAccountId = config.get("databricksAccountId");
            var naming = new String("naming", StringArgs.builder()
                .special(false)
                .upper(false)
                .length(6)
                .build());
    
            final var prefix = String.format("dltp%s", naming.result());
    
            final var this = DatabricksFunctions.getAwsAssumeRolePolicy(GetAwsAssumeRolePolicyArgs.builder()
                .externalId(databricksAccountId)
                .build());
    
            var crossAccountRole = new Role("crossAccountRole", RoleArgs.builder()
                .name(String.format("%s-crossaccount", prefix))
                .assumeRolePolicy(this_.json())
                .tags(tags)
                .build());
    
            final var thisGetAwsCrossAccountPolicy = DatabricksFunctions.getAwsCrossAccountPolicy();
    
            var thisRolePolicy = new RolePolicy("thisRolePolicy", RolePolicyArgs.builder()
                .name(String.format("%s-policy", prefix))
                .role(crossAccountRole.id())
                .policy(thisGetAwsCrossAccountPolicy.applyValue(getAwsCrossAccountPolicyResult -> getAwsCrossAccountPolicyResult.json()))
                .build());
    
            var thisMwsCredentials = new MwsCredentials("thisMwsCredentials", MwsCredentialsArgs.builder()
                .accountId(databricksAccountId)
                .credentialsName(String.format("%s-creds", prefix))
                .roleArn(crossAccountRole.arn())
                .build());
    
            var rootStorageBucket = new BucketV2("rootStorageBucket", BucketV2Args.builder()
                .bucket(String.format("%s-rootbucket", prefix))
                .acl("private")
                .forceDestroy(true)
                .tags(tags)
                .build());
    
            var rootVersioning = new BucketVersioningV2("rootVersioning", BucketVersioningV2Args.builder()
                .bucket(rootStorageBucket.id())
                .versioningConfiguration(BucketVersioningV2VersioningConfigurationArgs.builder()
                    .status("Disabled")
                    .build())
                .build());
    
            var rootStorageBucketBucketServerSideEncryptionConfigurationV2 = new BucketServerSideEncryptionConfigurationV2("rootStorageBucketBucketServerSideEncryptionConfigurationV2", BucketServerSideEncryptionConfigurationV2Args.builder()
                .bucket(rootStorageBucket.bucket())
                .rules(BucketServerSideEncryptionConfigurationV2RuleArgs.builder()
                    .applyServerSideEncryptionByDefault(BucketServerSideEncryptionConfigurationV2RuleApplyServerSideEncryptionByDefaultArgs.builder()
                        .sseAlgorithm("AES256")
                        .build())
                    .build())
                .build());
    
            var rootStorageBucketBucketPublicAccessBlock = new BucketPublicAccessBlock("rootStorageBucketBucketPublicAccessBlock", BucketPublicAccessBlockArgs.builder()
                .bucket(rootStorageBucket.id())
                .blockPublicAcls(true)
                .blockPublicPolicy(true)
                .ignorePublicAcls(true)
                .restrictPublicBuckets(true)
                .build(), CustomResourceOptions.builder()
                    .dependsOn(rootStorageBucket)
                    .build());
    
            final var thisGetAwsBucketPolicy = DatabricksFunctions.getAwsBucketPolicy(GetAwsBucketPolicyArgs.builder()
                .bucket(rootStorageBucket.bucket())
                .build());
    
            var rootBucketPolicy = new BucketPolicy("rootBucketPolicy", BucketPolicyArgs.builder()
                .bucket(rootStorageBucket.id())
                .policy(thisGetAwsBucketPolicy.applyValue(getAwsBucketPolicyResult -> getAwsBucketPolicyResult).applyValue(thisGetAwsBucketPolicy -> thisGetAwsBucketPolicy.applyValue(getAwsBucketPolicyResult -> getAwsBucketPolicyResult.json())))
                .build(), CustomResourceOptions.builder()
                    .dependsOn(rootStorageBucketBucketPublicAccessBlock)
                    .build());
    
            var thisMwsStorageConfigurations = new MwsStorageConfigurations("thisMwsStorageConfigurations", MwsStorageConfigurationsArgs.builder()
                .accountId(databricksAccountId)
                .storageConfigurationName(String.format("%s-storage", prefix))
                .bucketName(rootStorageBucket.bucket())
                .build());
    
            var thisMwsWorkspaces = new MwsWorkspaces("thisMwsWorkspaces", MwsWorkspacesArgs.builder()
                .accountId(databricksAccountId)
                .workspaceName(prefix)
                .awsRegion("us-east-1")
                .credentialsId(thisMwsCredentials.credentialsId())
                .storageConfigurationId(thisMwsStorageConfigurations.storageConfigurationId())
                .token()
                .customTags(Map.of("SoldToCode", "1234"))
                .build());
    
            ctx.export("databricksToken", thisMwsWorkspaces.token().applyValue(token -> token.tokenValue()));
        }
    }
    
    configuration:
      databricksAccountId:
        type: dynamic
    resources:
      naming:
        type: random:string
        properties:
          special: false
          upper: false
          length: 6
      crossAccountRole:
        type: aws:iam:Role
        name: cross_account_role
        properties:
          name: ${prefix}-crossaccount
          assumeRolePolicy: ${this.json}
          tags: ${tags}
      thisRolePolicy:
        type: aws:iam:RolePolicy
        name: this
        properties:
          name: ${prefix}-policy
          role: ${crossAccountRole.id}
          policy: ${thisGetAwsCrossAccountPolicy.json}
      thisMwsCredentials:
        type: databricks:MwsCredentials
        name: this
        properties:
          accountId: ${databricksAccountId}
          credentialsName: ${prefix}-creds
          roleArn: ${crossAccountRole.arn}
      rootStorageBucket:
        type: aws:s3:BucketV2
        name: root_storage_bucket
        properties:
          bucket: ${prefix}-rootbucket
          acl: private
          forceDestroy: true
          tags: ${tags}
      rootVersioning:
        type: aws:s3:BucketVersioningV2
        name: root_versioning
        properties:
          bucket: ${rootStorageBucket.id}
          versioningConfiguration:
            status: Disabled
      rootStorageBucketBucketServerSideEncryptionConfigurationV2:
        type: aws:s3:BucketServerSideEncryptionConfigurationV2
        name: root_storage_bucket
        properties:
          bucket: ${rootStorageBucket.bucket}
          rules:
            - applyServerSideEncryptionByDefault:
                sseAlgorithm: AES256
      rootStorageBucketBucketPublicAccessBlock:
        type: aws:s3:BucketPublicAccessBlock
        name: root_storage_bucket
        properties:
          bucket: ${rootStorageBucket.id}
          blockPublicAcls: true
          blockPublicPolicy: true
          ignorePublicAcls: true
          restrictPublicBuckets: true
        options:
          dependson:
            - ${rootStorageBucket}
      rootBucketPolicy:
        type: aws:s3:BucketPolicy
        name: root_bucket_policy
        properties:
          bucket: ${rootStorageBucket.id}
          policy: ${thisGetAwsBucketPolicy.json}
        options:
          dependson:
            - ${rootStorageBucketBucketPublicAccessBlock}
      thisMwsStorageConfigurations:
        type: databricks:MwsStorageConfigurations
        name: this
        properties:
          accountId: ${databricksAccountId}
          storageConfigurationName: ${prefix}-storage
          bucketName: ${rootStorageBucket.bucket}
      thisMwsWorkspaces:
        type: databricks:MwsWorkspaces
        name: this
        properties:
          accountId: ${databricksAccountId}
          workspaceName: ${prefix}
          awsRegion: us-east-1
          credentialsId: ${thisMwsCredentials.credentialsId}
          storageConfigurationId: ${thisMwsStorageConfigurations.storageConfigurationId}
          token: {}
          customTags:
            SoldToCode: '1234'
    variables:
      prefix: dltp${naming.result}
      this:
        fn::invoke:
          Function: databricks:getAwsAssumeRolePolicy
          Arguments:
            externalId: ${databricksAccountId}
      thisGetAwsCrossAccountPolicy:
        fn::invoke:
          Function: databricks:getAwsCrossAccountPolicy
          Arguments: {}
      thisGetAwsBucketPolicy:
        fn::invoke:
          Function: databricks:getAwsBucketPolicy
          Arguments:
            bucket: ${rootStorageBucket.bucket}
    outputs:
      databricksToken: ${thisMwsWorkspaces.token.tokenValue}
    

    In order to create a Databricks Workspace that leverages AWS PrivateLink please ensure that you have read and understood the Enable Private Link documentation and then customise the example above with the relevant examples from mws_vpc_endpoint, mws_private_access_settings and mws_networks.

    Creating a Databricks on GCP workspace

    To get workspace running, you have to configure a network object:

    • databricks.MwsNetworks - (optional, but recommended) You can share one customer-managed VPC with multiple workspaces in a single account. You do not have to create a new VPC for each workspace. However, you cannot reuse subnets with other resources, including other workspaces or non-Databricks resources. If you plan to share one VPC with multiple workspaces, be sure to size your VPC and subnets accordingly. Because a Databricks databricks.MwsNetworks encapsulates this information, you cannot reuse it across workspaces.
    import * as pulumi from "@pulumi/pulumi";
    import * as databricks from "@pulumi/databricks";
    
    const config = new pulumi.Config();
    // Account Id that could be found in the top right corner of https://accounts.cloud.databricks.com/
    const databricksAccountId = config.requireObject("databricksAccountId");
    const databricksGoogleServiceAccount = config.requireObject("databricksGoogleServiceAccount");
    const googleProject = config.requireObject("googleProject");
    // register VPC
    const _this = new databricks.MwsNetworks("this", {
        accountId: databricksAccountId,
        networkName: `${prefix}-network`,
        gcpNetworkInfo: {
            networkProjectId: googleProject,
            vpcId: vpcId,
            subnetId: subnetId,
            subnetRegion: subnetRegion,
            podIpRangeName: "pods",
            serviceIpRangeName: "svc",
        },
    });
    // create workspace in given VPC
    const thisMwsWorkspaces = new databricks.MwsWorkspaces("this", {
        accountId: databricksAccountId,
        workspaceName: prefix,
        location: subnetRegion,
        cloudResourceContainer: {
            gcp: {
                projectId: googleProject,
            },
        },
        networkId: _this.networkId,
        gkeConfig: {
            connectivityType: "PRIVATE_NODE_PUBLIC_MASTER",
            masterIpRange: "10.3.0.0/28",
        },
        token: {},
    });
    export const databricksToken = thisMwsWorkspaces.token.apply(token => token?.tokenValue);
    
    import pulumi
    import pulumi_databricks as databricks
    
    config = pulumi.Config()
    # Account Id that could be found in the top right corner of https://accounts.cloud.databricks.com/
    databricks_account_id = config.require_object("databricksAccountId")
    databricks_google_service_account = config.require_object("databricksGoogleServiceAccount")
    google_project = config.require_object("googleProject")
    # register VPC
    this = databricks.MwsNetworks("this",
        account_id=databricks_account_id,
        network_name=f"{prefix}-network",
        gcp_network_info={
            "network_project_id": google_project,
            "vpc_id": vpc_id,
            "subnet_id": subnet_id,
            "subnet_region": subnet_region,
            "pod_ip_range_name": "pods",
            "service_ip_range_name": "svc",
        })
    # create workspace in given VPC
    this_mws_workspaces = databricks.MwsWorkspaces("this",
        account_id=databricks_account_id,
        workspace_name=prefix,
        location=subnet_region,
        cloud_resource_container={
            "gcp": {
                "project_id": google_project,
            },
        },
        network_id=this.network_id,
        gke_config={
            "connectivity_type": "PRIVATE_NODE_PUBLIC_MASTER",
            "master_ip_range": "10.3.0.0/28",
        },
        token={})
    pulumi.export("databricksToken", this_mws_workspaces.token.token_value)
    
    package main
    
    import (
    	"fmt"
    
    	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi/config"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		cfg := config.New(ctx, "")
    		// Account Id that could be found in the top right corner of https://accounts.cloud.databricks.com/
    		databricksAccountId := cfg.RequireObject("databricksAccountId")
    		databricksGoogleServiceAccount := cfg.RequireObject("databricksGoogleServiceAccount")
    		googleProject := cfg.RequireObject("googleProject")
    		// register VPC
    		this, err := databricks.NewMwsNetworks(ctx, "this", &databricks.MwsNetworksArgs{
    			AccountId:   pulumi.Any(databricksAccountId),
    			NetworkName: pulumi.Sprintf("%v-network", prefix),
    			GcpNetworkInfo: &databricks.MwsNetworksGcpNetworkInfoArgs{
    				NetworkProjectId:   pulumi.Any(googleProject),
    				VpcId:              pulumi.Any(vpcId),
    				SubnetId:           pulumi.Any(subnetId),
    				SubnetRegion:       pulumi.Any(subnetRegion),
    				PodIpRangeName:     pulumi.String("pods"),
    				ServiceIpRangeName: pulumi.String("svc"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		// create workspace in given VPC
    		thisMwsWorkspaces, err := databricks.NewMwsWorkspaces(ctx, "this", &databricks.MwsWorkspacesArgs{
    			AccountId:     pulumi.Any(databricksAccountId),
    			WorkspaceName: pulumi.Any(prefix),
    			Location:      pulumi.Any(subnetRegion),
    			CloudResourceContainer: &databricks.MwsWorkspacesCloudResourceContainerArgs{
    				Gcp: &databricks.MwsWorkspacesCloudResourceContainerGcpArgs{
    					ProjectId: pulumi.Any(googleProject),
    				},
    			},
    			NetworkId: this.NetworkId,
    			GkeConfig: &databricks.MwsWorkspacesGkeConfigArgs{
    				ConnectivityType: pulumi.String("PRIVATE_NODE_PUBLIC_MASTER"),
    				MasterIpRange:    pulumi.String("10.3.0.0/28"),
    			},
    			Token: &databricks.MwsWorkspacesTokenArgs{},
    		})
    		if err != nil {
    			return err
    		}
    		ctx.Export("databricksToken", thisMwsWorkspaces.Token.ApplyT(func(token databricks.MwsWorkspacesToken) (*string, error) {
    			return &token.TokenValue, nil
    		}).(pulumi.StringPtrOutput))
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Databricks = Pulumi.Databricks;
    
    return await Deployment.RunAsync(() => 
    {
        var config = new Config();
        // Account Id that could be found in the top right corner of https://accounts.cloud.databricks.com/
        var databricksAccountId = config.RequireObject<dynamic>("databricksAccountId");
        var databricksGoogleServiceAccount = config.RequireObject<dynamic>("databricksGoogleServiceAccount");
        var googleProject = config.RequireObject<dynamic>("googleProject");
        // register VPC
        var @this = new Databricks.MwsNetworks("this", new()
        {
            AccountId = databricksAccountId,
            NetworkName = $"{prefix}-network",
            GcpNetworkInfo = new Databricks.Inputs.MwsNetworksGcpNetworkInfoArgs
            {
                NetworkProjectId = googleProject,
                VpcId = vpcId,
                SubnetId = subnetId,
                SubnetRegion = subnetRegion,
                PodIpRangeName = "pods",
                ServiceIpRangeName = "svc",
            },
        });
    
        // create workspace in given VPC
        var thisMwsWorkspaces = new Databricks.MwsWorkspaces("this", new()
        {
            AccountId = databricksAccountId,
            WorkspaceName = prefix,
            Location = subnetRegion,
            CloudResourceContainer = new Databricks.Inputs.MwsWorkspacesCloudResourceContainerArgs
            {
                Gcp = new Databricks.Inputs.MwsWorkspacesCloudResourceContainerGcpArgs
                {
                    ProjectId = googleProject,
                },
            },
            NetworkId = @this.NetworkId,
            GkeConfig = new Databricks.Inputs.MwsWorkspacesGkeConfigArgs
            {
                ConnectivityType = "PRIVATE_NODE_PUBLIC_MASTER",
                MasterIpRange = "10.3.0.0/28",
            },
            Token = null,
        });
    
        return new Dictionary<string, object?>
        {
            ["databricksToken"] = thisMwsWorkspaces.Token.Apply(token => token?.TokenValue),
        };
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.databricks.MwsNetworks;
    import com.pulumi.databricks.MwsNetworksArgs;
    import com.pulumi.databricks.inputs.MwsNetworksGcpNetworkInfoArgs;
    import com.pulumi.databricks.MwsWorkspaces;
    import com.pulumi.databricks.MwsWorkspacesArgs;
    import com.pulumi.databricks.inputs.MwsWorkspacesCloudResourceContainerArgs;
    import com.pulumi.databricks.inputs.MwsWorkspacesCloudResourceContainerGcpArgs;
    import com.pulumi.databricks.inputs.MwsWorkspacesGkeConfigArgs;
    import com.pulumi.databricks.inputs.MwsWorkspacesTokenArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            final var config = ctx.config();
            final var databricksAccountId = config.get("databricksAccountId");
            final var databricksGoogleServiceAccount = config.get("databricksGoogleServiceAccount");
            final var googleProject = config.get("googleProject");
            // register VPC
            var this_ = new MwsNetworks("this", MwsNetworksArgs.builder()
                .accountId(databricksAccountId)
                .networkName(String.format("%s-network", prefix))
                .gcpNetworkInfo(MwsNetworksGcpNetworkInfoArgs.builder()
                    .networkProjectId(googleProject)
                    .vpcId(vpcId)
                    .subnetId(subnetId)
                    .subnetRegion(subnetRegion)
                    .podIpRangeName("pods")
                    .serviceIpRangeName("svc")
                    .build())
                .build());
    
            // create workspace in given VPC
            var thisMwsWorkspaces = new MwsWorkspaces("thisMwsWorkspaces", MwsWorkspacesArgs.builder()
                .accountId(databricksAccountId)
                .workspaceName(prefix)
                .location(subnetRegion)
                .cloudResourceContainer(MwsWorkspacesCloudResourceContainerArgs.builder()
                    .gcp(MwsWorkspacesCloudResourceContainerGcpArgs.builder()
                        .projectId(googleProject)
                        .build())
                    .build())
                .networkId(this_.networkId())
                .gkeConfig(MwsWorkspacesGkeConfigArgs.builder()
                    .connectivityType("PRIVATE_NODE_PUBLIC_MASTER")
                    .masterIpRange("10.3.0.0/28")
                    .build())
                .token()
                .build());
    
            ctx.export("databricksToken", thisMwsWorkspaces.token().applyValue(token -> token.tokenValue()));
        }
    }
    
    configuration:
      databricksAccountId:
        type: dynamic
      databricksGoogleServiceAccount:
        type: dynamic
      googleProject:
        type: dynamic
    resources:
      # register VPC
      this:
        type: databricks:MwsNetworks
        properties:
          accountId: ${databricksAccountId}
          networkName: ${prefix}-network
          gcpNetworkInfo:
            networkProjectId: ${googleProject}
            vpcId: ${vpcId}
            subnetId: ${subnetId}
            subnetRegion: ${subnetRegion}
            podIpRangeName: pods
            serviceIpRangeName: svc
      # create workspace in given VPC
      thisMwsWorkspaces:
        type: databricks:MwsWorkspaces
        name: this
        properties:
          accountId: ${databricksAccountId}
          workspaceName: ${prefix}
          location: ${subnetRegion}
          cloudResourceContainer:
            gcp:
              projectId: ${googleProject}
          networkId: ${this.networkId}
          gkeConfig:
            connectivityType: PRIVATE_NODE_PUBLIC_MASTER
            masterIpRange: 10.3.0.0/28
          token: {}
    outputs:
      databricksToken: ${thisMwsWorkspaces.token.tokenValue}
    

    In order to create a Databricks Workspace that leverages GCP Private Service Connect please ensure that you have read and understood the Enable Private Service Connect documentation and then customise the example above with the relevant examples from mws_vpc_endpoint, mws_private_access_settings and mws_networks.

    Create MwsWorkspaces Resource

    Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

    Constructor syntax

    new MwsWorkspaces(name: string, args: MwsWorkspacesArgs, opts?: CustomResourceOptions);
    @overload
    def MwsWorkspaces(resource_name: str,
                      args: MwsWorkspacesArgs,
                      opts: Optional[ResourceOptions] = None)
    
    @overload
    def MwsWorkspaces(resource_name: str,
                      opts: Optional[ResourceOptions] = None,
                      account_id: Optional[str] = None,
                      workspace_name: Optional[str] = None,
                      is_no_public_ip_enabled: Optional[bool] = None,
                      workspace_status: Optional[str] = None,
                      creation_time: Optional[int] = None,
                      credentials_id: Optional[str] = None,
                      custom_tags: Optional[Mapping[str, str]] = None,
                      customer_managed_key_id: Optional[str] = None,
                      deployment_name: Optional[str] = None,
                      external_customer_info: Optional[MwsWorkspacesExternalCustomerInfoArgs] = None,
                      gcp_managed_network_config: Optional[MwsWorkspacesGcpManagedNetworkConfigArgs] = None,
                      gke_config: Optional[MwsWorkspacesGkeConfigArgs] = None,
                      workspace_url: Optional[str] = None,
                      cloud_resource_container: Optional[MwsWorkspacesCloudResourceContainerArgs] = None,
                      pricing_tier: Optional[str] = None,
                      network_id: Optional[str] = None,
                      managed_services_customer_managed_key_id: Optional[str] = None,
                      private_access_settings_id: Optional[str] = None,
                      storage_configuration_id: Optional[str] = None,
                      storage_customer_managed_key_id: Optional[str] = None,
                      token: Optional[MwsWorkspacesTokenArgs] = None,
                      workspace_id: Optional[str] = None,
                      aws_region: Optional[str] = None,
                      location: Optional[str] = None,
                      workspace_status_message: Optional[str] = None,
                      cloud: Optional[str] = None)
    func NewMwsWorkspaces(ctx *Context, name string, args MwsWorkspacesArgs, opts ...ResourceOption) (*MwsWorkspaces, error)
    public MwsWorkspaces(string name, MwsWorkspacesArgs args, CustomResourceOptions? opts = null)
    public MwsWorkspaces(String name, MwsWorkspacesArgs args)
    public MwsWorkspaces(String name, MwsWorkspacesArgs args, CustomResourceOptions options)
    
    type: databricks:MwsWorkspaces
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    

    Parameters

    name string
    The unique name of the resource.
    args MwsWorkspacesArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args MwsWorkspacesArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args MwsWorkspacesArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args MwsWorkspacesArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args MwsWorkspacesArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    Constructor example

    The following reference example uses placeholder values for all input properties.

    var mwsWorkspacesResource = new Databricks.MwsWorkspaces("mwsWorkspacesResource", new()
    {
        AccountId = "string",
        WorkspaceName = "string",
        IsNoPublicIpEnabled = false,
        WorkspaceStatus = "string",
        CreationTime = 0,
        CredentialsId = "string",
        CustomTags = 
        {
            { "string", "string" },
        },
        DeploymentName = "string",
        ExternalCustomerInfo = new Databricks.Inputs.MwsWorkspacesExternalCustomerInfoArgs
        {
            AuthoritativeUserEmail = "string",
            AuthoritativeUserFullName = "string",
            CustomerName = "string",
        },
        GcpManagedNetworkConfig = new Databricks.Inputs.MwsWorkspacesGcpManagedNetworkConfigArgs
        {
            GkeClusterPodIpRange = "string",
            GkeClusterServiceIpRange = "string",
            SubnetCidr = "string",
        },
        GkeConfig = new Databricks.Inputs.MwsWorkspacesGkeConfigArgs
        {
            ConnectivityType = "string",
            MasterIpRange = "string",
        },
        WorkspaceUrl = "string",
        CloudResourceContainer = new Databricks.Inputs.MwsWorkspacesCloudResourceContainerArgs
        {
            Gcp = new Databricks.Inputs.MwsWorkspacesCloudResourceContainerGcpArgs
            {
                ProjectId = "string",
            },
        },
        PricingTier = "string",
        NetworkId = "string",
        ManagedServicesCustomerManagedKeyId = "string",
        PrivateAccessSettingsId = "string",
        StorageConfigurationId = "string",
        StorageCustomerManagedKeyId = "string",
        Token = new Databricks.Inputs.MwsWorkspacesTokenArgs
        {
            Comment = "string",
            LifetimeSeconds = 0,
            TokenId = "string",
            TokenValue = "string",
        },
        WorkspaceId = "string",
        AwsRegion = "string",
        Location = "string",
        WorkspaceStatusMessage = "string",
        Cloud = "string",
    });
    
    example, err := databricks.NewMwsWorkspaces(ctx, "mwsWorkspacesResource", &databricks.MwsWorkspacesArgs{
    	AccountId:           pulumi.String("string"),
    	WorkspaceName:       pulumi.String("string"),
    	IsNoPublicIpEnabled: pulumi.Bool(false),
    	WorkspaceStatus:     pulumi.String("string"),
    	CreationTime:        pulumi.Int(0),
    	CredentialsId:       pulumi.String("string"),
    	CustomTags: pulumi.StringMap{
    		"string": pulumi.String("string"),
    	},
    	DeploymentName: pulumi.String("string"),
    	ExternalCustomerInfo: &databricks.MwsWorkspacesExternalCustomerInfoArgs{
    		AuthoritativeUserEmail:    pulumi.String("string"),
    		AuthoritativeUserFullName: pulumi.String("string"),
    		CustomerName:              pulumi.String("string"),
    	},
    	GcpManagedNetworkConfig: &databricks.MwsWorkspacesGcpManagedNetworkConfigArgs{
    		GkeClusterPodIpRange:     pulumi.String("string"),
    		GkeClusterServiceIpRange: pulumi.String("string"),
    		SubnetCidr:               pulumi.String("string"),
    	},
    	GkeConfig: &databricks.MwsWorkspacesGkeConfigArgs{
    		ConnectivityType: pulumi.String("string"),
    		MasterIpRange:    pulumi.String("string"),
    	},
    	WorkspaceUrl: pulumi.String("string"),
    	CloudResourceContainer: &databricks.MwsWorkspacesCloudResourceContainerArgs{
    		Gcp: &databricks.MwsWorkspacesCloudResourceContainerGcpArgs{
    			ProjectId: pulumi.String("string"),
    		},
    	},
    	PricingTier:                         pulumi.String("string"),
    	NetworkId:                           pulumi.String("string"),
    	ManagedServicesCustomerManagedKeyId: pulumi.String("string"),
    	PrivateAccessSettingsId:             pulumi.String("string"),
    	StorageConfigurationId:              pulumi.String("string"),
    	StorageCustomerManagedKeyId:         pulumi.String("string"),
    	Token: &databricks.MwsWorkspacesTokenArgs{
    		Comment:         pulumi.String("string"),
    		LifetimeSeconds: pulumi.Int(0),
    		TokenId:         pulumi.String("string"),
    		TokenValue:      pulumi.String("string"),
    	},
    	WorkspaceId:            pulumi.String("string"),
    	AwsRegion:              pulumi.String("string"),
    	Location:               pulumi.String("string"),
    	WorkspaceStatusMessage: pulumi.String("string"),
    	Cloud:                  pulumi.String("string"),
    })
    
    var mwsWorkspacesResource = new MwsWorkspaces("mwsWorkspacesResource", MwsWorkspacesArgs.builder()
        .accountId("string")
        .workspaceName("string")
        .isNoPublicIpEnabled(false)
        .workspaceStatus("string")
        .creationTime(0)
        .credentialsId("string")
        .customTags(Map.of("string", "string"))
        .deploymentName("string")
        .externalCustomerInfo(MwsWorkspacesExternalCustomerInfoArgs.builder()
            .authoritativeUserEmail("string")
            .authoritativeUserFullName("string")
            .customerName("string")
            .build())
        .gcpManagedNetworkConfig(MwsWorkspacesGcpManagedNetworkConfigArgs.builder()
            .gkeClusterPodIpRange("string")
            .gkeClusterServiceIpRange("string")
            .subnetCidr("string")
            .build())
        .gkeConfig(MwsWorkspacesGkeConfigArgs.builder()
            .connectivityType("string")
            .masterIpRange("string")
            .build())
        .workspaceUrl("string")
        .cloudResourceContainer(MwsWorkspacesCloudResourceContainerArgs.builder()
            .gcp(MwsWorkspacesCloudResourceContainerGcpArgs.builder()
                .projectId("string")
                .build())
            .build())
        .pricingTier("string")
        .networkId("string")
        .managedServicesCustomerManagedKeyId("string")
        .privateAccessSettingsId("string")
        .storageConfigurationId("string")
        .storageCustomerManagedKeyId("string")
        .token(MwsWorkspacesTokenArgs.builder()
            .comment("string")
            .lifetimeSeconds(0)
            .tokenId("string")
            .tokenValue("string")
            .build())
        .workspaceId("string")
        .awsRegion("string")
        .location("string")
        .workspaceStatusMessage("string")
        .cloud("string")
        .build());
    
    mws_workspaces_resource = databricks.MwsWorkspaces("mwsWorkspacesResource",
        account_id="string",
        workspace_name="string",
        is_no_public_ip_enabled=False,
        workspace_status="string",
        creation_time=0,
        credentials_id="string",
        custom_tags={
            "string": "string",
        },
        deployment_name="string",
        external_customer_info={
            "authoritative_user_email": "string",
            "authoritative_user_full_name": "string",
            "customer_name": "string",
        },
        gcp_managed_network_config={
            "gke_cluster_pod_ip_range": "string",
            "gke_cluster_service_ip_range": "string",
            "subnet_cidr": "string",
        },
        gke_config={
            "connectivity_type": "string",
            "master_ip_range": "string",
        },
        workspace_url="string",
        cloud_resource_container={
            "gcp": {
                "project_id": "string",
            },
        },
        pricing_tier="string",
        network_id="string",
        managed_services_customer_managed_key_id="string",
        private_access_settings_id="string",
        storage_configuration_id="string",
        storage_customer_managed_key_id="string",
        token={
            "comment": "string",
            "lifetime_seconds": 0,
            "token_id": "string",
            "token_value": "string",
        },
        workspace_id="string",
        aws_region="string",
        location="string",
        workspace_status_message="string",
        cloud="string")
    
    const mwsWorkspacesResource = new databricks.MwsWorkspaces("mwsWorkspacesResource", {
        accountId: "string",
        workspaceName: "string",
        isNoPublicIpEnabled: false,
        workspaceStatus: "string",
        creationTime: 0,
        credentialsId: "string",
        customTags: {
            string: "string",
        },
        deploymentName: "string",
        externalCustomerInfo: {
            authoritativeUserEmail: "string",
            authoritativeUserFullName: "string",
            customerName: "string",
        },
        gcpManagedNetworkConfig: {
            gkeClusterPodIpRange: "string",
            gkeClusterServiceIpRange: "string",
            subnetCidr: "string",
        },
        gkeConfig: {
            connectivityType: "string",
            masterIpRange: "string",
        },
        workspaceUrl: "string",
        cloudResourceContainer: {
            gcp: {
                projectId: "string",
            },
        },
        pricingTier: "string",
        networkId: "string",
        managedServicesCustomerManagedKeyId: "string",
        privateAccessSettingsId: "string",
        storageConfigurationId: "string",
        storageCustomerManagedKeyId: "string",
        token: {
            comment: "string",
            lifetimeSeconds: 0,
            tokenId: "string",
            tokenValue: "string",
        },
        workspaceId: "string",
        awsRegion: "string",
        location: "string",
        workspaceStatusMessage: "string",
        cloud: "string",
    });
    
    type: databricks:MwsWorkspaces
    properties:
        accountId: string
        awsRegion: string
        cloud: string
        cloudResourceContainer:
            gcp:
                projectId: string
        creationTime: 0
        credentialsId: string
        customTags:
            string: string
        deploymentName: string
        externalCustomerInfo:
            authoritativeUserEmail: string
            authoritativeUserFullName: string
            customerName: string
        gcpManagedNetworkConfig:
            gkeClusterPodIpRange: string
            gkeClusterServiceIpRange: string
            subnetCidr: string
        gkeConfig:
            connectivityType: string
            masterIpRange: string
        isNoPublicIpEnabled: false
        location: string
        managedServicesCustomerManagedKeyId: string
        networkId: string
        pricingTier: string
        privateAccessSettingsId: string
        storageConfigurationId: string
        storageCustomerManagedKeyId: string
        token:
            comment: string
            lifetimeSeconds: 0
            tokenId: string
            tokenValue: string
        workspaceId: string
        workspaceName: string
        workspaceStatus: string
        workspaceStatusMessage: string
        workspaceUrl: string
    

    MwsWorkspaces Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.

    The MwsWorkspaces resource accepts the following input properties:

    AccountId string
    Account Id that could be found in the top right corner of Accounts Console.
    WorkspaceName string
    name of the workspace, will appear on UI.
    AwsRegion string
    region of VPC.
    Cloud string
    CloudResourceContainer MwsWorkspacesCloudResourceContainer
    A block that specifies GCP workspace configurations, consisting of following blocks:
    CreationTime int
    (Integer) time when workspace was created
    CredentialsId string
    CustomTags Dictionary<string, string>
    The custom tags key-value pairing that is attached to this workspace. These tags will be applied to clusters automatically in addition to any default_tags or custom_tags on a cluster level. Please note it can take up to an hour for custom_tags to be set due to scheduling on Control Plane. After custom tags are applied, they can be modified however they can never be completely removed.
    CustomerManagedKeyId string

    Deprecated: Use managed_services_customer_managed_key_id instead

    DeploymentName string
    part of URL as in https://<prefix>-<deployment-name>.cloud.databricks.com. Deployment name cannot be used until a deployment name prefix is defined. Please contact your Databricks representative. Once a new deployment prefix is added/updated, it only will affect the new workspaces created.
    ExternalCustomerInfo MwsWorkspacesExternalCustomerInfo
    GcpManagedNetworkConfig MwsWorkspacesGcpManagedNetworkConfig
    GkeConfig MwsWorkspacesGkeConfig
    A block that specifies GKE configuration for the Databricks workspace:
    IsNoPublicIpEnabled bool
    Location string
    region of the subnet.
    ManagedServicesCustomerManagedKeyId string
    customer_managed_key_id from customer managed keys with use_cases set to MANAGED_SERVICES. This is used to encrypt the workspace's notebook and secret data in the control plane.
    NetworkId string
    network_id from networks.
    PricingTier string
    The pricing tier of the workspace.
    PrivateAccessSettingsId string
    Canonical unique identifier of databricks.MwsPrivateAccessSettings in Databricks Account.
    StorageConfigurationId string
    storage_configuration_id from storage configuration.
    StorageCustomerManagedKeyId string
    customer_managed_key_id from customer managed keys with use_cases set to STORAGE. This is used to encrypt the DBFS Storage & Cluster Volumes.
    Token MwsWorkspacesToken
    WorkspaceId string
    (String) workspace id
    WorkspaceStatus string
    (String) workspace status
    WorkspaceStatusMessage string
    (String) updates on workspace status
    WorkspaceUrl string
    (String) URL of the workspace
    AccountId string
    Account Id that could be found in the top right corner of Accounts Console.
    WorkspaceName string
    name of the workspace, will appear on UI.
    AwsRegion string
    region of VPC.
    Cloud string
    CloudResourceContainer MwsWorkspacesCloudResourceContainerArgs
    A block that specifies GCP workspace configurations, consisting of following blocks:
    CreationTime int
    (Integer) time when workspace was created
    CredentialsId string
    CustomTags map[string]string
    The custom tags key-value pairing that is attached to this workspace. These tags will be applied to clusters automatically in addition to any default_tags or custom_tags on a cluster level. Please note it can take up to an hour for custom_tags to be set due to scheduling on Control Plane. After custom tags are applied, they can be modified however they can never be completely removed.
    CustomerManagedKeyId string

    Deprecated: Use managed_services_customer_managed_key_id instead

    DeploymentName string
    part of URL as in https://<prefix>-<deployment-name>.cloud.databricks.com. Deployment name cannot be used until a deployment name prefix is defined. Please contact your Databricks representative. Once a new deployment prefix is added/updated, it only will affect the new workspaces created.
    ExternalCustomerInfo MwsWorkspacesExternalCustomerInfoArgs
    GcpManagedNetworkConfig MwsWorkspacesGcpManagedNetworkConfigArgs
    GkeConfig MwsWorkspacesGkeConfigArgs
    A block that specifies GKE configuration for the Databricks workspace:
    IsNoPublicIpEnabled bool
    Location string
    region of the subnet.
    ManagedServicesCustomerManagedKeyId string
    customer_managed_key_id from customer managed keys with use_cases set to MANAGED_SERVICES. This is used to encrypt the workspace's notebook and secret data in the control plane.
    NetworkId string
    network_id from networks.
    PricingTier string
    The pricing tier of the workspace.
    PrivateAccessSettingsId string
    Canonical unique identifier of databricks.MwsPrivateAccessSettings in Databricks Account.
    StorageConfigurationId string
    storage_configuration_id from storage configuration.
    StorageCustomerManagedKeyId string
    customer_managed_key_id from customer managed keys with use_cases set to STORAGE. This is used to encrypt the DBFS Storage & Cluster Volumes.
    Token MwsWorkspacesTokenArgs
    WorkspaceId string
    (String) workspace id
    WorkspaceStatus string
    (String) workspace status
    WorkspaceStatusMessage string
    (String) updates on workspace status
    WorkspaceUrl string
    (String) URL of the workspace
    accountId String
    Account Id that could be found in the top right corner of Accounts Console.
    workspaceName String
    name of the workspace, will appear on UI.
    awsRegion String
    region of VPC.
    cloud String
    cloudResourceContainer MwsWorkspacesCloudResourceContainer
    A block that specifies GCP workspace configurations, consisting of following blocks:
    creationTime Integer
    (Integer) time when workspace was created
    credentialsId String
    customTags Map<String,String>
    The custom tags key-value pairing that is attached to this workspace. These tags will be applied to clusters automatically in addition to any default_tags or custom_tags on a cluster level. Please note it can take up to an hour for custom_tags to be set due to scheduling on Control Plane. After custom tags are applied, they can be modified however they can never be completely removed.
    customerManagedKeyId String

    Deprecated: Use managed_services_customer_managed_key_id instead

    deploymentName String
    part of URL as in https://<prefix>-<deployment-name>.cloud.databricks.com. Deployment name cannot be used until a deployment name prefix is defined. Please contact your Databricks representative. Once a new deployment prefix is added/updated, it only will affect the new workspaces created.
    externalCustomerInfo MwsWorkspacesExternalCustomerInfo
    gcpManagedNetworkConfig MwsWorkspacesGcpManagedNetworkConfig
    gkeConfig MwsWorkspacesGkeConfig
    A block that specifies GKE configuration for the Databricks workspace:
    isNoPublicIpEnabled Boolean
    location String
    region of the subnet.
    managedServicesCustomerManagedKeyId String
    customer_managed_key_id from customer managed keys with use_cases set to MANAGED_SERVICES. This is used to encrypt the workspace's notebook and secret data in the control plane.
    networkId String
    network_id from networks.
    pricingTier String
    The pricing tier of the workspace.
    privateAccessSettingsId String
    Canonical unique identifier of databricks.MwsPrivateAccessSettings in Databricks Account.
    storageConfigurationId String
    storage_configuration_id from storage configuration.
    storageCustomerManagedKeyId String
    customer_managed_key_id from customer managed keys with use_cases set to STORAGE. This is used to encrypt the DBFS Storage & Cluster Volumes.
    token MwsWorkspacesToken
    workspaceId String
    (String) workspace id
    workspaceStatus String
    (String) workspace status
    workspaceStatusMessage String
    (String) updates on workspace status
    workspaceUrl String
    (String) URL of the workspace
    accountId string
    Account Id that could be found in the top right corner of Accounts Console.
    workspaceName string
    name of the workspace, will appear on UI.
    awsRegion string
    region of VPC.
    cloud string
    cloudResourceContainer MwsWorkspacesCloudResourceContainer
    A block that specifies GCP workspace configurations, consisting of following blocks:
    creationTime number
    (Integer) time when workspace was created
    credentialsId string
    customTags {[key: string]: string}
    The custom tags key-value pairing that is attached to this workspace. These tags will be applied to clusters automatically in addition to any default_tags or custom_tags on a cluster level. Please note it can take up to an hour for custom_tags to be set due to scheduling on Control Plane. After custom tags are applied, they can be modified however they can never be completely removed.
    customerManagedKeyId string

    Deprecated: Use managed_services_customer_managed_key_id instead

    deploymentName string
    part of URL as in https://<prefix>-<deployment-name>.cloud.databricks.com. Deployment name cannot be used until a deployment name prefix is defined. Please contact your Databricks representative. Once a new deployment prefix is added/updated, it only will affect the new workspaces created.
    externalCustomerInfo MwsWorkspacesExternalCustomerInfo
    gcpManagedNetworkConfig MwsWorkspacesGcpManagedNetworkConfig
    gkeConfig MwsWorkspacesGkeConfig
    A block that specifies GKE configuration for the Databricks workspace:
    isNoPublicIpEnabled boolean
    location string
    region of the subnet.
    managedServicesCustomerManagedKeyId string
    customer_managed_key_id from customer managed keys with use_cases set to MANAGED_SERVICES. This is used to encrypt the workspace's notebook and secret data in the control plane.
    networkId string
    network_id from networks.
    pricingTier string
    The pricing tier of the workspace.
    privateAccessSettingsId string
    Canonical unique identifier of databricks.MwsPrivateAccessSettings in Databricks Account.
    storageConfigurationId string
    storage_configuration_id from storage configuration.
    storageCustomerManagedKeyId string
    customer_managed_key_id from customer managed keys with use_cases set to STORAGE. This is used to encrypt the DBFS Storage & Cluster Volumes.
    token MwsWorkspacesToken
    workspaceId string
    (String) workspace id
    workspaceStatus string
    (String) workspace status
    workspaceStatusMessage string
    (String) updates on workspace status
    workspaceUrl string
    (String) URL of the workspace
    account_id str
    Account Id that could be found in the top right corner of Accounts Console.
    workspace_name str
    name of the workspace, will appear on UI.
    aws_region str
    region of VPC.
    cloud str
    cloud_resource_container MwsWorkspacesCloudResourceContainerArgs
    A block that specifies GCP workspace configurations, consisting of following blocks:
    creation_time int
    (Integer) time when workspace was created
    credentials_id str
    custom_tags Mapping[str, str]
    The custom tags key-value pairing that is attached to this workspace. These tags will be applied to clusters automatically in addition to any default_tags or custom_tags on a cluster level. Please note it can take up to an hour for custom_tags to be set due to scheduling on Control Plane. After custom tags are applied, they can be modified however they can never be completely removed.
    customer_managed_key_id str

    Deprecated: Use managed_services_customer_managed_key_id instead

    deployment_name str
    part of URL as in https://<prefix>-<deployment-name>.cloud.databricks.com. Deployment name cannot be used until a deployment name prefix is defined. Please contact your Databricks representative. Once a new deployment prefix is added/updated, it only will affect the new workspaces created.
    external_customer_info MwsWorkspacesExternalCustomerInfoArgs
    gcp_managed_network_config MwsWorkspacesGcpManagedNetworkConfigArgs
    gke_config MwsWorkspacesGkeConfigArgs
    A block that specifies GKE configuration for the Databricks workspace:
    is_no_public_ip_enabled bool
    location str
    region of the subnet.
    managed_services_customer_managed_key_id str
    customer_managed_key_id from customer managed keys with use_cases set to MANAGED_SERVICES. This is used to encrypt the workspace's notebook and secret data in the control plane.
    network_id str
    network_id from networks.
    pricing_tier str
    The pricing tier of the workspace.
    private_access_settings_id str
    Canonical unique identifier of databricks.MwsPrivateAccessSettings in Databricks Account.
    storage_configuration_id str
    storage_configuration_id from storage configuration.
    storage_customer_managed_key_id str
    customer_managed_key_id from customer managed keys with use_cases set to STORAGE. This is used to encrypt the DBFS Storage & Cluster Volumes.
    token MwsWorkspacesTokenArgs
    workspace_id str
    (String) workspace id
    workspace_status str
    (String) workspace status
    workspace_status_message str
    (String) updates on workspace status
    workspace_url str
    (String) URL of the workspace
    accountId String
    Account Id that could be found in the top right corner of Accounts Console.
    workspaceName String
    name of the workspace, will appear on UI.
    awsRegion String
    region of VPC.
    cloud String
    cloudResourceContainer Property Map
    A block that specifies GCP workspace configurations, consisting of following blocks:
    creationTime Number
    (Integer) time when workspace was created
    credentialsId String
    customTags Map<String>
    The custom tags key-value pairing that is attached to this workspace. These tags will be applied to clusters automatically in addition to any default_tags or custom_tags on a cluster level. Please note it can take up to an hour for custom_tags to be set due to scheduling on Control Plane. After custom tags are applied, they can be modified however they can never be completely removed.
    customerManagedKeyId String

    Deprecated: Use managed_services_customer_managed_key_id instead

    deploymentName String
    part of URL as in https://<prefix>-<deployment-name>.cloud.databricks.com. Deployment name cannot be used until a deployment name prefix is defined. Please contact your Databricks representative. Once a new deployment prefix is added/updated, it only will affect the new workspaces created.
    externalCustomerInfo Property Map
    gcpManagedNetworkConfig Property Map
    gkeConfig Property Map
    A block that specifies GKE configuration for the Databricks workspace:
    isNoPublicIpEnabled Boolean
    location String
    region of the subnet.
    managedServicesCustomerManagedKeyId String
    customer_managed_key_id from customer managed keys with use_cases set to MANAGED_SERVICES. This is used to encrypt the workspace's notebook and secret data in the control plane.
    networkId String
    network_id from networks.
    pricingTier String
    The pricing tier of the workspace.
    privateAccessSettingsId String
    Canonical unique identifier of databricks.MwsPrivateAccessSettings in Databricks Account.
    storageConfigurationId String
    storage_configuration_id from storage configuration.
    storageCustomerManagedKeyId String
    customer_managed_key_id from customer managed keys with use_cases set to STORAGE. This is used to encrypt the DBFS Storage & Cluster Volumes.
    token Property Map
    workspaceId String
    (String) workspace id
    workspaceStatus String
    (String) workspace status
    workspaceStatusMessage String
    (String) updates on workspace status
    workspaceUrl String
    (String) URL of the workspace

    Outputs

    All input properties are implicitly available as output properties. Additionally, the MwsWorkspaces resource produces the following output properties:

    GcpWorkspaceSa string
    (String, GCP only) identifier of a service account created for the workspace in form of db-<workspace-id>@prod-gcp-<region>.iam.gserviceaccount.com
    Id string
    The provider-assigned unique ID for this managed resource.
    GcpWorkspaceSa string
    (String, GCP only) identifier of a service account created for the workspace in form of db-<workspace-id>@prod-gcp-<region>.iam.gserviceaccount.com
    Id string
    The provider-assigned unique ID for this managed resource.
    gcpWorkspaceSa String
    (String, GCP only) identifier of a service account created for the workspace in form of db-<workspace-id>@prod-gcp-<region>.iam.gserviceaccount.com
    id String
    The provider-assigned unique ID for this managed resource.
    gcpWorkspaceSa string
    (String, GCP only) identifier of a service account created for the workspace in form of db-<workspace-id>@prod-gcp-<region>.iam.gserviceaccount.com
    id string
    The provider-assigned unique ID for this managed resource.
    gcp_workspace_sa str
    (String, GCP only) identifier of a service account created for the workspace in form of db-<workspace-id>@prod-gcp-<region>.iam.gserviceaccount.com
    id str
    The provider-assigned unique ID for this managed resource.
    gcpWorkspaceSa String
    (String, GCP only) identifier of a service account created for the workspace in form of db-<workspace-id>@prod-gcp-<region>.iam.gserviceaccount.com
    id String
    The provider-assigned unique ID for this managed resource.

    Look up Existing MwsWorkspaces Resource

    Get an existing MwsWorkspaces resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

    public static get(name: string, id: Input<ID>, state?: MwsWorkspacesState, opts?: CustomResourceOptions): MwsWorkspaces
    @staticmethod
    def get(resource_name: str,
            id: str,
            opts: Optional[ResourceOptions] = None,
            account_id: Optional[str] = None,
            aws_region: Optional[str] = None,
            cloud: Optional[str] = None,
            cloud_resource_container: Optional[MwsWorkspacesCloudResourceContainerArgs] = None,
            creation_time: Optional[int] = None,
            credentials_id: Optional[str] = None,
            custom_tags: Optional[Mapping[str, str]] = None,
            customer_managed_key_id: Optional[str] = None,
            deployment_name: Optional[str] = None,
            external_customer_info: Optional[MwsWorkspacesExternalCustomerInfoArgs] = None,
            gcp_managed_network_config: Optional[MwsWorkspacesGcpManagedNetworkConfigArgs] = None,
            gcp_workspace_sa: Optional[str] = None,
            gke_config: Optional[MwsWorkspacesGkeConfigArgs] = None,
            is_no_public_ip_enabled: Optional[bool] = None,
            location: Optional[str] = None,
            managed_services_customer_managed_key_id: Optional[str] = None,
            network_id: Optional[str] = None,
            pricing_tier: Optional[str] = None,
            private_access_settings_id: Optional[str] = None,
            storage_configuration_id: Optional[str] = None,
            storage_customer_managed_key_id: Optional[str] = None,
            token: Optional[MwsWorkspacesTokenArgs] = None,
            workspace_id: Optional[str] = None,
            workspace_name: Optional[str] = None,
            workspace_status: Optional[str] = None,
            workspace_status_message: Optional[str] = None,
            workspace_url: Optional[str] = None) -> MwsWorkspaces
    func GetMwsWorkspaces(ctx *Context, name string, id IDInput, state *MwsWorkspacesState, opts ...ResourceOption) (*MwsWorkspaces, error)
    public static MwsWorkspaces Get(string name, Input<string> id, MwsWorkspacesState? state, CustomResourceOptions? opts = null)
    public static MwsWorkspaces get(String name, Output<String> id, MwsWorkspacesState state, CustomResourceOptions options)
    Resource lookup is not supported in YAML
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    resource_name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    The following state arguments are supported:
    AccountId string
    Account Id that could be found in the top right corner of Accounts Console.
    AwsRegion string
    region of VPC.
    Cloud string
    CloudResourceContainer MwsWorkspacesCloudResourceContainer
    A block that specifies GCP workspace configurations, consisting of following blocks:
    CreationTime int
    (Integer) time when workspace was created
    CredentialsId string
    CustomTags Dictionary<string, string>
    The custom tags key-value pairing that is attached to this workspace. These tags will be applied to clusters automatically in addition to any default_tags or custom_tags on a cluster level. Please note it can take up to an hour for custom_tags to be set due to scheduling on Control Plane. After custom tags are applied, they can be modified however they can never be completely removed.
    CustomerManagedKeyId string

    Deprecated: Use managed_services_customer_managed_key_id instead

    DeploymentName string
    part of URL as in https://<prefix>-<deployment-name>.cloud.databricks.com. Deployment name cannot be used until a deployment name prefix is defined. Please contact your Databricks representative. Once a new deployment prefix is added/updated, it only will affect the new workspaces created.
    ExternalCustomerInfo MwsWorkspacesExternalCustomerInfo
    GcpManagedNetworkConfig MwsWorkspacesGcpManagedNetworkConfig
    GcpWorkspaceSa string
    (String, GCP only) identifier of a service account created for the workspace in form of db-<workspace-id>@prod-gcp-<region>.iam.gserviceaccount.com
    GkeConfig MwsWorkspacesGkeConfig
    A block that specifies GKE configuration for the Databricks workspace:
    IsNoPublicIpEnabled bool
    Location string
    region of the subnet.
    ManagedServicesCustomerManagedKeyId string
    customer_managed_key_id from customer managed keys with use_cases set to MANAGED_SERVICES. This is used to encrypt the workspace's notebook and secret data in the control plane.
    NetworkId string
    network_id from networks.
    PricingTier string
    The pricing tier of the workspace.
    PrivateAccessSettingsId string
    Canonical unique identifier of databricks.MwsPrivateAccessSettings in Databricks Account.
    StorageConfigurationId string
    storage_configuration_id from storage configuration.
    StorageCustomerManagedKeyId string
    customer_managed_key_id from customer managed keys with use_cases set to STORAGE. This is used to encrypt the DBFS Storage & Cluster Volumes.
    Token MwsWorkspacesToken
    WorkspaceId string
    (String) workspace id
    WorkspaceName string
    name of the workspace, will appear on UI.
    WorkspaceStatus string
    (String) workspace status
    WorkspaceStatusMessage string
    (String) updates on workspace status
    WorkspaceUrl string
    (String) URL of the workspace
    AccountId string
    Account Id that could be found in the top right corner of Accounts Console.
    AwsRegion string
    region of VPC.
    Cloud string
    CloudResourceContainer MwsWorkspacesCloudResourceContainerArgs
    A block that specifies GCP workspace configurations, consisting of following blocks:
    CreationTime int
    (Integer) time when workspace was created
    CredentialsId string
    CustomTags map[string]string
    The custom tags key-value pairing that is attached to this workspace. These tags will be applied to clusters automatically in addition to any default_tags or custom_tags on a cluster level. Please note it can take up to an hour for custom_tags to be set due to scheduling on Control Plane. After custom tags are applied, they can be modified however they can never be completely removed.
    CustomerManagedKeyId string

    Deprecated: Use managed_services_customer_managed_key_id instead

    DeploymentName string
    part of URL as in https://<prefix>-<deployment-name>.cloud.databricks.com. Deployment name cannot be used until a deployment name prefix is defined. Please contact your Databricks representative. Once a new deployment prefix is added/updated, it only will affect the new workspaces created.
    ExternalCustomerInfo MwsWorkspacesExternalCustomerInfoArgs
    GcpManagedNetworkConfig MwsWorkspacesGcpManagedNetworkConfigArgs
    GcpWorkspaceSa string
    (String, GCP only) identifier of a service account created for the workspace in form of db-<workspace-id>@prod-gcp-<region>.iam.gserviceaccount.com
    GkeConfig MwsWorkspacesGkeConfigArgs
    A block that specifies GKE configuration for the Databricks workspace:
    IsNoPublicIpEnabled bool
    Location string
    region of the subnet.
    ManagedServicesCustomerManagedKeyId string
    customer_managed_key_id from customer managed keys with use_cases set to MANAGED_SERVICES. This is used to encrypt the workspace's notebook and secret data in the control plane.
    NetworkId string
    network_id from networks.
    PricingTier string
    The pricing tier of the workspace.
    PrivateAccessSettingsId string
    Canonical unique identifier of databricks.MwsPrivateAccessSettings in Databricks Account.
    StorageConfigurationId string
    storage_configuration_id from storage configuration.
    StorageCustomerManagedKeyId string
    customer_managed_key_id from customer managed keys with use_cases set to STORAGE. This is used to encrypt the DBFS Storage & Cluster Volumes.
    Token MwsWorkspacesTokenArgs
    WorkspaceId string
    (String) workspace id
    WorkspaceName string
    name of the workspace, will appear on UI.
    WorkspaceStatus string
    (String) workspace status
    WorkspaceStatusMessage string
    (String) updates on workspace status
    WorkspaceUrl string
    (String) URL of the workspace
    accountId String
    Account Id that could be found in the top right corner of Accounts Console.
    awsRegion String
    region of VPC.
    cloud String
    cloudResourceContainer MwsWorkspacesCloudResourceContainer
    A block that specifies GCP workspace configurations, consisting of following blocks:
    creationTime Integer
    (Integer) time when workspace was created
    credentialsId String
    customTags Map<String,String>
    The custom tags key-value pairing that is attached to this workspace. These tags will be applied to clusters automatically in addition to any default_tags or custom_tags on a cluster level. Please note it can take up to an hour for custom_tags to be set due to scheduling on Control Plane. After custom tags are applied, they can be modified however they can never be completely removed.
    customerManagedKeyId String

    Deprecated: Use managed_services_customer_managed_key_id instead

    deploymentName String
    part of URL as in https://<prefix>-<deployment-name>.cloud.databricks.com. Deployment name cannot be used until a deployment name prefix is defined. Please contact your Databricks representative. Once a new deployment prefix is added/updated, it only will affect the new workspaces created.
    externalCustomerInfo MwsWorkspacesExternalCustomerInfo
    gcpManagedNetworkConfig MwsWorkspacesGcpManagedNetworkConfig
    gcpWorkspaceSa String
    (String, GCP only) identifier of a service account created for the workspace in form of db-<workspace-id>@prod-gcp-<region>.iam.gserviceaccount.com
    gkeConfig MwsWorkspacesGkeConfig
    A block that specifies GKE configuration for the Databricks workspace:
    isNoPublicIpEnabled Boolean
    location String
    region of the subnet.
    managedServicesCustomerManagedKeyId String
    customer_managed_key_id from customer managed keys with use_cases set to MANAGED_SERVICES. This is used to encrypt the workspace's notebook and secret data in the control plane.
    networkId String
    network_id from networks.
    pricingTier String
    The pricing tier of the workspace.
    privateAccessSettingsId String
    Canonical unique identifier of databricks.MwsPrivateAccessSettings in Databricks Account.
    storageConfigurationId String
    storage_configuration_id from storage configuration.
    storageCustomerManagedKeyId String
    customer_managed_key_id from customer managed keys with use_cases set to STORAGE. This is used to encrypt the DBFS Storage & Cluster Volumes.
    token MwsWorkspacesToken
    workspaceId String
    (String) workspace id
    workspaceName String
    name of the workspace, will appear on UI.
    workspaceStatus String
    (String) workspace status
    workspaceStatusMessage String
    (String) updates on workspace status
    workspaceUrl String
    (String) URL of the workspace
    accountId string
    Account Id that could be found in the top right corner of Accounts Console.
    awsRegion string
    region of VPC.
    cloud string
    cloudResourceContainer MwsWorkspacesCloudResourceContainer
    A block that specifies GCP workspace configurations, consisting of following blocks:
    creationTime number
    (Integer) time when workspace was created
    credentialsId string
    customTags {[key: string]: string}
    The custom tags key-value pairing that is attached to this workspace. These tags will be applied to clusters automatically in addition to any default_tags or custom_tags on a cluster level. Please note it can take up to an hour for custom_tags to be set due to scheduling on Control Plane. After custom tags are applied, they can be modified however they can never be completely removed.
    customerManagedKeyId string

    Deprecated: Use managed_services_customer_managed_key_id instead

    deploymentName string
    part of URL as in https://<prefix>-<deployment-name>.cloud.databricks.com. Deployment name cannot be used until a deployment name prefix is defined. Please contact your Databricks representative. Once a new deployment prefix is added/updated, it only will affect the new workspaces created.
    externalCustomerInfo MwsWorkspacesExternalCustomerInfo
    gcpManagedNetworkConfig MwsWorkspacesGcpManagedNetworkConfig
    gcpWorkspaceSa string
    (String, GCP only) identifier of a service account created for the workspace in form of db-<workspace-id>@prod-gcp-<region>.iam.gserviceaccount.com
    gkeConfig MwsWorkspacesGkeConfig
    A block that specifies GKE configuration for the Databricks workspace:
    isNoPublicIpEnabled boolean
    location string
    region of the subnet.
    managedServicesCustomerManagedKeyId string
    customer_managed_key_id from customer managed keys with use_cases set to MANAGED_SERVICES. This is used to encrypt the workspace's notebook and secret data in the control plane.
    networkId string
    network_id from networks.
    pricingTier string
    The pricing tier of the workspace.
    privateAccessSettingsId string
    Canonical unique identifier of databricks.MwsPrivateAccessSettings in Databricks Account.
    storageConfigurationId string
    storage_configuration_id from storage configuration.
    storageCustomerManagedKeyId string
    customer_managed_key_id from customer managed keys with use_cases set to STORAGE. This is used to encrypt the DBFS Storage & Cluster Volumes.
    token MwsWorkspacesToken
    workspaceId string
    (String) workspace id
    workspaceName string
    name of the workspace, will appear on UI.
    workspaceStatus string
    (String) workspace status
    workspaceStatusMessage string
    (String) updates on workspace status
    workspaceUrl string
    (String) URL of the workspace
    account_id str
    Account Id that could be found in the top right corner of Accounts Console.
    aws_region str
    region of VPC.
    cloud str
    cloud_resource_container MwsWorkspacesCloudResourceContainerArgs
    A block that specifies GCP workspace configurations, consisting of following blocks:
    creation_time int
    (Integer) time when workspace was created
    credentials_id str
    custom_tags Mapping[str, str]
    The custom tags key-value pairing that is attached to this workspace. These tags will be applied to clusters automatically in addition to any default_tags or custom_tags on a cluster level. Please note it can take up to an hour for custom_tags to be set due to scheduling on Control Plane. After custom tags are applied, they can be modified however they can never be completely removed.
    customer_managed_key_id str

    Deprecated: Use managed_services_customer_managed_key_id instead

    deployment_name str
    part of URL as in https://<prefix>-<deployment-name>.cloud.databricks.com. Deployment name cannot be used until a deployment name prefix is defined. Please contact your Databricks representative. Once a new deployment prefix is added/updated, it only will affect the new workspaces created.
    external_customer_info MwsWorkspacesExternalCustomerInfoArgs
    gcp_managed_network_config MwsWorkspacesGcpManagedNetworkConfigArgs
    gcp_workspace_sa str
    (String, GCP only) identifier of a service account created for the workspace in form of db-<workspace-id>@prod-gcp-<region>.iam.gserviceaccount.com
    gke_config MwsWorkspacesGkeConfigArgs
    A block that specifies GKE configuration for the Databricks workspace:
    is_no_public_ip_enabled bool
    location str
    region of the subnet.
    managed_services_customer_managed_key_id str
    customer_managed_key_id from customer managed keys with use_cases set to MANAGED_SERVICES. This is used to encrypt the workspace's notebook and secret data in the control plane.
    network_id str
    network_id from networks.
    pricing_tier str
    The pricing tier of the workspace.
    private_access_settings_id str
    Canonical unique identifier of databricks.MwsPrivateAccessSettings in Databricks Account.
    storage_configuration_id str
    storage_configuration_id from storage configuration.
    storage_customer_managed_key_id str
    customer_managed_key_id from customer managed keys with use_cases set to STORAGE. This is used to encrypt the DBFS Storage & Cluster Volumes.
    token MwsWorkspacesTokenArgs
    workspace_id str
    (String) workspace id
    workspace_name str
    name of the workspace, will appear on UI.
    workspace_status str
    (String) workspace status
    workspace_status_message str
    (String) updates on workspace status
    workspace_url str
    (String) URL of the workspace
    accountId String
    Account Id that could be found in the top right corner of Accounts Console.
    awsRegion String
    region of VPC.
    cloud String
    cloudResourceContainer Property Map
    A block that specifies GCP workspace configurations, consisting of following blocks:
    creationTime Number
    (Integer) time when workspace was created
    credentialsId String
    customTags Map<String>
    The custom tags key-value pairing that is attached to this workspace. These tags will be applied to clusters automatically in addition to any default_tags or custom_tags on a cluster level. Please note it can take up to an hour for custom_tags to be set due to scheduling on Control Plane. After custom tags are applied, they can be modified however they can never be completely removed.
    customerManagedKeyId String

    Deprecated: Use managed_services_customer_managed_key_id instead

    deploymentName String
    part of URL as in https://<prefix>-<deployment-name>.cloud.databricks.com. Deployment name cannot be used until a deployment name prefix is defined. Please contact your Databricks representative. Once a new deployment prefix is added/updated, it only will affect the new workspaces created.
    externalCustomerInfo Property Map
    gcpManagedNetworkConfig Property Map
    gcpWorkspaceSa String
    (String, GCP only) identifier of a service account created for the workspace in form of db-<workspace-id>@prod-gcp-<region>.iam.gserviceaccount.com
    gkeConfig Property Map
    A block that specifies GKE configuration for the Databricks workspace:
    isNoPublicIpEnabled Boolean
    location String
    region of the subnet.
    managedServicesCustomerManagedKeyId String
    customer_managed_key_id from customer managed keys with use_cases set to MANAGED_SERVICES. This is used to encrypt the workspace's notebook and secret data in the control plane.
    networkId String
    network_id from networks.
    pricingTier String
    The pricing tier of the workspace.
    privateAccessSettingsId String
    Canonical unique identifier of databricks.MwsPrivateAccessSettings in Databricks Account.
    storageConfigurationId String
    storage_configuration_id from storage configuration.
    storageCustomerManagedKeyId String
    customer_managed_key_id from customer managed keys with use_cases set to STORAGE. This is used to encrypt the DBFS Storage & Cluster Volumes.
    token Property Map
    workspaceId String
    (String) workspace id
    workspaceName String
    name of the workspace, will appear on UI.
    workspaceStatus String
    (String) workspace status
    workspaceStatusMessage String
    (String) updates on workspace status
    workspaceUrl String
    (String) URL of the workspace

    Supporting Types

    MwsWorkspacesCloudResourceContainer, MwsWorkspacesCloudResourceContainerArgs

    Gcp MwsWorkspacesCloudResourceContainerGcp
    A block that consists of the following field:
    Gcp MwsWorkspacesCloudResourceContainerGcp
    A block that consists of the following field:
    gcp MwsWorkspacesCloudResourceContainerGcp
    A block that consists of the following field:
    gcp MwsWorkspacesCloudResourceContainerGcp
    A block that consists of the following field:
    gcp MwsWorkspacesCloudResourceContainerGcp
    A block that consists of the following field:
    gcp Property Map
    A block that consists of the following field:

    MwsWorkspacesCloudResourceContainerGcp, MwsWorkspacesCloudResourceContainerGcpArgs

    ProjectId string
    The Google Cloud project ID, which the workspace uses to instantiate cloud resources for your workspace.
    ProjectId string
    The Google Cloud project ID, which the workspace uses to instantiate cloud resources for your workspace.
    projectId String
    The Google Cloud project ID, which the workspace uses to instantiate cloud resources for your workspace.
    projectId string
    The Google Cloud project ID, which the workspace uses to instantiate cloud resources for your workspace.
    project_id str
    The Google Cloud project ID, which the workspace uses to instantiate cloud resources for your workspace.
    projectId String
    The Google Cloud project ID, which the workspace uses to instantiate cloud resources for your workspace.

    MwsWorkspacesExternalCustomerInfo, MwsWorkspacesExternalCustomerInfoArgs

    MwsWorkspacesGcpManagedNetworkConfig, MwsWorkspacesGcpManagedNetworkConfigArgs

    MwsWorkspacesGkeConfig, MwsWorkspacesGkeConfigArgs

    ConnectivityType string
    Specifies the network connectivity types for the GKE nodes and the GKE master network. Possible values are: PRIVATE_NODE_PUBLIC_MASTER, PUBLIC_NODE_PUBLIC_MASTER.
    MasterIpRange string
    The IP range from which to allocate GKE cluster master resources. This field will be ignored if GKE private cluster is not enabled. It must be exactly as big as /28.
    ConnectivityType string
    Specifies the network connectivity types for the GKE nodes and the GKE master network. Possible values are: PRIVATE_NODE_PUBLIC_MASTER, PUBLIC_NODE_PUBLIC_MASTER.
    MasterIpRange string
    The IP range from which to allocate GKE cluster master resources. This field will be ignored if GKE private cluster is not enabled. It must be exactly as big as /28.
    connectivityType String
    Specifies the network connectivity types for the GKE nodes and the GKE master network. Possible values are: PRIVATE_NODE_PUBLIC_MASTER, PUBLIC_NODE_PUBLIC_MASTER.
    masterIpRange String
    The IP range from which to allocate GKE cluster master resources. This field will be ignored if GKE private cluster is not enabled. It must be exactly as big as /28.
    connectivityType string
    Specifies the network connectivity types for the GKE nodes and the GKE master network. Possible values are: PRIVATE_NODE_PUBLIC_MASTER, PUBLIC_NODE_PUBLIC_MASTER.
    masterIpRange string
    The IP range from which to allocate GKE cluster master resources. This field will be ignored if GKE private cluster is not enabled. It must be exactly as big as /28.
    connectivity_type str
    Specifies the network connectivity types for the GKE nodes and the GKE master network. Possible values are: PRIVATE_NODE_PUBLIC_MASTER, PUBLIC_NODE_PUBLIC_MASTER.
    master_ip_range str
    The IP range from which to allocate GKE cluster master resources. This field will be ignored if GKE private cluster is not enabled. It must be exactly as big as /28.
    connectivityType String
    Specifies the network connectivity types for the GKE nodes and the GKE master network. Possible values are: PRIVATE_NODE_PUBLIC_MASTER, PUBLIC_NODE_PUBLIC_MASTER.
    masterIpRange String
    The IP range from which to allocate GKE cluster master resources. This field will be ignored if GKE private cluster is not enabled. It must be exactly as big as /28.

    MwsWorkspacesToken, MwsWorkspacesTokenArgs

    Comment string
    Comment, that will appear in "User Settings / Access Tokens" page on Workspace UI. By default it's "Pulumi PAT".
    LifetimeSeconds int
    Token expiry lifetime. By default its 2592000 (30 days).
    TokenId string
    TokenValue string
    Comment string
    Comment, that will appear in "User Settings / Access Tokens" page on Workspace UI. By default it's "Pulumi PAT".
    LifetimeSeconds int
    Token expiry lifetime. By default its 2592000 (30 days).
    TokenId string
    TokenValue string
    comment String
    Comment, that will appear in "User Settings / Access Tokens" page on Workspace UI. By default it's "Pulumi PAT".
    lifetimeSeconds Integer
    Token expiry lifetime. By default its 2592000 (30 days).
    tokenId String
    tokenValue String
    comment string
    Comment, that will appear in "User Settings / Access Tokens" page on Workspace UI. By default it's "Pulumi PAT".
    lifetimeSeconds number
    Token expiry lifetime. By default its 2592000 (30 days).
    tokenId string
    tokenValue string
    comment str
    Comment, that will appear in "User Settings / Access Tokens" page on Workspace UI. By default it's "Pulumi PAT".
    lifetime_seconds int
    Token expiry lifetime. By default its 2592000 (30 days).
    token_id str
    token_value str
    comment String
    Comment, that will appear in "User Settings / Access Tokens" page on Workspace UI. By default it's "Pulumi PAT".
    lifetimeSeconds Number
    Token expiry lifetime. By default its 2592000 (30 days).
    tokenId String
    tokenValue String

    Package Details

    Repository
    databricks pulumi/pulumi-databricks
    License
    Apache-2.0
    Notes
    This Pulumi package is based on the databricks Terraform Provider.
    databricks logo
    Databricks v1.56.0 published on Tuesday, Nov 12, 2024 by Pulumi