databricks.Permissions
Explore with Pulumi AI
This resource allows you to generically manage access control in Databricks workspaces. It ensures that only admins, authenticated principal and those declared within access_control
blocks would have specified access. It is not possible to remove management rights from admins group.
This resource is authoritative for permissions on objects. Configuring this resource for an object will OVERWRITE any existing permissions of the same type unless imported, and changes made outside of Pulumi will be reset.
It is not possible to lower permissions for
admins
, so Databricks Pulumi Provider removes thoseaccess_control
blocks automatically.
If multiple permission levels are specified for an identity (e.g.
CAN_RESTART
andCAN_MANAGE
for a cluster), only the highest level permission is returned and will cause permanent drift.
To manage access control on service principals, use databricks_access_control_rule_set.
Cluster usage
It’s possible to separate cluster access control to three different permission levels: CAN_ATTACH_TO
, CAN_RESTART
and CAN_MANAGE
:
import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";
const auto = new databricks.Group("auto", {displayName: "Automation"});
const eng = new databricks.Group("eng", {displayName: "Engineering"});
const ds = new databricks.Group("ds", {displayName: "Data Science"});
const latest = databricks.getSparkVersion({});
const smallest = databricks.getNodeType({
localDisk: true,
});
const sharedAutoscaling = new databricks.Cluster("shared_autoscaling", {
clusterName: "Shared Autoscaling",
sparkVersion: latest.then(latest => latest.id),
nodeTypeId: smallest.then(smallest => smallest.id),
autoterminationMinutes: 60,
autoscale: {
minWorkers: 1,
maxWorkers: 10,
},
});
const clusterUsage = new databricks.Permissions("cluster_usage", {
clusterId: sharedAutoscaling.id,
accessControls: [
{
groupName: auto.displayName,
permissionLevel: "CAN_ATTACH_TO",
},
{
groupName: eng.displayName,
permissionLevel: "CAN_RESTART",
},
{
groupName: ds.displayName,
permissionLevel: "CAN_MANAGE",
},
],
});
import pulumi
import pulumi_databricks as databricks
auto = databricks.Group("auto", display_name="Automation")
eng = databricks.Group("eng", display_name="Engineering")
ds = databricks.Group("ds", display_name="Data Science")
latest = databricks.get_spark_version()
smallest = databricks.get_node_type(local_disk=True)
shared_autoscaling = databricks.Cluster("shared_autoscaling",
cluster_name="Shared Autoscaling",
spark_version=latest.id,
node_type_id=smallest.id,
autotermination_minutes=60,
autoscale={
"min_workers": 1,
"max_workers": 10,
})
cluster_usage = databricks.Permissions("cluster_usage",
cluster_id=shared_autoscaling.id,
access_controls=[
{
"group_name": auto.display_name,
"permission_level": "CAN_ATTACH_TO",
},
{
"group_name": eng.display_name,
"permission_level": "CAN_RESTART",
},
{
"group_name": ds.display_name,
"permission_level": "CAN_MANAGE",
},
])
package main
import (
"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
auto, err := databricks.NewGroup(ctx, "auto", &databricks.GroupArgs{
DisplayName: pulumi.String("Automation"),
})
if err != nil {
return err
}
eng, err := databricks.NewGroup(ctx, "eng", &databricks.GroupArgs{
DisplayName: pulumi.String("Engineering"),
})
if err != nil {
return err
}
ds, err := databricks.NewGroup(ctx, "ds", &databricks.GroupArgs{
DisplayName: pulumi.String("Data Science"),
})
if err != nil {
return err
}
latest, err := databricks.GetSparkVersion(ctx, &databricks.GetSparkVersionArgs{}, nil)
if err != nil {
return err
}
smallest, err := databricks.GetNodeType(ctx, &databricks.GetNodeTypeArgs{
LocalDisk: pulumi.BoolRef(true),
}, nil)
if err != nil {
return err
}
sharedAutoscaling, err := databricks.NewCluster(ctx, "shared_autoscaling", &databricks.ClusterArgs{
ClusterName: pulumi.String("Shared Autoscaling"),
SparkVersion: pulumi.String(latest.Id),
NodeTypeId: pulumi.String(smallest.Id),
AutoterminationMinutes: pulumi.Int(60),
Autoscale: &databricks.ClusterAutoscaleArgs{
MinWorkers: pulumi.Int(1),
MaxWorkers: pulumi.Int(10),
},
})
if err != nil {
return err
}
_, err = databricks.NewPermissions(ctx, "cluster_usage", &databricks.PermissionsArgs{
ClusterId: sharedAutoscaling.ID(),
AccessControls: databricks.PermissionsAccessControlArray{
&databricks.PermissionsAccessControlArgs{
GroupName: auto.DisplayName,
PermissionLevel: pulumi.String("CAN_ATTACH_TO"),
},
&databricks.PermissionsAccessControlArgs{
GroupName: eng.DisplayName,
PermissionLevel: pulumi.String("CAN_RESTART"),
},
&databricks.PermissionsAccessControlArgs{
GroupName: ds.DisplayName,
PermissionLevel: pulumi.String("CAN_MANAGE"),
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;
return await Deployment.RunAsync(() =>
{
var auto = new Databricks.Group("auto", new()
{
DisplayName = "Automation",
});
var eng = new Databricks.Group("eng", new()
{
DisplayName = "Engineering",
});
var ds = new Databricks.Group("ds", new()
{
DisplayName = "Data Science",
});
var latest = Databricks.GetSparkVersion.Invoke();
var smallest = Databricks.GetNodeType.Invoke(new()
{
LocalDisk = true,
});
var sharedAutoscaling = new Databricks.Cluster("shared_autoscaling", new()
{
ClusterName = "Shared Autoscaling",
SparkVersion = latest.Apply(getSparkVersionResult => getSparkVersionResult.Id),
NodeTypeId = smallest.Apply(getNodeTypeResult => getNodeTypeResult.Id),
AutoterminationMinutes = 60,
Autoscale = new Databricks.Inputs.ClusterAutoscaleArgs
{
MinWorkers = 1,
MaxWorkers = 10,
},
});
var clusterUsage = new Databricks.Permissions("cluster_usage", new()
{
ClusterId = sharedAutoscaling.Id,
AccessControls = new[]
{
new Databricks.Inputs.PermissionsAccessControlArgs
{
GroupName = auto.DisplayName,
PermissionLevel = "CAN_ATTACH_TO",
},
new Databricks.Inputs.PermissionsAccessControlArgs
{
GroupName = eng.DisplayName,
PermissionLevel = "CAN_RESTART",
},
new Databricks.Inputs.PermissionsAccessControlArgs
{
GroupName = ds.DisplayName,
PermissionLevel = "CAN_MANAGE",
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.Group;
import com.pulumi.databricks.GroupArgs;
import com.pulumi.databricks.DatabricksFunctions;
import com.pulumi.databricks.inputs.GetSparkVersionArgs;
import com.pulumi.databricks.inputs.GetNodeTypeArgs;
import com.pulumi.databricks.Cluster;
import com.pulumi.databricks.ClusterArgs;
import com.pulumi.databricks.inputs.ClusterAutoscaleArgs;
import com.pulumi.databricks.Permissions;
import com.pulumi.databricks.PermissionsArgs;
import com.pulumi.databricks.inputs.PermissionsAccessControlArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var auto = new Group("auto", GroupArgs.builder()
.displayName("Automation")
.build());
var eng = new Group("eng", GroupArgs.builder()
.displayName("Engineering")
.build());
var ds = new Group("ds", GroupArgs.builder()
.displayName("Data Science")
.build());
final var latest = DatabricksFunctions.getSparkVersion();
final var smallest = DatabricksFunctions.getNodeType(GetNodeTypeArgs.builder()
.localDisk(true)
.build());
var sharedAutoscaling = new Cluster("sharedAutoscaling", ClusterArgs.builder()
.clusterName("Shared Autoscaling")
.sparkVersion(latest.applyValue(getSparkVersionResult -> getSparkVersionResult.id()))
.nodeTypeId(smallest.applyValue(getNodeTypeResult -> getNodeTypeResult.id()))
.autoterminationMinutes(60)
.autoscale(ClusterAutoscaleArgs.builder()
.minWorkers(1)
.maxWorkers(10)
.build())
.build());
var clusterUsage = new Permissions("clusterUsage", PermissionsArgs.builder()
.clusterId(sharedAutoscaling.id())
.accessControls(
PermissionsAccessControlArgs.builder()
.groupName(auto.displayName())
.permissionLevel("CAN_ATTACH_TO")
.build(),
PermissionsAccessControlArgs.builder()
.groupName(eng.displayName())
.permissionLevel("CAN_RESTART")
.build(),
PermissionsAccessControlArgs.builder()
.groupName(ds.displayName())
.permissionLevel("CAN_MANAGE")
.build())
.build());
}
}
resources:
auto:
type: databricks:Group
properties:
displayName: Automation
eng:
type: databricks:Group
properties:
displayName: Engineering
ds:
type: databricks:Group
properties:
displayName: Data Science
sharedAutoscaling:
type: databricks:Cluster
name: shared_autoscaling
properties:
clusterName: Shared Autoscaling
sparkVersion: ${latest.id}
nodeTypeId: ${smallest.id}
autoterminationMinutes: 60
autoscale:
minWorkers: 1
maxWorkers: 10
clusterUsage:
type: databricks:Permissions
name: cluster_usage
properties:
clusterId: ${sharedAutoscaling.id}
accessControls:
- groupName: ${auto.displayName}
permissionLevel: CAN_ATTACH_TO
- groupName: ${eng.displayName}
permissionLevel: CAN_RESTART
- groupName: ${ds.displayName}
permissionLevel: CAN_MANAGE
variables:
latest:
fn::invoke:
Function: databricks:getSparkVersion
Arguments: {}
smallest:
fn::invoke:
Function: databricks:getNodeType
Arguments:
localDisk: true
Cluster Policy usage
Cluster policies allow creation of clusters, that match given policy. It’s possible to assign CAN_USE
permission to users and groups:
import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";
const ds = new databricks.Group("ds", {displayName: "Data Science"});
const eng = new databricks.Group("eng", {displayName: "Engineering"});
const somethingSimple = new databricks.ClusterPolicy("something_simple", {
name: "Some simple policy",
definition: JSON.stringify({
"spark_conf.spark.hadoop.javax.jdo.option.ConnectionURL": {
type: "forbidden",
},
"spark_conf.spark.secondkey": {
type: "forbidden",
},
}),
});
const policyUsage = new databricks.Permissions("policy_usage", {
clusterPolicyId: somethingSimple.id,
accessControls: [
{
groupName: ds.displayName,
permissionLevel: "CAN_USE",
},
{
groupName: eng.displayName,
permissionLevel: "CAN_USE",
},
],
});
import pulumi
import json
import pulumi_databricks as databricks
ds = databricks.Group("ds", display_name="Data Science")
eng = databricks.Group("eng", display_name="Engineering")
something_simple = databricks.ClusterPolicy("something_simple",
name="Some simple policy",
definition=json.dumps({
"spark_conf.spark.hadoop.javax.jdo.option.ConnectionURL": {
"type": "forbidden",
},
"spark_conf.spark.secondkey": {
"type": "forbidden",
},
}))
policy_usage = databricks.Permissions("policy_usage",
cluster_policy_id=something_simple.id,
access_controls=[
{
"group_name": ds.display_name,
"permission_level": "CAN_USE",
},
{
"group_name": eng.display_name,
"permission_level": "CAN_USE",
},
])
package main
import (
"encoding/json"
"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
ds, err := databricks.NewGroup(ctx, "ds", &databricks.GroupArgs{
DisplayName: pulumi.String("Data Science"),
})
if err != nil {
return err
}
eng, err := databricks.NewGroup(ctx, "eng", &databricks.GroupArgs{
DisplayName: pulumi.String("Engineering"),
})
if err != nil {
return err
}
tmpJSON0, err := json.Marshal(map[string]interface{}{
"spark_conf.spark.hadoop.javax.jdo.option.ConnectionURL": map[string]interface{}{
"type": "forbidden",
},
"spark_conf.spark.secondkey": map[string]interface{}{
"type": "forbidden",
},
})
if err != nil {
return err
}
json0 := string(tmpJSON0)
somethingSimple, err := databricks.NewClusterPolicy(ctx, "something_simple", &databricks.ClusterPolicyArgs{
Name: pulumi.String("Some simple policy"),
Definition: pulumi.String(json0),
})
if err != nil {
return err
}
_, err = databricks.NewPermissions(ctx, "policy_usage", &databricks.PermissionsArgs{
ClusterPolicyId: somethingSimple.ID(),
AccessControls: databricks.PermissionsAccessControlArray{
&databricks.PermissionsAccessControlArgs{
GroupName: ds.DisplayName,
PermissionLevel: pulumi.String("CAN_USE"),
},
&databricks.PermissionsAccessControlArgs{
GroupName: eng.DisplayName,
PermissionLevel: pulumi.String("CAN_USE"),
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using System.Text.Json;
using Pulumi;
using Databricks = Pulumi.Databricks;
return await Deployment.RunAsync(() =>
{
var ds = new Databricks.Group("ds", new()
{
DisplayName = "Data Science",
});
var eng = new Databricks.Group("eng", new()
{
DisplayName = "Engineering",
});
var somethingSimple = new Databricks.ClusterPolicy("something_simple", new()
{
Name = "Some simple policy",
Definition = JsonSerializer.Serialize(new Dictionary<string, object?>
{
["spark_conf.spark.hadoop.javax.jdo.option.ConnectionURL"] = new Dictionary<string, object?>
{
["type"] = "forbidden",
},
["spark_conf.spark.secondkey"] = new Dictionary<string, object?>
{
["type"] = "forbidden",
},
}),
});
var policyUsage = new Databricks.Permissions("policy_usage", new()
{
ClusterPolicyId = somethingSimple.Id,
AccessControls = new[]
{
new Databricks.Inputs.PermissionsAccessControlArgs
{
GroupName = ds.DisplayName,
PermissionLevel = "CAN_USE",
},
new Databricks.Inputs.PermissionsAccessControlArgs
{
GroupName = eng.DisplayName,
PermissionLevel = "CAN_USE",
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.Group;
import com.pulumi.databricks.GroupArgs;
import com.pulumi.databricks.ClusterPolicy;
import com.pulumi.databricks.ClusterPolicyArgs;
import com.pulumi.databricks.Permissions;
import com.pulumi.databricks.PermissionsArgs;
import com.pulumi.databricks.inputs.PermissionsAccessControlArgs;
import static com.pulumi.codegen.internal.Serialization.*;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var ds = new Group("ds", GroupArgs.builder()
.displayName("Data Science")
.build());
var eng = new Group("eng", GroupArgs.builder()
.displayName("Engineering")
.build());
var somethingSimple = new ClusterPolicy("somethingSimple", ClusterPolicyArgs.builder()
.name("Some simple policy")
.definition(serializeJson(
jsonObject(
jsonProperty("spark_conf.spark.hadoop.javax.jdo.option.ConnectionURL", jsonObject(
jsonProperty("type", "forbidden")
)),
jsonProperty("spark_conf.spark.secondkey", jsonObject(
jsonProperty("type", "forbidden")
))
)))
.build());
var policyUsage = new Permissions("policyUsage", PermissionsArgs.builder()
.clusterPolicyId(somethingSimple.id())
.accessControls(
PermissionsAccessControlArgs.builder()
.groupName(ds.displayName())
.permissionLevel("CAN_USE")
.build(),
PermissionsAccessControlArgs.builder()
.groupName(eng.displayName())
.permissionLevel("CAN_USE")
.build())
.build());
}
}
resources:
ds:
type: databricks:Group
properties:
displayName: Data Science
eng:
type: databricks:Group
properties:
displayName: Engineering
somethingSimple:
type: databricks:ClusterPolicy
name: something_simple
properties:
name: Some simple policy
definition:
fn::toJSON:
spark_conf.spark.hadoop.javax.jdo.option.ConnectionURL:
type: forbidden
spark_conf.spark.secondkey:
type: forbidden
policyUsage:
type: databricks:Permissions
name: policy_usage
properties:
clusterPolicyId: ${somethingSimple.id}
accessControls:
- groupName: ${ds.displayName}
permissionLevel: CAN_USE
- groupName: ${eng.displayName}
permissionLevel: CAN_USE
Instance Pool usage
Instance Pools access control allows to assign CAN_ATTACH_TO
and CAN_MANAGE
permissions to users, service principals, and groups. It’s also possible to grant creation of Instance Pools to individual groups and users, service principals.
import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";
const auto = new databricks.Group("auto", {displayName: "Automation"});
const eng = new databricks.Group("eng", {displayName: "Engineering"});
const smallest = databricks.getNodeType({
localDisk: true,
});
const _this = new databricks.InstancePool("this", {
instancePoolName: "Reserved Instances",
idleInstanceAutoterminationMinutes: 60,
nodeTypeId: smallest.then(smallest => smallest.id),
minIdleInstances: 0,
maxCapacity: 10,
});
const poolUsage = new databricks.Permissions("pool_usage", {
instancePoolId: _this.id,
accessControls: [
{
groupName: auto.displayName,
permissionLevel: "CAN_ATTACH_TO",
},
{
groupName: eng.displayName,
permissionLevel: "CAN_MANAGE",
},
],
});
import pulumi
import pulumi_databricks as databricks
auto = databricks.Group("auto", display_name="Automation")
eng = databricks.Group("eng", display_name="Engineering")
smallest = databricks.get_node_type(local_disk=True)
this = databricks.InstancePool("this",
instance_pool_name="Reserved Instances",
idle_instance_autotermination_minutes=60,
node_type_id=smallest.id,
min_idle_instances=0,
max_capacity=10)
pool_usage = databricks.Permissions("pool_usage",
instance_pool_id=this.id,
access_controls=[
{
"group_name": auto.display_name,
"permission_level": "CAN_ATTACH_TO",
},
{
"group_name": eng.display_name,
"permission_level": "CAN_MANAGE",
},
])
package main
import (
"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
auto, err := databricks.NewGroup(ctx, "auto", &databricks.GroupArgs{
DisplayName: pulumi.String("Automation"),
})
if err != nil {
return err
}
eng, err := databricks.NewGroup(ctx, "eng", &databricks.GroupArgs{
DisplayName: pulumi.String("Engineering"),
})
if err != nil {
return err
}
smallest, err := databricks.GetNodeType(ctx, &databricks.GetNodeTypeArgs{
LocalDisk: pulumi.BoolRef(true),
}, nil)
if err != nil {
return err
}
this, err := databricks.NewInstancePool(ctx, "this", &databricks.InstancePoolArgs{
InstancePoolName: pulumi.String("Reserved Instances"),
IdleInstanceAutoterminationMinutes: pulumi.Int(60),
NodeTypeId: pulumi.String(smallest.Id),
MinIdleInstances: pulumi.Int(0),
MaxCapacity: pulumi.Int(10),
})
if err != nil {
return err
}
_, err = databricks.NewPermissions(ctx, "pool_usage", &databricks.PermissionsArgs{
InstancePoolId: this.ID(),
AccessControls: databricks.PermissionsAccessControlArray{
&databricks.PermissionsAccessControlArgs{
GroupName: auto.DisplayName,
PermissionLevel: pulumi.String("CAN_ATTACH_TO"),
},
&databricks.PermissionsAccessControlArgs{
GroupName: eng.DisplayName,
PermissionLevel: pulumi.String("CAN_MANAGE"),
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;
return await Deployment.RunAsync(() =>
{
var auto = new Databricks.Group("auto", new()
{
DisplayName = "Automation",
});
var eng = new Databricks.Group("eng", new()
{
DisplayName = "Engineering",
});
var smallest = Databricks.GetNodeType.Invoke(new()
{
LocalDisk = true,
});
var @this = new Databricks.InstancePool("this", new()
{
InstancePoolName = "Reserved Instances",
IdleInstanceAutoterminationMinutes = 60,
NodeTypeId = smallest.Apply(getNodeTypeResult => getNodeTypeResult.Id),
MinIdleInstances = 0,
MaxCapacity = 10,
});
var poolUsage = new Databricks.Permissions("pool_usage", new()
{
InstancePoolId = @this.Id,
AccessControls = new[]
{
new Databricks.Inputs.PermissionsAccessControlArgs
{
GroupName = auto.DisplayName,
PermissionLevel = "CAN_ATTACH_TO",
},
new Databricks.Inputs.PermissionsAccessControlArgs
{
GroupName = eng.DisplayName,
PermissionLevel = "CAN_MANAGE",
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.Group;
import com.pulumi.databricks.GroupArgs;
import com.pulumi.databricks.DatabricksFunctions;
import com.pulumi.databricks.inputs.GetNodeTypeArgs;
import com.pulumi.databricks.InstancePool;
import com.pulumi.databricks.InstancePoolArgs;
import com.pulumi.databricks.Permissions;
import com.pulumi.databricks.PermissionsArgs;
import com.pulumi.databricks.inputs.PermissionsAccessControlArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var auto = new Group("auto", GroupArgs.builder()
.displayName("Automation")
.build());
var eng = new Group("eng", GroupArgs.builder()
.displayName("Engineering")
.build());
final var smallest = DatabricksFunctions.getNodeType(GetNodeTypeArgs.builder()
.localDisk(true)
.build());
var this_ = new InstancePool("this", InstancePoolArgs.builder()
.instancePoolName("Reserved Instances")
.idleInstanceAutoterminationMinutes(60)
.nodeTypeId(smallest.applyValue(getNodeTypeResult -> getNodeTypeResult.id()))
.minIdleInstances(0)
.maxCapacity(10)
.build());
var poolUsage = new Permissions("poolUsage", PermissionsArgs.builder()
.instancePoolId(this_.id())
.accessControls(
PermissionsAccessControlArgs.builder()
.groupName(auto.displayName())
.permissionLevel("CAN_ATTACH_TO")
.build(),
PermissionsAccessControlArgs.builder()
.groupName(eng.displayName())
.permissionLevel("CAN_MANAGE")
.build())
.build());
}
}
resources:
auto:
type: databricks:Group
properties:
displayName: Automation
eng:
type: databricks:Group
properties:
displayName: Engineering
this:
type: databricks:InstancePool
properties:
instancePoolName: Reserved Instances
idleInstanceAutoterminationMinutes: 60
nodeTypeId: ${smallest.id}
minIdleInstances: 0
maxCapacity: 10
poolUsage:
type: databricks:Permissions
name: pool_usage
properties:
instancePoolId: ${this.id}
accessControls:
- groupName: ${auto.displayName}
permissionLevel: CAN_ATTACH_TO
- groupName: ${eng.displayName}
permissionLevel: CAN_MANAGE
variables:
smallest:
fn::invoke:
Function: databricks:getNodeType
Arguments:
localDisk: true
Job usage
There are four assignable permission levels for databricks_job: CAN_VIEW
, CAN_MANAGE_RUN
, IS_OWNER
, and CAN_MANAGE
. Admins are granted the CAN_MANAGE
permission by default, and they can assign that permission to non-admin users, and service principals.
- The creator of a job has
IS_OWNER
permission. Destroyingdatabricks.Permissions
resource for a job would revert ownership to the creator. - A job must have exactly one owner. If a resource is changed and no owner is specified, the currently authenticated principal would become the new owner of the job. Nothing would change, per se, if the job was created through Pulumi.
- A job cannot have a group as an owner.
- Jobs triggered through Run Now assume the permissions of the job owner and not the user, and service principal who issued Run Now.
- Read main documentation for additional detail.
import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";
const auto = new databricks.Group("auto", {displayName: "Automation"});
const eng = new databricks.Group("eng", {displayName: "Engineering"});
const awsPrincipal = new databricks.ServicePrincipal("aws_principal", {displayName: "main"});
const latest = databricks.getSparkVersion({});
const smallest = databricks.getNodeType({
localDisk: true,
});
const _this = new databricks.Job("this", {
name: "Featurization",
maxConcurrentRuns: 1,
tasks: [{
taskKey: "task1",
newCluster: {
numWorkers: 300,
sparkVersion: latest.then(latest => latest.id),
nodeTypeId: smallest.then(smallest => smallest.id),
},
notebookTask: {
notebookPath: "/Production/MakeFeatures",
},
}],
});
const jobUsage = new databricks.Permissions("job_usage", {
jobId: _this.id,
accessControls: [
{
groupName: "users",
permissionLevel: "CAN_VIEW",
},
{
groupName: auto.displayName,
permissionLevel: "CAN_MANAGE_RUN",
},
{
groupName: eng.displayName,
permissionLevel: "CAN_MANAGE",
},
{
servicePrincipalName: awsPrincipal.applicationId,
permissionLevel: "IS_OWNER",
},
],
});
import pulumi
import pulumi_databricks as databricks
auto = databricks.Group("auto", display_name="Automation")
eng = databricks.Group("eng", display_name="Engineering")
aws_principal = databricks.ServicePrincipal("aws_principal", display_name="main")
latest = databricks.get_spark_version()
smallest = databricks.get_node_type(local_disk=True)
this = databricks.Job("this",
name="Featurization",
max_concurrent_runs=1,
tasks=[{
"task_key": "task1",
"new_cluster": {
"num_workers": 300,
"spark_version": latest.id,
"node_type_id": smallest.id,
},
"notebook_task": {
"notebook_path": "/Production/MakeFeatures",
},
}])
job_usage = databricks.Permissions("job_usage",
job_id=this.id,
access_controls=[
{
"group_name": "users",
"permission_level": "CAN_VIEW",
},
{
"group_name": auto.display_name,
"permission_level": "CAN_MANAGE_RUN",
},
{
"group_name": eng.display_name,
"permission_level": "CAN_MANAGE",
},
{
"service_principal_name": aws_principal.application_id,
"permission_level": "IS_OWNER",
},
])
package main
import (
"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
auto, err := databricks.NewGroup(ctx, "auto", &databricks.GroupArgs{
DisplayName: pulumi.String("Automation"),
})
if err != nil {
return err
}
eng, err := databricks.NewGroup(ctx, "eng", &databricks.GroupArgs{
DisplayName: pulumi.String("Engineering"),
})
if err != nil {
return err
}
awsPrincipal, err := databricks.NewServicePrincipal(ctx, "aws_principal", &databricks.ServicePrincipalArgs{
DisplayName: pulumi.String("main"),
})
if err != nil {
return err
}
latest, err := databricks.GetSparkVersion(ctx, &databricks.GetSparkVersionArgs{}, nil)
if err != nil {
return err
}
smallest, err := databricks.GetNodeType(ctx, &databricks.GetNodeTypeArgs{
LocalDisk: pulumi.BoolRef(true),
}, nil)
if err != nil {
return err
}
this, err := databricks.NewJob(ctx, "this", &databricks.JobArgs{
Name: pulumi.String("Featurization"),
MaxConcurrentRuns: pulumi.Int(1),
Tasks: databricks.JobTaskArray{
&databricks.JobTaskArgs{
TaskKey: pulumi.String("task1"),
NewCluster: &databricks.JobTaskNewClusterArgs{
NumWorkers: pulumi.Int(300),
SparkVersion: pulumi.String(latest.Id),
NodeTypeId: pulumi.String(smallest.Id),
},
NotebookTask: &databricks.JobTaskNotebookTaskArgs{
NotebookPath: pulumi.String("/Production/MakeFeatures"),
},
},
},
})
if err != nil {
return err
}
_, err = databricks.NewPermissions(ctx, "job_usage", &databricks.PermissionsArgs{
JobId: this.ID(),
AccessControls: databricks.PermissionsAccessControlArray{
&databricks.PermissionsAccessControlArgs{
GroupName: pulumi.String("users"),
PermissionLevel: pulumi.String("CAN_VIEW"),
},
&databricks.PermissionsAccessControlArgs{
GroupName: auto.DisplayName,
PermissionLevel: pulumi.String("CAN_MANAGE_RUN"),
},
&databricks.PermissionsAccessControlArgs{
GroupName: eng.DisplayName,
PermissionLevel: pulumi.String("CAN_MANAGE"),
},
&databricks.PermissionsAccessControlArgs{
ServicePrincipalName: awsPrincipal.ApplicationId,
PermissionLevel: pulumi.String("IS_OWNER"),
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;
return await Deployment.RunAsync(() =>
{
var auto = new Databricks.Group("auto", new()
{
DisplayName = "Automation",
});
var eng = new Databricks.Group("eng", new()
{
DisplayName = "Engineering",
});
var awsPrincipal = new Databricks.ServicePrincipal("aws_principal", new()
{
DisplayName = "main",
});
var latest = Databricks.GetSparkVersion.Invoke();
var smallest = Databricks.GetNodeType.Invoke(new()
{
LocalDisk = true,
});
var @this = new Databricks.Job("this", new()
{
Name = "Featurization",
MaxConcurrentRuns = 1,
Tasks = new[]
{
new Databricks.Inputs.JobTaskArgs
{
TaskKey = "task1",
NewCluster = new Databricks.Inputs.JobTaskNewClusterArgs
{
NumWorkers = 300,
SparkVersion = latest.Apply(getSparkVersionResult => getSparkVersionResult.Id),
NodeTypeId = smallest.Apply(getNodeTypeResult => getNodeTypeResult.Id),
},
NotebookTask = new Databricks.Inputs.JobTaskNotebookTaskArgs
{
NotebookPath = "/Production/MakeFeatures",
},
},
},
});
var jobUsage = new Databricks.Permissions("job_usage", new()
{
JobId = @this.Id,
AccessControls = new[]
{
new Databricks.Inputs.PermissionsAccessControlArgs
{
GroupName = "users",
PermissionLevel = "CAN_VIEW",
},
new Databricks.Inputs.PermissionsAccessControlArgs
{
GroupName = auto.DisplayName,
PermissionLevel = "CAN_MANAGE_RUN",
},
new Databricks.Inputs.PermissionsAccessControlArgs
{
GroupName = eng.DisplayName,
PermissionLevel = "CAN_MANAGE",
},
new Databricks.Inputs.PermissionsAccessControlArgs
{
ServicePrincipalName = awsPrincipal.ApplicationId,
PermissionLevel = "IS_OWNER",
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.Group;
import com.pulumi.databricks.GroupArgs;
import com.pulumi.databricks.ServicePrincipal;
import com.pulumi.databricks.ServicePrincipalArgs;
import com.pulumi.databricks.DatabricksFunctions;
import com.pulumi.databricks.inputs.GetSparkVersionArgs;
import com.pulumi.databricks.inputs.GetNodeTypeArgs;
import com.pulumi.databricks.Job;
import com.pulumi.databricks.JobArgs;
import com.pulumi.databricks.inputs.JobTaskArgs;
import com.pulumi.databricks.inputs.JobTaskNewClusterArgs;
import com.pulumi.databricks.inputs.JobTaskNotebookTaskArgs;
import com.pulumi.databricks.Permissions;
import com.pulumi.databricks.PermissionsArgs;
import com.pulumi.databricks.inputs.PermissionsAccessControlArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var auto = new Group("auto", GroupArgs.builder()
.displayName("Automation")
.build());
var eng = new Group("eng", GroupArgs.builder()
.displayName("Engineering")
.build());
var awsPrincipal = new ServicePrincipal("awsPrincipal", ServicePrincipalArgs.builder()
.displayName("main")
.build());
final var latest = DatabricksFunctions.getSparkVersion();
final var smallest = DatabricksFunctions.getNodeType(GetNodeTypeArgs.builder()
.localDisk(true)
.build());
var this_ = new Job("this", JobArgs.builder()
.name("Featurization")
.maxConcurrentRuns(1)
.tasks(JobTaskArgs.builder()
.taskKey("task1")
.newCluster(JobTaskNewClusterArgs.builder()
.numWorkers(300)
.sparkVersion(latest.applyValue(getSparkVersionResult -> getSparkVersionResult.id()))
.nodeTypeId(smallest.applyValue(getNodeTypeResult -> getNodeTypeResult.id()))
.build())
.notebookTask(JobTaskNotebookTaskArgs.builder()
.notebookPath("/Production/MakeFeatures")
.build())
.build())
.build());
var jobUsage = new Permissions("jobUsage", PermissionsArgs.builder()
.jobId(this_.id())
.accessControls(
PermissionsAccessControlArgs.builder()
.groupName("users")
.permissionLevel("CAN_VIEW")
.build(),
PermissionsAccessControlArgs.builder()
.groupName(auto.displayName())
.permissionLevel("CAN_MANAGE_RUN")
.build(),
PermissionsAccessControlArgs.builder()
.groupName(eng.displayName())
.permissionLevel("CAN_MANAGE")
.build(),
PermissionsAccessControlArgs.builder()
.servicePrincipalName(awsPrincipal.applicationId())
.permissionLevel("IS_OWNER")
.build())
.build());
}
}
resources:
auto:
type: databricks:Group
properties:
displayName: Automation
eng:
type: databricks:Group
properties:
displayName: Engineering
awsPrincipal:
type: databricks:ServicePrincipal
name: aws_principal
properties:
displayName: main
this:
type: databricks:Job
properties:
name: Featurization
maxConcurrentRuns: 1
tasks:
- taskKey: task1
newCluster:
numWorkers: 300
sparkVersion: ${latest.id}
nodeTypeId: ${smallest.id}
notebookTask:
notebookPath: /Production/MakeFeatures
jobUsage:
type: databricks:Permissions
name: job_usage
properties:
jobId: ${this.id}
accessControls:
- groupName: users
permissionLevel: CAN_VIEW
- groupName: ${auto.displayName}
permissionLevel: CAN_MANAGE_RUN
- groupName: ${eng.displayName}
permissionLevel: CAN_MANAGE
- servicePrincipalName: ${awsPrincipal.applicationId}
permissionLevel: IS_OWNER
variables:
latest:
fn::invoke:
Function: databricks:getSparkVersion
Arguments: {}
smallest:
fn::invoke:
Function: databricks:getNodeType
Arguments:
localDisk: true
Delta Live Tables usage
There are four assignable permission levels for databricks_pipeline: CAN_VIEW
, CAN_RUN
, CAN_MANAGE
, and IS_OWNER
. Admins are granted the CAN_MANAGE
permission by default, and they can assign that permission to non-admin users, and service principals.
- The creator of a DLT Pipeline has
IS_OWNER
permission. Destroyingdatabricks.Permissions
resource for a pipeline would revert ownership to the creator. - A DLT pipeline must have exactly one owner. If a resource is changed and no owner is specified, the currently authenticated principal would become the new owner of the pipeline. Nothing would change, per se, if the pipeline was created through Pulumi.
- A DLT pipeline cannot have a group as an owner.
- DLT Pipelines triggered through Start assume the permissions of the pipeline owner and not the user, and service principal who issued Run Now.
- Read main documentation for additional detail.
import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";
import * as std from "@pulumi/std";
const me = databricks.getCurrentUser({});
const eng = new databricks.Group("eng", {displayName: "Engineering"});
const dltDemo = new databricks.Notebook("dlt_demo", {
contentBase64: std.base64encode({
input: `import dlt
json_path = "/databricks-datasets/wikipedia-datasets/data-001/clickstream/raw-uncompressed-json/2015_2_clickstream.json"
@dlt.table(
comment="The raw wikipedia clickstream dataset, ingested from /databricks-datasets."
)
def clickstream_raw():
return (spark.read.format("json").load(json_path))
`,
}).then(invoke => invoke.result),
language: "PYTHON",
path: me.then(me => `${me.home}/DLT_Demo`),
});
const _this = new databricks.Pipeline("this", {
name: me.then(me => `DLT Demo Pipeline (${me.alphanumeric})`),
storage: "/test/tf-pipeline",
configuration: {
key1: "value1",
key2: "value2",
},
libraries: [{
notebook: {
path: dltDemo.id,
},
}],
continuous: false,
filters: {
includes: ["com.databricks.include"],
excludes: ["com.databricks.exclude"],
},
});
const dltUsage = new databricks.Permissions("dlt_usage", {
pipelineId: _this.id,
accessControls: [
{
groupName: "users",
permissionLevel: "CAN_VIEW",
},
{
groupName: eng.displayName,
permissionLevel: "CAN_MANAGE",
},
],
});
import pulumi
import pulumi_databricks as databricks
import pulumi_std as std
me = databricks.get_current_user()
eng = databricks.Group("eng", display_name="Engineering")
dlt_demo = databricks.Notebook("dlt_demo",
content_base64=std.base64encode(input="""import dlt
json_path = "/databricks-datasets/wikipedia-datasets/data-001/clickstream/raw-uncompressed-json/2015_2_clickstream.json"
@dlt.table(
comment="The raw wikipedia clickstream dataset, ingested from /databricks-datasets."
)
def clickstream_raw():
return (spark.read.format("json").load(json_path))
""").result,
language="PYTHON",
path=f"{me.home}/DLT_Demo")
this = databricks.Pipeline("this",
name=f"DLT Demo Pipeline ({me.alphanumeric})",
storage="/test/tf-pipeline",
configuration={
"key1": "value1",
"key2": "value2",
},
libraries=[{
"notebook": {
"path": dlt_demo.id,
},
}],
continuous=False,
filters={
"includes": ["com.databricks.include"],
"excludes": ["com.databricks.exclude"],
})
dlt_usage = databricks.Permissions("dlt_usage",
pipeline_id=this.id,
access_controls=[
{
"group_name": "users",
"permission_level": "CAN_VIEW",
},
{
"group_name": eng.display_name,
"permission_level": "CAN_MANAGE",
},
])
package main
import (
"fmt"
"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
"github.com/pulumi/pulumi-std/sdk/go/std"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
me, err := databricks.GetCurrentUser(ctx, map[string]interface{}{}, nil)
if err != nil {
return err
}
eng, err := databricks.NewGroup(ctx, "eng", &databricks.GroupArgs{
DisplayName: pulumi.String("Engineering"),
})
if err != nil {
return err
}
invokeBase64encode, err := std.Base64encode(ctx, &std.Base64encodeArgs{
Input: `import dlt
json_path = "/databricks-datasets/wikipedia-datasets/data-001/clickstream/raw-uncompressed-json/2015_2_clickstream.json"
@dlt.table(
comment="The raw wikipedia clickstream dataset, ingested from /databricks-datasets."
)
def clickstream_raw():
return (spark.read.format("json").load(json_path))
`,
}, nil)
if err != nil {
return err
}
dltDemo, err := databricks.NewNotebook(ctx, "dlt_demo", &databricks.NotebookArgs{
ContentBase64: pulumi.String(invokeBase64encode.Result),
Language: pulumi.String("PYTHON"),
Path: pulumi.Sprintf("%v/DLT_Demo", me.Home),
})
if err != nil {
return err
}
this, err := databricks.NewPipeline(ctx, "this", &databricks.PipelineArgs{
Name: pulumi.Sprintf("DLT Demo Pipeline (%v)", me.Alphanumeric),
Storage: pulumi.String("/test/tf-pipeline"),
Configuration: pulumi.StringMap{
"key1": pulumi.String("value1"),
"key2": pulumi.String("value2"),
},
Libraries: databricks.PipelineLibraryArray{
&databricks.PipelineLibraryArgs{
Notebook: &databricks.PipelineLibraryNotebookArgs{
Path: dltDemo.ID(),
},
},
},
Continuous: pulumi.Bool(false),
Filters: &databricks.PipelineFiltersArgs{
Includes: pulumi.StringArray{
pulumi.String("com.databricks.include"),
},
Excludes: pulumi.StringArray{
pulumi.String("com.databricks.exclude"),
},
},
})
if err != nil {
return err
}
_, err = databricks.NewPermissions(ctx, "dlt_usage", &databricks.PermissionsArgs{
PipelineId: this.ID(),
AccessControls: databricks.PermissionsAccessControlArray{
&databricks.PermissionsAccessControlArgs{
GroupName: pulumi.String("users"),
PermissionLevel: pulumi.String("CAN_VIEW"),
},
&databricks.PermissionsAccessControlArgs{
GroupName: eng.DisplayName,
PermissionLevel: pulumi.String("CAN_MANAGE"),
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;
using Std = Pulumi.Std;
return await Deployment.RunAsync(() =>
{
var me = Databricks.GetCurrentUser.Invoke();
var eng = new Databricks.Group("eng", new()
{
DisplayName = "Engineering",
});
var dltDemo = new Databricks.Notebook("dlt_demo", new()
{
ContentBase64 = Std.Base64encode.Invoke(new()
{
Input = @"import dlt
json_path = ""/databricks-datasets/wikipedia-datasets/data-001/clickstream/raw-uncompressed-json/2015_2_clickstream.json""
@dlt.table(
comment=""The raw wikipedia clickstream dataset, ingested from /databricks-datasets.""
)
def clickstream_raw():
return (spark.read.format(""json"").load(json_path))
",
}).Apply(invoke => invoke.Result),
Language = "PYTHON",
Path = $"{me.Apply(getCurrentUserResult => getCurrentUserResult.Home)}/DLT_Demo",
});
var @this = new Databricks.Pipeline("this", new()
{
Name = $"DLT Demo Pipeline ({me.Apply(getCurrentUserResult => getCurrentUserResult.Alphanumeric)})",
Storage = "/test/tf-pipeline",
Configuration =
{
{ "key1", "value1" },
{ "key2", "value2" },
},
Libraries = new[]
{
new Databricks.Inputs.PipelineLibraryArgs
{
Notebook = new Databricks.Inputs.PipelineLibraryNotebookArgs
{
Path = dltDemo.Id,
},
},
},
Continuous = false,
Filters = new Databricks.Inputs.PipelineFiltersArgs
{
Includes = new[]
{
"com.databricks.include",
},
Excludes = new[]
{
"com.databricks.exclude",
},
},
});
var dltUsage = new Databricks.Permissions("dlt_usage", new()
{
PipelineId = @this.Id,
AccessControls = new[]
{
new Databricks.Inputs.PermissionsAccessControlArgs
{
GroupName = "users",
PermissionLevel = "CAN_VIEW",
},
new Databricks.Inputs.PermissionsAccessControlArgs
{
GroupName = eng.DisplayName,
PermissionLevel = "CAN_MANAGE",
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.DatabricksFunctions;
import com.pulumi.databricks.Group;
import com.pulumi.databricks.GroupArgs;
import com.pulumi.databricks.Notebook;
import com.pulumi.databricks.NotebookArgs;
import com.pulumi.databricks.Pipeline;
import com.pulumi.databricks.PipelineArgs;
import com.pulumi.databricks.inputs.PipelineLibraryArgs;
import com.pulumi.databricks.inputs.PipelineLibraryNotebookArgs;
import com.pulumi.databricks.inputs.PipelineFiltersArgs;
import com.pulumi.databricks.Permissions;
import com.pulumi.databricks.PermissionsArgs;
import com.pulumi.databricks.inputs.PermissionsAccessControlArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
final var me = DatabricksFunctions.getCurrentUser();
var eng = new Group("eng", GroupArgs.builder()
.displayName("Engineering")
.build());
var dltDemo = new Notebook("dltDemo", NotebookArgs.builder()
.contentBase64(StdFunctions.base64encode(Base64encodeArgs.builder()
.input("""
import dlt
json_path = "/databricks-datasets/wikipedia-datasets/data-001/clickstream/raw-uncompressed-json/2015_2_clickstream.json"
@dlt.table(
comment="The raw wikipedia clickstream dataset, ingested from /databricks-datasets."
)
def clickstream_raw():
return (spark.read.format("json").load(json_path))
""")
.build()).result())
.language("PYTHON")
.path(String.format("%s/DLT_Demo", me.applyValue(getCurrentUserResult -> getCurrentUserResult.home())))
.build());
var this_ = new Pipeline("this", PipelineArgs.builder()
.name(String.format("DLT Demo Pipeline (%s)", me.applyValue(getCurrentUserResult -> getCurrentUserResult.alphanumeric())))
.storage("/test/tf-pipeline")
.configuration(Map.ofEntries(
Map.entry("key1", "value1"),
Map.entry("key2", "value2")
))
.libraries(PipelineLibraryArgs.builder()
.notebook(PipelineLibraryNotebookArgs.builder()
.path(dltDemo.id())
.build())
.build())
.continuous(false)
.filters(PipelineFiltersArgs.builder()
.includes("com.databricks.include")
.excludes("com.databricks.exclude")
.build())
.build());
var dltUsage = new Permissions("dltUsage", PermissionsArgs.builder()
.pipelineId(this_.id())
.accessControls(
PermissionsAccessControlArgs.builder()
.groupName("users")
.permissionLevel("CAN_VIEW")
.build(),
PermissionsAccessControlArgs.builder()
.groupName(eng.displayName())
.permissionLevel("CAN_MANAGE")
.build())
.build());
}
}
resources:
eng:
type: databricks:Group
properties:
displayName: Engineering
dltDemo:
type: databricks:Notebook
name: dlt_demo
properties:
contentBase64:
fn::invoke:
Function: std:base64encode
Arguments:
input: |
import dlt
json_path = "/databricks-datasets/wikipedia-datasets/data-001/clickstream/raw-uncompressed-json/2015_2_clickstream.json"
@dlt.table(
comment="The raw wikipedia clickstream dataset, ingested from /databricks-datasets."
)
def clickstream_raw():
return (spark.read.format("json").load(json_path))
Return: result
language: PYTHON
path: ${me.home}/DLT_Demo
this:
type: databricks:Pipeline
properties:
name: DLT Demo Pipeline (${me.alphanumeric})
storage: /test/tf-pipeline
configuration:
key1: value1
key2: value2
libraries:
- notebook:
path: ${dltDemo.id}
continuous: false
filters:
includes:
- com.databricks.include
excludes:
- com.databricks.exclude
dltUsage:
type: databricks:Permissions
name: dlt_usage
properties:
pipelineId: ${this.id}
accessControls:
- groupName: users
permissionLevel: CAN_VIEW
- groupName: ${eng.displayName}
permissionLevel: CAN_MANAGE
variables:
me:
fn::invoke:
Function: databricks:getCurrentUser
Arguments: {}
Notebook usage
Valid permission levels for databricks.Notebook are: CAN_READ
, CAN_RUN
, CAN_EDIT
, and CAN_MANAGE
.
A notebook could be specified by using either notebook_path
or notebook_id
attribute. The value for the notebook_id
is the object ID of the resource in the Databricks Workspace that is exposed as object_id
attribute of the databricks.Notebook
resource as shown below.
import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";
import * as std from "@pulumi/std";
const auto = new databricks.Group("auto", {displayName: "Automation"});
const eng = new databricks.Group("eng", {displayName: "Engineering"});
const _this = new databricks.Notebook("this", {
contentBase64: std.base64encode({
input: "# Welcome to your Python notebook",
}).then(invoke => invoke.result),
path: "/Production/ETL/Features",
language: "PYTHON",
});
const notebookUsageByPath = new databricks.Permissions("notebook_usage_by_path", {
notebookPath: _this.path,
accessControls: [
{
groupName: "users",
permissionLevel: "CAN_READ",
},
{
groupName: auto.displayName,
permissionLevel: "CAN_RUN",
},
{
groupName: eng.displayName,
permissionLevel: "CAN_EDIT",
},
],
});
const notebookUsageById = new databricks.Permissions("notebook_usage_by_id", {
notebookId: _this.objectId,
accessControls: [
{
groupName: "users",
permissionLevel: "CAN_READ",
},
{
groupName: auto.displayName,
permissionLevel: "CAN_RUN",
},
{
groupName: eng.displayName,
permissionLevel: "CAN_EDIT",
},
],
});
import pulumi
import pulumi_databricks as databricks
import pulumi_std as std
auto = databricks.Group("auto", display_name="Automation")
eng = databricks.Group("eng", display_name="Engineering")
this = databricks.Notebook("this",
content_base64=std.base64encode(input="# Welcome to your Python notebook").result,
path="/Production/ETL/Features",
language="PYTHON")
notebook_usage_by_path = databricks.Permissions("notebook_usage_by_path",
notebook_path=this.path,
access_controls=[
{
"group_name": "users",
"permission_level": "CAN_READ",
},
{
"group_name": auto.display_name,
"permission_level": "CAN_RUN",
},
{
"group_name": eng.display_name,
"permission_level": "CAN_EDIT",
},
])
notebook_usage_by_id = databricks.Permissions("notebook_usage_by_id",
notebook_id=this.object_id,
access_controls=[
{
"group_name": "users",
"permission_level": "CAN_READ",
},
{
"group_name": auto.display_name,
"permission_level": "CAN_RUN",
},
{
"group_name": eng.display_name,
"permission_level": "CAN_EDIT",
},
])
package main
import (
"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
"github.com/pulumi/pulumi-std/sdk/go/std"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
auto, err := databricks.NewGroup(ctx, "auto", &databricks.GroupArgs{
DisplayName: pulumi.String("Automation"),
})
if err != nil {
return err
}
eng, err := databricks.NewGroup(ctx, "eng", &databricks.GroupArgs{
DisplayName: pulumi.String("Engineering"),
})
if err != nil {
return err
}
invokeBase64encode, err := std.Base64encode(ctx, &std.Base64encodeArgs{
Input: "# Welcome to your Python notebook",
}, nil)
if err != nil {
return err
}
this, err := databricks.NewNotebook(ctx, "this", &databricks.NotebookArgs{
ContentBase64: pulumi.String(invokeBase64encode.Result),
Path: pulumi.String("/Production/ETL/Features"),
Language: pulumi.String("PYTHON"),
})
if err != nil {
return err
}
_, err = databricks.NewPermissions(ctx, "notebook_usage_by_path", &databricks.PermissionsArgs{
NotebookPath: this.Path,
AccessControls: databricks.PermissionsAccessControlArray{
&databricks.PermissionsAccessControlArgs{
GroupName: pulumi.String("users"),
PermissionLevel: pulumi.String("CAN_READ"),
},
&databricks.PermissionsAccessControlArgs{
GroupName: auto.DisplayName,
PermissionLevel: pulumi.String("CAN_RUN"),
},
&databricks.PermissionsAccessControlArgs{
GroupName: eng.DisplayName,
PermissionLevel: pulumi.String("CAN_EDIT"),
},
},
})
if err != nil {
return err
}
_, err = databricks.NewPermissions(ctx, "notebook_usage_by_id", &databricks.PermissionsArgs{
NotebookId: this.ObjectId,
AccessControls: databricks.PermissionsAccessControlArray{
&databricks.PermissionsAccessControlArgs{
GroupName: pulumi.String("users"),
PermissionLevel: pulumi.String("CAN_READ"),
},
&databricks.PermissionsAccessControlArgs{
GroupName: auto.DisplayName,
PermissionLevel: pulumi.String("CAN_RUN"),
},
&databricks.PermissionsAccessControlArgs{
GroupName: eng.DisplayName,
PermissionLevel: pulumi.String("CAN_EDIT"),
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;
using Std = Pulumi.Std;
return await Deployment.RunAsync(() =>
{
var auto = new Databricks.Group("auto", new()
{
DisplayName = "Automation",
});
var eng = new Databricks.Group("eng", new()
{
DisplayName = "Engineering",
});
var @this = new Databricks.Notebook("this", new()
{
ContentBase64 = Std.Base64encode.Invoke(new()
{
Input = "# Welcome to your Python notebook",
}).Apply(invoke => invoke.Result),
Path = "/Production/ETL/Features",
Language = "PYTHON",
});
var notebookUsageByPath = new Databricks.Permissions("notebook_usage_by_path", new()
{
NotebookPath = @this.Path,
AccessControls = new[]
{
new Databricks.Inputs.PermissionsAccessControlArgs
{
GroupName = "users",
PermissionLevel = "CAN_READ",
},
new Databricks.Inputs.PermissionsAccessControlArgs
{
GroupName = auto.DisplayName,
PermissionLevel = "CAN_RUN",
},
new Databricks.Inputs.PermissionsAccessControlArgs
{
GroupName = eng.DisplayName,
PermissionLevel = "CAN_EDIT",
},
},
});
var notebookUsageById = new Databricks.Permissions("notebook_usage_by_id", new()
{
NotebookId = @this.ObjectId,
AccessControls = new[]
{
new Databricks.Inputs.PermissionsAccessControlArgs
{
GroupName = "users",
PermissionLevel = "CAN_READ",
},
new Databricks.Inputs.PermissionsAccessControlArgs
{
GroupName = auto.DisplayName,
PermissionLevel = "CAN_RUN",
},
new Databricks.Inputs.PermissionsAccessControlArgs
{
GroupName = eng.DisplayName,
PermissionLevel = "CAN_EDIT",
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.Group;
import com.pulumi.databricks.GroupArgs;
import com.pulumi.databricks.Notebook;
import com.pulumi.databricks.NotebookArgs;
import com.pulumi.databricks.Permissions;
import com.pulumi.databricks.PermissionsArgs;
import com.pulumi.databricks.inputs.PermissionsAccessControlArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var auto = new Group("auto", GroupArgs.builder()
.displayName("Automation")
.build());
var eng = new Group("eng", GroupArgs.builder()
.displayName("Engineering")
.build());
var this_ = new Notebook("this", NotebookArgs.builder()
.contentBase64(StdFunctions.base64encode(Base64encodeArgs.builder()
.input("# Welcome to your Python notebook")
.build()).result())
.path("/Production/ETL/Features")
.language("PYTHON")
.build());
var notebookUsageByPath = new Permissions("notebookUsageByPath", PermissionsArgs.builder()
.notebookPath(this_.path())
.accessControls(
PermissionsAccessControlArgs.builder()
.groupName("users")
.permissionLevel("CAN_READ")
.build(),
PermissionsAccessControlArgs.builder()
.groupName(auto.displayName())
.permissionLevel("CAN_RUN")
.build(),
PermissionsAccessControlArgs.builder()
.groupName(eng.displayName())
.permissionLevel("CAN_EDIT")
.build())
.build());
var notebookUsageById = new Permissions("notebookUsageById", PermissionsArgs.builder()
.notebookId(this_.objectId())
.accessControls(
PermissionsAccessControlArgs.builder()
.groupName("users")
.permissionLevel("CAN_READ")
.build(),
PermissionsAccessControlArgs.builder()
.groupName(auto.displayName())
.permissionLevel("CAN_RUN")
.build(),
PermissionsAccessControlArgs.builder()
.groupName(eng.displayName())
.permissionLevel("CAN_EDIT")
.build())
.build());
}
}
resources:
auto:
type: databricks:Group
properties:
displayName: Automation
eng:
type: databricks:Group
properties:
displayName: Engineering
this:
type: databricks:Notebook
properties:
contentBase64:
fn::invoke:
Function: std:base64encode
Arguments:
input: '# Welcome to your Python notebook'
Return: result
path: /Production/ETL/Features
language: PYTHON
notebookUsageByPath:
type: databricks:Permissions
name: notebook_usage_by_path
properties:
notebookPath: ${this.path}
accessControls:
- groupName: users
permissionLevel: CAN_READ
- groupName: ${auto.displayName}
permissionLevel: CAN_RUN
- groupName: ${eng.displayName}
permissionLevel: CAN_EDIT
notebookUsageById:
type: databricks:Permissions
name: notebook_usage_by_id
properties:
notebookId: ${this.objectId}
accessControls:
- groupName: users
permissionLevel: CAN_READ
- groupName: ${auto.displayName}
permissionLevel: CAN_RUN
- groupName: ${eng.displayName}
permissionLevel: CAN_EDIT
when importing a permissions resource, only the
notebook_id
is filled!
Workspace file usage
Valid permission levels for databricks.WorkspaceFile are: CAN_READ
, CAN_RUN
, CAN_EDIT
, and CAN_MANAGE
.
A workspace file could be specified by using either workspace_file_path
or workspace_file_id
attribute. The value for the workspace_file_id
is the object ID of the resource in the Databricks Workspace that is exposed as object_id
attribute of the databricks.WorkspaceFile
resource as shown below.
import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";
import * as std from "@pulumi/std";
const auto = new databricks.Group("auto", {displayName: "Automation"});
const eng = new databricks.Group("eng", {displayName: "Engineering"});
const _this = new databricks.WorkspaceFile("this", {
contentBase64: std.base64encode({
input: "print('Hello World')",
}).then(invoke => invoke.result),
path: "/Production/ETL/Features.py",
});
const workspaceFileUsageByPath = new databricks.Permissions("workspace_file_usage_by_path", {
workspaceFilePath: _this.path,
accessControls: [
{
groupName: "users",
permissionLevel: "CAN_READ",
},
{
groupName: auto.displayName,
permissionLevel: "CAN_RUN",
},
{
groupName: eng.displayName,
permissionLevel: "CAN_EDIT",
},
],
});
const workspaceFileUsageById = new databricks.Permissions("workspace_file_usage_by_id", {
workspaceFileId: _this.objectId,
accessControls: [
{
groupName: "users",
permissionLevel: "CAN_READ",
},
{
groupName: auto.displayName,
permissionLevel: "CAN_RUN",
},
{
groupName: eng.displayName,
permissionLevel: "CAN_EDIT",
},
],
});
import pulumi
import pulumi_databricks as databricks
import pulumi_std as std
auto = databricks.Group("auto", display_name="Automation")
eng = databricks.Group("eng", display_name="Engineering")
this = databricks.WorkspaceFile("this",
content_base64=std.base64encode(input="print('Hello World')").result,
path="/Production/ETL/Features.py")
workspace_file_usage_by_path = databricks.Permissions("workspace_file_usage_by_path",
workspace_file_path=this.path,
access_controls=[
{
"group_name": "users",
"permission_level": "CAN_READ",
},
{
"group_name": auto.display_name,
"permission_level": "CAN_RUN",
},
{
"group_name": eng.display_name,
"permission_level": "CAN_EDIT",
},
])
workspace_file_usage_by_id = databricks.Permissions("workspace_file_usage_by_id",
workspace_file_id=this.object_id,
access_controls=[
{
"group_name": "users",
"permission_level": "CAN_READ",
},
{
"group_name": auto.display_name,
"permission_level": "CAN_RUN",
},
{
"group_name": eng.display_name,
"permission_level": "CAN_EDIT",
},
])
package main
import (
"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
"github.com/pulumi/pulumi-std/sdk/go/std"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
auto, err := databricks.NewGroup(ctx, "auto", &databricks.GroupArgs{
DisplayName: pulumi.String("Automation"),
})
if err != nil {
return err
}
eng, err := databricks.NewGroup(ctx, "eng", &databricks.GroupArgs{
DisplayName: pulumi.String("Engineering"),
})
if err != nil {
return err
}
invokeBase64encode, err := std.Base64encode(ctx, &std.Base64encodeArgs{
Input: "print('Hello World')",
}, nil)
if err != nil {
return err
}
this, err := databricks.NewWorkspaceFile(ctx, "this", &databricks.WorkspaceFileArgs{
ContentBase64: pulumi.String(invokeBase64encode.Result),
Path: pulumi.String("/Production/ETL/Features.py"),
})
if err != nil {
return err
}
_, err = databricks.NewPermissions(ctx, "workspace_file_usage_by_path", &databricks.PermissionsArgs{
WorkspaceFilePath: this.Path,
AccessControls: databricks.PermissionsAccessControlArray{
&databricks.PermissionsAccessControlArgs{
GroupName: pulumi.String("users"),
PermissionLevel: pulumi.String("CAN_READ"),
},
&databricks.PermissionsAccessControlArgs{
GroupName: auto.DisplayName,
PermissionLevel: pulumi.String("CAN_RUN"),
},
&databricks.PermissionsAccessControlArgs{
GroupName: eng.DisplayName,
PermissionLevel: pulumi.String("CAN_EDIT"),
},
},
})
if err != nil {
return err
}
_, err = databricks.NewPermissions(ctx, "workspace_file_usage_by_id", &databricks.PermissionsArgs{
WorkspaceFileId: this.ObjectId,
AccessControls: databricks.PermissionsAccessControlArray{
&databricks.PermissionsAccessControlArgs{
GroupName: pulumi.String("users"),
PermissionLevel: pulumi.String("CAN_READ"),
},
&databricks.PermissionsAccessControlArgs{
GroupName: auto.DisplayName,
PermissionLevel: pulumi.String("CAN_RUN"),
},
&databricks.PermissionsAccessControlArgs{
GroupName: eng.DisplayName,
PermissionLevel: pulumi.String("CAN_EDIT"),
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;
using Std = Pulumi.Std;
return await Deployment.RunAsync(() =>
{
var auto = new Databricks.Group("auto", new()
{
DisplayName = "Automation",
});
var eng = new Databricks.Group("eng", new()
{
DisplayName = "Engineering",
});
var @this = new Databricks.WorkspaceFile("this", new()
{
ContentBase64 = Std.Base64encode.Invoke(new()
{
Input = "print('Hello World')",
}).Apply(invoke => invoke.Result),
Path = "/Production/ETL/Features.py",
});
var workspaceFileUsageByPath = new Databricks.Permissions("workspace_file_usage_by_path", new()
{
WorkspaceFilePath = @this.Path,
AccessControls = new[]
{
new Databricks.Inputs.PermissionsAccessControlArgs
{
GroupName = "users",
PermissionLevel = "CAN_READ",
},
new Databricks.Inputs.PermissionsAccessControlArgs
{
GroupName = auto.DisplayName,
PermissionLevel = "CAN_RUN",
},
new Databricks.Inputs.PermissionsAccessControlArgs
{
GroupName = eng.DisplayName,
PermissionLevel = "CAN_EDIT",
},
},
});
var workspaceFileUsageById = new Databricks.Permissions("workspace_file_usage_by_id", new()
{
WorkspaceFileId = @this.ObjectId,
AccessControls = new[]
{
new Databricks.Inputs.PermissionsAccessControlArgs
{
GroupName = "users",
PermissionLevel = "CAN_READ",
},
new Databricks.Inputs.PermissionsAccessControlArgs
{
GroupName = auto.DisplayName,
PermissionLevel = "CAN_RUN",
},
new Databricks.Inputs.PermissionsAccessControlArgs
{
GroupName = eng.DisplayName,
PermissionLevel = "CAN_EDIT",
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.Group;
import com.pulumi.databricks.GroupArgs;
import com.pulumi.databricks.WorkspaceFile;
import com.pulumi.databricks.WorkspaceFileArgs;
import com.pulumi.databricks.Permissions;
import com.pulumi.databricks.PermissionsArgs;
import com.pulumi.databricks.inputs.PermissionsAccessControlArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var auto = new Group("auto", GroupArgs.builder()
.displayName("Automation")
.build());
var eng = new Group("eng", GroupArgs.builder()
.displayName("Engineering")
.build());
var this_ = new WorkspaceFile("this", WorkspaceFileArgs.builder()
.contentBase64(StdFunctions.base64encode(Base64encodeArgs.builder()
.input("print('Hello World')")
.build()).result())
.path("/Production/ETL/Features.py")
.build());
var workspaceFileUsageByPath = new Permissions("workspaceFileUsageByPath", PermissionsArgs.builder()
.workspaceFilePath(this_.path())
.accessControls(
PermissionsAccessControlArgs.builder()
.groupName("users")
.permissionLevel("CAN_READ")
.build(),
PermissionsAccessControlArgs.builder()
.groupName(auto.displayName())
.permissionLevel("CAN_RUN")
.build(),
PermissionsAccessControlArgs.builder()
.groupName(eng.displayName())
.permissionLevel("CAN_EDIT")
.build())
.build());
var workspaceFileUsageById = new Permissions("workspaceFileUsageById", PermissionsArgs.builder()
.workspaceFileId(this_.objectId())
.accessControls(
PermissionsAccessControlArgs.builder()
.groupName("users")
.permissionLevel("CAN_READ")
.build(),
PermissionsAccessControlArgs.builder()
.groupName(auto.displayName())
.permissionLevel("CAN_RUN")
.build(),
PermissionsAccessControlArgs.builder()
.groupName(eng.displayName())
.permissionLevel("CAN_EDIT")
.build())
.build());
}
}
resources:
auto:
type: databricks:Group
properties:
displayName: Automation
eng:
type: databricks:Group
properties:
displayName: Engineering
this:
type: databricks:WorkspaceFile
properties:
contentBase64:
fn::invoke:
Function: std:base64encode
Arguments:
input: print('Hello World')
Return: result
path: /Production/ETL/Features.py
workspaceFileUsageByPath:
type: databricks:Permissions
name: workspace_file_usage_by_path
properties:
workspaceFilePath: ${this.path}
accessControls:
- groupName: users
permissionLevel: CAN_READ
- groupName: ${auto.displayName}
permissionLevel: CAN_RUN
- groupName: ${eng.displayName}
permissionLevel: CAN_EDIT
workspaceFileUsageById:
type: databricks:Permissions
name: workspace_file_usage_by_id
properties:
workspaceFileId: ${this.objectId}
accessControls:
- groupName: users
permissionLevel: CAN_READ
- groupName: ${auto.displayName}
permissionLevel: CAN_RUN
- groupName: ${eng.displayName}
permissionLevel: CAN_EDIT
when importing a permissions resource, only the
workspace_file_id
is filled!
Folder usage
Valid permission levels for folders of databricks.Directory are: CAN_READ
, CAN_RUN
, CAN_EDIT
, and CAN_MANAGE
. Notebooks and experiments in a folder inherit all permissions settings of that folder. For example, a user (or service principal) that has CAN_RUN
permission on a folder has CAN_RUN
permission on the notebooks in that folder.
- All users can list items in the folder without any permissions.
- All users (or service principals) have
CAN_MANAGE
permission for items in the Workspace > Shared Icon Shared folder. You can grantCAN_MANAGE
permission to notebooks and folders by moving them to the Shared Icon Shared folder. - All users (or service principals) have
CAN_MANAGE
permission for objects the user creates. - User home directory - The user (or service principal) has
CAN_MANAGE
permission. All other users (or service principals) can list their directories.
A folder could be specified by using either directory_path
or directory_id
attribute. The value for the directory_id
is the object ID of the resource in the Databricks Workspace that is exposed as object_id
attribute of the databricks.Directory
resource as shown below.
import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";
const auto = new databricks.Group("auto", {displayName: "Automation"});
const eng = new databricks.Group("eng", {displayName: "Engineering"});
const _this = new databricks.Directory("this", {path: "/Production/ETL"});
const folderUsageByPath = new databricks.Permissions("folder_usage_by_path", {
directoryPath: _this.path,
accessControls: [
{
groupName: "users",
permissionLevel: "CAN_READ",
},
{
groupName: auto.displayName,
permissionLevel: "CAN_RUN",
},
{
groupName: eng.displayName,
permissionLevel: "CAN_EDIT",
},
],
});
const folderUsageById = new databricks.Permissions("folder_usage_by_id", {
directoryId: _this.objectId,
accessControls: [
{
groupName: "users",
permissionLevel: "CAN_READ",
},
{
groupName: auto.displayName,
permissionLevel: "CAN_RUN",
},
{
groupName: eng.displayName,
permissionLevel: "CAN_EDIT",
},
],
});
import pulumi
import pulumi_databricks as databricks
auto = databricks.Group("auto", display_name="Automation")
eng = databricks.Group("eng", display_name="Engineering")
this = databricks.Directory("this", path="/Production/ETL")
folder_usage_by_path = databricks.Permissions("folder_usage_by_path",
directory_path=this.path,
access_controls=[
{
"group_name": "users",
"permission_level": "CAN_READ",
},
{
"group_name": auto.display_name,
"permission_level": "CAN_RUN",
},
{
"group_name": eng.display_name,
"permission_level": "CAN_EDIT",
},
])
folder_usage_by_id = databricks.Permissions("folder_usage_by_id",
directory_id=this.object_id,
access_controls=[
{
"group_name": "users",
"permission_level": "CAN_READ",
},
{
"group_name": auto.display_name,
"permission_level": "CAN_RUN",
},
{
"group_name": eng.display_name,
"permission_level": "CAN_EDIT",
},
])
package main
import (
"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
auto, err := databricks.NewGroup(ctx, "auto", &databricks.GroupArgs{
DisplayName: pulumi.String("Automation"),
})
if err != nil {
return err
}
eng, err := databricks.NewGroup(ctx, "eng", &databricks.GroupArgs{
DisplayName: pulumi.String("Engineering"),
})
if err != nil {
return err
}
this, err := databricks.NewDirectory(ctx, "this", &databricks.DirectoryArgs{
Path: pulumi.String("/Production/ETL"),
})
if err != nil {
return err
}
_, err = databricks.NewPermissions(ctx, "folder_usage_by_path", &databricks.PermissionsArgs{
DirectoryPath: this.Path,
AccessControls: databricks.PermissionsAccessControlArray{
&databricks.PermissionsAccessControlArgs{
GroupName: pulumi.String("users"),
PermissionLevel: pulumi.String("CAN_READ"),
},
&databricks.PermissionsAccessControlArgs{
GroupName: auto.DisplayName,
PermissionLevel: pulumi.String("CAN_RUN"),
},
&databricks.PermissionsAccessControlArgs{
GroupName: eng.DisplayName,
PermissionLevel: pulumi.String("CAN_EDIT"),
},
},
})
if err != nil {
return err
}
_, err = databricks.NewPermissions(ctx, "folder_usage_by_id", &databricks.PermissionsArgs{
DirectoryId: this.ObjectId,
AccessControls: databricks.PermissionsAccessControlArray{
&databricks.PermissionsAccessControlArgs{
GroupName: pulumi.String("users"),
PermissionLevel: pulumi.String("CAN_READ"),
},
&databricks.PermissionsAccessControlArgs{
GroupName: auto.DisplayName,
PermissionLevel: pulumi.String("CAN_RUN"),
},
&databricks.PermissionsAccessControlArgs{
GroupName: eng.DisplayName,
PermissionLevel: pulumi.String("CAN_EDIT"),
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;
return await Deployment.RunAsync(() =>
{
var auto = new Databricks.Group("auto", new()
{
DisplayName = "Automation",
});
var eng = new Databricks.Group("eng", new()
{
DisplayName = "Engineering",
});
var @this = new Databricks.Directory("this", new()
{
Path = "/Production/ETL",
});
var folderUsageByPath = new Databricks.Permissions("folder_usage_by_path", new()
{
DirectoryPath = @this.Path,
AccessControls = new[]
{
new Databricks.Inputs.PermissionsAccessControlArgs
{
GroupName = "users",
PermissionLevel = "CAN_READ",
},
new Databricks.Inputs.PermissionsAccessControlArgs
{
GroupName = auto.DisplayName,
PermissionLevel = "CAN_RUN",
},
new Databricks.Inputs.PermissionsAccessControlArgs
{
GroupName = eng.DisplayName,
PermissionLevel = "CAN_EDIT",
},
},
});
var folderUsageById = new Databricks.Permissions("folder_usage_by_id", new()
{
DirectoryId = @this.ObjectId,
AccessControls = new[]
{
new Databricks.Inputs.PermissionsAccessControlArgs
{
GroupName = "users",
PermissionLevel = "CAN_READ",
},
new Databricks.Inputs.PermissionsAccessControlArgs
{
GroupName = auto.DisplayName,
PermissionLevel = "CAN_RUN",
},
new Databricks.Inputs.PermissionsAccessControlArgs
{
GroupName = eng.DisplayName,
PermissionLevel = "CAN_EDIT",
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.Group;
import com.pulumi.databricks.GroupArgs;
import com.pulumi.databricks.Directory;
import com.pulumi.databricks.DirectoryArgs;
import com.pulumi.databricks.Permissions;
import com.pulumi.databricks.PermissionsArgs;
import com.pulumi.databricks.inputs.PermissionsAccessControlArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var auto = new Group("auto", GroupArgs.builder()
.displayName("Automation")
.build());
var eng = new Group("eng", GroupArgs.builder()
.displayName("Engineering")
.build());
var this_ = new Directory("this", DirectoryArgs.builder()
.path("/Production/ETL")
.build());
var folderUsageByPath = new Permissions("folderUsageByPath", PermissionsArgs.builder()
.directoryPath(this_.path())
.accessControls(
PermissionsAccessControlArgs.builder()
.groupName("users")
.permissionLevel("CAN_READ")
.build(),
PermissionsAccessControlArgs.builder()
.groupName(auto.displayName())
.permissionLevel("CAN_RUN")
.build(),
PermissionsAccessControlArgs.builder()
.groupName(eng.displayName())
.permissionLevel("CAN_EDIT")
.build())
.build());
var folderUsageById = new Permissions("folderUsageById", PermissionsArgs.builder()
.directoryId(this_.objectId())
.accessControls(
PermissionsAccessControlArgs.builder()
.groupName("users")
.permissionLevel("CAN_READ")
.build(),
PermissionsAccessControlArgs.builder()
.groupName(auto.displayName())
.permissionLevel("CAN_RUN")
.build(),
PermissionsAccessControlArgs.builder()
.groupName(eng.displayName())
.permissionLevel("CAN_EDIT")
.build())
.build());
}
}
resources:
auto:
type: databricks:Group
properties:
displayName: Automation
eng:
type: databricks:Group
properties:
displayName: Engineering
this:
type: databricks:Directory
properties:
path: /Production/ETL
folderUsageByPath:
type: databricks:Permissions
name: folder_usage_by_path
properties:
directoryPath: ${this.path}
accessControls:
- groupName: users
permissionLevel: CAN_READ
- groupName: ${auto.displayName}
permissionLevel: CAN_RUN
- groupName: ${eng.displayName}
permissionLevel: CAN_EDIT
folderUsageById:
type: databricks:Permissions
name: folder_usage_by_id
properties:
directoryId: ${this.objectId}
accessControls:
- groupName: users
permissionLevel: CAN_READ
- groupName: ${auto.displayName}
permissionLevel: CAN_RUN
- groupName: ${eng.displayName}
permissionLevel: CAN_EDIT
when importing a permissions resource, only the
directory_id
is filled!
Repos usage
Valid permission levels for databricks.Repo are: CAN_READ
, CAN_RUN
, CAN_EDIT
, and CAN_MANAGE
.
import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";
const auto = new databricks.Group("auto", {displayName: "Automation"});
const eng = new databricks.Group("eng", {displayName: "Engineering"});
const _this = new databricks.Repo("this", {url: "https://github.com/user/demo.git"});
const repoUsage = new databricks.Permissions("repo_usage", {
repoId: _this.id,
accessControls: [
{
groupName: "users",
permissionLevel: "CAN_READ",
},
{
groupName: auto.displayName,
permissionLevel: "CAN_RUN",
},
{
groupName: eng.displayName,
permissionLevel: "CAN_EDIT",
},
],
});
import pulumi
import pulumi_databricks as databricks
auto = databricks.Group("auto", display_name="Automation")
eng = databricks.Group("eng", display_name="Engineering")
this = databricks.Repo("this", url="https://github.com/user/demo.git")
repo_usage = databricks.Permissions("repo_usage",
repo_id=this.id,
access_controls=[
{
"group_name": "users",
"permission_level": "CAN_READ",
},
{
"group_name": auto.display_name,
"permission_level": "CAN_RUN",
},
{
"group_name": eng.display_name,
"permission_level": "CAN_EDIT",
},
])
package main
import (
"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
auto, err := databricks.NewGroup(ctx, "auto", &databricks.GroupArgs{
DisplayName: pulumi.String("Automation"),
})
if err != nil {
return err
}
eng, err := databricks.NewGroup(ctx, "eng", &databricks.GroupArgs{
DisplayName: pulumi.String("Engineering"),
})
if err != nil {
return err
}
this, err := databricks.NewRepo(ctx, "this", &databricks.RepoArgs{
Url: pulumi.String("https://github.com/user/demo.git"),
})
if err != nil {
return err
}
_, err = databricks.NewPermissions(ctx, "repo_usage", &databricks.PermissionsArgs{
RepoId: this.ID(),
AccessControls: databricks.PermissionsAccessControlArray{
&databricks.PermissionsAccessControlArgs{
GroupName: pulumi.String("users"),
PermissionLevel: pulumi.String("CAN_READ"),
},
&databricks.PermissionsAccessControlArgs{
GroupName: auto.DisplayName,
PermissionLevel: pulumi.String("CAN_RUN"),
},
&databricks.PermissionsAccessControlArgs{
GroupName: eng.DisplayName,
PermissionLevel: pulumi.String("CAN_EDIT"),
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;
return await Deployment.RunAsync(() =>
{
var auto = new Databricks.Group("auto", new()
{
DisplayName = "Automation",
});
var eng = new Databricks.Group("eng", new()
{
DisplayName = "Engineering",
});
var @this = new Databricks.Repo("this", new()
{
Url = "https://github.com/user/demo.git",
});
var repoUsage = new Databricks.Permissions("repo_usage", new()
{
RepoId = @this.Id,
AccessControls = new[]
{
new Databricks.Inputs.PermissionsAccessControlArgs
{
GroupName = "users",
PermissionLevel = "CAN_READ",
},
new Databricks.Inputs.PermissionsAccessControlArgs
{
GroupName = auto.DisplayName,
PermissionLevel = "CAN_RUN",
},
new Databricks.Inputs.PermissionsAccessControlArgs
{
GroupName = eng.DisplayName,
PermissionLevel = "CAN_EDIT",
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.Group;
import com.pulumi.databricks.GroupArgs;
import com.pulumi.databricks.Repo;
import com.pulumi.databricks.RepoArgs;
import com.pulumi.databricks.Permissions;
import com.pulumi.databricks.PermissionsArgs;
import com.pulumi.databricks.inputs.PermissionsAccessControlArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var auto = new Group("auto", GroupArgs.builder()
.displayName("Automation")
.build());
var eng = new Group("eng", GroupArgs.builder()
.displayName("Engineering")
.build());
var this_ = new Repo("this", RepoArgs.builder()
.url("https://github.com/user/demo.git")
.build());
var repoUsage = new Permissions("repoUsage", PermissionsArgs.builder()
.repoId(this_.id())
.accessControls(
PermissionsAccessControlArgs.builder()
.groupName("users")
.permissionLevel("CAN_READ")
.build(),
PermissionsAccessControlArgs.builder()
.groupName(auto.displayName())
.permissionLevel("CAN_RUN")
.build(),
PermissionsAccessControlArgs.builder()
.groupName(eng.displayName())
.permissionLevel("CAN_EDIT")
.build())
.build());
}
}
resources:
auto:
type: databricks:Group
properties:
displayName: Automation
eng:
type: databricks:Group
properties:
displayName: Engineering
this:
type: databricks:Repo
properties:
url: https://github.com/user/demo.git
repoUsage:
type: databricks:Permissions
name: repo_usage
properties:
repoId: ${this.id}
accessControls:
- groupName: users
permissionLevel: CAN_READ
- groupName: ${auto.displayName}
permissionLevel: CAN_RUN
- groupName: ${eng.displayName}
permissionLevel: CAN_EDIT
MLflow Experiment usage
Valid permission levels for databricks.MlflowExperiment are: CAN_READ
, CAN_EDIT
, and CAN_MANAGE
.
import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";
const me = databricks.getCurrentUser({});
const _this = new databricks.MlflowExperiment("this", {
name: me.then(me => `${me.home}/Sample`),
artifactLocation: "dbfs:/tmp/my-experiment",
description: "My MLflow experiment description",
});
const auto = new databricks.Group("auto", {displayName: "Automation"});
const eng = new databricks.Group("eng", {displayName: "Engineering"});
const experimentUsage = new databricks.Permissions("experiment_usage", {
experimentId: _this.id,
accessControls: [
{
groupName: "users",
permissionLevel: "CAN_READ",
},
{
groupName: auto.displayName,
permissionLevel: "CAN_MANAGE",
},
{
groupName: eng.displayName,
permissionLevel: "CAN_EDIT",
},
],
});
import pulumi
import pulumi_databricks as databricks
me = databricks.get_current_user()
this = databricks.MlflowExperiment("this",
name=f"{me.home}/Sample",
artifact_location="dbfs:/tmp/my-experiment",
description="My MLflow experiment description")
auto = databricks.Group("auto", display_name="Automation")
eng = databricks.Group("eng", display_name="Engineering")
experiment_usage = databricks.Permissions("experiment_usage",
experiment_id=this.id,
access_controls=[
{
"group_name": "users",
"permission_level": "CAN_READ",
},
{
"group_name": auto.display_name,
"permission_level": "CAN_MANAGE",
},
{
"group_name": eng.display_name,
"permission_level": "CAN_EDIT",
},
])
package main
import (
"fmt"
"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
me, err := databricks.GetCurrentUser(ctx, map[string]interface{}{}, nil)
if err != nil {
return err
}
this, err := databricks.NewMlflowExperiment(ctx, "this", &databricks.MlflowExperimentArgs{
Name: pulumi.Sprintf("%v/Sample", me.Home),
ArtifactLocation: pulumi.String("dbfs:/tmp/my-experiment"),
Description: pulumi.String("My MLflow experiment description"),
})
if err != nil {
return err
}
auto, err := databricks.NewGroup(ctx, "auto", &databricks.GroupArgs{
DisplayName: pulumi.String("Automation"),
})
if err != nil {
return err
}
eng, err := databricks.NewGroup(ctx, "eng", &databricks.GroupArgs{
DisplayName: pulumi.String("Engineering"),
})
if err != nil {
return err
}
_, err = databricks.NewPermissions(ctx, "experiment_usage", &databricks.PermissionsArgs{
ExperimentId: this.ID(),
AccessControls: databricks.PermissionsAccessControlArray{
&databricks.PermissionsAccessControlArgs{
GroupName: pulumi.String("users"),
PermissionLevel: pulumi.String("CAN_READ"),
},
&databricks.PermissionsAccessControlArgs{
GroupName: auto.DisplayName,
PermissionLevel: pulumi.String("CAN_MANAGE"),
},
&databricks.PermissionsAccessControlArgs{
GroupName: eng.DisplayName,
PermissionLevel: pulumi.String("CAN_EDIT"),
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;
return await Deployment.RunAsync(() =>
{
var me = Databricks.GetCurrentUser.Invoke();
var @this = new Databricks.MlflowExperiment("this", new()
{
Name = $"{me.Apply(getCurrentUserResult => getCurrentUserResult.Home)}/Sample",
ArtifactLocation = "dbfs:/tmp/my-experiment",
Description = "My MLflow experiment description",
});
var auto = new Databricks.Group("auto", new()
{
DisplayName = "Automation",
});
var eng = new Databricks.Group("eng", new()
{
DisplayName = "Engineering",
});
var experimentUsage = new Databricks.Permissions("experiment_usage", new()
{
ExperimentId = @this.Id,
AccessControls = new[]
{
new Databricks.Inputs.PermissionsAccessControlArgs
{
GroupName = "users",
PermissionLevel = "CAN_READ",
},
new Databricks.Inputs.PermissionsAccessControlArgs
{
GroupName = auto.DisplayName,
PermissionLevel = "CAN_MANAGE",
},
new Databricks.Inputs.PermissionsAccessControlArgs
{
GroupName = eng.DisplayName,
PermissionLevel = "CAN_EDIT",
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.DatabricksFunctions;
import com.pulumi.databricks.MlflowExperiment;
import com.pulumi.databricks.MlflowExperimentArgs;
import com.pulumi.databricks.Group;
import com.pulumi.databricks.GroupArgs;
import com.pulumi.databricks.Permissions;
import com.pulumi.databricks.PermissionsArgs;
import com.pulumi.databricks.inputs.PermissionsAccessControlArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
final var me = DatabricksFunctions.getCurrentUser();
var this_ = new MlflowExperiment("this", MlflowExperimentArgs.builder()
.name(String.format("%s/Sample", me.applyValue(getCurrentUserResult -> getCurrentUserResult.home())))
.artifactLocation("dbfs:/tmp/my-experiment")
.description("My MLflow experiment description")
.build());
var auto = new Group("auto", GroupArgs.builder()
.displayName("Automation")
.build());
var eng = new Group("eng", GroupArgs.builder()
.displayName("Engineering")
.build());
var experimentUsage = new Permissions("experimentUsage", PermissionsArgs.builder()
.experimentId(this_.id())
.accessControls(
PermissionsAccessControlArgs.builder()
.groupName("users")
.permissionLevel("CAN_READ")
.build(),
PermissionsAccessControlArgs.builder()
.groupName(auto.displayName())
.permissionLevel("CAN_MANAGE")
.build(),
PermissionsAccessControlArgs.builder()
.groupName(eng.displayName())
.permissionLevel("CAN_EDIT")
.build())
.build());
}
}
resources:
this:
type: databricks:MlflowExperiment
properties:
name: ${me.home}/Sample
artifactLocation: dbfs:/tmp/my-experiment
description: My MLflow experiment description
auto:
type: databricks:Group
properties:
displayName: Automation
eng:
type: databricks:Group
properties:
displayName: Engineering
experimentUsage:
type: databricks:Permissions
name: experiment_usage
properties:
experimentId: ${this.id}
accessControls:
- groupName: users
permissionLevel: CAN_READ
- groupName: ${auto.displayName}
permissionLevel: CAN_MANAGE
- groupName: ${eng.displayName}
permissionLevel: CAN_EDIT
variables:
me:
fn::invoke:
Function: databricks:getCurrentUser
Arguments: {}
MLflow Model usage
Valid permission levels for databricks.MlflowModel are: CAN_READ
, CAN_EDIT
, CAN_MANAGE_STAGING_VERSIONS
, CAN_MANAGE_PRODUCTION_VERSIONS
, and CAN_MANAGE
. You can also manage permissions for all MLflow models by registered_model_id = "root"
.
import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";
const _this = new databricks.MlflowModel("this", {name: "SomePredictions"});
const auto = new databricks.Group("auto", {displayName: "Automation"});
const eng = new databricks.Group("eng", {displayName: "Engineering"});
const modelUsage = new databricks.Permissions("model_usage", {
registeredModelId: _this.registeredModelId,
accessControls: [
{
groupName: "users",
permissionLevel: "CAN_READ",
},
{
groupName: auto.displayName,
permissionLevel: "CAN_MANAGE_PRODUCTION_VERSIONS",
},
{
groupName: eng.displayName,
permissionLevel: "CAN_MANAGE_STAGING_VERSIONS",
},
],
});
import pulumi
import pulumi_databricks as databricks
this = databricks.MlflowModel("this", name="SomePredictions")
auto = databricks.Group("auto", display_name="Automation")
eng = databricks.Group("eng", display_name="Engineering")
model_usage = databricks.Permissions("model_usage",
registered_model_id=this.registered_model_id,
access_controls=[
{
"group_name": "users",
"permission_level": "CAN_READ",
},
{
"group_name": auto.display_name,
"permission_level": "CAN_MANAGE_PRODUCTION_VERSIONS",
},
{
"group_name": eng.display_name,
"permission_level": "CAN_MANAGE_STAGING_VERSIONS",
},
])
package main
import (
"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
this, err := databricks.NewMlflowModel(ctx, "this", &databricks.MlflowModelArgs{
Name: pulumi.String("SomePredictions"),
})
if err != nil {
return err
}
auto, err := databricks.NewGroup(ctx, "auto", &databricks.GroupArgs{
DisplayName: pulumi.String("Automation"),
})
if err != nil {
return err
}
eng, err := databricks.NewGroup(ctx, "eng", &databricks.GroupArgs{
DisplayName: pulumi.String("Engineering"),
})
if err != nil {
return err
}
_, err = databricks.NewPermissions(ctx, "model_usage", &databricks.PermissionsArgs{
RegisteredModelId: this.RegisteredModelId,
AccessControls: databricks.PermissionsAccessControlArray{
&databricks.PermissionsAccessControlArgs{
GroupName: pulumi.String("users"),
PermissionLevel: pulumi.String("CAN_READ"),
},
&databricks.PermissionsAccessControlArgs{
GroupName: auto.DisplayName,
PermissionLevel: pulumi.String("CAN_MANAGE_PRODUCTION_VERSIONS"),
},
&databricks.PermissionsAccessControlArgs{
GroupName: eng.DisplayName,
PermissionLevel: pulumi.String("CAN_MANAGE_STAGING_VERSIONS"),
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;
return await Deployment.RunAsync(() =>
{
var @this = new Databricks.MlflowModel("this", new()
{
Name = "SomePredictions",
});
var auto = new Databricks.Group("auto", new()
{
DisplayName = "Automation",
});
var eng = new Databricks.Group("eng", new()
{
DisplayName = "Engineering",
});
var modelUsage = new Databricks.Permissions("model_usage", new()
{
RegisteredModelId = @this.RegisteredModelId,
AccessControls = new[]
{
new Databricks.Inputs.PermissionsAccessControlArgs
{
GroupName = "users",
PermissionLevel = "CAN_READ",
},
new Databricks.Inputs.PermissionsAccessControlArgs
{
GroupName = auto.DisplayName,
PermissionLevel = "CAN_MANAGE_PRODUCTION_VERSIONS",
},
new Databricks.Inputs.PermissionsAccessControlArgs
{
GroupName = eng.DisplayName,
PermissionLevel = "CAN_MANAGE_STAGING_VERSIONS",
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.MlflowModel;
import com.pulumi.databricks.MlflowModelArgs;
import com.pulumi.databricks.Group;
import com.pulumi.databricks.GroupArgs;
import com.pulumi.databricks.Permissions;
import com.pulumi.databricks.PermissionsArgs;
import com.pulumi.databricks.inputs.PermissionsAccessControlArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var this_ = new MlflowModel("this", MlflowModelArgs.builder()
.name("SomePredictions")
.build());
var auto = new Group("auto", GroupArgs.builder()
.displayName("Automation")
.build());
var eng = new Group("eng", GroupArgs.builder()
.displayName("Engineering")
.build());
var modelUsage = new Permissions("modelUsage", PermissionsArgs.builder()
.registeredModelId(this_.registeredModelId())
.accessControls(
PermissionsAccessControlArgs.builder()
.groupName("users")
.permissionLevel("CAN_READ")
.build(),
PermissionsAccessControlArgs.builder()
.groupName(auto.displayName())
.permissionLevel("CAN_MANAGE_PRODUCTION_VERSIONS")
.build(),
PermissionsAccessControlArgs.builder()
.groupName(eng.displayName())
.permissionLevel("CAN_MANAGE_STAGING_VERSIONS")
.build())
.build());
}
}
resources:
this:
type: databricks:MlflowModel
properties:
name: SomePredictions
auto:
type: databricks:Group
properties:
displayName: Automation
eng:
type: databricks:Group
properties:
displayName: Engineering
modelUsage:
type: databricks:Permissions
name: model_usage
properties:
registeredModelId: ${this.registeredModelId}
accessControls:
- groupName: users
permissionLevel: CAN_READ
- groupName: ${auto.displayName}
permissionLevel: CAN_MANAGE_PRODUCTION_VERSIONS
- groupName: ${eng.displayName}
permissionLevel: CAN_MANAGE_STAGING_VERSIONS
Model serving usage
Valid permission levels for databricks.ModelServing are: CAN_VIEW
, CAN_QUERY
, and CAN_MANAGE
.
import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";
const _this = new databricks.ModelServing("this", {
name: "tf-test",
config: {
servedModels: [{
name: "prod_model",
modelName: "test",
modelVersion: "1",
workloadSize: "Small",
scaleToZeroEnabled: true,
}],
},
});
const auto = new databricks.Group("auto", {displayName: "Automation"});
const eng = new databricks.Group("eng", {displayName: "Engineering"});
const mlServingUsage = new databricks.Permissions("ml_serving_usage", {
servingEndpointId: _this.servingEndpointId,
accessControls: [
{
groupName: "users",
permissionLevel: "CAN_VIEW",
},
{
groupName: auto.displayName,
permissionLevel: "CAN_MANAGE",
},
{
groupName: eng.displayName,
permissionLevel: "CAN_QUERY",
},
],
});
import pulumi
import pulumi_databricks as databricks
this = databricks.ModelServing("this",
name="tf-test",
config={
"served_models": [{
"name": "prod_model",
"model_name": "test",
"model_version": "1",
"workload_size": "Small",
"scale_to_zero_enabled": True,
}],
})
auto = databricks.Group("auto", display_name="Automation")
eng = databricks.Group("eng", display_name="Engineering")
ml_serving_usage = databricks.Permissions("ml_serving_usage",
serving_endpoint_id=this.serving_endpoint_id,
access_controls=[
{
"group_name": "users",
"permission_level": "CAN_VIEW",
},
{
"group_name": auto.display_name,
"permission_level": "CAN_MANAGE",
},
{
"group_name": eng.display_name,
"permission_level": "CAN_QUERY",
},
])
package main
import (
"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
this, err := databricks.NewModelServing(ctx, "this", &databricks.ModelServingArgs{
Name: pulumi.String("tf-test"),
Config: &databricks.ModelServingConfigArgs{
ServedModels: databricks.ModelServingConfigServedModelArray{
&databricks.ModelServingConfigServedModelArgs{
Name: pulumi.String("prod_model"),
ModelName: pulumi.String("test"),
ModelVersion: pulumi.String("1"),
WorkloadSize: pulumi.String("Small"),
ScaleToZeroEnabled: pulumi.Bool(true),
},
},
},
})
if err != nil {
return err
}
auto, err := databricks.NewGroup(ctx, "auto", &databricks.GroupArgs{
DisplayName: pulumi.String("Automation"),
})
if err != nil {
return err
}
eng, err := databricks.NewGroup(ctx, "eng", &databricks.GroupArgs{
DisplayName: pulumi.String("Engineering"),
})
if err != nil {
return err
}
_, err = databricks.NewPermissions(ctx, "ml_serving_usage", &databricks.PermissionsArgs{
ServingEndpointId: this.ServingEndpointId,
AccessControls: databricks.PermissionsAccessControlArray{
&databricks.PermissionsAccessControlArgs{
GroupName: pulumi.String("users"),
PermissionLevel: pulumi.String("CAN_VIEW"),
},
&databricks.PermissionsAccessControlArgs{
GroupName: auto.DisplayName,
PermissionLevel: pulumi.String("CAN_MANAGE"),
},
&databricks.PermissionsAccessControlArgs{
GroupName: eng.DisplayName,
PermissionLevel: pulumi.String("CAN_QUERY"),
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;
return await Deployment.RunAsync(() =>
{
var @this = new Databricks.ModelServing("this", new()
{
Name = "tf-test",
Config = new Databricks.Inputs.ModelServingConfigArgs
{
ServedModels = new[]
{
new Databricks.Inputs.ModelServingConfigServedModelArgs
{
Name = "prod_model",
ModelName = "test",
ModelVersion = "1",
WorkloadSize = "Small",
ScaleToZeroEnabled = true,
},
},
},
});
var auto = new Databricks.Group("auto", new()
{
DisplayName = "Automation",
});
var eng = new Databricks.Group("eng", new()
{
DisplayName = "Engineering",
});
var mlServingUsage = new Databricks.Permissions("ml_serving_usage", new()
{
ServingEndpointId = @this.ServingEndpointId,
AccessControls = new[]
{
new Databricks.Inputs.PermissionsAccessControlArgs
{
GroupName = "users",
PermissionLevel = "CAN_VIEW",
},
new Databricks.Inputs.PermissionsAccessControlArgs
{
GroupName = auto.DisplayName,
PermissionLevel = "CAN_MANAGE",
},
new Databricks.Inputs.PermissionsAccessControlArgs
{
GroupName = eng.DisplayName,
PermissionLevel = "CAN_QUERY",
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.ModelServing;
import com.pulumi.databricks.ModelServingArgs;
import com.pulumi.databricks.inputs.ModelServingConfigArgs;
import com.pulumi.databricks.Group;
import com.pulumi.databricks.GroupArgs;
import com.pulumi.databricks.Permissions;
import com.pulumi.databricks.PermissionsArgs;
import com.pulumi.databricks.inputs.PermissionsAccessControlArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var this_ = new ModelServing("this", ModelServingArgs.builder()
.name("tf-test")
.config(ModelServingConfigArgs.builder()
.servedModels(ModelServingConfigServedModelArgs.builder()
.name("prod_model")
.modelName("test")
.modelVersion("1")
.workloadSize("Small")
.scaleToZeroEnabled(true)
.build())
.build())
.build());
var auto = new Group("auto", GroupArgs.builder()
.displayName("Automation")
.build());
var eng = new Group("eng", GroupArgs.builder()
.displayName("Engineering")
.build());
var mlServingUsage = new Permissions("mlServingUsage", PermissionsArgs.builder()
.servingEndpointId(this_.servingEndpointId())
.accessControls(
PermissionsAccessControlArgs.builder()
.groupName("users")
.permissionLevel("CAN_VIEW")
.build(),
PermissionsAccessControlArgs.builder()
.groupName(auto.displayName())
.permissionLevel("CAN_MANAGE")
.build(),
PermissionsAccessControlArgs.builder()
.groupName(eng.displayName())
.permissionLevel("CAN_QUERY")
.build())
.build());
}
}
resources:
this:
type: databricks:ModelServing
properties:
name: tf-test
config:
servedModels:
- name: prod_model
modelName: test
modelVersion: '1'
workloadSize: Small
scaleToZeroEnabled: true
auto:
type: databricks:Group
properties:
displayName: Automation
eng:
type: databricks:Group
properties:
displayName: Engineering
mlServingUsage:
type: databricks:Permissions
name: ml_serving_usage
properties:
servingEndpointId: ${this.servingEndpointId}
accessControls:
- groupName: users
permissionLevel: CAN_VIEW
- groupName: ${auto.displayName}
permissionLevel: CAN_MANAGE
- groupName: ${eng.displayName}
permissionLevel: CAN_QUERY
Passwords usage
By default on AWS deployments, all admin users can sign in to Databricks using either SSO or their username and password, and all API users can authenticate to the Databricks REST APIs using their username and password. As an admin, you can limit admin users’ and API users’ ability to authenticate with their username and password by configuring CAN_USE
permissions using password access control.
import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";
const guests = new databricks.Group("guests", {displayName: "Guest Users"});
const passwordUsage = new databricks.Permissions("password_usage", {
authorization: "passwords",
accessControls: [{
groupName: guests.displayName,
permissionLevel: "CAN_USE",
}],
});
import pulumi
import pulumi_databricks as databricks
guests = databricks.Group("guests", display_name="Guest Users")
password_usage = databricks.Permissions("password_usage",
authorization="passwords",
access_controls=[{
"group_name": guests.display_name,
"permission_level": "CAN_USE",
}])
package main
import (
"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
guests, err := databricks.NewGroup(ctx, "guests", &databricks.GroupArgs{
DisplayName: pulumi.String("Guest Users"),
})
if err != nil {
return err
}
_, err = databricks.NewPermissions(ctx, "password_usage", &databricks.PermissionsArgs{
Authorization: pulumi.String("passwords"),
AccessControls: databricks.PermissionsAccessControlArray{
&databricks.PermissionsAccessControlArgs{
GroupName: guests.DisplayName,
PermissionLevel: pulumi.String("CAN_USE"),
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;
return await Deployment.RunAsync(() =>
{
var guests = new Databricks.Group("guests", new()
{
DisplayName = "Guest Users",
});
var passwordUsage = new Databricks.Permissions("password_usage", new()
{
Authorization = "passwords",
AccessControls = new[]
{
new Databricks.Inputs.PermissionsAccessControlArgs
{
GroupName = guests.DisplayName,
PermissionLevel = "CAN_USE",
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.Group;
import com.pulumi.databricks.GroupArgs;
import com.pulumi.databricks.Permissions;
import com.pulumi.databricks.PermissionsArgs;
import com.pulumi.databricks.inputs.PermissionsAccessControlArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var guests = new Group("guests", GroupArgs.builder()
.displayName("Guest Users")
.build());
var passwordUsage = new Permissions("passwordUsage", PermissionsArgs.builder()
.authorization("passwords")
.accessControls(PermissionsAccessControlArgs.builder()
.groupName(guests.displayName())
.permissionLevel("CAN_USE")
.build())
.build());
}
}
resources:
guests:
type: databricks:Group
properties:
displayName: Guest Users
passwordUsage:
type: databricks:Permissions
name: password_usage
properties:
authorization: passwords
accessControls:
- groupName: ${guests.displayName}
permissionLevel: CAN_USE
Token usage
It is required to have at least 1 personal access token in the workspace before you can manage tokens permissions.
!> Warning There can be only one authorization = "tokens"
permissions resource per workspace, otherwise there’ll be a permanent configuration drift. After applying changes, users who previously had either CAN_USE
or CAN_MANAGE
permission but no longer have either permission have their access to token-based authentication revoked. Their active tokens are immediately deleted (revoked).
Only possible permission to assign to non-admin group is CAN_USE
, where admins CAN_MANAGE
all tokens:
import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";
const auto = new databricks.Group("auto", {displayName: "Automation"});
const eng = new databricks.Group("eng", {displayName: "Engineering"});
const tokenUsage = new databricks.Permissions("token_usage", {
authorization: "tokens",
accessControls: [
{
groupName: auto.displayName,
permissionLevel: "CAN_USE",
},
{
groupName: eng.displayName,
permissionLevel: "CAN_USE",
},
],
});
import pulumi
import pulumi_databricks as databricks
auto = databricks.Group("auto", display_name="Automation")
eng = databricks.Group("eng", display_name="Engineering")
token_usage = databricks.Permissions("token_usage",
authorization="tokens",
access_controls=[
{
"group_name": auto.display_name,
"permission_level": "CAN_USE",
},
{
"group_name": eng.display_name,
"permission_level": "CAN_USE",
},
])
package main
import (
"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
auto, err := databricks.NewGroup(ctx, "auto", &databricks.GroupArgs{
DisplayName: pulumi.String("Automation"),
})
if err != nil {
return err
}
eng, err := databricks.NewGroup(ctx, "eng", &databricks.GroupArgs{
DisplayName: pulumi.String("Engineering"),
})
if err != nil {
return err
}
_, err = databricks.NewPermissions(ctx, "token_usage", &databricks.PermissionsArgs{
Authorization: pulumi.String("tokens"),
AccessControls: databricks.PermissionsAccessControlArray{
&databricks.PermissionsAccessControlArgs{
GroupName: auto.DisplayName,
PermissionLevel: pulumi.String("CAN_USE"),
},
&databricks.PermissionsAccessControlArgs{
GroupName: eng.DisplayName,
PermissionLevel: pulumi.String("CAN_USE"),
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;
return await Deployment.RunAsync(() =>
{
var auto = new Databricks.Group("auto", new()
{
DisplayName = "Automation",
});
var eng = new Databricks.Group("eng", new()
{
DisplayName = "Engineering",
});
var tokenUsage = new Databricks.Permissions("token_usage", new()
{
Authorization = "tokens",
AccessControls = new[]
{
new Databricks.Inputs.PermissionsAccessControlArgs
{
GroupName = auto.DisplayName,
PermissionLevel = "CAN_USE",
},
new Databricks.Inputs.PermissionsAccessControlArgs
{
GroupName = eng.DisplayName,
PermissionLevel = "CAN_USE",
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.Group;
import com.pulumi.databricks.GroupArgs;
import com.pulumi.databricks.Permissions;
import com.pulumi.databricks.PermissionsArgs;
import com.pulumi.databricks.inputs.PermissionsAccessControlArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var auto = new Group("auto", GroupArgs.builder()
.displayName("Automation")
.build());
var eng = new Group("eng", GroupArgs.builder()
.displayName("Engineering")
.build());
var tokenUsage = new Permissions("tokenUsage", PermissionsArgs.builder()
.authorization("tokens")
.accessControls(
PermissionsAccessControlArgs.builder()
.groupName(auto.displayName())
.permissionLevel("CAN_USE")
.build(),
PermissionsAccessControlArgs.builder()
.groupName(eng.displayName())
.permissionLevel("CAN_USE")
.build())
.build());
}
}
resources:
auto:
type: databricks:Group
properties:
displayName: Automation
eng:
type: databricks:Group
properties:
displayName: Engineering
tokenUsage:
type: databricks:Permissions
name: token_usage
properties:
authorization: tokens
accessControls:
- groupName: ${auto.displayName}
permissionLevel: CAN_USE
- groupName: ${eng.displayName}
permissionLevel: CAN_USE
SQL warehouse usage
SQL warehouses have four possible permissions: CAN_USE
, CAN_MONITOR
, CAN_MANAGE
and IS_OWNER
:
- The creator of a warehouse has
IS_OWNER
permission. Destroyingdatabricks.Permissions
resource for a warehouse would revert ownership to the creator. - A warehouse must have exactly one owner. If a resource is changed and no owner is specified, the currently authenticated principal would become the new owner of the warehouse. Nothing would change, per se, if the warehouse was created through Pulumi.
- A warehouse cannot have a group as an owner.
import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";
const me = databricks.getCurrentUser({});
const auto = new databricks.Group("auto", {displayName: "Automation"});
const eng = new databricks.Group("eng", {displayName: "Engineering"});
const _this = new databricks.SqlEndpoint("this", {
name: me.then(me => `Endpoint of ${me.alphanumeric}`),
clusterSize: "Small",
maxNumClusters: 1,
tags: {
customTags: [{
key: "City",
value: "Amsterdam",
}],
},
});
const endpointUsage = new databricks.Permissions("endpoint_usage", {
sqlEndpointId: _this.id,
accessControls: [
{
groupName: auto.displayName,
permissionLevel: "CAN_USE",
},
{
groupName: eng.displayName,
permissionLevel: "CAN_MANAGE",
},
],
});
import pulumi
import pulumi_databricks as databricks
me = databricks.get_current_user()
auto = databricks.Group("auto", display_name="Automation")
eng = databricks.Group("eng", display_name="Engineering")
this = databricks.SqlEndpoint("this",
name=f"Endpoint of {me.alphanumeric}",
cluster_size="Small",
max_num_clusters=1,
tags={
"custom_tags": [{
"key": "City",
"value": "Amsterdam",
}],
})
endpoint_usage = databricks.Permissions("endpoint_usage",
sql_endpoint_id=this.id,
access_controls=[
{
"group_name": auto.display_name,
"permission_level": "CAN_USE",
},
{
"group_name": eng.display_name,
"permission_level": "CAN_MANAGE",
},
])
package main
import (
"fmt"
"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
me, err := databricks.GetCurrentUser(ctx, map[string]interface{}{}, nil)
if err != nil {
return err
}
auto, err := databricks.NewGroup(ctx, "auto", &databricks.GroupArgs{
DisplayName: pulumi.String("Automation"),
})
if err != nil {
return err
}
eng, err := databricks.NewGroup(ctx, "eng", &databricks.GroupArgs{
DisplayName: pulumi.String("Engineering"),
})
if err != nil {
return err
}
this, err := databricks.NewSqlEndpoint(ctx, "this", &databricks.SqlEndpointArgs{
Name: pulumi.Sprintf("Endpoint of %v", me.Alphanumeric),
ClusterSize: pulumi.String("Small"),
MaxNumClusters: pulumi.Int(1),
Tags: &databricks.SqlEndpointTagsArgs{
CustomTags: databricks.SqlEndpointTagsCustomTagArray{
&databricks.SqlEndpointTagsCustomTagArgs{
Key: pulumi.String("City"),
Value: pulumi.String("Amsterdam"),
},
},
},
})
if err != nil {
return err
}
_, err = databricks.NewPermissions(ctx, "endpoint_usage", &databricks.PermissionsArgs{
SqlEndpointId: this.ID(),
AccessControls: databricks.PermissionsAccessControlArray{
&databricks.PermissionsAccessControlArgs{
GroupName: auto.DisplayName,
PermissionLevel: pulumi.String("CAN_USE"),
},
&databricks.PermissionsAccessControlArgs{
GroupName: eng.DisplayName,
PermissionLevel: pulumi.String("CAN_MANAGE"),
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;
return await Deployment.RunAsync(() =>
{
var me = Databricks.GetCurrentUser.Invoke();
var auto = new Databricks.Group("auto", new()
{
DisplayName = "Automation",
});
var eng = new Databricks.Group("eng", new()
{
DisplayName = "Engineering",
});
var @this = new Databricks.SqlEndpoint("this", new()
{
Name = $"Endpoint of {me.Apply(getCurrentUserResult => getCurrentUserResult.Alphanumeric)}",
ClusterSize = "Small",
MaxNumClusters = 1,
Tags = new Databricks.Inputs.SqlEndpointTagsArgs
{
CustomTags = new[]
{
new Databricks.Inputs.SqlEndpointTagsCustomTagArgs
{
Key = "City",
Value = "Amsterdam",
},
},
},
});
var endpointUsage = new Databricks.Permissions("endpoint_usage", new()
{
SqlEndpointId = @this.Id,
AccessControls = new[]
{
new Databricks.Inputs.PermissionsAccessControlArgs
{
GroupName = auto.DisplayName,
PermissionLevel = "CAN_USE",
},
new Databricks.Inputs.PermissionsAccessControlArgs
{
GroupName = eng.DisplayName,
PermissionLevel = "CAN_MANAGE",
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.DatabricksFunctions;
import com.pulumi.databricks.Group;
import com.pulumi.databricks.GroupArgs;
import com.pulumi.databricks.SqlEndpoint;
import com.pulumi.databricks.SqlEndpointArgs;
import com.pulumi.databricks.inputs.SqlEndpointTagsArgs;
import com.pulumi.databricks.Permissions;
import com.pulumi.databricks.PermissionsArgs;
import com.pulumi.databricks.inputs.PermissionsAccessControlArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
final var me = DatabricksFunctions.getCurrentUser();
var auto = new Group("auto", GroupArgs.builder()
.displayName("Automation")
.build());
var eng = new Group("eng", GroupArgs.builder()
.displayName("Engineering")
.build());
var this_ = new SqlEndpoint("this", SqlEndpointArgs.builder()
.name(String.format("Endpoint of %s", me.applyValue(getCurrentUserResult -> getCurrentUserResult.alphanumeric())))
.clusterSize("Small")
.maxNumClusters(1)
.tags(SqlEndpointTagsArgs.builder()
.customTags(SqlEndpointTagsCustomTagArgs.builder()
.key("City")
.value("Amsterdam")
.build())
.build())
.build());
var endpointUsage = new Permissions("endpointUsage", PermissionsArgs.builder()
.sqlEndpointId(this_.id())
.accessControls(
PermissionsAccessControlArgs.builder()
.groupName(auto.displayName())
.permissionLevel("CAN_USE")
.build(),
PermissionsAccessControlArgs.builder()
.groupName(eng.displayName())
.permissionLevel("CAN_MANAGE")
.build())
.build());
}
}
resources:
auto:
type: databricks:Group
properties:
displayName: Automation
eng:
type: databricks:Group
properties:
displayName: Engineering
this:
type: databricks:SqlEndpoint
properties:
name: Endpoint of ${me.alphanumeric}
clusterSize: Small
maxNumClusters: 1
tags:
customTags:
- key: City
value: Amsterdam
endpointUsage:
type: databricks:Permissions
name: endpoint_usage
properties:
sqlEndpointId: ${this.id}
accessControls:
- groupName: ${auto.displayName}
permissionLevel: CAN_USE
- groupName: ${eng.displayName}
permissionLevel: CAN_MANAGE
variables:
me:
fn::invoke:
Function: databricks:getCurrentUser
Arguments: {}
Dashboard usage
Dashboards have four possible permissions: CAN_READ
, CAN_RUN
, CAN_EDIT
and CAN_MANAGE
:
import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";
const auto = new databricks.Group("auto", {displayName: "Automation"});
const eng = new databricks.Group("eng", {displayName: "Engineering"});
const dashboard = new databricks.Dashboard("dashboard", {displayName: "TF New Dashboard"});
const dashboardUsage = new databricks.Permissions("dashboard_usage", {
dashboardId: dashboard.id,
accessControls: [
{
groupName: auto.displayName,
permissionLevel: "CAN_RUN",
},
{
groupName: eng.displayName,
permissionLevel: "CAN_MANAGE",
},
],
});
import pulumi
import pulumi_databricks as databricks
auto = databricks.Group("auto", display_name="Automation")
eng = databricks.Group("eng", display_name="Engineering")
dashboard = databricks.Dashboard("dashboard", display_name="TF New Dashboard")
dashboard_usage = databricks.Permissions("dashboard_usage",
dashboard_id=dashboard.id,
access_controls=[
{
"group_name": auto.display_name,
"permission_level": "CAN_RUN",
},
{
"group_name": eng.display_name,
"permission_level": "CAN_MANAGE",
},
])
package main
import (
"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
auto, err := databricks.NewGroup(ctx, "auto", &databricks.GroupArgs{
DisplayName: pulumi.String("Automation"),
})
if err != nil {
return err
}
eng, err := databricks.NewGroup(ctx, "eng", &databricks.GroupArgs{
DisplayName: pulumi.String("Engineering"),
})
if err != nil {
return err
}
dashboard, err := databricks.NewDashboard(ctx, "dashboard", &databricks.DashboardArgs{
DisplayName: pulumi.String("TF New Dashboard"),
})
if err != nil {
return err
}
_, err = databricks.NewPermissions(ctx, "dashboard_usage", &databricks.PermissionsArgs{
DashboardId: dashboard.ID(),
AccessControls: databricks.PermissionsAccessControlArray{
&databricks.PermissionsAccessControlArgs{
GroupName: auto.DisplayName,
PermissionLevel: pulumi.String("CAN_RUN"),
},
&databricks.PermissionsAccessControlArgs{
GroupName: eng.DisplayName,
PermissionLevel: pulumi.String("CAN_MANAGE"),
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;
return await Deployment.RunAsync(() =>
{
var auto = new Databricks.Group("auto", new()
{
DisplayName = "Automation",
});
var eng = new Databricks.Group("eng", new()
{
DisplayName = "Engineering",
});
var dashboard = new Databricks.Dashboard("dashboard", new()
{
DisplayName = "TF New Dashboard",
});
var dashboardUsage = new Databricks.Permissions("dashboard_usage", new()
{
DashboardId = dashboard.Id,
AccessControls = new[]
{
new Databricks.Inputs.PermissionsAccessControlArgs
{
GroupName = auto.DisplayName,
PermissionLevel = "CAN_RUN",
},
new Databricks.Inputs.PermissionsAccessControlArgs
{
GroupName = eng.DisplayName,
PermissionLevel = "CAN_MANAGE",
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.Group;
import com.pulumi.databricks.GroupArgs;
import com.pulumi.databricks.Dashboard;
import com.pulumi.databricks.DashboardArgs;
import com.pulumi.databricks.Permissions;
import com.pulumi.databricks.PermissionsArgs;
import com.pulumi.databricks.inputs.PermissionsAccessControlArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var auto = new Group("auto", GroupArgs.builder()
.displayName("Automation")
.build());
var eng = new Group("eng", GroupArgs.builder()
.displayName("Engineering")
.build());
var dashboard = new Dashboard("dashboard", DashboardArgs.builder()
.displayName("TF New Dashboard")
.build());
var dashboardUsage = new Permissions("dashboardUsage", PermissionsArgs.builder()
.dashboardId(dashboard.id())
.accessControls(
PermissionsAccessControlArgs.builder()
.groupName(auto.displayName())
.permissionLevel("CAN_RUN")
.build(),
PermissionsAccessControlArgs.builder()
.groupName(eng.displayName())
.permissionLevel("CAN_MANAGE")
.build())
.build());
}
}
resources:
auto:
type: databricks:Group
properties:
displayName: Automation
eng:
type: databricks:Group
properties:
displayName: Engineering
dashboard:
type: databricks:Dashboard
properties:
displayName: TF New Dashboard
dashboardUsage:
type: databricks:Permissions
name: dashboard_usage
properties:
dashboardId: ${dashboard.id}
accessControls:
- groupName: ${auto.displayName}
permissionLevel: CAN_RUN
- groupName: ${eng.displayName}
permissionLevel: CAN_MANAGE
Legacy SQL Dashboard usage
Legacy SQL dashboards have three possible permissions: CAN_VIEW
, CAN_RUN
and CAN_MANAGE
:
import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";
const auto = new databricks.Group("auto", {displayName: "Automation"});
const eng = new databricks.Group("eng", {displayName: "Engineering"});
const sqlDashboardUsage = new databricks.Permissions("sql_dashboard_usage", {
sqlDashboardId: "3244325",
accessControls: [
{
groupName: auto.displayName,
permissionLevel: "CAN_RUN",
},
{
groupName: eng.displayName,
permissionLevel: "CAN_MANAGE",
},
],
});
import pulumi
import pulumi_databricks as databricks
auto = databricks.Group("auto", display_name="Automation")
eng = databricks.Group("eng", display_name="Engineering")
sql_dashboard_usage = databricks.Permissions("sql_dashboard_usage",
sql_dashboard_id="3244325",
access_controls=[
{
"group_name": auto.display_name,
"permission_level": "CAN_RUN",
},
{
"group_name": eng.display_name,
"permission_level": "CAN_MANAGE",
},
])
package main
import (
"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
auto, err := databricks.NewGroup(ctx, "auto", &databricks.GroupArgs{
DisplayName: pulumi.String("Automation"),
})
if err != nil {
return err
}
eng, err := databricks.NewGroup(ctx, "eng", &databricks.GroupArgs{
DisplayName: pulumi.String("Engineering"),
})
if err != nil {
return err
}
_, err = databricks.NewPermissions(ctx, "sql_dashboard_usage", &databricks.PermissionsArgs{
SqlDashboardId: pulumi.String("3244325"),
AccessControls: databricks.PermissionsAccessControlArray{
&databricks.PermissionsAccessControlArgs{
GroupName: auto.DisplayName,
PermissionLevel: pulumi.String("CAN_RUN"),
},
&databricks.PermissionsAccessControlArgs{
GroupName: eng.DisplayName,
PermissionLevel: pulumi.String("CAN_MANAGE"),
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;
return await Deployment.RunAsync(() =>
{
var auto = new Databricks.Group("auto", new()
{
DisplayName = "Automation",
});
var eng = new Databricks.Group("eng", new()
{
DisplayName = "Engineering",
});
var sqlDashboardUsage = new Databricks.Permissions("sql_dashboard_usage", new()
{
SqlDashboardId = "3244325",
AccessControls = new[]
{
new Databricks.Inputs.PermissionsAccessControlArgs
{
GroupName = auto.DisplayName,
PermissionLevel = "CAN_RUN",
},
new Databricks.Inputs.PermissionsAccessControlArgs
{
GroupName = eng.DisplayName,
PermissionLevel = "CAN_MANAGE",
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.Group;
import com.pulumi.databricks.GroupArgs;
import com.pulumi.databricks.Permissions;
import com.pulumi.databricks.PermissionsArgs;
import com.pulumi.databricks.inputs.PermissionsAccessControlArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var auto = new Group("auto", GroupArgs.builder()
.displayName("Automation")
.build());
var eng = new Group("eng", GroupArgs.builder()
.displayName("Engineering")
.build());
var sqlDashboardUsage = new Permissions("sqlDashboardUsage", PermissionsArgs.builder()
.sqlDashboardId("3244325")
.accessControls(
PermissionsAccessControlArgs.builder()
.groupName(auto.displayName())
.permissionLevel("CAN_RUN")
.build(),
PermissionsAccessControlArgs.builder()
.groupName(eng.displayName())
.permissionLevel("CAN_MANAGE")
.build())
.build());
}
}
resources:
auto:
type: databricks:Group
properties:
displayName: Automation
eng:
type: databricks:Group
properties:
displayName: Engineering
sqlDashboardUsage:
type: databricks:Permissions
name: sql_dashboard_usage
properties:
sqlDashboardId: '3244325'
accessControls:
- groupName: ${auto.displayName}
permissionLevel: CAN_RUN
- groupName: ${eng.displayName}
permissionLevel: CAN_MANAGE
SQL Query usage
SQL queries have three possible permissions: CAN_VIEW
, CAN_RUN
and CAN_MANAGE
:
If you do not define an
access_control
block grantingCAN_MANAGE
explictly for the user calling this provider, Databricks Pulumi Provider will addCAN_MANAGE
permission for the caller. This is a failsafe to prevent situations where the caller is locked out from making changes to the targeteddatabricks.SqlQuery
resource when backend API do not apply permission inheritance correctly.
import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";
const auto = new databricks.Group("auto", {displayName: "Automation"});
const eng = new databricks.Group("eng", {displayName: "Engineering"});
const queryUsage = new databricks.Permissions("query_usage", {
sqlQueryId: "3244325",
accessControls: [
{
groupName: auto.displayName,
permissionLevel: "CAN_RUN",
},
{
groupName: eng.displayName,
permissionLevel: "CAN_MANAGE",
},
],
});
import pulumi
import pulumi_databricks as databricks
auto = databricks.Group("auto", display_name="Automation")
eng = databricks.Group("eng", display_name="Engineering")
query_usage = databricks.Permissions("query_usage",
sql_query_id="3244325",
access_controls=[
{
"group_name": auto.display_name,
"permission_level": "CAN_RUN",
},
{
"group_name": eng.display_name,
"permission_level": "CAN_MANAGE",
},
])
package main
import (
"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
auto, err := databricks.NewGroup(ctx, "auto", &databricks.GroupArgs{
DisplayName: pulumi.String("Automation"),
})
if err != nil {
return err
}
eng, err := databricks.NewGroup(ctx, "eng", &databricks.GroupArgs{
DisplayName: pulumi.String("Engineering"),
})
if err != nil {
return err
}
_, err = databricks.NewPermissions(ctx, "query_usage", &databricks.PermissionsArgs{
SqlQueryId: pulumi.String("3244325"),
AccessControls: databricks.PermissionsAccessControlArray{
&databricks.PermissionsAccessControlArgs{
GroupName: auto.DisplayName,
PermissionLevel: pulumi.String("CAN_RUN"),
},
&databricks.PermissionsAccessControlArgs{
GroupName: eng.DisplayName,
PermissionLevel: pulumi.String("CAN_MANAGE"),
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;
return await Deployment.RunAsync(() =>
{
var auto = new Databricks.Group("auto", new()
{
DisplayName = "Automation",
});
var eng = new Databricks.Group("eng", new()
{
DisplayName = "Engineering",
});
var queryUsage = new Databricks.Permissions("query_usage", new()
{
SqlQueryId = "3244325",
AccessControls = new[]
{
new Databricks.Inputs.PermissionsAccessControlArgs
{
GroupName = auto.DisplayName,
PermissionLevel = "CAN_RUN",
},
new Databricks.Inputs.PermissionsAccessControlArgs
{
GroupName = eng.DisplayName,
PermissionLevel = "CAN_MANAGE",
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.Group;
import com.pulumi.databricks.GroupArgs;
import com.pulumi.databricks.Permissions;
import com.pulumi.databricks.PermissionsArgs;
import com.pulumi.databricks.inputs.PermissionsAccessControlArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var auto = new Group("auto", GroupArgs.builder()
.displayName("Automation")
.build());
var eng = new Group("eng", GroupArgs.builder()
.displayName("Engineering")
.build());
var queryUsage = new Permissions("queryUsage", PermissionsArgs.builder()
.sqlQueryId("3244325")
.accessControls(
PermissionsAccessControlArgs.builder()
.groupName(auto.displayName())
.permissionLevel("CAN_RUN")
.build(),
PermissionsAccessControlArgs.builder()
.groupName(eng.displayName())
.permissionLevel("CAN_MANAGE")
.build())
.build());
}
}
resources:
auto:
type: databricks:Group
properties:
displayName: Automation
eng:
type: databricks:Group
properties:
displayName: Engineering
queryUsage:
type: databricks:Permissions
name: query_usage
properties:
sqlQueryId: '3244325'
accessControls:
- groupName: ${auto.displayName}
permissionLevel: CAN_RUN
- groupName: ${eng.displayName}
permissionLevel: CAN_MANAGE
SQL Alert usage
SQL alerts have three possible permissions: CAN_VIEW
, CAN_RUN
and CAN_MANAGE
:
import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";
const auto = new databricks.Group("auto", {displayName: "Automation"});
const eng = new databricks.Group("eng", {displayName: "Engineering"});
const alertUsage = new databricks.Permissions("alert_usage", {
sqlAlertId: "3244325",
accessControls: [
{
groupName: auto.displayName,
permissionLevel: "CAN_RUN",
},
{
groupName: eng.displayName,
permissionLevel: "CAN_MANAGE",
},
],
});
import pulumi
import pulumi_databricks as databricks
auto = databricks.Group("auto", display_name="Automation")
eng = databricks.Group("eng", display_name="Engineering")
alert_usage = databricks.Permissions("alert_usage",
sql_alert_id="3244325",
access_controls=[
{
"group_name": auto.display_name,
"permission_level": "CAN_RUN",
},
{
"group_name": eng.display_name,
"permission_level": "CAN_MANAGE",
},
])
package main
import (
"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
auto, err := databricks.NewGroup(ctx, "auto", &databricks.GroupArgs{
DisplayName: pulumi.String("Automation"),
})
if err != nil {
return err
}
eng, err := databricks.NewGroup(ctx, "eng", &databricks.GroupArgs{
DisplayName: pulumi.String("Engineering"),
})
if err != nil {
return err
}
_, err = databricks.NewPermissions(ctx, "alert_usage", &databricks.PermissionsArgs{
SqlAlertId: pulumi.String("3244325"),
AccessControls: databricks.PermissionsAccessControlArray{
&databricks.PermissionsAccessControlArgs{
GroupName: auto.DisplayName,
PermissionLevel: pulumi.String("CAN_RUN"),
},
&databricks.PermissionsAccessControlArgs{
GroupName: eng.DisplayName,
PermissionLevel: pulumi.String("CAN_MANAGE"),
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;
return await Deployment.RunAsync(() =>
{
var auto = new Databricks.Group("auto", new()
{
DisplayName = "Automation",
});
var eng = new Databricks.Group("eng", new()
{
DisplayName = "Engineering",
});
var alertUsage = new Databricks.Permissions("alert_usage", new()
{
SqlAlertId = "3244325",
AccessControls = new[]
{
new Databricks.Inputs.PermissionsAccessControlArgs
{
GroupName = auto.DisplayName,
PermissionLevel = "CAN_RUN",
},
new Databricks.Inputs.PermissionsAccessControlArgs
{
GroupName = eng.DisplayName,
PermissionLevel = "CAN_MANAGE",
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.Group;
import com.pulumi.databricks.GroupArgs;
import com.pulumi.databricks.Permissions;
import com.pulumi.databricks.PermissionsArgs;
import com.pulumi.databricks.inputs.PermissionsAccessControlArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var auto = new Group("auto", GroupArgs.builder()
.displayName("Automation")
.build());
var eng = new Group("eng", GroupArgs.builder()
.displayName("Engineering")
.build());
var alertUsage = new Permissions("alertUsage", PermissionsArgs.builder()
.sqlAlertId("3244325")
.accessControls(
PermissionsAccessControlArgs.builder()
.groupName(auto.displayName())
.permissionLevel("CAN_RUN")
.build(),
PermissionsAccessControlArgs.builder()
.groupName(eng.displayName())
.permissionLevel("CAN_MANAGE")
.build())
.build());
}
}
resources:
auto:
type: databricks:Group
properties:
displayName: Automation
eng:
type: databricks:Group
properties:
displayName: Engineering
alertUsage:
type: databricks:Permissions
name: alert_usage
properties:
sqlAlertId: '3244325'
accessControls:
- groupName: ${auto.displayName}
permissionLevel: CAN_RUN
- groupName: ${eng.displayName}
permissionLevel: CAN_MANAGE
Instance Profiles
Instance Profiles are not managed by General Permissions API and therefore databricks.GroupInstanceProfile and databricks.UserInstanceProfile should be used to allow usage of specific AWS EC2 IAM roles to users or groups.
Secrets
One can control access to databricks.Secret through initial_manage_principal
argument on databricks.SecretScope or databricks_secret_acl, so that users (or service principals) can READ
, WRITE
or MANAGE
entries within secret scope.
Tables, Views and Databases
General Permissions API does not apply to access control for tables and they have to be managed separately using the databricks.SqlPermissions resource, though you’re encouraged to use Unity Catalog or migrate to it.
Data Access with Unity Catalog
Initially in Unity Catalog all users have no access to data, which has to be later assigned through databricks.Grants resource.
Create Permissions Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new Permissions(name: string, args: PermissionsArgs, opts?: CustomResourceOptions);
@overload
def Permissions(resource_name: str,
args: PermissionsArgs,
opts: Optional[ResourceOptions] = None)
@overload
def Permissions(resource_name: str,
opts: Optional[ResourceOptions] = None,
access_controls: Optional[Sequence[PermissionsAccessControlArgs]] = None,
authorization: Optional[str] = None,
cluster_id: Optional[str] = None,
cluster_policy_id: Optional[str] = None,
dashboard_id: Optional[str] = None,
directory_id: Optional[str] = None,
directory_path: Optional[str] = None,
experiment_id: Optional[str] = None,
instance_pool_id: Optional[str] = None,
job_id: Optional[str] = None,
notebook_id: Optional[str] = None,
notebook_path: Optional[str] = None,
object_type: Optional[str] = None,
pipeline_id: Optional[str] = None,
registered_model_id: Optional[str] = None,
repo_id: Optional[str] = None,
repo_path: Optional[str] = None,
serving_endpoint_id: Optional[str] = None,
sql_alert_id: Optional[str] = None,
sql_dashboard_id: Optional[str] = None,
sql_endpoint_id: Optional[str] = None,
sql_query_id: Optional[str] = None,
workspace_file_id: Optional[str] = None,
workspace_file_path: Optional[str] = None)
func NewPermissions(ctx *Context, name string, args PermissionsArgs, opts ...ResourceOption) (*Permissions, error)
public Permissions(string name, PermissionsArgs args, CustomResourceOptions? opts = null)
public Permissions(String name, PermissionsArgs args)
public Permissions(String name, PermissionsArgs args, CustomResourceOptions options)
type: databricks:Permissions
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args PermissionsArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args PermissionsArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args PermissionsArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args PermissionsArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args PermissionsArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var permissionsResource = new Databricks.Permissions("permissionsResource", new()
{
AccessControls = new[]
{
new Databricks.Inputs.PermissionsAccessControlArgs
{
GroupName = "string",
PermissionLevel = "string",
ServicePrincipalName = "string",
UserName = "string",
},
},
Authorization = "string",
ClusterId = "string",
ClusterPolicyId = "string",
DashboardId = "string",
DirectoryId = "string",
DirectoryPath = "string",
ExperimentId = "string",
InstancePoolId = "string",
JobId = "string",
NotebookId = "string",
NotebookPath = "string",
ObjectType = "string",
PipelineId = "string",
RegisteredModelId = "string",
RepoId = "string",
RepoPath = "string",
ServingEndpointId = "string",
SqlAlertId = "string",
SqlDashboardId = "string",
SqlEndpointId = "string",
SqlQueryId = "string",
WorkspaceFileId = "string",
WorkspaceFilePath = "string",
});
example, err := databricks.NewPermissions(ctx, "permissionsResource", &databricks.PermissionsArgs{
AccessControls: databricks.PermissionsAccessControlArray{
&databricks.PermissionsAccessControlArgs{
GroupName: pulumi.String("string"),
PermissionLevel: pulumi.String("string"),
ServicePrincipalName: pulumi.String("string"),
UserName: pulumi.String("string"),
},
},
Authorization: pulumi.String("string"),
ClusterId: pulumi.String("string"),
ClusterPolicyId: pulumi.String("string"),
DashboardId: pulumi.String("string"),
DirectoryId: pulumi.String("string"),
DirectoryPath: pulumi.String("string"),
ExperimentId: pulumi.String("string"),
InstancePoolId: pulumi.String("string"),
JobId: pulumi.String("string"),
NotebookId: pulumi.String("string"),
NotebookPath: pulumi.String("string"),
ObjectType: pulumi.String("string"),
PipelineId: pulumi.String("string"),
RegisteredModelId: pulumi.String("string"),
RepoId: pulumi.String("string"),
RepoPath: pulumi.String("string"),
ServingEndpointId: pulumi.String("string"),
SqlAlertId: pulumi.String("string"),
SqlDashboardId: pulumi.String("string"),
SqlEndpointId: pulumi.String("string"),
SqlQueryId: pulumi.String("string"),
WorkspaceFileId: pulumi.String("string"),
WorkspaceFilePath: pulumi.String("string"),
})
var permissionsResource = new Permissions("permissionsResource", PermissionsArgs.builder()
.accessControls(PermissionsAccessControlArgs.builder()
.groupName("string")
.permissionLevel("string")
.servicePrincipalName("string")
.userName("string")
.build())
.authorization("string")
.clusterId("string")
.clusterPolicyId("string")
.dashboardId("string")
.directoryId("string")
.directoryPath("string")
.experimentId("string")
.instancePoolId("string")
.jobId("string")
.notebookId("string")
.notebookPath("string")
.objectType("string")
.pipelineId("string")
.registeredModelId("string")
.repoId("string")
.repoPath("string")
.servingEndpointId("string")
.sqlAlertId("string")
.sqlDashboardId("string")
.sqlEndpointId("string")
.sqlQueryId("string")
.workspaceFileId("string")
.workspaceFilePath("string")
.build());
permissions_resource = databricks.Permissions("permissionsResource",
access_controls=[{
"group_name": "string",
"permission_level": "string",
"service_principal_name": "string",
"user_name": "string",
}],
authorization="string",
cluster_id="string",
cluster_policy_id="string",
dashboard_id="string",
directory_id="string",
directory_path="string",
experiment_id="string",
instance_pool_id="string",
job_id="string",
notebook_id="string",
notebook_path="string",
object_type="string",
pipeline_id="string",
registered_model_id="string",
repo_id="string",
repo_path="string",
serving_endpoint_id="string",
sql_alert_id="string",
sql_dashboard_id="string",
sql_endpoint_id="string",
sql_query_id="string",
workspace_file_id="string",
workspace_file_path="string")
const permissionsResource = new databricks.Permissions("permissionsResource", {
accessControls: [{
groupName: "string",
permissionLevel: "string",
servicePrincipalName: "string",
userName: "string",
}],
authorization: "string",
clusterId: "string",
clusterPolicyId: "string",
dashboardId: "string",
directoryId: "string",
directoryPath: "string",
experimentId: "string",
instancePoolId: "string",
jobId: "string",
notebookId: "string",
notebookPath: "string",
objectType: "string",
pipelineId: "string",
registeredModelId: "string",
repoId: "string",
repoPath: "string",
servingEndpointId: "string",
sqlAlertId: "string",
sqlDashboardId: "string",
sqlEndpointId: "string",
sqlQueryId: "string",
workspaceFileId: "string",
workspaceFilePath: "string",
});
type: databricks:Permissions
properties:
accessControls:
- groupName: string
permissionLevel: string
servicePrincipalName: string
userName: string
authorization: string
clusterId: string
clusterPolicyId: string
dashboardId: string
directoryId: string
directoryPath: string
experimentId: string
instancePoolId: string
jobId: string
notebookId: string
notebookPath: string
objectType: string
pipelineId: string
registeredModelId: string
repoId: string
repoPath: string
servingEndpointId: string
sqlAlertId: string
sqlDashboardId: string
sqlEndpointId: string
sqlQueryId: string
workspaceFileId: string
workspaceFilePath: string
Permissions Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The Permissions resource accepts the following input properties:
- Access
Controls List<PermissionsAccess Control> - string
- Cluster
Id string - Cluster
Policy stringId - Dashboard
Id string - Directory
Id string - Directory
Path string - Experiment
Id string - Instance
Pool stringId - Job
Id string - Notebook
Id string - Notebook
Path string - Object
Type string - type of permissions.
- Pipeline
Id string - Registered
Model stringId - Repo
Id string - Repo
Path string - Serving
Endpoint stringId - Sql
Alert stringId - Sql
Dashboard stringId - Sql
Endpoint stringId - Sql
Query stringId - Workspace
File stringId - Workspace
File stringPath
- Access
Controls []PermissionsAccess Control Args - string
- Cluster
Id string - Cluster
Policy stringId - Dashboard
Id string - Directory
Id string - Directory
Path string - Experiment
Id string - Instance
Pool stringId - Job
Id string - Notebook
Id string - Notebook
Path string - Object
Type string - type of permissions.
- Pipeline
Id string - Registered
Model stringId - Repo
Id string - Repo
Path string - Serving
Endpoint stringId - Sql
Alert stringId - Sql
Dashboard stringId - Sql
Endpoint stringId - Sql
Query stringId - Workspace
File stringId - Workspace
File stringPath
- access
Controls List<PermissionsAccess Control> - String
- cluster
Id String - cluster
Policy StringId - dashboard
Id String - directory
Id String - directory
Path String - experiment
Id String - instance
Pool StringId - job
Id String - notebook
Id String - notebook
Path String - object
Type String - type of permissions.
- pipeline
Id String - registered
Model StringId - repo
Id String - repo
Path String - serving
Endpoint StringId - sql
Alert StringId - sql
Dashboard StringId - sql
Endpoint StringId - sql
Query StringId - workspace
File StringId - workspace
File StringPath
- access
Controls PermissionsAccess Control[] - string
- cluster
Id string - cluster
Policy stringId - dashboard
Id string - directory
Id string - directory
Path string - experiment
Id string - instance
Pool stringId - job
Id string - notebook
Id string - notebook
Path string - object
Type string - type of permissions.
- pipeline
Id string - registered
Model stringId - repo
Id string - repo
Path string - serving
Endpoint stringId - sql
Alert stringId - sql
Dashboard stringId - sql
Endpoint stringId - sql
Query stringId - workspace
File stringId - workspace
File stringPath
- access_
controls Sequence[PermissionsAccess Control Args] - str
- cluster_
id str - cluster_
policy_ strid - dashboard_
id str - directory_
id str - directory_
path str - experiment_
id str - instance_
pool_ strid - job_
id str - notebook_
id str - notebook_
path str - object_
type str - type of permissions.
- pipeline_
id str - registered_
model_ strid - repo_
id str - repo_
path str - serving_
endpoint_ strid - sql_
alert_ strid - sql_
dashboard_ strid - sql_
endpoint_ strid - sql_
query_ strid - workspace_
file_ strid - workspace_
file_ strpath
- access
Controls List<Property Map> - String
- cluster
Id String - cluster
Policy StringId - dashboard
Id String - directory
Id String - directory
Path String - experiment
Id String - instance
Pool StringId - job
Id String - notebook
Id String - notebook
Path String - object
Type String - type of permissions.
- pipeline
Id String - registered
Model StringId - repo
Id String - repo
Path String - serving
Endpoint StringId - sql
Alert StringId - sql
Dashboard StringId - sql
Endpoint StringId - sql
Query StringId - workspace
File StringId - workspace
File StringPath
Outputs
All input properties are implicitly available as output properties. Additionally, the Permissions resource produces the following output properties:
- Id string
- The provider-assigned unique ID for this managed resource.
- Id string
- The provider-assigned unique ID for this managed resource.
- id String
- The provider-assigned unique ID for this managed resource.
- id string
- The provider-assigned unique ID for this managed resource.
- id str
- The provider-assigned unique ID for this managed resource.
- id String
- The provider-assigned unique ID for this managed resource.
Look up Existing Permissions Resource
Get an existing Permissions resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: PermissionsState, opts?: CustomResourceOptions): Permissions
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
access_controls: Optional[Sequence[PermissionsAccessControlArgs]] = None,
authorization: Optional[str] = None,
cluster_id: Optional[str] = None,
cluster_policy_id: Optional[str] = None,
dashboard_id: Optional[str] = None,
directory_id: Optional[str] = None,
directory_path: Optional[str] = None,
experiment_id: Optional[str] = None,
instance_pool_id: Optional[str] = None,
job_id: Optional[str] = None,
notebook_id: Optional[str] = None,
notebook_path: Optional[str] = None,
object_type: Optional[str] = None,
pipeline_id: Optional[str] = None,
registered_model_id: Optional[str] = None,
repo_id: Optional[str] = None,
repo_path: Optional[str] = None,
serving_endpoint_id: Optional[str] = None,
sql_alert_id: Optional[str] = None,
sql_dashboard_id: Optional[str] = None,
sql_endpoint_id: Optional[str] = None,
sql_query_id: Optional[str] = None,
workspace_file_id: Optional[str] = None,
workspace_file_path: Optional[str] = None) -> Permissions
func GetPermissions(ctx *Context, name string, id IDInput, state *PermissionsState, opts ...ResourceOption) (*Permissions, error)
public static Permissions Get(string name, Input<string> id, PermissionsState? state, CustomResourceOptions? opts = null)
public static Permissions get(String name, Output<String> id, PermissionsState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Access
Controls List<PermissionsAccess Control> - string
- Cluster
Id string - Cluster
Policy stringId - Dashboard
Id string - Directory
Id string - Directory
Path string - Experiment
Id string - Instance
Pool stringId - Job
Id string - Notebook
Id string - Notebook
Path string - Object
Type string - type of permissions.
- Pipeline
Id string - Registered
Model stringId - Repo
Id string - Repo
Path string - Serving
Endpoint stringId - Sql
Alert stringId - Sql
Dashboard stringId - Sql
Endpoint stringId - Sql
Query stringId - Workspace
File stringId - Workspace
File stringPath
- Access
Controls []PermissionsAccess Control Args - string
- Cluster
Id string - Cluster
Policy stringId - Dashboard
Id string - Directory
Id string - Directory
Path string - Experiment
Id string - Instance
Pool stringId - Job
Id string - Notebook
Id string - Notebook
Path string - Object
Type string - type of permissions.
- Pipeline
Id string - Registered
Model stringId - Repo
Id string - Repo
Path string - Serving
Endpoint stringId - Sql
Alert stringId - Sql
Dashboard stringId - Sql
Endpoint stringId - Sql
Query stringId - Workspace
File stringId - Workspace
File stringPath
- access
Controls List<PermissionsAccess Control> - String
- cluster
Id String - cluster
Policy StringId - dashboard
Id String - directory
Id String - directory
Path String - experiment
Id String - instance
Pool StringId - job
Id String - notebook
Id String - notebook
Path String - object
Type String - type of permissions.
- pipeline
Id String - registered
Model StringId - repo
Id String - repo
Path String - serving
Endpoint StringId - sql
Alert StringId - sql
Dashboard StringId - sql
Endpoint StringId - sql
Query StringId - workspace
File StringId - workspace
File StringPath
- access
Controls PermissionsAccess Control[] - string
- cluster
Id string - cluster
Policy stringId - dashboard
Id string - directory
Id string - directory
Path string - experiment
Id string - instance
Pool stringId - job
Id string - notebook
Id string - notebook
Path string - object
Type string - type of permissions.
- pipeline
Id string - registered
Model stringId - repo
Id string - repo
Path string - serving
Endpoint stringId - sql
Alert stringId - sql
Dashboard stringId - sql
Endpoint stringId - sql
Query stringId - workspace
File stringId - workspace
File stringPath
- access_
controls Sequence[PermissionsAccess Control Args] - str
- cluster_
id str - cluster_
policy_ strid - dashboard_
id str - directory_
id str - directory_
path str - experiment_
id str - instance_
pool_ strid - job_
id str - notebook_
id str - notebook_
path str - object_
type str - type of permissions.
- pipeline_
id str - registered_
model_ strid - repo_
id str - repo_
path str - serving_
endpoint_ strid - sql_
alert_ strid - sql_
dashboard_ strid - sql_
endpoint_ strid - sql_
query_ strid - workspace_
file_ strid - workspace_
file_ strpath
- access
Controls List<Property Map> - String
- cluster
Id String - cluster
Policy StringId - dashboard
Id String - directory
Id String - directory
Path String - experiment
Id String - instance
Pool StringId - job
Id String - notebook
Id String - notebook
Path String - object
Type String - type of permissions.
- pipeline
Id String - registered
Model StringId - repo
Id String - repo
Path String - serving
Endpoint StringId - sql
Alert StringId - sql
Dashboard StringId - sql
Endpoint StringId - sql
Query StringId - workspace
File StringId - workspace
File StringPath
Supporting Types
PermissionsAccessControl, PermissionsAccessControlArgs
- Group
Name string - name of the group. We recommend setting permissions on groups.
- Permission
Level string permission level according to specific resource. See examples above for the reference.
Exactly one of the below arguments is required:
- Service
Principal stringName - Application ID of the service_principal.
- User
Name string - name of the user.
- Group
Name string - name of the group. We recommend setting permissions on groups.
- Permission
Level string permission level according to specific resource. See examples above for the reference.
Exactly one of the below arguments is required:
- Service
Principal stringName - Application ID of the service_principal.
- User
Name string - name of the user.
- group
Name String - name of the group. We recommend setting permissions on groups.
- permission
Level String permission level according to specific resource. See examples above for the reference.
Exactly one of the below arguments is required:
- service
Principal StringName - Application ID of the service_principal.
- user
Name String - name of the user.
- group
Name string - name of the group. We recommend setting permissions on groups.
- permission
Level string permission level according to specific resource. See examples above for the reference.
Exactly one of the below arguments is required:
- service
Principal stringName - Application ID of the service_principal.
- user
Name string - name of the user.
- group_
name str - name of the group. We recommend setting permissions on groups.
- permission_
level str permission level according to specific resource. See examples above for the reference.
Exactly one of the below arguments is required:
- service_
principal_ strname - Application ID of the service_principal.
- user_
name str - name of the user.
- group
Name String - name of the group. We recommend setting permissions on groups.
- permission
Level String permission level according to specific resource. See examples above for the reference.
Exactly one of the below arguments is required:
- service
Principal StringName - Application ID of the service_principal.
- user
Name String - name of the user.
Import
The resource permissions can be imported using the object id
$ pulumi import databricks:index/permissions:Permissions databricks_permissions <object type>/<object id>
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- databricks pulumi/pulumi-databricks
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
databricks
Terraform Provider.