databricks.Library
Explore with Pulumi AI
Installs a library on databricks_cluster. Each different type of library has a slightly different syntax. It’s possible to set only one type of library within one resource. Otherwise, the plan will fail with an error.
databricks.Library
resource would always start the associated cluster if it’s not running, so make sure to have auto-termination configured. It’s not possible to atomically change the version of the same library without cluster restart. Libraries are fully removed from the cluster only after restart.
Plugin Framework Migration
The library resource has been migrated from sdkv2 to plugin framework。 If you encounter any problem with this resource and suspect it is due to the migration, you can fallback to sdkv2 by setting the environment variable in the following way export USE_SDK_V2_RESOURCES="databricks.Library"
.
Installing library on all clusters
You can install libraries on all clusters with the help of databricks.getClusters data resource:
import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";
export = async () => {
const all = await databricks.getClusters({});
const cli: databricks.Library[] = [];
for (const range of all.ids.map((v, k) => ({key: k, value: v}))) {
cli.push(new databricks.Library(`cli-${range.key}`, {
clusterId: range.key,
pypi: {
"package": "databricks-cli",
},
}));
}
}
import pulumi
import pulumi_databricks as databricks
all = databricks.get_clusters()
cli = []
for range in [{"key": k, "value": v} for [k, v] in enumerate(all.ids)]:
cli.append(databricks.Library(f"cli-{range['key']}",
cluster_id=range["key"],
pypi={
"package": "databricks-cli",
}))
package main
import (
"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
all, err := databricks.GetClusters(ctx, &databricks.GetClustersArgs{}, nil)
if err != nil {
return err
}
var cli []*databricks.Library
for key0, _ := range all.Ids {
__res, err := databricks.NewLibrary(ctx, fmt.Sprintf("cli-%v", key0), &databricks.LibraryArgs{
ClusterId: pulumi.Float64(key0),
Pypi: &databricks.LibraryPypiArgs{
Package: pulumi.String("databricks-cli"),
},
})
if err != nil {
return err
}
cli = append(cli, __res)
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using System.Threading.Tasks;
using Pulumi;
using Databricks = Pulumi.Databricks;
return await Deployment.RunAsync(async() =>
{
var all = await Databricks.GetClusters.InvokeAsync();
var cli = new List<Databricks.Library>();
foreach (var range in )
{
cli.Add(new Databricks.Library($"cli-{range.Key}", new()
{
ClusterId = range.Key,
Pypi = new Databricks.Inputs.LibraryPypiArgs
{
Package = "databricks-cli",
},
}));
}
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.DatabricksFunctions;
import com.pulumi.databricks.inputs.GetClustersArgs;
import com.pulumi.databricks.Library;
import com.pulumi.databricks.LibraryArgs;
import com.pulumi.databricks.inputs.LibraryPypiArgs;
import com.pulumi.codegen.internal.KeyedValue;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
final var all = DatabricksFunctions.getClusters();
final var cli = all.applyValue(getClustersResult -> {
final var resources = new ArrayList<Library>();
for (var range : KeyedValue.of(getClustersResult.ids()) {
var resource = new Library("cli-" + range.key(), LibraryArgs.builder()
.clusterId(range.key())
.pypi(LibraryPypiArgs.builder()
.package_("databricks-cli")
.build())
.build());
resources.add(resource);
}
return resources;
});
}
}
resources:
cli:
type: databricks:Library
properties:
clusterId: ${range.key}
pypi:
package: databricks-cli
options: {}
variables:
all:
fn::invoke:
Function: databricks:getClusters
Arguments: {}
Java/Scala Maven
Installing artifacts from Maven repository. You can also optionally specify a repo
parameter for a custom Maven-style repository, that should be accessible without any authentication. Maven libraries are resolved in Databricks Control Plane, so repo should be accessible from it. It can even be properly configured maven s3 wagon, AWS CodeArtifact or Azure Artifacts.
import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";
const deequ = new databricks.Library("deequ", {
clusterId: _this.id,
maven: {
coordinates: "com.amazon.deequ:deequ:1.0.4",
exclusions: ["org.apache.avro:avro"],
},
});
import pulumi
import pulumi_databricks as databricks
deequ = databricks.Library("deequ",
cluster_id=this["id"],
maven={
"coordinates": "com.amazon.deequ:deequ:1.0.4",
"exclusions": ["org.apache.avro:avro"],
})
package main
import (
"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := databricks.NewLibrary(ctx, "deequ", &databricks.LibraryArgs{
ClusterId: pulumi.Any(this.Id),
Maven: &databricks.LibraryMavenArgs{
Coordinates: pulumi.String("com.amazon.deequ:deequ:1.0.4"),
Exclusions: pulumi.StringArray{
pulumi.String("org.apache.avro:avro"),
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;
return await Deployment.RunAsync(() =>
{
var deequ = new Databricks.Library("deequ", new()
{
ClusterId = @this.Id,
Maven = new Databricks.Inputs.LibraryMavenArgs
{
Coordinates = "com.amazon.deequ:deequ:1.0.4",
Exclusions = new[]
{
"org.apache.avro:avro",
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.Library;
import com.pulumi.databricks.LibraryArgs;
import com.pulumi.databricks.inputs.LibraryMavenArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var deequ = new Library("deequ", LibraryArgs.builder()
.clusterId(this_.id())
.maven(LibraryMavenArgs.builder()
.coordinates("com.amazon.deequ:deequ:1.0.4")
.exclusions("org.apache.avro:avro")
.build())
.build());
}
}
resources:
deequ:
type: databricks:Library
properties:
clusterId: ${this.id}
maven:
coordinates: com.amazon.deequ:deequ:1.0.4
exclusions:
- org.apache.avro:avro
Python PyPI
Installing Python PyPI artifacts. You can optionally also specify the repo
parameter for a custom PyPI mirror, which should be accessible without any authentication for the network that cluster runs in.
repo
host should be accessible from the Internet by Databricks control plane. If connectivity to custom PyPI repositories is required, please modify cluster-node/etc/pip.conf
through databricks_global_init_script.
import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";
const fbprophet = new databricks.Library("fbprophet", {
clusterId: _this.id,
pypi: {
"package": "fbprophet==0.6",
},
});
import pulumi
import pulumi_databricks as databricks
fbprophet = databricks.Library("fbprophet",
cluster_id=this["id"],
pypi={
"package": "fbprophet==0.6",
})
package main
import (
"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := databricks.NewLibrary(ctx, "fbprophet", &databricks.LibraryArgs{
ClusterId: pulumi.Any(this.Id),
Pypi: &databricks.LibraryPypiArgs{
Package: pulumi.String("fbprophet==0.6"),
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;
return await Deployment.RunAsync(() =>
{
var fbprophet = new Databricks.Library("fbprophet", new()
{
ClusterId = @this.Id,
Pypi = new Databricks.Inputs.LibraryPypiArgs
{
Package = "fbprophet==0.6",
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.Library;
import com.pulumi.databricks.LibraryArgs;
import com.pulumi.databricks.inputs.LibraryPypiArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var fbprophet = new Library("fbprophet", LibraryArgs.builder()
.clusterId(this_.id())
.pypi(LibraryPypiArgs.builder()
.package_("fbprophet==0.6")
.build())
.build());
}
}
resources:
fbprophet:
type: databricks:Library
properties:
clusterId: ${this.id}
pypi:
package: fbprophet==0.6
Python requirements files
Installing Python libraries listed in the requirements.txt
file. Only Workspace paths and Unity Catalog Volumes paths are supported. Requires a cluster with DBR 15.0+.
import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";
const libraries = new databricks.Library("libraries", {
clusterId: _this.id,
requirements: "/Workspace/path/to/requirements.txt",
});
import pulumi
import pulumi_databricks as databricks
libraries = databricks.Library("libraries",
cluster_id=this["id"],
requirements="/Workspace/path/to/requirements.txt")
package main
import (
"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := databricks.NewLibrary(ctx, "libraries", &databricks.LibraryArgs{
ClusterId: pulumi.Any(this.Id),
Requirements: pulumi.String("/Workspace/path/to/requirements.txt"),
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;
return await Deployment.RunAsync(() =>
{
var libraries = new Databricks.Library("libraries", new()
{
ClusterId = @this.Id,
Requirements = "/Workspace/path/to/requirements.txt",
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.Library;
import com.pulumi.databricks.LibraryArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var libraries = new Library("libraries", LibraryArgs.builder()
.clusterId(this_.id())
.requirements("/Workspace/path/to/requirements.txt")
.build());
}
}
resources:
libraries:
type: databricks:Library
properties:
clusterId: ${this.id}
requirements: /Workspace/path/to/requirements.txt
R CRan
Installing artifacts from CRan. You can also optionally specify a repo
parameter for a custom cran mirror.
import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";
const rkeops = new databricks.Library("rkeops", {
clusterId: _this.id,
cran: {
"package": "rkeops",
},
});
import pulumi
import pulumi_databricks as databricks
rkeops = databricks.Library("rkeops",
cluster_id=this["id"],
cran={
"package": "rkeops",
})
package main
import (
"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := databricks.NewLibrary(ctx, "rkeops", &databricks.LibraryArgs{
ClusterId: pulumi.Any(this.Id),
Cran: &databricks.LibraryCranArgs{
Package: pulumi.String("rkeops"),
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;
return await Deployment.RunAsync(() =>
{
var rkeops = new Databricks.Library("rkeops", new()
{
ClusterId = @this.Id,
Cran = new Databricks.Inputs.LibraryCranArgs
{
Package = "rkeops",
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.Library;
import com.pulumi.databricks.LibraryArgs;
import com.pulumi.databricks.inputs.LibraryCranArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var rkeops = new Library("rkeops", LibraryArgs.builder()
.clusterId(this_.id())
.cran(LibraryCranArgs.builder()
.package_("rkeops")
.build())
.build());
}
}
resources:
rkeops:
type: databricks:Library
properties:
clusterId: ${this.id}
cran:
package: rkeops
Related Resources
The following resources are often used in the same context:
- End to end workspace management guide.
- databricks.getClusters data to retrieve a list of databricks.Cluster ids.
- databricks.Cluster to create Databricks Clusters.
- databricks.ClusterPolicy to create a databricks.Cluster policy, which limits the ability to create clusters based on a set of rules.
- databricks.DbfsFile data to get file content from Databricks File System (DBFS).
- databricks.getDbfsFilePaths data to get list of file names from get file content from Databricks File System (DBFS).
- databricks.DbfsFile to manage relatively small files on Databricks File System (DBFS).
- databricks.GlobalInitScript to manage global init scripts, which are run on all databricks.Cluster and databricks_job.
- databricks.Job to manage Databricks Jobs to run non-interactive code in a databricks_cluster.
- databricks.Mount to mount your cloud storage on
dbfs:/mnt/name
. - databricks.Pipeline to deploy Delta Live Tables.
- databricks.Repo to manage Databricks Repos.
Create Library Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new Library(name: string, args: LibraryArgs, opts?: CustomResourceOptions);
@overload
def Library(resource_name: str,
args: LibraryArgs,
opts: Optional[ResourceOptions] = None)
@overload
def Library(resource_name: str,
opts: Optional[ResourceOptions] = None,
cluster_id: Optional[str] = None,
cran: Optional[LibraryCranArgs] = None,
egg: Optional[str] = None,
jar: Optional[str] = None,
library_id: Optional[str] = None,
maven: Optional[LibraryMavenArgs] = None,
pypi: Optional[LibraryPypiArgs] = None,
requirements: Optional[str] = None,
whl: Optional[str] = None)
func NewLibrary(ctx *Context, name string, args LibraryArgs, opts ...ResourceOption) (*Library, error)
public Library(string name, LibraryArgs args, CustomResourceOptions? opts = null)
public Library(String name, LibraryArgs args)
public Library(String name, LibraryArgs args, CustomResourceOptions options)
type: databricks:Library
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args LibraryArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args LibraryArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args LibraryArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args LibraryArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args LibraryArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var libraryResource = new Databricks.Library("libraryResource", new()
{
ClusterId = "string",
Cran = new Databricks.Inputs.LibraryCranArgs
{
Package = "string",
Repo = "string",
},
Egg = "string",
Jar = "string",
LibraryId = "string",
Maven = new Databricks.Inputs.LibraryMavenArgs
{
Coordinates = "string",
Exclusions = new[]
{
"string",
},
Repo = "string",
},
Pypi = new Databricks.Inputs.LibraryPypiArgs
{
Package = "string",
Repo = "string",
},
Requirements = "string",
Whl = "string",
});
example, err := databricks.NewLibrary(ctx, "libraryResource", &databricks.LibraryArgs{
ClusterId: pulumi.String("string"),
Cran: &databricks.LibraryCranArgs{
Package: pulumi.String("string"),
Repo: pulumi.String("string"),
},
Egg: pulumi.String("string"),
Jar: pulumi.String("string"),
LibraryId: pulumi.String("string"),
Maven: &databricks.LibraryMavenArgs{
Coordinates: pulumi.String("string"),
Exclusions: pulumi.StringArray{
pulumi.String("string"),
},
Repo: pulumi.String("string"),
},
Pypi: &databricks.LibraryPypiArgs{
Package: pulumi.String("string"),
Repo: pulumi.String("string"),
},
Requirements: pulumi.String("string"),
Whl: pulumi.String("string"),
})
var libraryResource = new Library("libraryResource", LibraryArgs.builder()
.clusterId("string")
.cran(LibraryCranArgs.builder()
.package_("string")
.repo("string")
.build())
.egg("string")
.jar("string")
.libraryId("string")
.maven(LibraryMavenArgs.builder()
.coordinates("string")
.exclusions("string")
.repo("string")
.build())
.pypi(LibraryPypiArgs.builder()
.package_("string")
.repo("string")
.build())
.requirements("string")
.whl("string")
.build());
library_resource = databricks.Library("libraryResource",
cluster_id="string",
cran={
"package": "string",
"repo": "string",
},
egg="string",
jar="string",
library_id="string",
maven={
"coordinates": "string",
"exclusions": ["string"],
"repo": "string",
},
pypi={
"package": "string",
"repo": "string",
},
requirements="string",
whl="string")
const libraryResource = new databricks.Library("libraryResource", {
clusterId: "string",
cran: {
"package": "string",
repo: "string",
},
egg: "string",
jar: "string",
libraryId: "string",
maven: {
coordinates: "string",
exclusions: ["string"],
repo: "string",
},
pypi: {
"package": "string",
repo: "string",
},
requirements: "string",
whl: "string",
});
type: databricks:Library
properties:
clusterId: string
cran:
package: string
repo: string
egg: string
jar: string
libraryId: string
maven:
coordinates: string
exclusions:
- string
repo: string
pypi:
package: string
repo: string
requirements: string
whl: string
Library Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The Library resource accepts the following input properties:
- Cluster
Id string - Cran
Library
Cran - Egg string
- Jar string
- Library
Id string - Maven
Library
Maven - Pypi
Library
Pypi - Requirements string
- Whl string
- Cluster
Id string - Cran
Library
Cran Args - Egg string
- Jar string
- Library
Id string - Maven
Library
Maven Args - Pypi
Library
Pypi Args - Requirements string
- Whl string
- cluster
Id String - cran
Library
Cran - egg String
- jar String
- library
Id String - maven
Library
Maven - pypi
Library
Pypi - requirements String
- whl String
- cluster
Id string - cran
Library
Cran - egg string
- jar string
- library
Id string - maven
Library
Maven - pypi
Library
Pypi - requirements string
- whl string
- cluster_
id str - cran
Library
Cran Args - egg str
- jar str
- library_
id str - maven
Library
Maven Args - pypi
Library
Pypi Args - requirements str
- whl str
- cluster
Id String - cran Property Map
- egg String
- jar String
- library
Id String - maven Property Map
- pypi Property Map
- requirements String
- whl String
Outputs
All input properties are implicitly available as output properties. Additionally, the Library resource produces the following output properties:
- Id string
- The provider-assigned unique ID for this managed resource.
- Id string
- The provider-assigned unique ID for this managed resource.
- id String
- The provider-assigned unique ID for this managed resource.
- id string
- The provider-assigned unique ID for this managed resource.
- id str
- The provider-assigned unique ID for this managed resource.
- id String
- The provider-assigned unique ID for this managed resource.
Look up Existing Library Resource
Get an existing Library resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: LibraryState, opts?: CustomResourceOptions): Library
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
cluster_id: Optional[str] = None,
cran: Optional[LibraryCranArgs] = None,
egg: Optional[str] = None,
jar: Optional[str] = None,
library_id: Optional[str] = None,
maven: Optional[LibraryMavenArgs] = None,
pypi: Optional[LibraryPypiArgs] = None,
requirements: Optional[str] = None,
whl: Optional[str] = None) -> Library
func GetLibrary(ctx *Context, name string, id IDInput, state *LibraryState, opts ...ResourceOption) (*Library, error)
public static Library Get(string name, Input<string> id, LibraryState? state, CustomResourceOptions? opts = null)
public static Library get(String name, Output<String> id, LibraryState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Cluster
Id string - Cran
Library
Cran - Egg string
- Jar string
- Library
Id string - Maven
Library
Maven - Pypi
Library
Pypi - Requirements string
- Whl string
- Cluster
Id string - Cran
Library
Cran Args - Egg string
- Jar string
- Library
Id string - Maven
Library
Maven Args - Pypi
Library
Pypi Args - Requirements string
- Whl string
- cluster
Id String - cran
Library
Cran - egg String
- jar String
- library
Id String - maven
Library
Maven - pypi
Library
Pypi - requirements String
- whl String
- cluster
Id string - cran
Library
Cran - egg string
- jar string
- library
Id string - maven
Library
Maven - pypi
Library
Pypi - requirements string
- whl string
- cluster_
id str - cran
Library
Cran Args - egg str
- jar str
- library_
id str - maven
Library
Maven Args - pypi
Library
Pypi Args - requirements str
- whl str
- cluster
Id String - cran Property Map
- egg String
- jar String
- library
Id String - maven Property Map
- pypi Property Map
- requirements String
- whl String
Supporting Types
LibraryCran, LibraryCranArgs
LibraryMaven, LibraryMavenArgs
- Coordinates string
- Exclusions List<string>
- Repo string
- Coordinates string
- Exclusions []string
- Repo string
- coordinates String
- exclusions List<String>
- repo String
- coordinates string
- exclusions string[]
- repo string
- coordinates str
- exclusions Sequence[str]
- repo str
- coordinates String
- exclusions List<String>
- repo String
LibraryPypi, LibraryPypiArgs
Import
!> Importing this resource is not currently supported.
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- databricks pulumi/pulumi-databricks
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
databricks
Terraform Provider.