databricks.Pipeline
Explore with Pulumi AI
Use databricks.Pipeline
to deploy Delta Live Tables.
Example Usage
import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";
const dltDemo = new databricks.Notebook("dlt_demo", {});
const dltDemoRepo = new databricks.Repo("dlt_demo", {});
const _this = new databricks.Pipeline("this", {
name: "Pipeline Name",
storage: "/test/first-pipeline",
configuration: {
key1: "value1",
key2: "value2",
},
clusters: [
{
label: "default",
numWorkers: 2,
customTags: {
cluster_type: "default",
},
},
{
label: "maintenance",
numWorkers: 1,
customTags: {
cluster_type: "maintenance",
},
},
],
libraries: [
{
notebook: {
path: dltDemo.id,
},
},
{
file: {
path: pulumi.interpolate`${dltDemoRepo.path}/pipeline.sql`,
},
},
],
continuous: false,
notifications: [{
emailRecipients: [
"user@domain.com",
"user1@domain.com",
],
alerts: [
"on-update-failure",
"on-update-fatal-failure",
"on-update-success",
"on-flow-failure",
],
}],
});
import pulumi
import pulumi_databricks as databricks
dlt_demo = databricks.Notebook("dlt_demo")
dlt_demo_repo = databricks.Repo("dlt_demo")
this = databricks.Pipeline("this",
name="Pipeline Name",
storage="/test/first-pipeline",
configuration={
"key1": "value1",
"key2": "value2",
},
clusters=[
{
"label": "default",
"num_workers": 2,
"custom_tags": {
"cluster_type": "default",
},
},
{
"label": "maintenance",
"num_workers": 1,
"custom_tags": {
"cluster_type": "maintenance",
},
},
],
libraries=[
{
"notebook": {
"path": dlt_demo.id,
},
},
{
"file": {
"path": dlt_demo_repo.path.apply(lambda path: f"{path}/pipeline.sql"),
},
},
],
continuous=False,
notifications=[{
"email_recipients": [
"user@domain.com",
"user1@domain.com",
],
"alerts": [
"on-update-failure",
"on-update-fatal-failure",
"on-update-success",
"on-flow-failure",
],
}])
package main
import (
"fmt"
"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
dltDemo, err := databricks.NewNotebook(ctx, "dlt_demo", nil)
if err != nil {
return err
}
dltDemoRepo, err := databricks.NewRepo(ctx, "dlt_demo", nil)
if err != nil {
return err
}
_, err = databricks.NewPipeline(ctx, "this", &databricks.PipelineArgs{
Name: pulumi.String("Pipeline Name"),
Storage: pulumi.String("/test/first-pipeline"),
Configuration: pulumi.StringMap{
"key1": pulumi.String("value1"),
"key2": pulumi.String("value2"),
},
Clusters: databricks.PipelineClusterArray{
&databricks.PipelineClusterArgs{
Label: pulumi.String("default"),
NumWorkers: pulumi.Int(2),
CustomTags: pulumi.StringMap{
"cluster_type": pulumi.String("default"),
},
},
&databricks.PipelineClusterArgs{
Label: pulumi.String("maintenance"),
NumWorkers: pulumi.Int(1),
CustomTags: pulumi.StringMap{
"cluster_type": pulumi.String("maintenance"),
},
},
},
Libraries: databricks.PipelineLibraryArray{
&databricks.PipelineLibraryArgs{
Notebook: &databricks.PipelineLibraryNotebookArgs{
Path: dltDemo.ID(),
},
},
&databricks.PipelineLibraryArgs{
File: &databricks.PipelineLibraryFileArgs{
Path: dltDemoRepo.Path.ApplyT(func(path string) (string, error) {
return fmt.Sprintf("%v/pipeline.sql", path), nil
}).(pulumi.StringOutput),
},
},
},
Continuous: pulumi.Bool(false),
Notifications: databricks.PipelineNotificationArray{
&databricks.PipelineNotificationArgs{
EmailRecipients: pulumi.StringArray{
pulumi.String("user@domain.com"),
pulumi.String("user1@domain.com"),
},
Alerts: pulumi.StringArray{
pulumi.String("on-update-failure"),
pulumi.String("on-update-fatal-failure"),
pulumi.String("on-update-success"),
pulumi.String("on-flow-failure"),
},
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;
return await Deployment.RunAsync(() =>
{
var dltDemo = new Databricks.Notebook("dlt_demo");
var dltDemoRepo = new Databricks.Repo("dlt_demo");
var @this = new Databricks.Pipeline("this", new()
{
Name = "Pipeline Name",
Storage = "/test/first-pipeline",
Configuration =
{
{ "key1", "value1" },
{ "key2", "value2" },
},
Clusters = new[]
{
new Databricks.Inputs.PipelineClusterArgs
{
Label = "default",
NumWorkers = 2,
CustomTags =
{
{ "cluster_type", "default" },
},
},
new Databricks.Inputs.PipelineClusterArgs
{
Label = "maintenance",
NumWorkers = 1,
CustomTags =
{
{ "cluster_type", "maintenance" },
},
},
},
Libraries = new[]
{
new Databricks.Inputs.PipelineLibraryArgs
{
Notebook = new Databricks.Inputs.PipelineLibraryNotebookArgs
{
Path = dltDemo.Id,
},
},
new Databricks.Inputs.PipelineLibraryArgs
{
File = new Databricks.Inputs.PipelineLibraryFileArgs
{
Path = dltDemoRepo.Path.Apply(path => $"{path}/pipeline.sql"),
},
},
},
Continuous = false,
Notifications = new[]
{
new Databricks.Inputs.PipelineNotificationArgs
{
EmailRecipients = new[]
{
"user@domain.com",
"user1@domain.com",
},
Alerts = new[]
{
"on-update-failure",
"on-update-fatal-failure",
"on-update-success",
"on-flow-failure",
},
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.Notebook;
import com.pulumi.databricks.Repo;
import com.pulumi.databricks.Pipeline;
import com.pulumi.databricks.PipelineArgs;
import com.pulumi.databricks.inputs.PipelineClusterArgs;
import com.pulumi.databricks.inputs.PipelineLibraryArgs;
import com.pulumi.databricks.inputs.PipelineLibraryNotebookArgs;
import com.pulumi.databricks.inputs.PipelineLibraryFileArgs;
import com.pulumi.databricks.inputs.PipelineNotificationArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var dltDemo = new Notebook("dltDemo");
var dltDemoRepo = new Repo("dltDemoRepo");
var this_ = new Pipeline("this", PipelineArgs.builder()
.name("Pipeline Name")
.storage("/test/first-pipeline")
.configuration(Map.ofEntries(
Map.entry("key1", "value1"),
Map.entry("key2", "value2")
))
.clusters(
PipelineClusterArgs.builder()
.label("default")
.numWorkers(2)
.customTags(Map.of("cluster_type", "default"))
.build(),
PipelineClusterArgs.builder()
.label("maintenance")
.numWorkers(1)
.customTags(Map.of("cluster_type", "maintenance"))
.build())
.libraries(
PipelineLibraryArgs.builder()
.notebook(PipelineLibraryNotebookArgs.builder()
.path(dltDemo.id())
.build())
.build(),
PipelineLibraryArgs.builder()
.file(PipelineLibraryFileArgs.builder()
.path(dltDemoRepo.path().applyValue(path -> String.format("%s/pipeline.sql", path)))
.build())
.build())
.continuous(false)
.notifications(PipelineNotificationArgs.builder()
.emailRecipients(
"user@domain.com",
"user1@domain.com")
.alerts(
"on-update-failure",
"on-update-fatal-failure",
"on-update-success",
"on-flow-failure")
.build())
.build());
}
}
resources:
dltDemo:
type: databricks:Notebook
name: dlt_demo
dltDemoRepo:
type: databricks:Repo
name: dlt_demo
this:
type: databricks:Pipeline
properties:
name: Pipeline Name
storage: /test/first-pipeline
configuration:
key1: value1
key2: value2
clusters:
- label: default
numWorkers: 2
customTags:
cluster_type: default
- label: maintenance
numWorkers: 1
customTags:
cluster_type: maintenance
libraries:
- notebook:
path: ${dltDemo.id}
- file:
path: ${dltDemoRepo.path}/pipeline.sql
continuous: false
notifications:
- emailRecipients:
- user@domain.com
- user1@domain.com
alerts:
- on-update-failure
- on-update-fatal-failure
- on-update-success
- on-flow-failure
Related Resources
The following resources are often used in the same context:
- End to end workspace management guide.
- databricks.getPipelines to retrieve Delta Live Tables pipeline data.
- databricks.Cluster to create Databricks Clusters.
- databricks.Job to manage Databricks Jobs to run non-interactive code in a databricks_cluster.
- databricks.Notebook to manage Databricks Notebooks.
Create Pipeline Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new Pipeline(name: string, args?: PipelineArgs, opts?: CustomResourceOptions);
@overload
def Pipeline(resource_name: str,
args: Optional[PipelineArgs] = None,
opts: Optional[ResourceOptions] = None)
@overload
def Pipeline(resource_name: str,
opts: Optional[ResourceOptions] = None,
allow_duplicate_names: Optional[bool] = None,
budget_policy_id: Optional[str] = None,
catalog: Optional[str] = None,
cause: Optional[str] = None,
channel: Optional[str] = None,
cluster_id: Optional[str] = None,
clusters: Optional[Sequence[PipelineClusterArgs]] = None,
configuration: Optional[Mapping[str, str]] = None,
continuous: Optional[bool] = None,
creator_user_name: Optional[str] = None,
deployment: Optional[PipelineDeploymentArgs] = None,
development: Optional[bool] = None,
edition: Optional[str] = None,
expected_last_modified: Optional[int] = None,
filters: Optional[PipelineFiltersArgs] = None,
gateway_definition: Optional[PipelineGatewayDefinitionArgs] = None,
health: Optional[str] = None,
ingestion_definition: Optional[PipelineIngestionDefinitionArgs] = None,
last_modified: Optional[int] = None,
latest_updates: Optional[Sequence[PipelineLatestUpdateArgs]] = None,
libraries: Optional[Sequence[PipelineLibraryArgs]] = None,
name: Optional[str] = None,
notifications: Optional[Sequence[PipelineNotificationArgs]] = None,
photon: Optional[bool] = None,
restart_window: Optional[PipelineRestartWindowArgs] = None,
run_as_user_name: Optional[str] = None,
schema: Optional[str] = None,
serverless: Optional[bool] = None,
state: Optional[str] = None,
storage: Optional[str] = None,
target: Optional[str] = None,
trigger: Optional[PipelineTriggerArgs] = None,
url: Optional[str] = None)
func NewPipeline(ctx *Context, name string, args *PipelineArgs, opts ...ResourceOption) (*Pipeline, error)
public Pipeline(string name, PipelineArgs? args = null, CustomResourceOptions? opts = null)
public Pipeline(String name, PipelineArgs args)
public Pipeline(String name, PipelineArgs args, CustomResourceOptions options)
type: databricks:Pipeline
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args PipelineArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args PipelineArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args PipelineArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args PipelineArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args PipelineArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var pipelineResource = new Databricks.Pipeline("pipelineResource", new()
{
AllowDuplicateNames = false,
BudgetPolicyId = "string",
Catalog = "string",
Cause = "string",
Channel = "string",
ClusterId = "string",
Clusters = new[]
{
new Databricks.Inputs.PipelineClusterArgs
{
ApplyPolicyDefaultValues = false,
Autoscale = new Databricks.Inputs.PipelineClusterAutoscaleArgs
{
MaxWorkers = 0,
MinWorkers = 0,
Mode = "string",
},
AwsAttributes = new Databricks.Inputs.PipelineClusterAwsAttributesArgs
{
Availability = "string",
EbsVolumeCount = 0,
EbsVolumeIops = 0,
EbsVolumeSize = 0,
EbsVolumeThroughput = 0,
EbsVolumeType = "string",
FirstOnDemand = 0,
InstanceProfileArn = "string",
SpotBidPricePercent = 0,
ZoneId = "string",
},
AzureAttributes = new Databricks.Inputs.PipelineClusterAzureAttributesArgs
{
Availability = "string",
FirstOnDemand = 0,
LogAnalyticsInfo = new Databricks.Inputs.PipelineClusterAzureAttributesLogAnalyticsInfoArgs
{
LogAnalyticsPrimaryKey = "string",
LogAnalyticsWorkspaceId = "string",
},
SpotBidMaxPrice = 0,
},
ClusterLogConf = new Databricks.Inputs.PipelineClusterClusterLogConfArgs
{
Dbfs = new Databricks.Inputs.PipelineClusterClusterLogConfDbfsArgs
{
Destination = "string",
},
S3 = new Databricks.Inputs.PipelineClusterClusterLogConfS3Args
{
Destination = "string",
CannedAcl = "string",
EnableEncryption = false,
EncryptionType = "string",
Endpoint = "string",
KmsKey = "string",
Region = "string",
},
},
CustomTags =
{
{ "string", "string" },
},
DriverInstancePoolId = "string",
DriverNodeTypeId = "string",
EnableLocalDiskEncryption = false,
GcpAttributes = new Databricks.Inputs.PipelineClusterGcpAttributesArgs
{
Availability = "string",
GoogleServiceAccount = "string",
LocalSsdCount = 0,
ZoneId = "string",
},
InitScripts = new[]
{
new Databricks.Inputs.PipelineClusterInitScriptArgs
{
Abfss = new Databricks.Inputs.PipelineClusterInitScriptAbfssArgs
{
Destination = "string",
},
File = new Databricks.Inputs.PipelineClusterInitScriptFileArgs
{
Destination = "string",
},
Gcs = new Databricks.Inputs.PipelineClusterInitScriptGcsArgs
{
Destination = "string",
},
S3 = new Databricks.Inputs.PipelineClusterInitScriptS3Args
{
Destination = "string",
CannedAcl = "string",
EnableEncryption = false,
EncryptionType = "string",
Endpoint = "string",
KmsKey = "string",
Region = "string",
},
Volumes = new Databricks.Inputs.PipelineClusterInitScriptVolumesArgs
{
Destination = "string",
},
Workspace = new Databricks.Inputs.PipelineClusterInitScriptWorkspaceArgs
{
Destination = "string",
},
},
},
InstancePoolId = "string",
Label = "string",
NodeTypeId = "string",
NumWorkers = 0,
PolicyId = "string",
SparkConf =
{
{ "string", "string" },
},
SparkEnvVars =
{
{ "string", "string" },
},
SshPublicKeys = new[]
{
"string",
},
},
},
Configuration =
{
{ "string", "string" },
},
Continuous = false,
CreatorUserName = "string",
Deployment = new Databricks.Inputs.PipelineDeploymentArgs
{
Kind = "string",
MetadataFilePath = "string",
},
Development = false,
Edition = "string",
ExpectedLastModified = 0,
Filters = new Databricks.Inputs.PipelineFiltersArgs
{
Excludes = new[]
{
"string",
},
Includes = new[]
{
"string",
},
},
GatewayDefinition = new Databricks.Inputs.PipelineGatewayDefinitionArgs
{
ConnectionId = "string",
ConnectionName = "string",
GatewayStorageCatalog = "string",
GatewayStorageName = "string",
GatewayStorageSchema = "string",
},
Health = "string",
IngestionDefinition = new Databricks.Inputs.PipelineIngestionDefinitionArgs
{
ConnectionName = "string",
IngestionGatewayId = "string",
Objects = new[]
{
new Databricks.Inputs.PipelineIngestionDefinitionObjectArgs
{
Report = new Databricks.Inputs.PipelineIngestionDefinitionObjectReportArgs
{
DestinationCatalog = "string",
DestinationSchema = "string",
DestinationTable = "string",
SourceUrl = "string",
TableConfiguration = new Databricks.Inputs.PipelineIngestionDefinitionObjectReportTableConfigurationArgs
{
PrimaryKeys = new[]
{
"string",
},
SalesforceIncludeFormulaFields = false,
ScdType = "string",
SequenceBies = new[]
{
"string",
},
},
},
Schema = new Databricks.Inputs.PipelineIngestionDefinitionObjectSchemaArgs
{
DestinationCatalog = "string",
DestinationSchema = "string",
SourceCatalog = "string",
SourceSchema = "string",
TableConfiguration = new Databricks.Inputs.PipelineIngestionDefinitionObjectSchemaTableConfigurationArgs
{
PrimaryKeys = new[]
{
"string",
},
SalesforceIncludeFormulaFields = false,
ScdType = "string",
SequenceBies = new[]
{
"string",
},
},
},
Table = new Databricks.Inputs.PipelineIngestionDefinitionObjectTableArgs
{
DestinationCatalog = "string",
DestinationSchema = "string",
DestinationTable = "string",
SourceCatalog = "string",
SourceSchema = "string",
SourceTable = "string",
TableConfiguration = new Databricks.Inputs.PipelineIngestionDefinitionObjectTableTableConfigurationArgs
{
PrimaryKeys = new[]
{
"string",
},
SalesforceIncludeFormulaFields = false,
ScdType = "string",
SequenceBies = new[]
{
"string",
},
},
},
},
},
TableConfiguration = new Databricks.Inputs.PipelineIngestionDefinitionTableConfigurationArgs
{
PrimaryKeys = new[]
{
"string",
},
SalesforceIncludeFormulaFields = false,
ScdType = "string",
SequenceBies = new[]
{
"string",
},
},
},
LastModified = 0,
LatestUpdates = new[]
{
new Databricks.Inputs.PipelineLatestUpdateArgs
{
CreationTime = "string",
State = "string",
UpdateId = "string",
},
},
Libraries = new[]
{
new Databricks.Inputs.PipelineLibraryArgs
{
File = new Databricks.Inputs.PipelineLibraryFileArgs
{
Path = "string",
},
Jar = "string",
Maven = new Databricks.Inputs.PipelineLibraryMavenArgs
{
Coordinates = "string",
Exclusions = new[]
{
"string",
},
Repo = "string",
},
Notebook = new Databricks.Inputs.PipelineLibraryNotebookArgs
{
Path = "string",
},
},
},
Name = "string",
Notifications = new[]
{
new Databricks.Inputs.PipelineNotificationArgs
{
Alerts = new[]
{
"string",
},
EmailRecipients = new[]
{
"string",
},
},
},
Photon = false,
RestartWindow = new Databricks.Inputs.PipelineRestartWindowArgs
{
StartHour = 0,
DaysOfWeek = "string",
TimeZoneId = "string",
},
RunAsUserName = "string",
Schema = "string",
Serverless = false,
State = "string",
Storage = "string",
Target = "string",
Trigger = new Databricks.Inputs.PipelineTriggerArgs
{
Cron = new Databricks.Inputs.PipelineTriggerCronArgs
{
QuartzCronSchedule = "string",
TimezoneId = "string",
},
Manual = null,
},
Url = "string",
});
example, err := databricks.NewPipeline(ctx, "pipelineResource", &databricks.PipelineArgs{
AllowDuplicateNames: pulumi.Bool(false),
BudgetPolicyId: pulumi.String("string"),
Catalog: pulumi.String("string"),
Cause: pulumi.String("string"),
Channel: pulumi.String("string"),
ClusterId: pulumi.String("string"),
Clusters: databricks.PipelineClusterArray{
&databricks.PipelineClusterArgs{
ApplyPolicyDefaultValues: pulumi.Bool(false),
Autoscale: &databricks.PipelineClusterAutoscaleArgs{
MaxWorkers: pulumi.Int(0),
MinWorkers: pulumi.Int(0),
Mode: pulumi.String("string"),
},
AwsAttributes: &databricks.PipelineClusterAwsAttributesArgs{
Availability: pulumi.String("string"),
EbsVolumeCount: pulumi.Int(0),
EbsVolumeIops: pulumi.Int(0),
EbsVolumeSize: pulumi.Int(0),
EbsVolumeThroughput: pulumi.Int(0),
EbsVolumeType: pulumi.String("string"),
FirstOnDemand: pulumi.Int(0),
InstanceProfileArn: pulumi.String("string"),
SpotBidPricePercent: pulumi.Int(0),
ZoneId: pulumi.String("string"),
},
AzureAttributes: &databricks.PipelineClusterAzureAttributesArgs{
Availability: pulumi.String("string"),
FirstOnDemand: pulumi.Int(0),
LogAnalyticsInfo: &databricks.PipelineClusterAzureAttributesLogAnalyticsInfoArgs{
LogAnalyticsPrimaryKey: pulumi.String("string"),
LogAnalyticsWorkspaceId: pulumi.String("string"),
},
SpotBidMaxPrice: pulumi.Float64(0),
},
ClusterLogConf: &databricks.PipelineClusterClusterLogConfArgs{
Dbfs: &databricks.PipelineClusterClusterLogConfDbfsArgs{
Destination: pulumi.String("string"),
},
S3: &databricks.PipelineClusterClusterLogConfS3Args{
Destination: pulumi.String("string"),
CannedAcl: pulumi.String("string"),
EnableEncryption: pulumi.Bool(false),
EncryptionType: pulumi.String("string"),
Endpoint: pulumi.String("string"),
KmsKey: pulumi.String("string"),
Region: pulumi.String("string"),
},
},
CustomTags: pulumi.StringMap{
"string": pulumi.String("string"),
},
DriverInstancePoolId: pulumi.String("string"),
DriverNodeTypeId: pulumi.String("string"),
EnableLocalDiskEncryption: pulumi.Bool(false),
GcpAttributes: &databricks.PipelineClusterGcpAttributesArgs{
Availability: pulumi.String("string"),
GoogleServiceAccount: pulumi.String("string"),
LocalSsdCount: pulumi.Int(0),
ZoneId: pulumi.String("string"),
},
InitScripts: databricks.PipelineClusterInitScriptArray{
&databricks.PipelineClusterInitScriptArgs{
Abfss: &databricks.PipelineClusterInitScriptAbfssArgs{
Destination: pulumi.String("string"),
},
File: &databricks.PipelineClusterInitScriptFileArgs{
Destination: pulumi.String("string"),
},
Gcs: &databricks.PipelineClusterInitScriptGcsArgs{
Destination: pulumi.String("string"),
},
S3: &databricks.PipelineClusterInitScriptS3Args{
Destination: pulumi.String("string"),
CannedAcl: pulumi.String("string"),
EnableEncryption: pulumi.Bool(false),
EncryptionType: pulumi.String("string"),
Endpoint: pulumi.String("string"),
KmsKey: pulumi.String("string"),
Region: pulumi.String("string"),
},
Volumes: &databricks.PipelineClusterInitScriptVolumesArgs{
Destination: pulumi.String("string"),
},
Workspace: &databricks.PipelineClusterInitScriptWorkspaceArgs{
Destination: pulumi.String("string"),
},
},
},
InstancePoolId: pulumi.String("string"),
Label: pulumi.String("string"),
NodeTypeId: pulumi.String("string"),
NumWorkers: pulumi.Int(0),
PolicyId: pulumi.String("string"),
SparkConf: pulumi.StringMap{
"string": pulumi.String("string"),
},
SparkEnvVars: pulumi.StringMap{
"string": pulumi.String("string"),
},
SshPublicKeys: pulumi.StringArray{
pulumi.String("string"),
},
},
},
Configuration: pulumi.StringMap{
"string": pulumi.String("string"),
},
Continuous: pulumi.Bool(false),
CreatorUserName: pulumi.String("string"),
Deployment: &databricks.PipelineDeploymentArgs{
Kind: pulumi.String("string"),
MetadataFilePath: pulumi.String("string"),
},
Development: pulumi.Bool(false),
Edition: pulumi.String("string"),
ExpectedLastModified: pulumi.Int(0),
Filters: &databricks.PipelineFiltersArgs{
Excludes: pulumi.StringArray{
pulumi.String("string"),
},
Includes: pulumi.StringArray{
pulumi.String("string"),
},
},
GatewayDefinition: &databricks.PipelineGatewayDefinitionArgs{
ConnectionId: pulumi.String("string"),
ConnectionName: pulumi.String("string"),
GatewayStorageCatalog: pulumi.String("string"),
GatewayStorageName: pulumi.String("string"),
GatewayStorageSchema: pulumi.String("string"),
},
Health: pulumi.String("string"),
IngestionDefinition: &databricks.PipelineIngestionDefinitionArgs{
ConnectionName: pulumi.String("string"),
IngestionGatewayId: pulumi.String("string"),
Objects: databricks.PipelineIngestionDefinitionObjectArray{
&databricks.PipelineIngestionDefinitionObjectArgs{
Report: &databricks.PipelineIngestionDefinitionObjectReportArgs{
DestinationCatalog: pulumi.String("string"),
DestinationSchema: pulumi.String("string"),
DestinationTable: pulumi.String("string"),
SourceUrl: pulumi.String("string"),
TableConfiguration: &databricks.PipelineIngestionDefinitionObjectReportTableConfigurationArgs{
PrimaryKeys: pulumi.StringArray{
pulumi.String("string"),
},
SalesforceIncludeFormulaFields: pulumi.Bool(false),
ScdType: pulumi.String("string"),
SequenceBies: pulumi.StringArray{
pulumi.String("string"),
},
},
},
Schema: &databricks.PipelineIngestionDefinitionObjectSchemaArgs{
DestinationCatalog: pulumi.String("string"),
DestinationSchema: pulumi.String("string"),
SourceCatalog: pulumi.String("string"),
SourceSchema: pulumi.String("string"),
TableConfiguration: &databricks.PipelineIngestionDefinitionObjectSchemaTableConfigurationArgs{
PrimaryKeys: pulumi.StringArray{
pulumi.String("string"),
},
SalesforceIncludeFormulaFields: pulumi.Bool(false),
ScdType: pulumi.String("string"),
SequenceBies: pulumi.StringArray{
pulumi.String("string"),
},
},
},
Table: &databricks.PipelineIngestionDefinitionObjectTableArgs{
DestinationCatalog: pulumi.String("string"),
DestinationSchema: pulumi.String("string"),
DestinationTable: pulumi.String("string"),
SourceCatalog: pulumi.String("string"),
SourceSchema: pulumi.String("string"),
SourceTable: pulumi.String("string"),
TableConfiguration: &databricks.PipelineIngestionDefinitionObjectTableTableConfigurationArgs{
PrimaryKeys: pulumi.StringArray{
pulumi.String("string"),
},
SalesforceIncludeFormulaFields: pulumi.Bool(false),
ScdType: pulumi.String("string"),
SequenceBies: pulumi.StringArray{
pulumi.String("string"),
},
},
},
},
},
TableConfiguration: &databricks.PipelineIngestionDefinitionTableConfigurationArgs{
PrimaryKeys: pulumi.StringArray{
pulumi.String("string"),
},
SalesforceIncludeFormulaFields: pulumi.Bool(false),
ScdType: pulumi.String("string"),
SequenceBies: pulumi.StringArray{
pulumi.String("string"),
},
},
},
LastModified: pulumi.Int(0),
LatestUpdates: databricks.PipelineLatestUpdateArray{
&databricks.PipelineLatestUpdateArgs{
CreationTime: pulumi.String("string"),
State: pulumi.String("string"),
UpdateId: pulumi.String("string"),
},
},
Libraries: databricks.PipelineLibraryArray{
&databricks.PipelineLibraryArgs{
File: &databricks.PipelineLibraryFileArgs{
Path: pulumi.String("string"),
},
Jar: pulumi.String("string"),
Maven: &databricks.PipelineLibraryMavenArgs{
Coordinates: pulumi.String("string"),
Exclusions: pulumi.StringArray{
pulumi.String("string"),
},
Repo: pulumi.String("string"),
},
Notebook: &databricks.PipelineLibraryNotebookArgs{
Path: pulumi.String("string"),
},
},
},
Name: pulumi.String("string"),
Notifications: databricks.PipelineNotificationArray{
&databricks.PipelineNotificationArgs{
Alerts: pulumi.StringArray{
pulumi.String("string"),
},
EmailRecipients: pulumi.StringArray{
pulumi.String("string"),
},
},
},
Photon: pulumi.Bool(false),
RestartWindow: &databricks.PipelineRestartWindowArgs{
StartHour: pulumi.Int(0),
DaysOfWeek: pulumi.String("string"),
TimeZoneId: pulumi.String("string"),
},
RunAsUserName: pulumi.String("string"),
Schema: pulumi.String("string"),
Serverless: pulumi.Bool(false),
State: pulumi.String("string"),
Storage: pulumi.String("string"),
Target: pulumi.String("string"),
Trigger: &databricks.PipelineTriggerArgs{
Cron: &databricks.PipelineTriggerCronArgs{
QuartzCronSchedule: pulumi.String("string"),
TimezoneId: pulumi.String("string"),
},
Manual: &databricks.PipelineTriggerManualArgs{},
},
Url: pulumi.String("string"),
})
var pipelineResource = new Pipeline("pipelineResource", PipelineArgs.builder()
.allowDuplicateNames(false)
.budgetPolicyId("string")
.catalog("string")
.cause("string")
.channel("string")
.clusterId("string")
.clusters(PipelineClusterArgs.builder()
.applyPolicyDefaultValues(false)
.autoscale(PipelineClusterAutoscaleArgs.builder()
.maxWorkers(0)
.minWorkers(0)
.mode("string")
.build())
.awsAttributes(PipelineClusterAwsAttributesArgs.builder()
.availability("string")
.ebsVolumeCount(0)
.ebsVolumeIops(0)
.ebsVolumeSize(0)
.ebsVolumeThroughput(0)
.ebsVolumeType("string")
.firstOnDemand(0)
.instanceProfileArn("string")
.spotBidPricePercent(0)
.zoneId("string")
.build())
.azureAttributes(PipelineClusterAzureAttributesArgs.builder()
.availability("string")
.firstOnDemand(0)
.logAnalyticsInfo(PipelineClusterAzureAttributesLogAnalyticsInfoArgs.builder()
.logAnalyticsPrimaryKey("string")
.logAnalyticsWorkspaceId("string")
.build())
.spotBidMaxPrice(0)
.build())
.clusterLogConf(PipelineClusterClusterLogConfArgs.builder()
.dbfs(PipelineClusterClusterLogConfDbfsArgs.builder()
.destination("string")
.build())
.s3(PipelineClusterClusterLogConfS3Args.builder()
.destination("string")
.cannedAcl("string")
.enableEncryption(false)
.encryptionType("string")
.endpoint("string")
.kmsKey("string")
.region("string")
.build())
.build())
.customTags(Map.of("string", "string"))
.driverInstancePoolId("string")
.driverNodeTypeId("string")
.enableLocalDiskEncryption(false)
.gcpAttributes(PipelineClusterGcpAttributesArgs.builder()
.availability("string")
.googleServiceAccount("string")
.localSsdCount(0)
.zoneId("string")
.build())
.initScripts(PipelineClusterInitScriptArgs.builder()
.abfss(PipelineClusterInitScriptAbfssArgs.builder()
.destination("string")
.build())
.file(PipelineClusterInitScriptFileArgs.builder()
.destination("string")
.build())
.gcs(PipelineClusterInitScriptGcsArgs.builder()
.destination("string")
.build())
.s3(PipelineClusterInitScriptS3Args.builder()
.destination("string")
.cannedAcl("string")
.enableEncryption(false)
.encryptionType("string")
.endpoint("string")
.kmsKey("string")
.region("string")
.build())
.volumes(PipelineClusterInitScriptVolumesArgs.builder()
.destination("string")
.build())
.workspace(PipelineClusterInitScriptWorkspaceArgs.builder()
.destination("string")
.build())
.build())
.instancePoolId("string")
.label("string")
.nodeTypeId("string")
.numWorkers(0)
.policyId("string")
.sparkConf(Map.of("string", "string"))
.sparkEnvVars(Map.of("string", "string"))
.sshPublicKeys("string")
.build())
.configuration(Map.of("string", "string"))
.continuous(false)
.creatorUserName("string")
.deployment(PipelineDeploymentArgs.builder()
.kind("string")
.metadataFilePath("string")
.build())
.development(false)
.edition("string")
.expectedLastModified(0)
.filters(PipelineFiltersArgs.builder()
.excludes("string")
.includes("string")
.build())
.gatewayDefinition(PipelineGatewayDefinitionArgs.builder()
.connectionId("string")
.connectionName("string")
.gatewayStorageCatalog("string")
.gatewayStorageName("string")
.gatewayStorageSchema("string")
.build())
.health("string")
.ingestionDefinition(PipelineIngestionDefinitionArgs.builder()
.connectionName("string")
.ingestionGatewayId("string")
.objects(PipelineIngestionDefinitionObjectArgs.builder()
.report(PipelineIngestionDefinitionObjectReportArgs.builder()
.destinationCatalog("string")
.destinationSchema("string")
.destinationTable("string")
.sourceUrl("string")
.tableConfiguration(PipelineIngestionDefinitionObjectReportTableConfigurationArgs.builder()
.primaryKeys("string")
.salesforceIncludeFormulaFields(false)
.scdType("string")
.sequenceBies("string")
.build())
.build())
.schema(PipelineIngestionDefinitionObjectSchemaArgs.builder()
.destinationCatalog("string")
.destinationSchema("string")
.sourceCatalog("string")
.sourceSchema("string")
.tableConfiguration(PipelineIngestionDefinitionObjectSchemaTableConfigurationArgs.builder()
.primaryKeys("string")
.salesforceIncludeFormulaFields(false)
.scdType("string")
.sequenceBies("string")
.build())
.build())
.table(PipelineIngestionDefinitionObjectTableArgs.builder()
.destinationCatalog("string")
.destinationSchema("string")
.destinationTable("string")
.sourceCatalog("string")
.sourceSchema("string")
.sourceTable("string")
.tableConfiguration(PipelineIngestionDefinitionObjectTableTableConfigurationArgs.builder()
.primaryKeys("string")
.salesforceIncludeFormulaFields(false)
.scdType("string")
.sequenceBies("string")
.build())
.build())
.build())
.tableConfiguration(PipelineIngestionDefinitionTableConfigurationArgs.builder()
.primaryKeys("string")
.salesforceIncludeFormulaFields(false)
.scdType("string")
.sequenceBies("string")
.build())
.build())
.lastModified(0)
.latestUpdates(PipelineLatestUpdateArgs.builder()
.creationTime("string")
.state("string")
.updateId("string")
.build())
.libraries(PipelineLibraryArgs.builder()
.file(PipelineLibraryFileArgs.builder()
.path("string")
.build())
.jar("string")
.maven(PipelineLibraryMavenArgs.builder()
.coordinates("string")
.exclusions("string")
.repo("string")
.build())
.notebook(PipelineLibraryNotebookArgs.builder()
.path("string")
.build())
.build())
.name("string")
.notifications(PipelineNotificationArgs.builder()
.alerts("string")
.emailRecipients("string")
.build())
.photon(false)
.restartWindow(PipelineRestartWindowArgs.builder()
.startHour(0)
.daysOfWeek("string")
.timeZoneId("string")
.build())
.runAsUserName("string")
.schema("string")
.serverless(false)
.state("string")
.storage("string")
.target("string")
.trigger(PipelineTriggerArgs.builder()
.cron(PipelineTriggerCronArgs.builder()
.quartzCronSchedule("string")
.timezoneId("string")
.build())
.manual()
.build())
.url("string")
.build());
pipeline_resource = databricks.Pipeline("pipelineResource",
allow_duplicate_names=False,
budget_policy_id="string",
catalog="string",
cause="string",
channel="string",
cluster_id="string",
clusters=[{
"apply_policy_default_values": False,
"autoscale": {
"max_workers": 0,
"min_workers": 0,
"mode": "string",
},
"aws_attributes": {
"availability": "string",
"ebs_volume_count": 0,
"ebs_volume_iops": 0,
"ebs_volume_size": 0,
"ebs_volume_throughput": 0,
"ebs_volume_type": "string",
"first_on_demand": 0,
"instance_profile_arn": "string",
"spot_bid_price_percent": 0,
"zone_id": "string",
},
"azure_attributes": {
"availability": "string",
"first_on_demand": 0,
"log_analytics_info": {
"log_analytics_primary_key": "string",
"log_analytics_workspace_id": "string",
},
"spot_bid_max_price": 0,
},
"cluster_log_conf": {
"dbfs": {
"destination": "string",
},
"s3": {
"destination": "string",
"canned_acl": "string",
"enable_encryption": False,
"encryption_type": "string",
"endpoint": "string",
"kms_key": "string",
"region": "string",
},
},
"custom_tags": {
"string": "string",
},
"driver_instance_pool_id": "string",
"driver_node_type_id": "string",
"enable_local_disk_encryption": False,
"gcp_attributes": {
"availability": "string",
"google_service_account": "string",
"local_ssd_count": 0,
"zone_id": "string",
},
"init_scripts": [{
"abfss": {
"destination": "string",
},
"file": {
"destination": "string",
},
"gcs": {
"destination": "string",
},
"s3": {
"destination": "string",
"canned_acl": "string",
"enable_encryption": False,
"encryption_type": "string",
"endpoint": "string",
"kms_key": "string",
"region": "string",
},
"volumes": {
"destination": "string",
},
"workspace": {
"destination": "string",
},
}],
"instance_pool_id": "string",
"label": "string",
"node_type_id": "string",
"num_workers": 0,
"policy_id": "string",
"spark_conf": {
"string": "string",
},
"spark_env_vars": {
"string": "string",
},
"ssh_public_keys": ["string"],
}],
configuration={
"string": "string",
},
continuous=False,
creator_user_name="string",
deployment={
"kind": "string",
"metadata_file_path": "string",
},
development=False,
edition="string",
expected_last_modified=0,
filters={
"excludes": ["string"],
"includes": ["string"],
},
gateway_definition={
"connection_id": "string",
"connection_name": "string",
"gateway_storage_catalog": "string",
"gateway_storage_name": "string",
"gateway_storage_schema": "string",
},
health="string",
ingestion_definition={
"connection_name": "string",
"ingestion_gateway_id": "string",
"objects": [{
"report": {
"destination_catalog": "string",
"destination_schema": "string",
"destination_table": "string",
"source_url": "string",
"table_configuration": {
"primary_keys": ["string"],
"salesforce_include_formula_fields": False,
"scd_type": "string",
"sequence_bies": ["string"],
},
},
"schema": {
"destination_catalog": "string",
"destination_schema": "string",
"source_catalog": "string",
"source_schema": "string",
"table_configuration": {
"primary_keys": ["string"],
"salesforce_include_formula_fields": False,
"scd_type": "string",
"sequence_bies": ["string"],
},
},
"table": {
"destination_catalog": "string",
"destination_schema": "string",
"destination_table": "string",
"source_catalog": "string",
"source_schema": "string",
"source_table": "string",
"table_configuration": {
"primary_keys": ["string"],
"salesforce_include_formula_fields": False,
"scd_type": "string",
"sequence_bies": ["string"],
},
},
}],
"table_configuration": {
"primary_keys": ["string"],
"salesforce_include_formula_fields": False,
"scd_type": "string",
"sequence_bies": ["string"],
},
},
last_modified=0,
latest_updates=[{
"creation_time": "string",
"state": "string",
"update_id": "string",
}],
libraries=[{
"file": {
"path": "string",
},
"jar": "string",
"maven": {
"coordinates": "string",
"exclusions": ["string"],
"repo": "string",
},
"notebook": {
"path": "string",
},
}],
name="string",
notifications=[{
"alerts": ["string"],
"email_recipients": ["string"],
}],
photon=False,
restart_window={
"start_hour": 0,
"days_of_week": "string",
"time_zone_id": "string",
},
run_as_user_name="string",
schema="string",
serverless=False,
state="string",
storage="string",
target="string",
trigger={
"cron": {
"quartz_cron_schedule": "string",
"timezone_id": "string",
},
"manual": {},
},
url="string")
const pipelineResource = new databricks.Pipeline("pipelineResource", {
allowDuplicateNames: false,
budgetPolicyId: "string",
catalog: "string",
cause: "string",
channel: "string",
clusterId: "string",
clusters: [{
applyPolicyDefaultValues: false,
autoscale: {
maxWorkers: 0,
minWorkers: 0,
mode: "string",
},
awsAttributes: {
availability: "string",
ebsVolumeCount: 0,
ebsVolumeIops: 0,
ebsVolumeSize: 0,
ebsVolumeThroughput: 0,
ebsVolumeType: "string",
firstOnDemand: 0,
instanceProfileArn: "string",
spotBidPricePercent: 0,
zoneId: "string",
},
azureAttributes: {
availability: "string",
firstOnDemand: 0,
logAnalyticsInfo: {
logAnalyticsPrimaryKey: "string",
logAnalyticsWorkspaceId: "string",
},
spotBidMaxPrice: 0,
},
clusterLogConf: {
dbfs: {
destination: "string",
},
s3: {
destination: "string",
cannedAcl: "string",
enableEncryption: false,
encryptionType: "string",
endpoint: "string",
kmsKey: "string",
region: "string",
},
},
customTags: {
string: "string",
},
driverInstancePoolId: "string",
driverNodeTypeId: "string",
enableLocalDiskEncryption: false,
gcpAttributes: {
availability: "string",
googleServiceAccount: "string",
localSsdCount: 0,
zoneId: "string",
},
initScripts: [{
abfss: {
destination: "string",
},
file: {
destination: "string",
},
gcs: {
destination: "string",
},
s3: {
destination: "string",
cannedAcl: "string",
enableEncryption: false,
encryptionType: "string",
endpoint: "string",
kmsKey: "string",
region: "string",
},
volumes: {
destination: "string",
},
workspace: {
destination: "string",
},
}],
instancePoolId: "string",
label: "string",
nodeTypeId: "string",
numWorkers: 0,
policyId: "string",
sparkConf: {
string: "string",
},
sparkEnvVars: {
string: "string",
},
sshPublicKeys: ["string"],
}],
configuration: {
string: "string",
},
continuous: false,
creatorUserName: "string",
deployment: {
kind: "string",
metadataFilePath: "string",
},
development: false,
edition: "string",
expectedLastModified: 0,
filters: {
excludes: ["string"],
includes: ["string"],
},
gatewayDefinition: {
connectionId: "string",
connectionName: "string",
gatewayStorageCatalog: "string",
gatewayStorageName: "string",
gatewayStorageSchema: "string",
},
health: "string",
ingestionDefinition: {
connectionName: "string",
ingestionGatewayId: "string",
objects: [{
report: {
destinationCatalog: "string",
destinationSchema: "string",
destinationTable: "string",
sourceUrl: "string",
tableConfiguration: {
primaryKeys: ["string"],
salesforceIncludeFormulaFields: false,
scdType: "string",
sequenceBies: ["string"],
},
},
schema: {
destinationCatalog: "string",
destinationSchema: "string",
sourceCatalog: "string",
sourceSchema: "string",
tableConfiguration: {
primaryKeys: ["string"],
salesforceIncludeFormulaFields: false,
scdType: "string",
sequenceBies: ["string"],
},
},
table: {
destinationCatalog: "string",
destinationSchema: "string",
destinationTable: "string",
sourceCatalog: "string",
sourceSchema: "string",
sourceTable: "string",
tableConfiguration: {
primaryKeys: ["string"],
salesforceIncludeFormulaFields: false,
scdType: "string",
sequenceBies: ["string"],
},
},
}],
tableConfiguration: {
primaryKeys: ["string"],
salesforceIncludeFormulaFields: false,
scdType: "string",
sequenceBies: ["string"],
},
},
lastModified: 0,
latestUpdates: [{
creationTime: "string",
state: "string",
updateId: "string",
}],
libraries: [{
file: {
path: "string",
},
jar: "string",
maven: {
coordinates: "string",
exclusions: ["string"],
repo: "string",
},
notebook: {
path: "string",
},
}],
name: "string",
notifications: [{
alerts: ["string"],
emailRecipients: ["string"],
}],
photon: false,
restartWindow: {
startHour: 0,
daysOfWeek: "string",
timeZoneId: "string",
},
runAsUserName: "string",
schema: "string",
serverless: false,
state: "string",
storage: "string",
target: "string",
trigger: {
cron: {
quartzCronSchedule: "string",
timezoneId: "string",
},
manual: {},
},
url: "string",
});
type: databricks:Pipeline
properties:
allowDuplicateNames: false
budgetPolicyId: string
catalog: string
cause: string
channel: string
clusterId: string
clusters:
- applyPolicyDefaultValues: false
autoscale:
maxWorkers: 0
minWorkers: 0
mode: string
awsAttributes:
availability: string
ebsVolumeCount: 0
ebsVolumeIops: 0
ebsVolumeSize: 0
ebsVolumeThroughput: 0
ebsVolumeType: string
firstOnDemand: 0
instanceProfileArn: string
spotBidPricePercent: 0
zoneId: string
azureAttributes:
availability: string
firstOnDemand: 0
logAnalyticsInfo:
logAnalyticsPrimaryKey: string
logAnalyticsWorkspaceId: string
spotBidMaxPrice: 0
clusterLogConf:
dbfs:
destination: string
s3:
cannedAcl: string
destination: string
enableEncryption: false
encryptionType: string
endpoint: string
kmsKey: string
region: string
customTags:
string: string
driverInstancePoolId: string
driverNodeTypeId: string
enableLocalDiskEncryption: false
gcpAttributes:
availability: string
googleServiceAccount: string
localSsdCount: 0
zoneId: string
initScripts:
- abfss:
destination: string
file:
destination: string
gcs:
destination: string
s3:
cannedAcl: string
destination: string
enableEncryption: false
encryptionType: string
endpoint: string
kmsKey: string
region: string
volumes:
destination: string
workspace:
destination: string
instancePoolId: string
label: string
nodeTypeId: string
numWorkers: 0
policyId: string
sparkConf:
string: string
sparkEnvVars:
string: string
sshPublicKeys:
- string
configuration:
string: string
continuous: false
creatorUserName: string
deployment:
kind: string
metadataFilePath: string
development: false
edition: string
expectedLastModified: 0
filters:
excludes:
- string
includes:
- string
gatewayDefinition:
connectionId: string
connectionName: string
gatewayStorageCatalog: string
gatewayStorageName: string
gatewayStorageSchema: string
health: string
ingestionDefinition:
connectionName: string
ingestionGatewayId: string
objects:
- report:
destinationCatalog: string
destinationSchema: string
destinationTable: string
sourceUrl: string
tableConfiguration:
primaryKeys:
- string
salesforceIncludeFormulaFields: false
scdType: string
sequenceBies:
- string
schema:
destinationCatalog: string
destinationSchema: string
sourceCatalog: string
sourceSchema: string
tableConfiguration:
primaryKeys:
- string
salesforceIncludeFormulaFields: false
scdType: string
sequenceBies:
- string
table:
destinationCatalog: string
destinationSchema: string
destinationTable: string
sourceCatalog: string
sourceSchema: string
sourceTable: string
tableConfiguration:
primaryKeys:
- string
salesforceIncludeFormulaFields: false
scdType: string
sequenceBies:
- string
tableConfiguration:
primaryKeys:
- string
salesforceIncludeFormulaFields: false
scdType: string
sequenceBies:
- string
lastModified: 0
latestUpdates:
- creationTime: string
state: string
updateId: string
libraries:
- file:
path: string
jar: string
maven:
coordinates: string
exclusions:
- string
repo: string
notebook:
path: string
name: string
notifications:
- alerts:
- string
emailRecipients:
- string
photon: false
restartWindow:
daysOfWeek: string
startHour: 0
timeZoneId: string
runAsUserName: string
schema: string
serverless: false
state: string
storage: string
target: string
trigger:
cron:
quartzCronSchedule: string
timezoneId: string
manual: {}
url: string
Pipeline Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The Pipeline resource accepts the following input properties:
- Allow
Duplicate boolNames - Optional boolean flag. If false, deployment will fail if name conflicts with that of another pipeline. default is
false
. - Budget
Policy stringId - optional string specifying ID of the budget policy for this DLT pipeline.
- Catalog string
- The name of catalog in Unity Catalog. Change of this parameter forces recreation of the pipeline. (Conflicts with
storage
). - Cause string
- Channel string
- optional name of the release channel for Spark version used by DLT pipeline. Supported values are:
CURRENT
(default) andPREVIEW
. - Cluster
Id string - Clusters
List<Pipeline
Cluster> - blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline. Please note that DLT pipeline clusters are supporting only subset of attributes as described in documentation. Also, note that
autoscale
block is extended with themode
parameter that controls the autoscaling algorithm (possible values areENHANCED
for new, enhanced autoscaling algorithm, orLEGACY
for old algorithm). - Configuration Dictionary<string, string>
- An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
- Continuous bool
- A flag indicating whether to run the pipeline continuously. The default value is
false
. - Creator
User stringName - Deployment
Pipeline
Deployment - Deployment type of this pipeline. Supports following attributes:
- Development bool
- A flag indicating whether to run the pipeline in development mode. The default value is
false
. - Edition string
- optional name of the product edition. Supported values are:
CORE
,PRO
,ADVANCED
(default). Not required whenserverless
is set totrue
. - Expected
Last intModified - Filters
Pipeline
Filters - Filters on which Pipeline packages to include in the deployed graph. This block consists of following attributes:
- Gateway
Definition PipelineGateway Definition - The definition of a gateway pipeline to support CDC. Consists of following attributes:
- Health string
- Ingestion
Definition PipelineIngestion Definition - Last
Modified int - Latest
Updates List<PipelineLatest Update> - Libraries
List<Pipeline
Library> - blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special
notebook
&file
library types that should have thepath
attribute. Right now only thenotebook
&file
types are supported. - Name string
- A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
- Notifications
List<Pipeline
Notification> - Photon bool
- A flag indicating whether to use Photon engine. The default value is
false
. - Restart
Window PipelineRestart Window - Run
As stringUser Name - Schema string
- The default schema (database) where tables are read from or published to. The presence of this attribute implies that the pipeline is in direct publishing mode.
- Serverless bool
- An optional flag indicating if serverless compute should be used for this DLT pipeline. Requires
catalog
to be set, as it could be used only with Unity Catalog. - State string
- Storage string
- A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location. Change of this parameter forces recreation of the pipeline. (Conflicts with
catalog
). - Target string
- The name of a database (in either the Hive metastore or in a UC catalog) for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
- Trigger
Pipeline
Trigger - Url string
- URL of the DLT pipeline on the given workspace.
- Allow
Duplicate boolNames - Optional boolean flag. If false, deployment will fail if name conflicts with that of another pipeline. default is
false
. - Budget
Policy stringId - optional string specifying ID of the budget policy for this DLT pipeline.
- Catalog string
- The name of catalog in Unity Catalog. Change of this parameter forces recreation of the pipeline. (Conflicts with
storage
). - Cause string
- Channel string
- optional name of the release channel for Spark version used by DLT pipeline. Supported values are:
CURRENT
(default) andPREVIEW
. - Cluster
Id string - Clusters
[]Pipeline
Cluster Args - blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline. Please note that DLT pipeline clusters are supporting only subset of attributes as described in documentation. Also, note that
autoscale
block is extended with themode
parameter that controls the autoscaling algorithm (possible values areENHANCED
for new, enhanced autoscaling algorithm, orLEGACY
for old algorithm). - Configuration map[string]string
- An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
- Continuous bool
- A flag indicating whether to run the pipeline continuously. The default value is
false
. - Creator
User stringName - Deployment
Pipeline
Deployment Args - Deployment type of this pipeline. Supports following attributes:
- Development bool
- A flag indicating whether to run the pipeline in development mode. The default value is
false
. - Edition string
- optional name of the product edition. Supported values are:
CORE
,PRO
,ADVANCED
(default). Not required whenserverless
is set totrue
. - Expected
Last intModified - Filters
Pipeline
Filters Args - Filters on which Pipeline packages to include in the deployed graph. This block consists of following attributes:
- Gateway
Definition PipelineGateway Definition Args - The definition of a gateway pipeline to support CDC. Consists of following attributes:
- Health string
- Ingestion
Definition PipelineIngestion Definition Args - Last
Modified int - Latest
Updates []PipelineLatest Update Args - Libraries
[]Pipeline
Library Args - blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special
notebook
&file
library types that should have thepath
attribute. Right now only thenotebook
&file
types are supported. - Name string
- A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
- Notifications
[]Pipeline
Notification Args - Photon bool
- A flag indicating whether to use Photon engine. The default value is
false
. - Restart
Window PipelineRestart Window Args - Run
As stringUser Name - Schema string
- The default schema (database) where tables are read from or published to. The presence of this attribute implies that the pipeline is in direct publishing mode.
- Serverless bool
- An optional flag indicating if serverless compute should be used for this DLT pipeline. Requires
catalog
to be set, as it could be used only with Unity Catalog. - State string
- Storage string
- A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location. Change of this parameter forces recreation of the pipeline. (Conflicts with
catalog
). - Target string
- The name of a database (in either the Hive metastore or in a UC catalog) for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
- Trigger
Pipeline
Trigger Args - Url string
- URL of the DLT pipeline on the given workspace.
- allow
Duplicate BooleanNames - Optional boolean flag. If false, deployment will fail if name conflicts with that of another pipeline. default is
false
. - budget
Policy StringId - optional string specifying ID of the budget policy for this DLT pipeline.
- catalog String
- The name of catalog in Unity Catalog. Change of this parameter forces recreation of the pipeline. (Conflicts with
storage
). - cause String
- channel String
- optional name of the release channel for Spark version used by DLT pipeline. Supported values are:
CURRENT
(default) andPREVIEW
. - cluster
Id String - clusters
List<Pipeline
Cluster> - blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline. Please note that DLT pipeline clusters are supporting only subset of attributes as described in documentation. Also, note that
autoscale
block is extended with themode
parameter that controls the autoscaling algorithm (possible values areENHANCED
for new, enhanced autoscaling algorithm, orLEGACY
for old algorithm). - configuration Map<String,String>
- An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
- continuous Boolean
- A flag indicating whether to run the pipeline continuously. The default value is
false
. - creator
User StringName - deployment
Pipeline
Deployment - Deployment type of this pipeline. Supports following attributes:
- development Boolean
- A flag indicating whether to run the pipeline in development mode. The default value is
false
. - edition String
- optional name of the product edition. Supported values are:
CORE
,PRO
,ADVANCED
(default). Not required whenserverless
is set totrue
. - expected
Last IntegerModified - filters
Pipeline
Filters - Filters on which Pipeline packages to include in the deployed graph. This block consists of following attributes:
- gateway
Definition PipelineGateway Definition - The definition of a gateway pipeline to support CDC. Consists of following attributes:
- health String
- ingestion
Definition PipelineIngestion Definition - last
Modified Integer - latest
Updates List<PipelineLatest Update> - libraries
List<Pipeline
Library> - blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special
notebook
&file
library types that should have thepath
attribute. Right now only thenotebook
&file
types are supported. - name String
- A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
- notifications
List<Pipeline
Notification> - photon Boolean
- A flag indicating whether to use Photon engine. The default value is
false
. - restart
Window PipelineRestart Window - run
As StringUser Name - schema String
- The default schema (database) where tables are read from or published to. The presence of this attribute implies that the pipeline is in direct publishing mode.
- serverless Boolean
- An optional flag indicating if serverless compute should be used for this DLT pipeline. Requires
catalog
to be set, as it could be used only with Unity Catalog. - state String
- storage String
- A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location. Change of this parameter forces recreation of the pipeline. (Conflicts with
catalog
). - target String
- The name of a database (in either the Hive metastore or in a UC catalog) for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
- trigger
Pipeline
Trigger - url String
- URL of the DLT pipeline on the given workspace.
- allow
Duplicate booleanNames - Optional boolean flag. If false, deployment will fail if name conflicts with that of another pipeline. default is
false
. - budget
Policy stringId - optional string specifying ID of the budget policy for this DLT pipeline.
- catalog string
- The name of catalog in Unity Catalog. Change of this parameter forces recreation of the pipeline. (Conflicts with
storage
). - cause string
- channel string
- optional name of the release channel for Spark version used by DLT pipeline. Supported values are:
CURRENT
(default) andPREVIEW
. - cluster
Id string - clusters
Pipeline
Cluster[] - blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline. Please note that DLT pipeline clusters are supporting only subset of attributes as described in documentation. Also, note that
autoscale
block is extended with themode
parameter that controls the autoscaling algorithm (possible values areENHANCED
for new, enhanced autoscaling algorithm, orLEGACY
for old algorithm). - configuration {[key: string]: string}
- An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
- continuous boolean
- A flag indicating whether to run the pipeline continuously. The default value is
false
. - creator
User stringName - deployment
Pipeline
Deployment - Deployment type of this pipeline. Supports following attributes:
- development boolean
- A flag indicating whether to run the pipeline in development mode. The default value is
false
. - edition string
- optional name of the product edition. Supported values are:
CORE
,PRO
,ADVANCED
(default). Not required whenserverless
is set totrue
. - expected
Last numberModified - filters
Pipeline
Filters - Filters on which Pipeline packages to include in the deployed graph. This block consists of following attributes:
- gateway
Definition PipelineGateway Definition - The definition of a gateway pipeline to support CDC. Consists of following attributes:
- health string
- ingestion
Definition PipelineIngestion Definition - last
Modified number - latest
Updates PipelineLatest Update[] - libraries
Pipeline
Library[] - blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special
notebook
&file
library types that should have thepath
attribute. Right now only thenotebook
&file
types are supported. - name string
- A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
- notifications
Pipeline
Notification[] - photon boolean
- A flag indicating whether to use Photon engine. The default value is
false
. - restart
Window PipelineRestart Window - run
As stringUser Name - schema string
- The default schema (database) where tables are read from or published to. The presence of this attribute implies that the pipeline is in direct publishing mode.
- serverless boolean
- An optional flag indicating if serverless compute should be used for this DLT pipeline. Requires
catalog
to be set, as it could be used only with Unity Catalog. - state string
- storage string
- A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location. Change of this parameter forces recreation of the pipeline. (Conflicts with
catalog
). - target string
- The name of a database (in either the Hive metastore or in a UC catalog) for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
- trigger
Pipeline
Trigger - url string
- URL of the DLT pipeline on the given workspace.
- allow_
duplicate_ boolnames - Optional boolean flag. If false, deployment will fail if name conflicts with that of another pipeline. default is
false
. - budget_
policy_ strid - optional string specifying ID of the budget policy for this DLT pipeline.
- catalog str
- The name of catalog in Unity Catalog. Change of this parameter forces recreation of the pipeline. (Conflicts with
storage
). - cause str
- channel str
- optional name of the release channel for Spark version used by DLT pipeline. Supported values are:
CURRENT
(default) andPREVIEW
. - cluster_
id str - clusters
Sequence[Pipeline
Cluster Args] - blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline. Please note that DLT pipeline clusters are supporting only subset of attributes as described in documentation. Also, note that
autoscale
block is extended with themode
parameter that controls the autoscaling algorithm (possible values areENHANCED
for new, enhanced autoscaling algorithm, orLEGACY
for old algorithm). - configuration Mapping[str, str]
- An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
- continuous bool
- A flag indicating whether to run the pipeline continuously. The default value is
false
. - creator_
user_ strname - deployment
Pipeline
Deployment Args - Deployment type of this pipeline. Supports following attributes:
- development bool
- A flag indicating whether to run the pipeline in development mode. The default value is
false
. - edition str
- optional name of the product edition. Supported values are:
CORE
,PRO
,ADVANCED
(default). Not required whenserverless
is set totrue
. - expected_
last_ intmodified - filters
Pipeline
Filters Args - Filters on which Pipeline packages to include in the deployed graph. This block consists of following attributes:
- gateway_
definition PipelineGateway Definition Args - The definition of a gateway pipeline to support CDC. Consists of following attributes:
- health str
- ingestion_
definition PipelineIngestion Definition Args - last_
modified int - latest_
updates Sequence[PipelineLatest Update Args] - libraries
Sequence[Pipeline
Library Args] - blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special
notebook
&file
library types that should have thepath
attribute. Right now only thenotebook
&file
types are supported. - name str
- A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
- notifications
Sequence[Pipeline
Notification Args] - photon bool
- A flag indicating whether to use Photon engine. The default value is
false
. - restart_
window PipelineRestart Window Args - run_
as_ struser_ name - schema str
- The default schema (database) where tables are read from or published to. The presence of this attribute implies that the pipeline is in direct publishing mode.
- serverless bool
- An optional flag indicating if serverless compute should be used for this DLT pipeline. Requires
catalog
to be set, as it could be used only with Unity Catalog. - state str
- storage str
- A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location. Change of this parameter forces recreation of the pipeline. (Conflicts with
catalog
). - target str
- The name of a database (in either the Hive metastore or in a UC catalog) for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
- trigger
Pipeline
Trigger Args - url str
- URL of the DLT pipeline on the given workspace.
- allow
Duplicate BooleanNames - Optional boolean flag. If false, deployment will fail if name conflicts with that of another pipeline. default is
false
. - budget
Policy StringId - optional string specifying ID of the budget policy for this DLT pipeline.
- catalog String
- The name of catalog in Unity Catalog. Change of this parameter forces recreation of the pipeline. (Conflicts with
storage
). - cause String
- channel String
- optional name of the release channel for Spark version used by DLT pipeline. Supported values are:
CURRENT
(default) andPREVIEW
. - cluster
Id String - clusters List<Property Map>
- blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline. Please note that DLT pipeline clusters are supporting only subset of attributes as described in documentation. Also, note that
autoscale
block is extended with themode
parameter that controls the autoscaling algorithm (possible values areENHANCED
for new, enhanced autoscaling algorithm, orLEGACY
for old algorithm). - configuration Map<String>
- An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
- continuous Boolean
- A flag indicating whether to run the pipeline continuously. The default value is
false
. - creator
User StringName - deployment Property Map
- Deployment type of this pipeline. Supports following attributes:
- development Boolean
- A flag indicating whether to run the pipeline in development mode. The default value is
false
. - edition String
- optional name of the product edition. Supported values are:
CORE
,PRO
,ADVANCED
(default). Not required whenserverless
is set totrue
. - expected
Last NumberModified - filters Property Map
- Filters on which Pipeline packages to include in the deployed graph. This block consists of following attributes:
- gateway
Definition Property Map - The definition of a gateway pipeline to support CDC. Consists of following attributes:
- health String
- ingestion
Definition Property Map - last
Modified Number - latest
Updates List<Property Map> - libraries List<Property Map>
- blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special
notebook
&file
library types that should have thepath
attribute. Right now only thenotebook
&file
types are supported. - name String
- A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
- notifications List<Property Map>
- photon Boolean
- A flag indicating whether to use Photon engine. The default value is
false
. - restart
Window Property Map - run
As StringUser Name - schema String
- The default schema (database) where tables are read from or published to. The presence of this attribute implies that the pipeline is in direct publishing mode.
- serverless Boolean
- An optional flag indicating if serverless compute should be used for this DLT pipeline. Requires
catalog
to be set, as it could be used only with Unity Catalog. - state String
- storage String
- A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location. Change of this parameter forces recreation of the pipeline. (Conflicts with
catalog
). - target String
- The name of a database (in either the Hive metastore or in a UC catalog) for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
- trigger Property Map
- url String
- URL of the DLT pipeline on the given workspace.
Outputs
All input properties are implicitly available as output properties. Additionally, the Pipeline resource produces the following output properties:
- Id string
- The provider-assigned unique ID for this managed resource.
- Id string
- The provider-assigned unique ID for this managed resource.
- id String
- The provider-assigned unique ID for this managed resource.
- id string
- The provider-assigned unique ID for this managed resource.
- id str
- The provider-assigned unique ID for this managed resource.
- id String
- The provider-assigned unique ID for this managed resource.
Look up Existing Pipeline Resource
Get an existing Pipeline resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: PipelineState, opts?: CustomResourceOptions): Pipeline
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
allow_duplicate_names: Optional[bool] = None,
budget_policy_id: Optional[str] = None,
catalog: Optional[str] = None,
cause: Optional[str] = None,
channel: Optional[str] = None,
cluster_id: Optional[str] = None,
clusters: Optional[Sequence[PipelineClusterArgs]] = None,
configuration: Optional[Mapping[str, str]] = None,
continuous: Optional[bool] = None,
creator_user_name: Optional[str] = None,
deployment: Optional[PipelineDeploymentArgs] = None,
development: Optional[bool] = None,
edition: Optional[str] = None,
expected_last_modified: Optional[int] = None,
filters: Optional[PipelineFiltersArgs] = None,
gateway_definition: Optional[PipelineGatewayDefinitionArgs] = None,
health: Optional[str] = None,
ingestion_definition: Optional[PipelineIngestionDefinitionArgs] = None,
last_modified: Optional[int] = None,
latest_updates: Optional[Sequence[PipelineLatestUpdateArgs]] = None,
libraries: Optional[Sequence[PipelineLibraryArgs]] = None,
name: Optional[str] = None,
notifications: Optional[Sequence[PipelineNotificationArgs]] = None,
photon: Optional[bool] = None,
restart_window: Optional[PipelineRestartWindowArgs] = None,
run_as_user_name: Optional[str] = None,
schema: Optional[str] = None,
serverless: Optional[bool] = None,
state: Optional[str] = None,
storage: Optional[str] = None,
target: Optional[str] = None,
trigger: Optional[PipelineTriggerArgs] = None,
url: Optional[str] = None) -> Pipeline
func GetPipeline(ctx *Context, name string, id IDInput, state *PipelineState, opts ...ResourceOption) (*Pipeline, error)
public static Pipeline Get(string name, Input<string> id, PipelineState? state, CustomResourceOptions? opts = null)
public static Pipeline get(String name, Output<String> id, PipelineState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Allow
Duplicate boolNames - Optional boolean flag. If false, deployment will fail if name conflicts with that of another pipeline. default is
false
. - Budget
Policy stringId - optional string specifying ID of the budget policy for this DLT pipeline.
- Catalog string
- The name of catalog in Unity Catalog. Change of this parameter forces recreation of the pipeline. (Conflicts with
storage
). - Cause string
- Channel string
- optional name of the release channel for Spark version used by DLT pipeline. Supported values are:
CURRENT
(default) andPREVIEW
. - Cluster
Id string - Clusters
List<Pipeline
Cluster> - blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline. Please note that DLT pipeline clusters are supporting only subset of attributes as described in documentation. Also, note that
autoscale
block is extended with themode
parameter that controls the autoscaling algorithm (possible values areENHANCED
for new, enhanced autoscaling algorithm, orLEGACY
for old algorithm). - Configuration Dictionary<string, string>
- An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
- Continuous bool
- A flag indicating whether to run the pipeline continuously. The default value is
false
. - Creator
User stringName - Deployment
Pipeline
Deployment - Deployment type of this pipeline. Supports following attributes:
- Development bool
- A flag indicating whether to run the pipeline in development mode. The default value is
false
. - Edition string
- optional name of the product edition. Supported values are:
CORE
,PRO
,ADVANCED
(default). Not required whenserverless
is set totrue
. - Expected
Last intModified - Filters
Pipeline
Filters - Filters on which Pipeline packages to include in the deployed graph. This block consists of following attributes:
- Gateway
Definition PipelineGateway Definition - The definition of a gateway pipeline to support CDC. Consists of following attributes:
- Health string
- Ingestion
Definition PipelineIngestion Definition - Last
Modified int - Latest
Updates List<PipelineLatest Update> - Libraries
List<Pipeline
Library> - blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special
notebook
&file
library types that should have thepath
attribute. Right now only thenotebook
&file
types are supported. - Name string
- A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
- Notifications
List<Pipeline
Notification> - Photon bool
- A flag indicating whether to use Photon engine. The default value is
false
. - Restart
Window PipelineRestart Window - Run
As stringUser Name - Schema string
- The default schema (database) where tables are read from or published to. The presence of this attribute implies that the pipeline is in direct publishing mode.
- Serverless bool
- An optional flag indicating if serverless compute should be used for this DLT pipeline. Requires
catalog
to be set, as it could be used only with Unity Catalog. - State string
- Storage string
- A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location. Change of this parameter forces recreation of the pipeline. (Conflicts with
catalog
). - Target string
- The name of a database (in either the Hive metastore or in a UC catalog) for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
- Trigger
Pipeline
Trigger - Url string
- URL of the DLT pipeline on the given workspace.
- Allow
Duplicate boolNames - Optional boolean flag. If false, deployment will fail if name conflicts with that of another pipeline. default is
false
. - Budget
Policy stringId - optional string specifying ID of the budget policy for this DLT pipeline.
- Catalog string
- The name of catalog in Unity Catalog. Change of this parameter forces recreation of the pipeline. (Conflicts with
storage
). - Cause string
- Channel string
- optional name of the release channel for Spark version used by DLT pipeline. Supported values are:
CURRENT
(default) andPREVIEW
. - Cluster
Id string - Clusters
[]Pipeline
Cluster Args - blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline. Please note that DLT pipeline clusters are supporting only subset of attributes as described in documentation. Also, note that
autoscale
block is extended with themode
parameter that controls the autoscaling algorithm (possible values areENHANCED
for new, enhanced autoscaling algorithm, orLEGACY
for old algorithm). - Configuration map[string]string
- An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
- Continuous bool
- A flag indicating whether to run the pipeline continuously. The default value is
false
. - Creator
User stringName - Deployment
Pipeline
Deployment Args - Deployment type of this pipeline. Supports following attributes:
- Development bool
- A flag indicating whether to run the pipeline in development mode. The default value is
false
. - Edition string
- optional name of the product edition. Supported values are:
CORE
,PRO
,ADVANCED
(default). Not required whenserverless
is set totrue
. - Expected
Last intModified - Filters
Pipeline
Filters Args - Filters on which Pipeline packages to include in the deployed graph. This block consists of following attributes:
- Gateway
Definition PipelineGateway Definition Args - The definition of a gateway pipeline to support CDC. Consists of following attributes:
- Health string
- Ingestion
Definition PipelineIngestion Definition Args - Last
Modified int - Latest
Updates []PipelineLatest Update Args - Libraries
[]Pipeline
Library Args - blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special
notebook
&file
library types that should have thepath
attribute. Right now only thenotebook
&file
types are supported. - Name string
- A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
- Notifications
[]Pipeline
Notification Args - Photon bool
- A flag indicating whether to use Photon engine. The default value is
false
. - Restart
Window PipelineRestart Window Args - Run
As stringUser Name - Schema string
- The default schema (database) where tables are read from or published to. The presence of this attribute implies that the pipeline is in direct publishing mode.
- Serverless bool
- An optional flag indicating if serverless compute should be used for this DLT pipeline. Requires
catalog
to be set, as it could be used only with Unity Catalog. - State string
- Storage string
- A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location. Change of this parameter forces recreation of the pipeline. (Conflicts with
catalog
). - Target string
- The name of a database (in either the Hive metastore or in a UC catalog) for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
- Trigger
Pipeline
Trigger Args - Url string
- URL of the DLT pipeline on the given workspace.
- allow
Duplicate BooleanNames - Optional boolean flag. If false, deployment will fail if name conflicts with that of another pipeline. default is
false
. - budget
Policy StringId - optional string specifying ID of the budget policy for this DLT pipeline.
- catalog String
- The name of catalog in Unity Catalog. Change of this parameter forces recreation of the pipeline. (Conflicts with
storage
). - cause String
- channel String
- optional name of the release channel for Spark version used by DLT pipeline. Supported values are:
CURRENT
(default) andPREVIEW
. - cluster
Id String - clusters
List<Pipeline
Cluster> - blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline. Please note that DLT pipeline clusters are supporting only subset of attributes as described in documentation. Also, note that
autoscale
block is extended with themode
parameter that controls the autoscaling algorithm (possible values areENHANCED
for new, enhanced autoscaling algorithm, orLEGACY
for old algorithm). - configuration Map<String,String>
- An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
- continuous Boolean
- A flag indicating whether to run the pipeline continuously. The default value is
false
. - creator
User StringName - deployment
Pipeline
Deployment - Deployment type of this pipeline. Supports following attributes:
- development Boolean
- A flag indicating whether to run the pipeline in development mode. The default value is
false
. - edition String
- optional name of the product edition. Supported values are:
CORE
,PRO
,ADVANCED
(default). Not required whenserverless
is set totrue
. - expected
Last IntegerModified - filters
Pipeline
Filters - Filters on which Pipeline packages to include in the deployed graph. This block consists of following attributes:
- gateway
Definition PipelineGateway Definition - The definition of a gateway pipeline to support CDC. Consists of following attributes:
- health String
- ingestion
Definition PipelineIngestion Definition - last
Modified Integer - latest
Updates List<PipelineLatest Update> - libraries
List<Pipeline
Library> - blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special
notebook
&file
library types that should have thepath
attribute. Right now only thenotebook
&file
types are supported. - name String
- A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
- notifications
List<Pipeline
Notification> - photon Boolean
- A flag indicating whether to use Photon engine. The default value is
false
. - restart
Window PipelineRestart Window - run
As StringUser Name - schema String
- The default schema (database) where tables are read from or published to. The presence of this attribute implies that the pipeline is in direct publishing mode.
- serverless Boolean
- An optional flag indicating if serverless compute should be used for this DLT pipeline. Requires
catalog
to be set, as it could be used only with Unity Catalog. - state String
- storage String
- A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location. Change of this parameter forces recreation of the pipeline. (Conflicts with
catalog
). - target String
- The name of a database (in either the Hive metastore or in a UC catalog) for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
- trigger
Pipeline
Trigger - url String
- URL of the DLT pipeline on the given workspace.
- allow
Duplicate booleanNames - Optional boolean flag. If false, deployment will fail if name conflicts with that of another pipeline. default is
false
. - budget
Policy stringId - optional string specifying ID of the budget policy for this DLT pipeline.
- catalog string
- The name of catalog in Unity Catalog. Change of this parameter forces recreation of the pipeline. (Conflicts with
storage
). - cause string
- channel string
- optional name of the release channel for Spark version used by DLT pipeline. Supported values are:
CURRENT
(default) andPREVIEW
. - cluster
Id string - clusters
Pipeline
Cluster[] - blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline. Please note that DLT pipeline clusters are supporting only subset of attributes as described in documentation. Also, note that
autoscale
block is extended with themode
parameter that controls the autoscaling algorithm (possible values areENHANCED
for new, enhanced autoscaling algorithm, orLEGACY
for old algorithm). - configuration {[key: string]: string}
- An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
- continuous boolean
- A flag indicating whether to run the pipeline continuously. The default value is
false
. - creator
User stringName - deployment
Pipeline
Deployment - Deployment type of this pipeline. Supports following attributes:
- development boolean
- A flag indicating whether to run the pipeline in development mode. The default value is
false
. - edition string
- optional name of the product edition. Supported values are:
CORE
,PRO
,ADVANCED
(default). Not required whenserverless
is set totrue
. - expected
Last numberModified - filters
Pipeline
Filters - Filters on which Pipeline packages to include in the deployed graph. This block consists of following attributes:
- gateway
Definition PipelineGateway Definition - The definition of a gateway pipeline to support CDC. Consists of following attributes:
- health string
- ingestion
Definition PipelineIngestion Definition - last
Modified number - latest
Updates PipelineLatest Update[] - libraries
Pipeline
Library[] - blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special
notebook
&file
library types that should have thepath
attribute. Right now only thenotebook
&file
types are supported. - name string
- A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
- notifications
Pipeline
Notification[] - photon boolean
- A flag indicating whether to use Photon engine. The default value is
false
. - restart
Window PipelineRestart Window - run
As stringUser Name - schema string
- The default schema (database) where tables are read from or published to. The presence of this attribute implies that the pipeline is in direct publishing mode.
- serverless boolean
- An optional flag indicating if serverless compute should be used for this DLT pipeline. Requires
catalog
to be set, as it could be used only with Unity Catalog. - state string
- storage string
- A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location. Change of this parameter forces recreation of the pipeline. (Conflicts with
catalog
). - target string
- The name of a database (in either the Hive metastore or in a UC catalog) for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
- trigger
Pipeline
Trigger - url string
- URL of the DLT pipeline on the given workspace.
- allow_
duplicate_ boolnames - Optional boolean flag. If false, deployment will fail if name conflicts with that of another pipeline. default is
false
. - budget_
policy_ strid - optional string specifying ID of the budget policy for this DLT pipeline.
- catalog str
- The name of catalog in Unity Catalog. Change of this parameter forces recreation of the pipeline. (Conflicts with
storage
). - cause str
- channel str
- optional name of the release channel for Spark version used by DLT pipeline. Supported values are:
CURRENT
(default) andPREVIEW
. - cluster_
id str - clusters
Sequence[Pipeline
Cluster Args] - blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline. Please note that DLT pipeline clusters are supporting only subset of attributes as described in documentation. Also, note that
autoscale
block is extended with themode
parameter that controls the autoscaling algorithm (possible values areENHANCED
for new, enhanced autoscaling algorithm, orLEGACY
for old algorithm). - configuration Mapping[str, str]
- An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
- continuous bool
- A flag indicating whether to run the pipeline continuously. The default value is
false
. - creator_
user_ strname - deployment
Pipeline
Deployment Args - Deployment type of this pipeline. Supports following attributes:
- development bool
- A flag indicating whether to run the pipeline in development mode. The default value is
false
. - edition str
- optional name of the product edition. Supported values are:
CORE
,PRO
,ADVANCED
(default). Not required whenserverless
is set totrue
. - expected_
last_ intmodified - filters
Pipeline
Filters Args - Filters on which Pipeline packages to include in the deployed graph. This block consists of following attributes:
- gateway_
definition PipelineGateway Definition Args - The definition of a gateway pipeline to support CDC. Consists of following attributes:
- health str
- ingestion_
definition PipelineIngestion Definition Args - last_
modified int - latest_
updates Sequence[PipelineLatest Update Args] - libraries
Sequence[Pipeline
Library Args] - blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special
notebook
&file
library types that should have thepath
attribute. Right now only thenotebook
&file
types are supported. - name str
- A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
- notifications
Sequence[Pipeline
Notification Args] - photon bool
- A flag indicating whether to use Photon engine. The default value is
false
. - restart_
window PipelineRestart Window Args - run_
as_ struser_ name - schema str
- The default schema (database) where tables are read from or published to. The presence of this attribute implies that the pipeline is in direct publishing mode.
- serverless bool
- An optional flag indicating if serverless compute should be used for this DLT pipeline. Requires
catalog
to be set, as it could be used only with Unity Catalog. - state str
- storage str
- A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location. Change of this parameter forces recreation of the pipeline. (Conflicts with
catalog
). - target str
- The name of a database (in either the Hive metastore or in a UC catalog) for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
- trigger
Pipeline
Trigger Args - url str
- URL of the DLT pipeline on the given workspace.
- allow
Duplicate BooleanNames - Optional boolean flag. If false, deployment will fail if name conflicts with that of another pipeline. default is
false
. - budget
Policy StringId - optional string specifying ID of the budget policy for this DLT pipeline.
- catalog String
- The name of catalog in Unity Catalog. Change of this parameter forces recreation of the pipeline. (Conflicts with
storage
). - cause String
- channel String
- optional name of the release channel for Spark version used by DLT pipeline. Supported values are:
CURRENT
(default) andPREVIEW
. - cluster
Id String - clusters List<Property Map>
- blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline. Please note that DLT pipeline clusters are supporting only subset of attributes as described in documentation. Also, note that
autoscale
block is extended with themode
parameter that controls the autoscaling algorithm (possible values areENHANCED
for new, enhanced autoscaling algorithm, orLEGACY
for old algorithm). - configuration Map<String>
- An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
- continuous Boolean
- A flag indicating whether to run the pipeline continuously. The default value is
false
. - creator
User StringName - deployment Property Map
- Deployment type of this pipeline. Supports following attributes:
- development Boolean
- A flag indicating whether to run the pipeline in development mode. The default value is
false
. - edition String
- optional name of the product edition. Supported values are:
CORE
,PRO
,ADVANCED
(default). Not required whenserverless
is set totrue
. - expected
Last NumberModified - filters Property Map
- Filters on which Pipeline packages to include in the deployed graph. This block consists of following attributes:
- gateway
Definition Property Map - The definition of a gateway pipeline to support CDC. Consists of following attributes:
- health String
- ingestion
Definition Property Map - last
Modified Number - latest
Updates List<Property Map> - libraries List<Property Map>
- blocks - Specifies pipeline code and required artifacts. Syntax resembles library configuration block with the addition of a special
notebook
&file
library types that should have thepath
attribute. Right now only thenotebook
&file
types are supported. - name String
- A user-friendly name for this pipeline. The name can be used to identify pipeline jobs in the UI.
- notifications List<Property Map>
- photon Boolean
- A flag indicating whether to use Photon engine. The default value is
false
. - restart
Window Property Map - run
As StringUser Name - schema String
- The default schema (database) where tables are read from or published to. The presence of this attribute implies that the pipeline is in direct publishing mode.
- serverless Boolean
- An optional flag indicating if serverless compute should be used for this DLT pipeline. Requires
catalog
to be set, as it could be used only with Unity Catalog. - state String
- storage String
- A location on DBFS or cloud storage where output data and metadata required for pipeline execution are stored. By default, tables are stored in a subdirectory of this location. Change of this parameter forces recreation of the pipeline. (Conflicts with
catalog
). - target String
- The name of a database (in either the Hive metastore or in a UC catalog) for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI.
- trigger Property Map
- url String
- URL of the DLT pipeline on the given workspace.
Supporting Types
PipelineCluster, PipelineClusterArgs
- Apply
Policy boolDefault Values - Autoscale
Pipeline
Cluster Autoscale - Aws
Attributes PipelineCluster Aws Attributes - Azure
Attributes PipelineCluster Azure Attributes - Cluster
Log PipelineConf Cluster Cluster Log Conf - Dictionary<string, string>
- Driver
Instance stringPool Id - Driver
Node stringType Id - Enable
Local boolDisk Encryption - Gcp
Attributes PipelineCluster Gcp Attributes - Init
Scripts List<PipelineCluster Init Script> - Instance
Pool stringId - Label string
- Node
Type stringId - Num
Workers int - Policy
Id string - Spark
Conf Dictionary<string, string> - Spark
Env Dictionary<string, string>Vars - Ssh
Public List<string>Keys
- Apply
Policy boolDefault Values - Autoscale
Pipeline
Cluster Autoscale - Aws
Attributes PipelineCluster Aws Attributes - Azure
Attributes PipelineCluster Azure Attributes - Cluster
Log PipelineConf Cluster Cluster Log Conf - map[string]string
- Driver
Instance stringPool Id - Driver
Node stringType Id - Enable
Local boolDisk Encryption - Gcp
Attributes PipelineCluster Gcp Attributes - Init
Scripts []PipelineCluster Init Script - Instance
Pool stringId - Label string
- Node
Type stringId - Num
Workers int - Policy
Id string - Spark
Conf map[string]string - Spark
Env map[string]stringVars - Ssh
Public []stringKeys
- apply
Policy BooleanDefault Values - autoscale
Pipeline
Cluster Autoscale - aws
Attributes PipelineCluster Aws Attributes - azure
Attributes PipelineCluster Azure Attributes - cluster
Log PipelineConf Cluster Cluster Log Conf - Map<String,String>
- driver
Instance StringPool Id - driver
Node StringType Id - enable
Local BooleanDisk Encryption - gcp
Attributes PipelineCluster Gcp Attributes - init
Scripts List<PipelineCluster Init Script> - instance
Pool StringId - label String
- node
Type StringId - num
Workers Integer - policy
Id String - spark
Conf Map<String,String> - spark
Env Map<String,String>Vars - ssh
Public List<String>Keys
- apply
Policy booleanDefault Values - autoscale
Pipeline
Cluster Autoscale - aws
Attributes PipelineCluster Aws Attributes - azure
Attributes PipelineCluster Azure Attributes - cluster
Log PipelineConf Cluster Cluster Log Conf - {[key: string]: string}
- driver
Instance stringPool Id - driver
Node stringType Id - enable
Local booleanDisk Encryption - gcp
Attributes PipelineCluster Gcp Attributes - init
Scripts PipelineCluster Init Script[] - instance
Pool stringId - label string
- node
Type stringId - num
Workers number - policy
Id string - spark
Conf {[key: string]: string} - spark
Env {[key: string]: string}Vars - ssh
Public string[]Keys
- apply_
policy_ booldefault_ values - autoscale
Pipeline
Cluster Autoscale - aws_
attributes PipelineCluster Aws Attributes - azure_
attributes PipelineCluster Azure Attributes - cluster_
log_ Pipelineconf Cluster Cluster Log Conf - Mapping[str, str]
- driver_
instance_ strpool_ id - driver_
node_ strtype_ id - enable_
local_ booldisk_ encryption - gcp_
attributes PipelineCluster Gcp Attributes - init_
scripts Sequence[PipelineCluster Init Script] - instance_
pool_ strid - label str
- node_
type_ strid - num_
workers int - policy_
id str - spark_
conf Mapping[str, str] - spark_
env_ Mapping[str, str]vars - ssh_
public_ Sequence[str]keys
- apply
Policy BooleanDefault Values - autoscale Property Map
- aws
Attributes Property Map - azure
Attributes Property Map - cluster
Log Property MapConf - Map<String>
- driver
Instance StringPool Id - driver
Node StringType Id - enable
Local BooleanDisk Encryption - gcp
Attributes Property Map - init
Scripts List<Property Map> - instance
Pool StringId - label String
- node
Type StringId - num
Workers Number - policy
Id String - spark
Conf Map<String> - spark
Env Map<String>Vars - ssh
Public List<String>Keys
PipelineClusterAutoscale, PipelineClusterAutoscaleArgs
- Max
Workers int - Min
Workers int - Mode string
- Max
Workers int - Min
Workers int - Mode string
- max
Workers Integer - min
Workers Integer - mode String
- max
Workers number - min
Workers number - mode string
- max_
workers int - min_
workers int - mode str
- max
Workers Number - min
Workers Number - mode String
PipelineClusterAwsAttributes, PipelineClusterAwsAttributesArgs
- Availability string
- Ebs
Volume intCount - Ebs
Volume intIops - Ebs
Volume intSize - Ebs
Volume intThroughput - Ebs
Volume stringType - First
On intDemand - Instance
Profile stringArn - Spot
Bid intPrice Percent - Zone
Id string
- Availability string
- Ebs
Volume intCount - Ebs
Volume intIops - Ebs
Volume intSize - Ebs
Volume intThroughput - Ebs
Volume stringType - First
On intDemand - Instance
Profile stringArn - Spot
Bid intPrice Percent - Zone
Id string
- availability String
- ebs
Volume IntegerCount - ebs
Volume IntegerIops - ebs
Volume IntegerSize - ebs
Volume IntegerThroughput - ebs
Volume StringType - first
On IntegerDemand - instance
Profile StringArn - spot
Bid IntegerPrice Percent - zone
Id String
- availability string
- ebs
Volume numberCount - ebs
Volume numberIops - ebs
Volume numberSize - ebs
Volume numberThroughput - ebs
Volume stringType - first
On numberDemand - instance
Profile stringArn - spot
Bid numberPrice Percent - zone
Id string
- availability str
- ebs_
volume_ intcount - ebs_
volume_ intiops - ebs_
volume_ intsize - ebs_
volume_ intthroughput - ebs_
volume_ strtype - first_
on_ intdemand - instance_
profile_ strarn - spot_
bid_ intprice_ percent - zone_
id str
- availability String
- ebs
Volume NumberCount - ebs
Volume NumberIops - ebs
Volume NumberSize - ebs
Volume NumberThroughput - ebs
Volume StringType - first
On NumberDemand - instance
Profile StringArn - spot
Bid NumberPrice Percent - zone
Id String
PipelineClusterAzureAttributes, PipelineClusterAzureAttributesArgs
- availability String
- first
On NumberDemand - log
Analytics Property MapInfo - spot
Bid NumberMax Price
PipelineClusterAzureAttributesLogAnalyticsInfo, PipelineClusterAzureAttributesLogAnalyticsInfoArgs
- Log
Analytics stringPrimary Key - Log
Analytics stringWorkspace Id
- Log
Analytics stringPrimary Key - Log
Analytics stringWorkspace Id
- log
Analytics StringPrimary Key - log
Analytics StringWorkspace Id
- log
Analytics stringPrimary Key - log
Analytics stringWorkspace Id
- log
Analytics StringPrimary Key - log
Analytics StringWorkspace Id
PipelineClusterClusterLogConf, PipelineClusterClusterLogConfArgs
PipelineClusterClusterLogConfDbfs, PipelineClusterClusterLogConfDbfsArgs
- Destination string
- Destination string
- destination String
- destination string
- destination str
- destination String
PipelineClusterClusterLogConfS3, PipelineClusterClusterLogConfS3Args
- Destination string
- Canned
Acl string - Enable
Encryption bool - Encryption
Type string - Endpoint string
- Kms
Key string - Region string
- Destination string
- Canned
Acl string - Enable
Encryption bool - Encryption
Type string - Endpoint string
- Kms
Key string - Region string
- destination String
- canned
Acl String - enable
Encryption Boolean - encryption
Type String - endpoint String
- kms
Key String - region String
- destination string
- canned
Acl string - enable
Encryption boolean - encryption
Type string - endpoint string
- kms
Key string - region string
- destination str
- canned_
acl str - enable_
encryption bool - encryption_
type str - endpoint str
- kms_
key str - region str
- destination String
- canned
Acl String - enable
Encryption Boolean - encryption
Type String - endpoint String
- kms
Key String - region String
PipelineClusterGcpAttributes, PipelineClusterGcpAttributesArgs
- Availability string
- Google
Service stringAccount - Local
Ssd intCount - Zone
Id string
- Availability string
- Google
Service stringAccount - Local
Ssd intCount - Zone
Id string
- availability String
- google
Service StringAccount - local
Ssd IntegerCount - zone
Id String
- availability string
- google
Service stringAccount - local
Ssd numberCount - zone
Id string
- availability str
- google_
service_ straccount - local_
ssd_ intcount - zone_
id str
- availability String
- google
Service StringAccount - local
Ssd NumberCount - zone
Id String
PipelineClusterInitScript, PipelineClusterInitScriptArgs
PipelineClusterInitScriptAbfss, PipelineClusterInitScriptAbfssArgs
- Destination string
- Destination string
- destination String
- destination string
- destination str
- destination String
PipelineClusterInitScriptDbfs, PipelineClusterInitScriptDbfsArgs
- Destination string
- Destination string
- destination String
- destination string
- destination str
- destination String
PipelineClusterInitScriptFile, PipelineClusterInitScriptFileArgs
- Destination string
- Destination string
- destination String
- destination string
- destination str
- destination String
PipelineClusterInitScriptGcs, PipelineClusterInitScriptGcsArgs
- Destination string
- Destination string
- destination String
- destination string
- destination str
- destination String
PipelineClusterInitScriptS3, PipelineClusterInitScriptS3Args
- Destination string
- Canned
Acl string - Enable
Encryption bool - Encryption
Type string - Endpoint string
- Kms
Key string - Region string
- Destination string
- Canned
Acl string - Enable
Encryption bool - Encryption
Type string - Endpoint string
- Kms
Key string - Region string
- destination String
- canned
Acl String - enable
Encryption Boolean - encryption
Type String - endpoint String
- kms
Key String - region String
- destination string
- canned
Acl string - enable
Encryption boolean - encryption
Type string - endpoint string
- kms
Key string - region string
- destination str
- canned_
acl str - enable_
encryption bool - encryption_
type str - endpoint str
- kms_
key str - region str
- destination String
- canned
Acl String - enable
Encryption Boolean - encryption
Type String - endpoint String
- kms
Key String - region String
PipelineClusterInitScriptVolumes, PipelineClusterInitScriptVolumesArgs
- Destination string
- Destination string
- destination String
- destination string
- destination str
- destination String
PipelineClusterInitScriptWorkspace, PipelineClusterInitScriptWorkspaceArgs
- Destination string
- Destination string
- destination String
- destination string
- destination str
- destination String
PipelineDeployment, PipelineDeploymentArgs
- Kind string
- The deployment method that manages the pipeline.
- Metadata
File stringPath - The path to the file containing metadata about the deployment.
- Kind string
- The deployment method that manages the pipeline.
- Metadata
File stringPath - The path to the file containing metadata about the deployment.
- kind String
- The deployment method that manages the pipeline.
- metadata
File StringPath - The path to the file containing metadata about the deployment.
- kind string
- The deployment method that manages the pipeline.
- metadata
File stringPath - The path to the file containing metadata about the deployment.
- kind str
- The deployment method that manages the pipeline.
- metadata_
file_ strpath - The path to the file containing metadata about the deployment.
- kind String
- The deployment method that manages the pipeline.
- metadata
File StringPath - The path to the file containing metadata about the deployment.
PipelineFilters, PipelineFiltersArgs
PipelineGatewayDefinition, PipelineGatewayDefinitionArgs
- Connection
Id string - Immutable. The Unity Catalog connection this gateway pipeline uses to communicate with the source.
- Connection
Name string - Gateway
Storage stringCatalog - Required, Immutable. The name of the catalog for the gateway pipeline's storage location.
- Gateway
Storage stringName - Required. The Unity Catalog-compatible naming for the gateway storage location. This is the destination to use for the data that is extracted by the gateway. Delta Live Tables system will automatically create the storage location under the catalog and schema.
- Gateway
Storage stringSchema - Required, Immutable. The name of the schema for the gateway pipelines's storage location.
- Connection
Id string - Immutable. The Unity Catalog connection this gateway pipeline uses to communicate with the source.
- Connection
Name string - Gateway
Storage stringCatalog - Required, Immutable. The name of the catalog for the gateway pipeline's storage location.
- Gateway
Storage stringName - Required. The Unity Catalog-compatible naming for the gateway storage location. This is the destination to use for the data that is extracted by the gateway. Delta Live Tables system will automatically create the storage location under the catalog and schema.
- Gateway
Storage stringSchema - Required, Immutable. The name of the schema for the gateway pipelines's storage location.
- connection
Id String - Immutable. The Unity Catalog connection this gateway pipeline uses to communicate with the source.
- connection
Name String - gateway
Storage StringCatalog - Required, Immutable. The name of the catalog for the gateway pipeline's storage location.
- gateway
Storage StringName - Required. The Unity Catalog-compatible naming for the gateway storage location. This is the destination to use for the data that is extracted by the gateway. Delta Live Tables system will automatically create the storage location under the catalog and schema.
- gateway
Storage StringSchema - Required, Immutable. The name of the schema for the gateway pipelines's storage location.
- connection
Id string - Immutable. The Unity Catalog connection this gateway pipeline uses to communicate with the source.
- connection
Name string - gateway
Storage stringCatalog - Required, Immutable. The name of the catalog for the gateway pipeline's storage location.
- gateway
Storage stringName - Required. The Unity Catalog-compatible naming for the gateway storage location. This is the destination to use for the data that is extracted by the gateway. Delta Live Tables system will automatically create the storage location under the catalog and schema.
- gateway
Storage stringSchema - Required, Immutable. The name of the schema for the gateway pipelines's storage location.
- connection_
id str - Immutable. The Unity Catalog connection this gateway pipeline uses to communicate with the source.
- connection_
name str - gateway_
storage_ strcatalog - Required, Immutable. The name of the catalog for the gateway pipeline's storage location.
- gateway_
storage_ strname - Required. The Unity Catalog-compatible naming for the gateway storage location. This is the destination to use for the data that is extracted by the gateway. Delta Live Tables system will automatically create the storage location under the catalog and schema.
- gateway_
storage_ strschema - Required, Immutable. The name of the schema for the gateway pipelines's storage location.
- connection
Id String - Immutable. The Unity Catalog connection this gateway pipeline uses to communicate with the source.
- connection
Name String - gateway
Storage StringCatalog - Required, Immutable. The name of the catalog for the gateway pipeline's storage location.
- gateway
Storage StringName - Required. The Unity Catalog-compatible naming for the gateway storage location. This is the destination to use for the data that is extracted by the gateway. Delta Live Tables system will automatically create the storage location under the catalog and schema.
- gateway
Storage StringSchema - Required, Immutable. The name of the schema for the gateway pipelines's storage location.
PipelineIngestionDefinition, PipelineIngestionDefinitionArgs
PipelineIngestionDefinitionObject, PipelineIngestionDefinitionObjectArgs
- Report
Pipeline
Ingestion Definition Object Report - Schema
Pipeline
Ingestion Definition Object Schema - The default schema (database) where tables are read from or published to. The presence of this attribute implies that the pipeline is in direct publishing mode.
- Table
Pipeline
Ingestion Definition Object Table
- Report
Pipeline
Ingestion Definition Object Report - Schema
Pipeline
Ingestion Definition Object Schema - The default schema (database) where tables are read from or published to. The presence of this attribute implies that the pipeline is in direct publishing mode.
- Table
Pipeline
Ingestion Definition Object Table
- report
Pipeline
Ingestion Definition Object Report - schema
Pipeline
Ingestion Definition Object Schema - The default schema (database) where tables are read from or published to. The presence of this attribute implies that the pipeline is in direct publishing mode.
- table
Pipeline
Ingestion Definition Object Table
- report
Pipeline
Ingestion Definition Object Report - schema
Pipeline
Ingestion Definition Object Schema - The default schema (database) where tables are read from or published to. The presence of this attribute implies that the pipeline is in direct publishing mode.
- table
Pipeline
Ingestion Definition Object Table
- report
Pipeline
Ingestion Definition Object Report - schema
Pipeline
Ingestion Definition Object Schema - The default schema (database) where tables are read from or published to. The presence of this attribute implies that the pipeline is in direct publishing mode.
- table
Pipeline
Ingestion Definition Object Table
- report Property Map
- schema Property Map
- The default schema (database) where tables are read from or published to. The presence of this attribute implies that the pipeline is in direct publishing mode.
- table Property Map
PipelineIngestionDefinitionObjectReport, PipelineIngestionDefinitionObjectReportArgs
- destination
Catalog String - destination
Schema String - destination
Table String - source
Url String - table
Configuration Property Map
PipelineIngestionDefinitionObjectReportTableConfiguration, PipelineIngestionDefinitionObjectReportTableConfigurationArgs
- Primary
Keys List<string> - Salesforce
Include boolFormula Fields - Scd
Type string - Sequence
Bies List<string>
- Primary
Keys []string - Salesforce
Include boolFormula Fields - Scd
Type string - Sequence
Bies []string
- primary
Keys List<String> - salesforce
Include BooleanFormula Fields - scd
Type String - sequence
Bies List<String>
- primary
Keys string[] - salesforce
Include booleanFormula Fields - scd
Type string - sequence
Bies string[]
- primary_
keys Sequence[str] - salesforce_
include_ boolformula_ fields - scd_
type str - sequence_
bies Sequence[str]
- primary
Keys List<String> - salesforce
Include BooleanFormula Fields - scd
Type String - sequence
Bies List<String>
PipelineIngestionDefinitionObjectSchema, PipelineIngestionDefinitionObjectSchemaArgs
- destination
Catalog String - destination
Schema String - source
Catalog String - source
Schema String - table
Configuration Property Map
PipelineIngestionDefinitionObjectSchemaTableConfiguration, PipelineIngestionDefinitionObjectSchemaTableConfigurationArgs
- Primary
Keys List<string> - Salesforce
Include boolFormula Fields - Scd
Type string - Sequence
Bies List<string>
- Primary
Keys []string - Salesforce
Include boolFormula Fields - Scd
Type string - Sequence
Bies []string
- primary
Keys List<String> - salesforce
Include BooleanFormula Fields - scd
Type String - sequence
Bies List<String>
- primary
Keys string[] - salesforce
Include booleanFormula Fields - scd
Type string - sequence
Bies string[]
- primary_
keys Sequence[str] - salesforce_
include_ boolformula_ fields - scd_
type str - sequence_
bies Sequence[str]
- primary
Keys List<String> - salesforce
Include BooleanFormula Fields - scd
Type String - sequence
Bies List<String>
PipelineIngestionDefinitionObjectTable, PipelineIngestionDefinitionObjectTableArgs
- Destination
Catalog string - Destination
Schema string - Destination
Table string - Source
Catalog string - Source
Schema string - Source
Table string - Table
Configuration PipelineIngestion Definition Object Table Table Configuration
- Destination
Catalog string - Destination
Schema string - Destination
Table string - Source
Catalog string - Source
Schema string - Source
Table string - Table
Configuration PipelineIngestion Definition Object Table Table Configuration
- destination
Catalog String - destination
Schema String - destination
Table String - source
Catalog String - source
Schema String - source
Table String - table
Configuration PipelineIngestion Definition Object Table Table Configuration
- destination
Catalog string - destination
Schema string - destination
Table string - source
Catalog string - source
Schema string - source
Table string - table
Configuration PipelineIngestion Definition Object Table Table Configuration
- destination
Catalog String - destination
Schema String - destination
Table String - source
Catalog String - source
Schema String - source
Table String - table
Configuration Property Map
PipelineIngestionDefinitionObjectTableTableConfiguration, PipelineIngestionDefinitionObjectTableTableConfigurationArgs
- Primary
Keys List<string> - Salesforce
Include boolFormula Fields - Scd
Type string - Sequence
Bies List<string>
- Primary
Keys []string - Salesforce
Include boolFormula Fields - Scd
Type string - Sequence
Bies []string
- primary
Keys List<String> - salesforce
Include BooleanFormula Fields - scd
Type String - sequence
Bies List<String>
- primary
Keys string[] - salesforce
Include booleanFormula Fields - scd
Type string - sequence
Bies string[]
- primary_
keys Sequence[str] - salesforce_
include_ boolformula_ fields - scd_
type str - sequence_
bies Sequence[str]
- primary
Keys List<String> - salesforce
Include BooleanFormula Fields - scd
Type String - sequence
Bies List<String>
PipelineIngestionDefinitionTableConfiguration, PipelineIngestionDefinitionTableConfigurationArgs
- Primary
Keys List<string> - Salesforce
Include boolFormula Fields - Scd
Type string - Sequence
Bies List<string>
- Primary
Keys []string - Salesforce
Include boolFormula Fields - Scd
Type string - Sequence
Bies []string
- primary
Keys List<String> - salesforce
Include BooleanFormula Fields - scd
Type String - sequence
Bies List<String>
- primary
Keys string[] - salesforce
Include booleanFormula Fields - scd
Type string - sequence
Bies string[]
- primary_
keys Sequence[str] - salesforce_
include_ boolformula_ fields - scd_
type str - sequence_
bies Sequence[str]
- primary
Keys List<String> - salesforce
Include BooleanFormula Fields - scd
Type String - sequence
Bies List<String>
PipelineLatestUpdate, PipelineLatestUpdateArgs
- Creation
Time string - State string
- Update
Id string
- Creation
Time string - State string
- Update
Id string
- creation
Time String - state String
- update
Id String
- creation
Time string - state string
- update
Id string
- creation_
time str - state str
- update_
id str
- creation
Time String - state String
- update
Id String
PipelineLibrary, PipelineLibraryArgs
- file Property Map
- jar String
- maven Property Map
- notebook Property Map
- whl String
PipelineLibraryFile, PipelineLibraryFileArgs
- Path string
- Path string
- path String
- path string
- path str
- path String
PipelineLibraryMaven, PipelineLibraryMavenArgs
- Coordinates string
- Exclusions List<string>
- Repo string
- Coordinates string
- Exclusions []string
- Repo string
- coordinates String
- exclusions List<String>
- repo String
- coordinates string
- exclusions string[]
- repo string
- coordinates str
- exclusions Sequence[str]
- repo str
- coordinates String
- exclusions List<String>
- repo String
PipelineLibraryNotebook, PipelineLibraryNotebookArgs
- Path string
- Path string
- path String
- path string
- path str
- path String
PipelineNotification, PipelineNotificationArgs
- Alerts List<string>
- non-empty list of alert types. Right now following alert types are supported, consult documentation for actual list
on-update-success
- a pipeline update completes successfully.on-update-failure
- a pipeline update fails with a retryable error.on-update-fatal-failure
- a pipeline update fails with a non-retryable (fatal) error.on-flow-failure
- a single data flow fails.
- Email
Recipients List<string> - non-empty list of emails to notify.
- Alerts []string
- non-empty list of alert types. Right now following alert types are supported, consult documentation for actual list
on-update-success
- a pipeline update completes successfully.on-update-failure
- a pipeline update fails with a retryable error.on-update-fatal-failure
- a pipeline update fails with a non-retryable (fatal) error.on-flow-failure
- a single data flow fails.
- Email
Recipients []string - non-empty list of emails to notify.
- alerts List<String>
- non-empty list of alert types. Right now following alert types are supported, consult documentation for actual list
on-update-success
- a pipeline update completes successfully.on-update-failure
- a pipeline update fails with a retryable error.on-update-fatal-failure
- a pipeline update fails with a non-retryable (fatal) error.on-flow-failure
- a single data flow fails.
- email
Recipients List<String> - non-empty list of emails to notify.
- alerts string[]
- non-empty list of alert types. Right now following alert types are supported, consult documentation for actual list
on-update-success
- a pipeline update completes successfully.on-update-failure
- a pipeline update fails with a retryable error.on-update-fatal-failure
- a pipeline update fails with a non-retryable (fatal) error.on-flow-failure
- a single data flow fails.
- email
Recipients string[] - non-empty list of emails to notify.
- alerts Sequence[str]
- non-empty list of alert types. Right now following alert types are supported, consult documentation for actual list
on-update-success
- a pipeline update completes successfully.on-update-failure
- a pipeline update fails with a retryable error.on-update-fatal-failure
- a pipeline update fails with a non-retryable (fatal) error.on-flow-failure
- a single data flow fails.
- email_
recipients Sequence[str] - non-empty list of emails to notify.
- alerts List<String>
- non-empty list of alert types. Right now following alert types are supported, consult documentation for actual list
on-update-success
- a pipeline update completes successfully.on-update-failure
- a pipeline update fails with a retryable error.on-update-fatal-failure
- a pipeline update fails with a non-retryable (fatal) error.on-flow-failure
- a single data flow fails.
- email
Recipients List<String> - non-empty list of emails to notify.
PipelineRestartWindow, PipelineRestartWindowArgs
- Start
Hour int - Days
Of stringWeek - Time
Zone stringId
- Start
Hour int - Days
Of stringWeek - Time
Zone stringId
- start
Hour Integer - days
Of StringWeek - time
Zone StringId
- start
Hour number - days
Of stringWeek - time
Zone stringId
- start_
hour int - days_
of_ strweek - time_
zone_ strid
- start
Hour Number - days
Of StringWeek - time
Zone StringId
PipelineTrigger, PipelineTriggerArgs
PipelineTriggerCron, PipelineTriggerCronArgs
- Quartz
Cron stringSchedule - Timezone
Id string
- Quartz
Cron stringSchedule - Timezone
Id string
- quartz
Cron StringSchedule - timezone
Id String
- quartz
Cron stringSchedule - timezone
Id string
- quartz_
cron_ strschedule - timezone_
id str
- quartz
Cron StringSchedule - timezone
Id String
Import
The resource job can be imported using the id of the pipeline
bash
$ pulumi import databricks:index/pipeline:Pipeline this <pipeline-id>
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- databricks pulumi/pulumi-databricks
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
databricks
Terraform Provider.