databricks.Job
Explore with Pulumi AI
The databricks.Job
resource allows you to manage Databricks Jobs to run non-interactive code in a databricks_cluster.
Example Usage
In Pulumi configuration, it is recommended to define tasks in alphabetical order of their
task_key
arguments, so that you get consistent and readable diff. Whenever tasks are added or removed, ortask_key
is renamed, you’ll observe a change in the majority of tasks. It’s related to the fact that the current version of the provider treatstask
blocks as an ordered list. Alternatively,task
block could have been an unordered set, though end-users would see the entire block replaced upon a change in single property of the task.
It is possible to create a Databricks job using task
blocks. A single task is defined with the task
block containing one of the *_task
blocks, task_key
, and additional arguments described below.
import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";
const _this = new databricks.Job("this", {
name: "Job with multiple tasks",
description: "This job executes multiple tasks on a shared job cluster, which will be provisioned as part of execution, and terminated once all tasks are finished.",
jobClusters: [{
jobClusterKey: "j",
newCluster: {
numWorkers: 2,
sparkVersion: latest.id,
nodeTypeId: smallest.id,
},
}],
tasks: [
{
taskKey: "a",
newCluster: {
numWorkers: 1,
sparkVersion: latest.id,
nodeTypeId: smallest.id,
},
notebookTask: {
notebookPath: thisDatabricksNotebook.path,
},
},
{
taskKey: "b",
dependsOns: [{
taskKey: "a",
}],
existingClusterId: shared.id,
sparkJarTask: {
mainClassName: "com.acme.data.Main",
},
},
{
taskKey: "c",
jobClusterKey: "j",
notebookTask: {
notebookPath: thisDatabricksNotebook.path,
},
},
{
taskKey: "d",
pipelineTask: {
pipelineId: thisDatabricksPipeline.id,
},
},
],
});
import pulumi
import pulumi_databricks as databricks
this = databricks.Job("this",
name="Job with multiple tasks",
description="This job executes multiple tasks on a shared job cluster, which will be provisioned as part of execution, and terminated once all tasks are finished.",
job_clusters=[{
"job_cluster_key": "j",
"new_cluster": {
"num_workers": 2,
"spark_version": latest["id"],
"node_type_id": smallest["id"],
},
}],
tasks=[
{
"task_key": "a",
"new_cluster": {
"num_workers": 1,
"spark_version": latest["id"],
"node_type_id": smallest["id"],
},
"notebook_task": {
"notebook_path": this_databricks_notebook["path"],
},
},
{
"task_key": "b",
"depends_ons": [{
"task_key": "a",
}],
"existing_cluster_id": shared["id"],
"spark_jar_task": {
"main_class_name": "com.acme.data.Main",
},
},
{
"task_key": "c",
"job_cluster_key": "j",
"notebook_task": {
"notebook_path": this_databricks_notebook["path"],
},
},
{
"task_key": "d",
"pipeline_task": {
"pipeline_id": this_databricks_pipeline["id"],
},
},
])
package main
import (
"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := databricks.NewJob(ctx, "this", &databricks.JobArgs{
Name: pulumi.String("Job with multiple tasks"),
Description: pulumi.String("This job executes multiple tasks on a shared job cluster, which will be provisioned as part of execution, and terminated once all tasks are finished."),
JobClusters: databricks.JobJobClusterArray{
&databricks.JobJobClusterArgs{
JobClusterKey: pulumi.String("j"),
NewCluster: &databricks.JobJobClusterNewClusterArgs{
NumWorkers: pulumi.Int(2),
SparkVersion: pulumi.Any(latest.Id),
NodeTypeId: pulumi.Any(smallest.Id),
},
},
},
Tasks: databricks.JobTaskArray{
&databricks.JobTaskArgs{
TaskKey: pulumi.String("a"),
NewCluster: &databricks.JobTaskNewClusterArgs{
NumWorkers: pulumi.Int(1),
SparkVersion: pulumi.Any(latest.Id),
NodeTypeId: pulumi.Any(smallest.Id),
},
NotebookTask: &databricks.JobTaskNotebookTaskArgs{
NotebookPath: pulumi.Any(thisDatabricksNotebook.Path),
},
},
&databricks.JobTaskArgs{
TaskKey: pulumi.String("b"),
DependsOns: databricks.JobTaskDependsOnArray{
&databricks.JobTaskDependsOnArgs{
TaskKey: pulumi.String("a"),
},
},
ExistingClusterId: pulumi.Any(shared.Id),
SparkJarTask: &databricks.JobTaskSparkJarTaskArgs{
MainClassName: pulumi.String("com.acme.data.Main"),
},
},
&databricks.JobTaskArgs{
TaskKey: pulumi.String("c"),
JobClusterKey: pulumi.String("j"),
NotebookTask: &databricks.JobTaskNotebookTaskArgs{
NotebookPath: pulumi.Any(thisDatabricksNotebook.Path),
},
},
&databricks.JobTaskArgs{
TaskKey: pulumi.String("d"),
PipelineTask: &databricks.JobTaskPipelineTaskArgs{
PipelineId: pulumi.Any(thisDatabricksPipeline.Id),
},
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;
return await Deployment.RunAsync(() =>
{
var @this = new Databricks.Job("this", new()
{
Name = "Job with multiple tasks",
Description = "This job executes multiple tasks on a shared job cluster, which will be provisioned as part of execution, and terminated once all tasks are finished.",
JobClusters = new[]
{
new Databricks.Inputs.JobJobClusterArgs
{
JobClusterKey = "j",
NewCluster = new Databricks.Inputs.JobJobClusterNewClusterArgs
{
NumWorkers = 2,
SparkVersion = latest.Id,
NodeTypeId = smallest.Id,
},
},
},
Tasks = new[]
{
new Databricks.Inputs.JobTaskArgs
{
TaskKey = "a",
NewCluster = new Databricks.Inputs.JobTaskNewClusterArgs
{
NumWorkers = 1,
SparkVersion = latest.Id,
NodeTypeId = smallest.Id,
},
NotebookTask = new Databricks.Inputs.JobTaskNotebookTaskArgs
{
NotebookPath = thisDatabricksNotebook.Path,
},
},
new Databricks.Inputs.JobTaskArgs
{
TaskKey = "b",
DependsOns = new[]
{
new Databricks.Inputs.JobTaskDependsOnArgs
{
TaskKey = "a",
},
},
ExistingClusterId = shared.Id,
SparkJarTask = new Databricks.Inputs.JobTaskSparkJarTaskArgs
{
MainClassName = "com.acme.data.Main",
},
},
new Databricks.Inputs.JobTaskArgs
{
TaskKey = "c",
JobClusterKey = "j",
NotebookTask = new Databricks.Inputs.JobTaskNotebookTaskArgs
{
NotebookPath = thisDatabricksNotebook.Path,
},
},
new Databricks.Inputs.JobTaskArgs
{
TaskKey = "d",
PipelineTask = new Databricks.Inputs.JobTaskPipelineTaskArgs
{
PipelineId = thisDatabricksPipeline.Id,
},
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.Job;
import com.pulumi.databricks.JobArgs;
import com.pulumi.databricks.inputs.JobJobClusterArgs;
import com.pulumi.databricks.inputs.JobJobClusterNewClusterArgs;
import com.pulumi.databricks.inputs.JobTaskArgs;
import com.pulumi.databricks.inputs.JobTaskNewClusterArgs;
import com.pulumi.databricks.inputs.JobTaskNotebookTaskArgs;
import com.pulumi.databricks.inputs.JobTaskSparkJarTaskArgs;
import com.pulumi.databricks.inputs.JobTaskPipelineTaskArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var this_ = new Job("this", JobArgs.builder()
.name("Job with multiple tasks")
.description("This job executes multiple tasks on a shared job cluster, which will be provisioned as part of execution, and terminated once all tasks are finished.")
.jobClusters(JobJobClusterArgs.builder()
.jobClusterKey("j")
.newCluster(JobJobClusterNewClusterArgs.builder()
.numWorkers(2)
.sparkVersion(latest.id())
.nodeTypeId(smallest.id())
.build())
.build())
.tasks(
JobTaskArgs.builder()
.taskKey("a")
.newCluster(JobTaskNewClusterArgs.builder()
.numWorkers(1)
.sparkVersion(latest.id())
.nodeTypeId(smallest.id())
.build())
.notebookTask(JobTaskNotebookTaskArgs.builder()
.notebookPath(thisDatabricksNotebook.path())
.build())
.build(),
JobTaskArgs.builder()
.taskKey("b")
.dependsOns(JobTaskDependsOnArgs.builder()
.taskKey("a")
.build())
.existingClusterId(shared.id())
.sparkJarTask(JobTaskSparkJarTaskArgs.builder()
.mainClassName("com.acme.data.Main")
.build())
.build(),
JobTaskArgs.builder()
.taskKey("c")
.jobClusterKey("j")
.notebookTask(JobTaskNotebookTaskArgs.builder()
.notebookPath(thisDatabricksNotebook.path())
.build())
.build(),
JobTaskArgs.builder()
.taskKey("d")
.pipelineTask(JobTaskPipelineTaskArgs.builder()
.pipelineId(thisDatabricksPipeline.id())
.build())
.build())
.build());
}
}
resources:
this:
type: databricks:Job
properties:
name: Job with multiple tasks
description: This job executes multiple tasks on a shared job cluster, which will be provisioned as part of execution, and terminated once all tasks are finished.
jobClusters:
- jobClusterKey: j
newCluster:
numWorkers: 2
sparkVersion: ${latest.id}
nodeTypeId: ${smallest.id}
tasks:
- taskKey: a
newCluster:
numWorkers: 1
sparkVersion: ${latest.id}
nodeTypeId: ${smallest.id}
notebookTask:
notebookPath: ${thisDatabricksNotebook.path}
- taskKey: b
dependsOns:
- taskKey: a
existingClusterId: ${shared.id}
sparkJarTask:
mainClassName: com.acme.data.Main
- taskKey: c
jobClusterKey: j
notebookTask:
notebookPath: ${thisDatabricksNotebook.path}
- taskKey: d
pipelineTask:
pipelineId: ${thisDatabricksPipeline.id}
Access Control
By default, all users can create and modify jobs unless an administrator enables jobs access control. With jobs access control, individual permissions determine a user’s abilities.
- databricks.Permissions can control which groups or individual users can Can View, Can Manage Run, and Can Manage.
- databricks.ClusterPolicy can control which kinds of clusters users can create for jobs.
Create Job Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new Job(name: string, args?: JobArgs, opts?: CustomResourceOptions);
@overload
def Job(resource_name: str,
args: Optional[JobArgs] = None,
opts: Optional[ResourceOptions] = None)
@overload
def Job(resource_name: str,
opts: Optional[ResourceOptions] = None,
always_running: Optional[bool] = None,
budget_policy_id: Optional[str] = None,
continuous: Optional[JobContinuousArgs] = None,
control_run_state: Optional[bool] = None,
dbt_task: Optional[JobDbtTaskArgs] = None,
deployment: Optional[JobDeploymentArgs] = None,
description: Optional[str] = None,
edit_mode: Optional[str] = None,
email_notifications: Optional[JobEmailNotificationsArgs] = None,
environments: Optional[Sequence[JobEnvironmentArgs]] = None,
existing_cluster_id: Optional[str] = None,
format: Optional[str] = None,
git_source: Optional[JobGitSourceArgs] = None,
health: Optional[JobHealthArgs] = None,
job_clusters: Optional[Sequence[JobJobClusterArgs]] = None,
libraries: Optional[Sequence[JobLibraryArgs]] = None,
max_concurrent_runs: Optional[int] = None,
max_retries: Optional[int] = None,
min_retry_interval_millis: Optional[int] = None,
name: Optional[str] = None,
new_cluster: Optional[JobNewClusterArgs] = None,
notebook_task: Optional[JobNotebookTaskArgs] = None,
notification_settings: Optional[JobNotificationSettingsArgs] = None,
parameters: Optional[Sequence[JobParameterArgs]] = None,
pipeline_task: Optional[JobPipelineTaskArgs] = None,
python_wheel_task: Optional[JobPythonWheelTaskArgs] = None,
queue: Optional[JobQueueArgs] = None,
retry_on_timeout: Optional[bool] = None,
run_as: Optional[JobRunAsArgs] = None,
run_job_task: Optional[JobRunJobTaskArgs] = None,
schedule: Optional[JobScheduleArgs] = None,
spark_jar_task: Optional[JobSparkJarTaskArgs] = None,
spark_python_task: Optional[JobSparkPythonTaskArgs] = None,
spark_submit_task: Optional[JobSparkSubmitTaskArgs] = None,
tags: Optional[Mapping[str, str]] = None,
tasks: Optional[Sequence[JobTaskArgs]] = None,
timeout_seconds: Optional[int] = None,
trigger: Optional[JobTriggerArgs] = None,
webhook_notifications: Optional[JobWebhookNotificationsArgs] = None)
func NewJob(ctx *Context, name string, args *JobArgs, opts ...ResourceOption) (*Job, error)
public Job(string name, JobArgs? args = null, CustomResourceOptions? opts = null)
type: databricks:Job
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args JobArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args JobArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args JobArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args JobArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args JobArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var jobResource = new Databricks.Job("jobResource", new()
{
BudgetPolicyId = "string",
Continuous = new Databricks.Inputs.JobContinuousArgs
{
PauseStatus = "string",
},
ControlRunState = false,
Deployment = new Databricks.Inputs.JobDeploymentArgs
{
Kind = "string",
MetadataFilePath = "string",
},
Description = "string",
EditMode = "string",
EmailNotifications = new Databricks.Inputs.JobEmailNotificationsArgs
{
NoAlertForSkippedRuns = false,
OnDurationWarningThresholdExceededs = new[]
{
"string",
},
OnFailures = new[]
{
"string",
},
OnStarts = new[]
{
"string",
},
OnStreamingBacklogExceededs = new[]
{
"string",
},
OnSuccesses = new[]
{
"string",
},
},
Environments = new[]
{
new Databricks.Inputs.JobEnvironmentArgs
{
EnvironmentKey = "string",
Spec = new Databricks.Inputs.JobEnvironmentSpecArgs
{
Client = "string",
Dependencies = new[]
{
"string",
},
},
},
},
ExistingClusterId = "string",
Format = "string",
GitSource = new Databricks.Inputs.JobGitSourceArgs
{
Url = "string",
Branch = "string",
Commit = "string",
GitSnapshot = new Databricks.Inputs.JobGitSourceGitSnapshotArgs
{
UsedCommit = "string",
},
JobSource = new Databricks.Inputs.JobGitSourceJobSourceArgs
{
ImportFromGitBranch = "string",
JobConfigPath = "string",
DirtyState = "string",
},
Provider = "string",
Tag = "string",
},
Health = new Databricks.Inputs.JobHealthArgs
{
Rules = new[]
{
new Databricks.Inputs.JobHealthRuleArgs
{
Metric = "string",
Op = "string",
Value = 0,
},
},
},
JobClusters = new[]
{
new Databricks.Inputs.JobJobClusterArgs
{
JobClusterKey = "string",
NewCluster = new Databricks.Inputs.JobJobClusterNewClusterArgs
{
SparkVersion = "string",
EnableLocalDiskEncryption = false,
ClusterLogConf = new Databricks.Inputs.JobJobClusterNewClusterClusterLogConfArgs
{
Dbfs = new Databricks.Inputs.JobJobClusterNewClusterClusterLogConfDbfsArgs
{
Destination = "string",
},
S3 = new Databricks.Inputs.JobJobClusterNewClusterClusterLogConfS3Args
{
Destination = "string",
CannedAcl = "string",
EnableEncryption = false,
EncryptionType = "string",
Endpoint = "string",
KmsKey = "string",
Region = "string",
},
},
GcpAttributes = new Databricks.Inputs.JobJobClusterNewClusterGcpAttributesArgs
{
Availability = "string",
BootDiskSize = 0,
GoogleServiceAccount = "string",
LocalSsdCount = 0,
UsePreemptibleExecutors = false,
ZoneId = "string",
},
ClusterId = "string",
IdempotencyToken = "string",
ClusterMountInfos = new[]
{
new Databricks.Inputs.JobJobClusterNewClusterClusterMountInfoArgs
{
LocalMountDirPath = "string",
NetworkFilesystemInfo = new Databricks.Inputs.JobJobClusterNewClusterClusterMountInfoNetworkFilesystemInfoArgs
{
ServerAddress = "string",
MountOptions = "string",
},
RemoteMountDirPath = "string",
},
},
ClusterName = "string",
CustomTags =
{
{ "string", "string" },
},
DataSecurityMode = "string",
DockerImage = new Databricks.Inputs.JobJobClusterNewClusterDockerImageArgs
{
Url = "string",
BasicAuth = new Databricks.Inputs.JobJobClusterNewClusterDockerImageBasicAuthArgs
{
Password = "string",
Username = "string",
},
},
DriverInstancePoolId = "string",
InitScripts = new[]
{
new Databricks.Inputs.JobJobClusterNewClusterInitScriptArgs
{
Abfss = new Databricks.Inputs.JobJobClusterNewClusterInitScriptAbfssArgs
{
Destination = "string",
},
File = new Databricks.Inputs.JobJobClusterNewClusterInitScriptFileArgs
{
Destination = "string",
},
Gcs = new Databricks.Inputs.JobJobClusterNewClusterInitScriptGcsArgs
{
Destination = "string",
},
S3 = new Databricks.Inputs.JobJobClusterNewClusterInitScriptS3Args
{
Destination = "string",
CannedAcl = "string",
EnableEncryption = false,
EncryptionType = "string",
Endpoint = "string",
KmsKey = "string",
Region = "string",
},
Volumes = new Databricks.Inputs.JobJobClusterNewClusterInitScriptVolumesArgs
{
Destination = "string",
},
Workspace = new Databricks.Inputs.JobJobClusterNewClusterInitScriptWorkspaceArgs
{
Destination = "string",
},
},
},
EnableElasticDisk = false,
ApplyPolicyDefaultValues = false,
AzureAttributes = new Databricks.Inputs.JobJobClusterNewClusterAzureAttributesArgs
{
Availability = "string",
FirstOnDemand = 0,
LogAnalyticsInfo = new Databricks.Inputs.JobJobClusterNewClusterAzureAttributesLogAnalyticsInfoArgs
{
LogAnalyticsPrimaryKey = "string",
LogAnalyticsWorkspaceId = "string",
},
SpotBidMaxPrice = 0,
},
AwsAttributes = new Databricks.Inputs.JobJobClusterNewClusterAwsAttributesArgs
{
Availability = "string",
EbsVolumeCount = 0,
EbsVolumeIops = 0,
EbsVolumeSize = 0,
EbsVolumeThroughput = 0,
EbsVolumeType = "string",
FirstOnDemand = 0,
InstanceProfileArn = "string",
SpotBidPricePercent = 0,
ZoneId = "string",
},
DriverNodeTypeId = "string",
InstancePoolId = "string",
Libraries = new[]
{
new Databricks.Inputs.JobJobClusterNewClusterLibraryArgs
{
Cran = new Databricks.Inputs.JobJobClusterNewClusterLibraryCranArgs
{
Package = "string",
Repo = "string",
},
Egg = "string",
Jar = "string",
Maven = new Databricks.Inputs.JobJobClusterNewClusterLibraryMavenArgs
{
Coordinates = "string",
Exclusions = new[]
{
"string",
},
Repo = "string",
},
Pypi = new Databricks.Inputs.JobJobClusterNewClusterLibraryPypiArgs
{
Package = "string",
Repo = "string",
},
Requirements = "string",
Whl = "string",
},
},
NodeTypeId = "string",
NumWorkers = 0,
PolicyId = "string",
RuntimeEngine = "string",
SingleUserName = "string",
SparkConf =
{
{ "string", "string" },
},
SparkEnvVars =
{
{ "string", "string" },
},
Autoscale = new Databricks.Inputs.JobJobClusterNewClusterAutoscaleArgs
{
MaxWorkers = 0,
MinWorkers = 0,
},
SshPublicKeys = new[]
{
"string",
},
WorkloadType = new Databricks.Inputs.JobJobClusterNewClusterWorkloadTypeArgs
{
Clients = new Databricks.Inputs.JobJobClusterNewClusterWorkloadTypeClientsArgs
{
Jobs = false,
Notebooks = false,
},
},
},
},
},
Libraries = new[]
{
new Databricks.Inputs.JobLibraryArgs
{
Cran = new Databricks.Inputs.JobLibraryCranArgs
{
Package = "string",
Repo = "string",
},
Egg = "string",
Jar = "string",
Maven = new Databricks.Inputs.JobLibraryMavenArgs
{
Coordinates = "string",
Exclusions = new[]
{
"string",
},
Repo = "string",
},
Pypi = new Databricks.Inputs.JobLibraryPypiArgs
{
Package = "string",
Repo = "string",
},
Requirements = "string",
Whl = "string",
},
},
MaxConcurrentRuns = 0,
Name = "string",
NewCluster = new Databricks.Inputs.JobNewClusterArgs
{
SparkVersion = "string",
EnableLocalDiskEncryption = false,
ClusterLogConf = new Databricks.Inputs.JobNewClusterClusterLogConfArgs
{
Dbfs = new Databricks.Inputs.JobNewClusterClusterLogConfDbfsArgs
{
Destination = "string",
},
S3 = new Databricks.Inputs.JobNewClusterClusterLogConfS3Args
{
Destination = "string",
CannedAcl = "string",
EnableEncryption = false,
EncryptionType = "string",
Endpoint = "string",
KmsKey = "string",
Region = "string",
},
},
GcpAttributes = new Databricks.Inputs.JobNewClusterGcpAttributesArgs
{
Availability = "string",
BootDiskSize = 0,
GoogleServiceAccount = "string",
LocalSsdCount = 0,
UsePreemptibleExecutors = false,
ZoneId = "string",
},
ClusterId = "string",
IdempotencyToken = "string",
ClusterMountInfos = new[]
{
new Databricks.Inputs.JobNewClusterClusterMountInfoArgs
{
LocalMountDirPath = "string",
NetworkFilesystemInfo = new Databricks.Inputs.JobNewClusterClusterMountInfoNetworkFilesystemInfoArgs
{
ServerAddress = "string",
MountOptions = "string",
},
RemoteMountDirPath = "string",
},
},
ClusterName = "string",
CustomTags =
{
{ "string", "string" },
},
DataSecurityMode = "string",
DockerImage = new Databricks.Inputs.JobNewClusterDockerImageArgs
{
Url = "string",
BasicAuth = new Databricks.Inputs.JobNewClusterDockerImageBasicAuthArgs
{
Password = "string",
Username = "string",
},
},
DriverInstancePoolId = "string",
InitScripts = new[]
{
new Databricks.Inputs.JobNewClusterInitScriptArgs
{
Abfss = new Databricks.Inputs.JobNewClusterInitScriptAbfssArgs
{
Destination = "string",
},
File = new Databricks.Inputs.JobNewClusterInitScriptFileArgs
{
Destination = "string",
},
Gcs = new Databricks.Inputs.JobNewClusterInitScriptGcsArgs
{
Destination = "string",
},
S3 = new Databricks.Inputs.JobNewClusterInitScriptS3Args
{
Destination = "string",
CannedAcl = "string",
EnableEncryption = false,
EncryptionType = "string",
Endpoint = "string",
KmsKey = "string",
Region = "string",
},
Volumes = new Databricks.Inputs.JobNewClusterInitScriptVolumesArgs
{
Destination = "string",
},
Workspace = new Databricks.Inputs.JobNewClusterInitScriptWorkspaceArgs
{
Destination = "string",
},
},
},
EnableElasticDisk = false,
ApplyPolicyDefaultValues = false,
AzureAttributes = new Databricks.Inputs.JobNewClusterAzureAttributesArgs
{
Availability = "string",
FirstOnDemand = 0,
LogAnalyticsInfo = new Databricks.Inputs.JobNewClusterAzureAttributesLogAnalyticsInfoArgs
{
LogAnalyticsPrimaryKey = "string",
LogAnalyticsWorkspaceId = "string",
},
SpotBidMaxPrice = 0,
},
AwsAttributes = new Databricks.Inputs.JobNewClusterAwsAttributesArgs
{
Availability = "string",
EbsVolumeCount = 0,
EbsVolumeIops = 0,
EbsVolumeSize = 0,
EbsVolumeThroughput = 0,
EbsVolumeType = "string",
FirstOnDemand = 0,
InstanceProfileArn = "string",
SpotBidPricePercent = 0,
ZoneId = "string",
},
DriverNodeTypeId = "string",
InstancePoolId = "string",
Libraries = new[]
{
new Databricks.Inputs.JobNewClusterLibraryArgs
{
Cran = new Databricks.Inputs.JobNewClusterLibraryCranArgs
{
Package = "string",
Repo = "string",
},
Egg = "string",
Jar = "string",
Maven = new Databricks.Inputs.JobNewClusterLibraryMavenArgs
{
Coordinates = "string",
Exclusions = new[]
{
"string",
},
Repo = "string",
},
Pypi = new Databricks.Inputs.JobNewClusterLibraryPypiArgs
{
Package = "string",
Repo = "string",
},
Requirements = "string",
Whl = "string",
},
},
NodeTypeId = "string",
NumWorkers = 0,
PolicyId = "string",
RuntimeEngine = "string",
SingleUserName = "string",
SparkConf =
{
{ "string", "string" },
},
SparkEnvVars =
{
{ "string", "string" },
},
Autoscale = new Databricks.Inputs.JobNewClusterAutoscaleArgs
{
MaxWorkers = 0,
MinWorkers = 0,
},
SshPublicKeys = new[]
{
"string",
},
WorkloadType = new Databricks.Inputs.JobNewClusterWorkloadTypeArgs
{
Clients = new Databricks.Inputs.JobNewClusterWorkloadTypeClientsArgs
{
Jobs = false,
Notebooks = false,
},
},
},
NotificationSettings = new Databricks.Inputs.JobNotificationSettingsArgs
{
NoAlertForCanceledRuns = false,
NoAlertForSkippedRuns = false,
},
Parameters = new[]
{
new Databricks.Inputs.JobParameterArgs
{
Default = "string",
Name = "string",
},
},
Queue = new Databricks.Inputs.JobQueueArgs
{
Enabled = false,
},
RunAs = new Databricks.Inputs.JobRunAsArgs
{
ServicePrincipalName = "string",
UserName = "string",
},
Schedule = new Databricks.Inputs.JobScheduleArgs
{
QuartzCronExpression = "string",
TimezoneId = "string",
PauseStatus = "string",
},
Tags =
{
{ "string", "string" },
},
Tasks = new[]
{
new Databricks.Inputs.JobTaskArgs
{
TaskKey = "string",
NewCluster = new Databricks.Inputs.JobTaskNewClusterArgs
{
SparkVersion = "string",
EnableLocalDiskEncryption = false,
ClusterLogConf = new Databricks.Inputs.JobTaskNewClusterClusterLogConfArgs
{
Dbfs = new Databricks.Inputs.JobTaskNewClusterClusterLogConfDbfsArgs
{
Destination = "string",
},
S3 = new Databricks.Inputs.JobTaskNewClusterClusterLogConfS3Args
{
Destination = "string",
CannedAcl = "string",
EnableEncryption = false,
EncryptionType = "string",
Endpoint = "string",
KmsKey = "string",
Region = "string",
},
},
GcpAttributes = new Databricks.Inputs.JobTaskNewClusterGcpAttributesArgs
{
Availability = "string",
BootDiskSize = 0,
GoogleServiceAccount = "string",
LocalSsdCount = 0,
UsePreemptibleExecutors = false,
ZoneId = "string",
},
ClusterId = "string",
IdempotencyToken = "string",
ClusterMountInfos = new[]
{
new Databricks.Inputs.JobTaskNewClusterClusterMountInfoArgs
{
LocalMountDirPath = "string",
NetworkFilesystemInfo = new Databricks.Inputs.JobTaskNewClusterClusterMountInfoNetworkFilesystemInfoArgs
{
ServerAddress = "string",
MountOptions = "string",
},
RemoteMountDirPath = "string",
},
},
ClusterName = "string",
CustomTags =
{
{ "string", "string" },
},
DataSecurityMode = "string",
DockerImage = new Databricks.Inputs.JobTaskNewClusterDockerImageArgs
{
Url = "string",
BasicAuth = new Databricks.Inputs.JobTaskNewClusterDockerImageBasicAuthArgs
{
Password = "string",
Username = "string",
},
},
DriverInstancePoolId = "string",
InitScripts = new[]
{
new Databricks.Inputs.JobTaskNewClusterInitScriptArgs
{
Abfss = new Databricks.Inputs.JobTaskNewClusterInitScriptAbfssArgs
{
Destination = "string",
},
File = new Databricks.Inputs.JobTaskNewClusterInitScriptFileArgs
{
Destination = "string",
},
Gcs = new Databricks.Inputs.JobTaskNewClusterInitScriptGcsArgs
{
Destination = "string",
},
S3 = new Databricks.Inputs.JobTaskNewClusterInitScriptS3Args
{
Destination = "string",
CannedAcl = "string",
EnableEncryption = false,
EncryptionType = "string",
Endpoint = "string",
KmsKey = "string",
Region = "string",
},
Volumes = new Databricks.Inputs.JobTaskNewClusterInitScriptVolumesArgs
{
Destination = "string",
},
Workspace = new Databricks.Inputs.JobTaskNewClusterInitScriptWorkspaceArgs
{
Destination = "string",
},
},
},
EnableElasticDisk = false,
ApplyPolicyDefaultValues = false,
AzureAttributes = new Databricks.Inputs.JobTaskNewClusterAzureAttributesArgs
{
Availability = "string",
FirstOnDemand = 0,
LogAnalyticsInfo = new Databricks.Inputs.JobTaskNewClusterAzureAttributesLogAnalyticsInfoArgs
{
LogAnalyticsPrimaryKey = "string",
LogAnalyticsWorkspaceId = "string",
},
SpotBidMaxPrice = 0,
},
AwsAttributes = new Databricks.Inputs.JobTaskNewClusterAwsAttributesArgs
{
Availability = "string",
EbsVolumeCount = 0,
EbsVolumeIops = 0,
EbsVolumeSize = 0,
EbsVolumeThroughput = 0,
EbsVolumeType = "string",
FirstOnDemand = 0,
InstanceProfileArn = "string",
SpotBidPricePercent = 0,
ZoneId = "string",
},
DriverNodeTypeId = "string",
InstancePoolId = "string",
Libraries = new[]
{
new Databricks.Inputs.JobTaskNewClusterLibraryArgs
{
Cran = new Databricks.Inputs.JobTaskNewClusterLibraryCranArgs
{
Package = "string",
Repo = "string",
},
Egg = "string",
Jar = "string",
Maven = new Databricks.Inputs.JobTaskNewClusterLibraryMavenArgs
{
Coordinates = "string",
Exclusions = new[]
{
"string",
},
Repo = "string",
},
Pypi = new Databricks.Inputs.JobTaskNewClusterLibraryPypiArgs
{
Package = "string",
Repo = "string",
},
Requirements = "string",
Whl = "string",
},
},
NodeTypeId = "string",
NumWorkers = 0,
PolicyId = "string",
RuntimeEngine = "string",
SingleUserName = "string",
SparkConf =
{
{ "string", "string" },
},
SparkEnvVars =
{
{ "string", "string" },
},
Autoscale = new Databricks.Inputs.JobTaskNewClusterAutoscaleArgs
{
MaxWorkers = 0,
MinWorkers = 0,
},
SshPublicKeys = new[]
{
"string",
},
WorkloadType = new Databricks.Inputs.JobTaskNewClusterWorkloadTypeArgs
{
Clients = new Databricks.Inputs.JobTaskNewClusterWorkloadTypeClientsArgs
{
Jobs = false,
Notebooks = false,
},
},
},
DbtTask = new Databricks.Inputs.JobTaskDbtTaskArgs
{
Commands = new[]
{
"string",
},
Catalog = "string",
ProfilesDirectory = "string",
ProjectDirectory = "string",
Schema = "string",
Source = "string",
WarehouseId = "string",
},
Description = "string",
DisableAutoOptimization = false,
EmailNotifications = new Databricks.Inputs.JobTaskEmailNotificationsArgs
{
NoAlertForSkippedRuns = false,
OnDurationWarningThresholdExceededs = new[]
{
"string",
},
OnFailures = new[]
{
"string",
},
OnStarts = new[]
{
"string",
},
OnStreamingBacklogExceededs = new[]
{
"string",
},
OnSuccesses = new[]
{
"string",
},
},
EnvironmentKey = "string",
ExistingClusterId = "string",
ForEachTask = new Databricks.Inputs.JobTaskForEachTaskArgs
{
Inputs = "string",
Task = new Databricks.Inputs.JobTaskForEachTaskTaskArgs
{
TaskKey = "string",
NotebookTask = new Databricks.Inputs.JobTaskForEachTaskTaskNotebookTaskArgs
{
NotebookPath = "string",
BaseParameters =
{
{ "string", "string" },
},
Source = "string",
WarehouseId = "string",
},
WebhookNotifications = new Databricks.Inputs.JobTaskForEachTaskTaskWebhookNotificationsArgs
{
OnDurationWarningThresholdExceededs = new[]
{
new Databricks.Inputs.JobTaskForEachTaskTaskWebhookNotificationsOnDurationWarningThresholdExceededArgs
{
Id = "string",
},
},
OnFailures = new[]
{
new Databricks.Inputs.JobTaskForEachTaskTaskWebhookNotificationsOnFailureArgs
{
Id = "string",
},
},
OnStarts = new[]
{
new Databricks.Inputs.JobTaskForEachTaskTaskWebhookNotificationsOnStartArgs
{
Id = "string",
},
},
OnStreamingBacklogExceededs = new[]
{
new Databricks.Inputs.JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs
{
Id = "string",
},
},
OnSuccesses = new[]
{
new Databricks.Inputs.JobTaskForEachTaskTaskWebhookNotificationsOnSuccessArgs
{
Id = "string",
},
},
},
NewCluster = new Databricks.Inputs.JobTaskForEachTaskTaskNewClusterArgs
{
SparkVersion = "string",
EnableLocalDiskEncryption = false,
ClusterLogConf = new Databricks.Inputs.JobTaskForEachTaskTaskNewClusterClusterLogConfArgs
{
Dbfs = new Databricks.Inputs.JobTaskForEachTaskTaskNewClusterClusterLogConfDbfsArgs
{
Destination = "string",
},
S3 = new Databricks.Inputs.JobTaskForEachTaskTaskNewClusterClusterLogConfS3Args
{
Destination = "string",
CannedAcl = "string",
EnableEncryption = false,
EncryptionType = "string",
Endpoint = "string",
KmsKey = "string",
Region = "string",
},
},
GcpAttributes = new Databricks.Inputs.JobTaskForEachTaskTaskNewClusterGcpAttributesArgs
{
Availability = "string",
BootDiskSize = 0,
GoogleServiceAccount = "string",
LocalSsdCount = 0,
UsePreemptibleExecutors = false,
ZoneId = "string",
},
ClusterId = "string",
IdempotencyToken = "string",
ClusterMountInfos = new[]
{
new Databricks.Inputs.JobTaskForEachTaskTaskNewClusterClusterMountInfoArgs
{
LocalMountDirPath = "string",
NetworkFilesystemInfo = new Databricks.Inputs.JobTaskForEachTaskTaskNewClusterClusterMountInfoNetworkFilesystemInfoArgs
{
ServerAddress = "string",
MountOptions = "string",
},
RemoteMountDirPath = "string",
},
},
ClusterName = "string",
CustomTags =
{
{ "string", "string" },
},
DataSecurityMode = "string",
DockerImage = new Databricks.Inputs.JobTaskForEachTaskTaskNewClusterDockerImageArgs
{
Url = "string",
BasicAuth = new Databricks.Inputs.JobTaskForEachTaskTaskNewClusterDockerImageBasicAuthArgs
{
Password = "string",
Username = "string",
},
},
DriverInstancePoolId = "string",
InitScripts = new[]
{
new Databricks.Inputs.JobTaskForEachTaskTaskNewClusterInitScriptArgs
{
Abfss = new Databricks.Inputs.JobTaskForEachTaskTaskNewClusterInitScriptAbfssArgs
{
Destination = "string",
},
File = new Databricks.Inputs.JobTaskForEachTaskTaskNewClusterInitScriptFileArgs
{
Destination = "string",
},
Gcs = new Databricks.Inputs.JobTaskForEachTaskTaskNewClusterInitScriptGcsArgs
{
Destination = "string",
},
S3 = new Databricks.Inputs.JobTaskForEachTaskTaskNewClusterInitScriptS3Args
{
Destination = "string",
CannedAcl = "string",
EnableEncryption = false,
EncryptionType = "string",
Endpoint = "string",
KmsKey = "string",
Region = "string",
},
Volumes = new Databricks.Inputs.JobTaskForEachTaskTaskNewClusterInitScriptVolumesArgs
{
Destination = "string",
},
Workspace = new Databricks.Inputs.JobTaskForEachTaskTaskNewClusterInitScriptWorkspaceArgs
{
Destination = "string",
},
},
},
EnableElasticDisk = false,
ApplyPolicyDefaultValues = false,
AzureAttributes = new Databricks.Inputs.JobTaskForEachTaskTaskNewClusterAzureAttributesArgs
{
Availability = "string",
FirstOnDemand = 0,
LogAnalyticsInfo = new Databricks.Inputs.JobTaskForEachTaskTaskNewClusterAzureAttributesLogAnalyticsInfoArgs
{
LogAnalyticsPrimaryKey = "string",
LogAnalyticsWorkspaceId = "string",
},
SpotBidMaxPrice = 0,
},
AwsAttributes = new Databricks.Inputs.JobTaskForEachTaskTaskNewClusterAwsAttributesArgs
{
Availability = "string",
EbsVolumeCount = 0,
EbsVolumeIops = 0,
EbsVolumeSize = 0,
EbsVolumeThroughput = 0,
EbsVolumeType = "string",
FirstOnDemand = 0,
InstanceProfileArn = "string",
SpotBidPricePercent = 0,
ZoneId = "string",
},
DriverNodeTypeId = "string",
InstancePoolId = "string",
Libraries = new[]
{
new Databricks.Inputs.JobTaskForEachTaskTaskNewClusterLibraryArgs
{
Cran = new Databricks.Inputs.JobTaskForEachTaskTaskNewClusterLibraryCranArgs
{
Package = "string",
Repo = "string",
},
Egg = "string",
Jar = "string",
Maven = new Databricks.Inputs.JobTaskForEachTaskTaskNewClusterLibraryMavenArgs
{
Coordinates = "string",
Exclusions = new[]
{
"string",
},
Repo = "string",
},
Pypi = new Databricks.Inputs.JobTaskForEachTaskTaskNewClusterLibraryPypiArgs
{
Package = "string",
Repo = "string",
},
Requirements = "string",
Whl = "string",
},
},
NodeTypeId = "string",
NumWorkers = 0,
PolicyId = "string",
RuntimeEngine = "string",
SingleUserName = "string",
SparkConf =
{
{ "string", "string" },
},
SparkEnvVars =
{
{ "string", "string" },
},
Autoscale = new Databricks.Inputs.JobTaskForEachTaskTaskNewClusterAutoscaleArgs
{
MaxWorkers = 0,
MinWorkers = 0,
},
SshPublicKeys = new[]
{
"string",
},
WorkloadType = new Databricks.Inputs.JobTaskForEachTaskTaskNewClusterWorkloadTypeArgs
{
Clients = new Databricks.Inputs.JobTaskForEachTaskTaskNewClusterWorkloadTypeClientsArgs
{
Jobs = false,
Notebooks = false,
},
},
},
DisableAutoOptimization = false,
EmailNotifications = new Databricks.Inputs.JobTaskForEachTaskTaskEmailNotificationsArgs
{
NoAlertForSkippedRuns = false,
OnDurationWarningThresholdExceededs = new[]
{
"string",
},
OnFailures = new[]
{
"string",
},
OnStarts = new[]
{
"string",
},
OnStreamingBacklogExceededs = new[]
{
"string",
},
OnSuccesses = new[]
{
"string",
},
},
EnvironmentKey = "string",
ExistingClusterId = "string",
Health = new Databricks.Inputs.JobTaskForEachTaskTaskHealthArgs
{
Rules = new[]
{
new Databricks.Inputs.JobTaskForEachTaskTaskHealthRuleArgs
{
Metric = "string",
Op = "string",
Value = 0,
},
},
},
JobClusterKey = "string",
Libraries = new[]
{
new Databricks.Inputs.JobTaskForEachTaskTaskLibraryArgs
{
Cran = new Databricks.Inputs.JobTaskForEachTaskTaskLibraryCranArgs
{
Package = "string",
Repo = "string",
},
Egg = "string",
Jar = "string",
Maven = new Databricks.Inputs.JobTaskForEachTaskTaskLibraryMavenArgs
{
Coordinates = "string",
Exclusions = new[]
{
"string",
},
Repo = "string",
},
Pypi = new Databricks.Inputs.JobTaskForEachTaskTaskLibraryPypiArgs
{
Package = "string",
Repo = "string",
},
Requirements = "string",
Whl = "string",
},
},
MaxRetries = 0,
MinRetryIntervalMillis = 0,
Description = "string",
DependsOns = new[]
{
new Databricks.Inputs.JobTaskForEachTaskTaskDependsOnArgs
{
TaskKey = "string",
Outcome = "string",
},
},
SparkPythonTask = new Databricks.Inputs.JobTaskForEachTaskTaskSparkPythonTaskArgs
{
PythonFile = "string",
Parameters = new[]
{
"string",
},
Source = "string",
},
PipelineTask = new Databricks.Inputs.JobTaskForEachTaskTaskPipelineTaskArgs
{
PipelineId = "string",
FullRefresh = false,
},
PythonWheelTask = new Databricks.Inputs.JobTaskForEachTaskTaskPythonWheelTaskArgs
{
EntryPoint = "string",
NamedParameters =
{
{ "string", "string" },
},
PackageName = "string",
Parameters = new[]
{
"string",
},
},
RetryOnTimeout = false,
RunIf = "string",
RunJobTask = new Databricks.Inputs.JobTaskForEachTaskTaskRunJobTaskArgs
{
JobId = 0,
DbtCommands = new[]
{
"string",
},
JarParams = new[]
{
"string",
},
JobParameters =
{
{ "string", "string" },
},
NotebookParams =
{
{ "string", "string" },
},
PipelineParams = new Databricks.Inputs.JobTaskForEachTaskTaskRunJobTaskPipelineParamsArgs
{
FullRefresh = false,
},
PythonNamedParams =
{
{ "string", "string" },
},
PythonParams = new[]
{
"string",
},
SparkSubmitParams = new[]
{
"string",
},
SqlParams =
{
{ "string", "string" },
},
},
SparkJarTask = new Databricks.Inputs.JobTaskForEachTaskTaskSparkJarTaskArgs
{
JarUri = "string",
MainClassName = "string",
Parameters = new[]
{
"string",
},
},
NotificationSettings = new Databricks.Inputs.JobTaskForEachTaskTaskNotificationSettingsArgs
{
AlertOnLastAttempt = false,
NoAlertForCanceledRuns = false,
NoAlertForSkippedRuns = false,
},
SparkSubmitTask = new Databricks.Inputs.JobTaskForEachTaskTaskSparkSubmitTaskArgs
{
Parameters = new[]
{
"string",
},
},
SqlTask = new Databricks.Inputs.JobTaskForEachTaskTaskSqlTaskArgs
{
WarehouseId = "string",
Alert = new Databricks.Inputs.JobTaskForEachTaskTaskSqlTaskAlertArgs
{
AlertId = "string",
PauseSubscriptions = false,
Subscriptions = new[]
{
new Databricks.Inputs.JobTaskForEachTaskTaskSqlTaskAlertSubscriptionArgs
{
DestinationId = "string",
UserName = "string",
},
},
},
Dashboard = new Databricks.Inputs.JobTaskForEachTaskTaskSqlTaskDashboardArgs
{
DashboardId = "string",
CustomSubject = "string",
PauseSubscriptions = false,
Subscriptions = new[]
{
new Databricks.Inputs.JobTaskForEachTaskTaskSqlTaskDashboardSubscriptionArgs
{
DestinationId = "string",
UserName = "string",
},
},
},
File = new Databricks.Inputs.JobTaskForEachTaskTaskSqlTaskFileArgs
{
Path = "string",
Source = "string",
},
Parameters =
{
{ "string", "string" },
},
Query = new Databricks.Inputs.JobTaskForEachTaskTaskSqlTaskQueryArgs
{
QueryId = "string",
},
},
DbtTask = new Databricks.Inputs.JobTaskForEachTaskTaskDbtTaskArgs
{
Commands = new[]
{
"string",
},
Catalog = "string",
ProfilesDirectory = "string",
ProjectDirectory = "string",
Schema = "string",
Source = "string",
WarehouseId = "string",
},
TimeoutSeconds = 0,
ConditionTask = new Databricks.Inputs.JobTaskForEachTaskTaskConditionTaskArgs
{
Left = "string",
Op = "string",
Right = "string",
},
},
Concurrency = 0,
},
Health = new Databricks.Inputs.JobTaskHealthArgs
{
Rules = new[]
{
new Databricks.Inputs.JobTaskHealthRuleArgs
{
Metric = "string",
Op = "string",
Value = 0,
},
},
},
JobClusterKey = "string",
Libraries = new[]
{
new Databricks.Inputs.JobTaskLibraryArgs
{
Cran = new Databricks.Inputs.JobTaskLibraryCranArgs
{
Package = "string",
Repo = "string",
},
Egg = "string",
Jar = "string",
Maven = new Databricks.Inputs.JobTaskLibraryMavenArgs
{
Coordinates = "string",
Exclusions = new[]
{
"string",
},
Repo = "string",
},
Pypi = new Databricks.Inputs.JobTaskLibraryPypiArgs
{
Package = "string",
Repo = "string",
},
Requirements = "string",
Whl = "string",
},
},
MaxRetries = 0,
WebhookNotifications = new Databricks.Inputs.JobTaskWebhookNotificationsArgs
{
OnDurationWarningThresholdExceededs = new[]
{
new Databricks.Inputs.JobTaskWebhookNotificationsOnDurationWarningThresholdExceededArgs
{
Id = "string",
},
},
OnFailures = new[]
{
new Databricks.Inputs.JobTaskWebhookNotificationsOnFailureArgs
{
Id = "string",
},
},
OnStarts = new[]
{
new Databricks.Inputs.JobTaskWebhookNotificationsOnStartArgs
{
Id = "string",
},
},
OnStreamingBacklogExceededs = new[]
{
new Databricks.Inputs.JobTaskWebhookNotificationsOnStreamingBacklogExceededArgs
{
Id = "string",
},
},
OnSuccesses = new[]
{
new Databricks.Inputs.JobTaskWebhookNotificationsOnSuccessArgs
{
Id = "string",
},
},
},
DependsOns = new[]
{
new Databricks.Inputs.JobTaskDependsOnArgs
{
TaskKey = "string",
Outcome = "string",
},
},
RetryOnTimeout = false,
NotificationSettings = new Databricks.Inputs.JobTaskNotificationSettingsArgs
{
AlertOnLastAttempt = false,
NoAlertForCanceledRuns = false,
NoAlertForSkippedRuns = false,
},
PipelineTask = new Databricks.Inputs.JobTaskPipelineTaskArgs
{
PipelineId = "string",
FullRefresh = false,
},
PythonWheelTask = new Databricks.Inputs.JobTaskPythonWheelTaskArgs
{
EntryPoint = "string",
NamedParameters =
{
{ "string", "string" },
},
PackageName = "string",
Parameters = new[]
{
"string",
},
},
NotebookTask = new Databricks.Inputs.JobTaskNotebookTaskArgs
{
NotebookPath = "string",
BaseParameters =
{
{ "string", "string" },
},
Source = "string",
WarehouseId = "string",
},
RunIf = "string",
RunJobTask = new Databricks.Inputs.JobTaskRunJobTaskArgs
{
JobId = 0,
DbtCommands = new[]
{
"string",
},
JarParams = new[]
{
"string",
},
JobParameters =
{
{ "string", "string" },
},
NotebookParams =
{
{ "string", "string" },
},
PipelineParams = new Databricks.Inputs.JobTaskRunJobTaskPipelineParamsArgs
{
FullRefresh = false,
},
PythonNamedParams =
{
{ "string", "string" },
},
PythonParams = new[]
{
"string",
},
SparkSubmitParams = new[]
{
"string",
},
SqlParams =
{
{ "string", "string" },
},
},
SparkJarTask = new Databricks.Inputs.JobTaskSparkJarTaskArgs
{
JarUri = "string",
MainClassName = "string",
Parameters = new[]
{
"string",
},
},
SparkPythonTask = new Databricks.Inputs.JobTaskSparkPythonTaskArgs
{
PythonFile = "string",
Parameters = new[]
{
"string",
},
Source = "string",
},
SparkSubmitTask = new Databricks.Inputs.JobTaskSparkSubmitTaskArgs
{
Parameters = new[]
{
"string",
},
},
SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs
{
WarehouseId = "string",
Alert = new Databricks.Inputs.JobTaskSqlTaskAlertArgs
{
AlertId = "string",
PauseSubscriptions = false,
Subscriptions = new[]
{
new Databricks.Inputs.JobTaskSqlTaskAlertSubscriptionArgs
{
DestinationId = "string",
UserName = "string",
},
},
},
Dashboard = new Databricks.Inputs.JobTaskSqlTaskDashboardArgs
{
DashboardId = "string",
CustomSubject = "string",
PauseSubscriptions = false,
Subscriptions = new[]
{
new Databricks.Inputs.JobTaskSqlTaskDashboardSubscriptionArgs
{
DestinationId = "string",
UserName = "string",
},
},
},
File = new Databricks.Inputs.JobTaskSqlTaskFileArgs
{
Path = "string",
Source = "string",
},
Parameters =
{
{ "string", "string" },
},
Query = new Databricks.Inputs.JobTaskSqlTaskQueryArgs
{
QueryId = "string",
},
},
ConditionTask = new Databricks.Inputs.JobTaskConditionTaskArgs
{
Left = "string",
Op = "string",
Right = "string",
},
TimeoutSeconds = 0,
MinRetryIntervalMillis = 0,
},
},
TimeoutSeconds = 0,
Trigger = new Databricks.Inputs.JobTriggerArgs
{
FileArrival = new Databricks.Inputs.JobTriggerFileArrivalArgs
{
Url = "string",
MinTimeBetweenTriggersSeconds = 0,
WaitAfterLastChangeSeconds = 0,
},
PauseStatus = "string",
Periodic = new Databricks.Inputs.JobTriggerPeriodicArgs
{
Interval = 0,
Unit = "string",
},
Table = new Databricks.Inputs.JobTriggerTableArgs
{
Condition = "string",
MinTimeBetweenTriggersSeconds = 0,
TableNames = new[]
{
"string",
},
WaitAfterLastChangeSeconds = 0,
},
TableUpdate = new Databricks.Inputs.JobTriggerTableUpdateArgs
{
TableNames = new[]
{
"string",
},
Condition = "string",
MinTimeBetweenTriggersSeconds = 0,
WaitAfterLastChangeSeconds = 0,
},
},
WebhookNotifications = new Databricks.Inputs.JobWebhookNotificationsArgs
{
OnDurationWarningThresholdExceededs = new[]
{
new Databricks.Inputs.JobWebhookNotificationsOnDurationWarningThresholdExceededArgs
{
Id = "string",
},
},
OnFailures = new[]
{
new Databricks.Inputs.JobWebhookNotificationsOnFailureArgs
{
Id = "string",
},
},
OnStarts = new[]
{
new Databricks.Inputs.JobWebhookNotificationsOnStartArgs
{
Id = "string",
},
},
OnStreamingBacklogExceededs = new[]
{
new Databricks.Inputs.JobWebhookNotificationsOnStreamingBacklogExceededArgs
{
Id = "string",
},
},
OnSuccesses = new[]
{
new Databricks.Inputs.JobWebhookNotificationsOnSuccessArgs
{
Id = "string",
},
},
},
});
example, err := databricks.NewJob(ctx, "jobResource", &databricks.JobArgs{
BudgetPolicyId: pulumi.String("string"),
Continuous: &databricks.JobContinuousArgs{
PauseStatus: pulumi.String("string"),
},
ControlRunState: pulumi.Bool(false),
Deployment: &databricks.JobDeploymentArgs{
Kind: pulumi.String("string"),
MetadataFilePath: pulumi.String("string"),
},
Description: pulumi.String("string"),
EditMode: pulumi.String("string"),
EmailNotifications: &databricks.JobEmailNotificationsArgs{
NoAlertForSkippedRuns: pulumi.Bool(false),
OnDurationWarningThresholdExceededs: pulumi.StringArray{
pulumi.String("string"),
},
OnFailures: pulumi.StringArray{
pulumi.String("string"),
},
OnStarts: pulumi.StringArray{
pulumi.String("string"),
},
OnStreamingBacklogExceededs: pulumi.StringArray{
pulumi.String("string"),
},
OnSuccesses: pulumi.StringArray{
pulumi.String("string"),
},
},
Environments: databricks.JobEnvironmentArray{
&databricks.JobEnvironmentArgs{
EnvironmentKey: pulumi.String("string"),
Spec: &databricks.JobEnvironmentSpecArgs{
Client: pulumi.String("string"),
Dependencies: pulumi.StringArray{
pulumi.String("string"),
},
},
},
},
ExistingClusterId: pulumi.String("string"),
Format: pulumi.String("string"),
GitSource: &databricks.JobGitSourceArgs{
Url: pulumi.String("string"),
Branch: pulumi.String("string"),
Commit: pulumi.String("string"),
GitSnapshot: &databricks.JobGitSourceGitSnapshotArgs{
UsedCommit: pulumi.String("string"),
},
JobSource: &databricks.JobGitSourceJobSourceArgs{
ImportFromGitBranch: pulumi.String("string"),
JobConfigPath: pulumi.String("string"),
DirtyState: pulumi.String("string"),
},
Provider: pulumi.String("string"),
Tag: pulumi.String("string"),
},
Health: &databricks.JobHealthArgs{
Rules: databricks.JobHealthRuleArray{
&databricks.JobHealthRuleArgs{
Metric: pulumi.String("string"),
Op: pulumi.String("string"),
Value: pulumi.Int(0),
},
},
},
JobClusters: databricks.JobJobClusterArray{
&databricks.JobJobClusterArgs{
JobClusterKey: pulumi.String("string"),
NewCluster: &databricks.JobJobClusterNewClusterArgs{
SparkVersion: pulumi.String("string"),
EnableLocalDiskEncryption: pulumi.Bool(false),
ClusterLogConf: &databricks.JobJobClusterNewClusterClusterLogConfArgs{
Dbfs: &databricks.JobJobClusterNewClusterClusterLogConfDbfsArgs{
Destination: pulumi.String("string"),
},
S3: &databricks.JobJobClusterNewClusterClusterLogConfS3Args{
Destination: pulumi.String("string"),
CannedAcl: pulumi.String("string"),
EnableEncryption: pulumi.Bool(false),
EncryptionType: pulumi.String("string"),
Endpoint: pulumi.String("string"),
KmsKey: pulumi.String("string"),
Region: pulumi.String("string"),
},
},
GcpAttributes: &databricks.JobJobClusterNewClusterGcpAttributesArgs{
Availability: pulumi.String("string"),
BootDiskSize: pulumi.Int(0),
GoogleServiceAccount: pulumi.String("string"),
LocalSsdCount: pulumi.Int(0),
UsePreemptibleExecutors: pulumi.Bool(false),
ZoneId: pulumi.String("string"),
},
ClusterId: pulumi.String("string"),
IdempotencyToken: pulumi.String("string"),
ClusterMountInfos: databricks.JobJobClusterNewClusterClusterMountInfoArray{
&databricks.JobJobClusterNewClusterClusterMountInfoArgs{
LocalMountDirPath: pulumi.String("string"),
NetworkFilesystemInfo: &databricks.JobJobClusterNewClusterClusterMountInfoNetworkFilesystemInfoArgs{
ServerAddress: pulumi.String("string"),
MountOptions: pulumi.String("string"),
},
RemoteMountDirPath: pulumi.String("string"),
},
},
ClusterName: pulumi.String("string"),
CustomTags: pulumi.StringMap{
"string": pulumi.String("string"),
},
DataSecurityMode: pulumi.String("string"),
DockerImage: &databricks.JobJobClusterNewClusterDockerImageArgs{
Url: pulumi.String("string"),
BasicAuth: &databricks.JobJobClusterNewClusterDockerImageBasicAuthArgs{
Password: pulumi.String("string"),
Username: pulumi.String("string"),
},
},
DriverInstancePoolId: pulumi.String("string"),
InitScripts: databricks.JobJobClusterNewClusterInitScriptArray{
&databricks.JobJobClusterNewClusterInitScriptArgs{
Abfss: &databricks.JobJobClusterNewClusterInitScriptAbfssArgs{
Destination: pulumi.String("string"),
},
File: &databricks.JobJobClusterNewClusterInitScriptFileArgs{
Destination: pulumi.String("string"),
},
Gcs: &databricks.JobJobClusterNewClusterInitScriptGcsArgs{
Destination: pulumi.String("string"),
},
S3: &databricks.JobJobClusterNewClusterInitScriptS3Args{
Destination: pulumi.String("string"),
CannedAcl: pulumi.String("string"),
EnableEncryption: pulumi.Bool(false),
EncryptionType: pulumi.String("string"),
Endpoint: pulumi.String("string"),
KmsKey: pulumi.String("string"),
Region: pulumi.String("string"),
},
Volumes: &databricks.JobJobClusterNewClusterInitScriptVolumesArgs{
Destination: pulumi.String("string"),
},
Workspace: &databricks.JobJobClusterNewClusterInitScriptWorkspaceArgs{
Destination: pulumi.String("string"),
},
},
},
EnableElasticDisk: pulumi.Bool(false),
ApplyPolicyDefaultValues: pulumi.Bool(false),
AzureAttributes: &databricks.JobJobClusterNewClusterAzureAttributesArgs{
Availability: pulumi.String("string"),
FirstOnDemand: pulumi.Int(0),
LogAnalyticsInfo: &databricks.JobJobClusterNewClusterAzureAttributesLogAnalyticsInfoArgs{
LogAnalyticsPrimaryKey: pulumi.String("string"),
LogAnalyticsWorkspaceId: pulumi.String("string"),
},
SpotBidMaxPrice: pulumi.Float64(0),
},
AwsAttributes: &databricks.JobJobClusterNewClusterAwsAttributesArgs{
Availability: pulumi.String("string"),
EbsVolumeCount: pulumi.Int(0),
EbsVolumeIops: pulumi.Int(0),
EbsVolumeSize: pulumi.Int(0),
EbsVolumeThroughput: pulumi.Int(0),
EbsVolumeType: pulumi.String("string"),
FirstOnDemand: pulumi.Int(0),
InstanceProfileArn: pulumi.String("string"),
SpotBidPricePercent: pulumi.Int(0),
ZoneId: pulumi.String("string"),
},
DriverNodeTypeId: pulumi.String("string"),
InstancePoolId: pulumi.String("string"),
Libraries: databricks.JobJobClusterNewClusterLibraryArray{
&databricks.JobJobClusterNewClusterLibraryArgs{
Cran: &databricks.JobJobClusterNewClusterLibraryCranArgs{
Package: pulumi.String("string"),
Repo: pulumi.String("string"),
},
Egg: pulumi.String("string"),
Jar: pulumi.String("string"),
Maven: &databricks.JobJobClusterNewClusterLibraryMavenArgs{
Coordinates: pulumi.String("string"),
Exclusions: pulumi.StringArray{
pulumi.String("string"),
},
Repo: pulumi.String("string"),
},
Pypi: &databricks.JobJobClusterNewClusterLibraryPypiArgs{
Package: pulumi.String("string"),
Repo: pulumi.String("string"),
},
Requirements: pulumi.String("string"),
Whl: pulumi.String("string"),
},
},
NodeTypeId: pulumi.String("string"),
NumWorkers: pulumi.Int(0),
PolicyId: pulumi.String("string"),
RuntimeEngine: pulumi.String("string"),
SingleUserName: pulumi.String("string"),
SparkConf: pulumi.StringMap{
"string": pulumi.String("string"),
},
SparkEnvVars: pulumi.StringMap{
"string": pulumi.String("string"),
},
Autoscale: &databricks.JobJobClusterNewClusterAutoscaleArgs{
MaxWorkers: pulumi.Int(0),
MinWorkers: pulumi.Int(0),
},
SshPublicKeys: pulumi.StringArray{
pulumi.String("string"),
},
WorkloadType: &databricks.JobJobClusterNewClusterWorkloadTypeArgs{
Clients: &databricks.JobJobClusterNewClusterWorkloadTypeClientsArgs{
Jobs: pulumi.Bool(false),
Notebooks: pulumi.Bool(false),
},
},
},
},
},
Libraries: databricks.JobLibraryArray{
&databricks.JobLibraryArgs{
Cran: &databricks.JobLibraryCranArgs{
Package: pulumi.String("string"),
Repo: pulumi.String("string"),
},
Egg: pulumi.String("string"),
Jar: pulumi.String("string"),
Maven: &databricks.JobLibraryMavenArgs{
Coordinates: pulumi.String("string"),
Exclusions: pulumi.StringArray{
pulumi.String("string"),
},
Repo: pulumi.String("string"),
},
Pypi: &databricks.JobLibraryPypiArgs{
Package: pulumi.String("string"),
Repo: pulumi.String("string"),
},
Requirements: pulumi.String("string"),
Whl: pulumi.String("string"),
},
},
MaxConcurrentRuns: pulumi.Int(0),
Name: pulumi.String("string"),
NewCluster: &databricks.JobNewClusterArgs{
SparkVersion: pulumi.String("string"),
EnableLocalDiskEncryption: pulumi.Bool(false),
ClusterLogConf: &databricks.JobNewClusterClusterLogConfArgs{
Dbfs: &databricks.JobNewClusterClusterLogConfDbfsArgs{
Destination: pulumi.String("string"),
},
S3: &databricks.JobNewClusterClusterLogConfS3Args{
Destination: pulumi.String("string"),
CannedAcl: pulumi.String("string"),
EnableEncryption: pulumi.Bool(false),
EncryptionType: pulumi.String("string"),
Endpoint: pulumi.String("string"),
KmsKey: pulumi.String("string"),
Region: pulumi.String("string"),
},
},
GcpAttributes: &databricks.JobNewClusterGcpAttributesArgs{
Availability: pulumi.String("string"),
BootDiskSize: pulumi.Int(0),
GoogleServiceAccount: pulumi.String("string"),
LocalSsdCount: pulumi.Int(0),
UsePreemptibleExecutors: pulumi.Bool(false),
ZoneId: pulumi.String("string"),
},
ClusterId: pulumi.String("string"),
IdempotencyToken: pulumi.String("string"),
ClusterMountInfos: databricks.JobNewClusterClusterMountInfoArray{
&databricks.JobNewClusterClusterMountInfoArgs{
LocalMountDirPath: pulumi.String("string"),
NetworkFilesystemInfo: &databricks.JobNewClusterClusterMountInfoNetworkFilesystemInfoArgs{
ServerAddress: pulumi.String("string"),
MountOptions: pulumi.String("string"),
},
RemoteMountDirPath: pulumi.String("string"),
},
},
ClusterName: pulumi.String("string"),
CustomTags: pulumi.StringMap{
"string": pulumi.String("string"),
},
DataSecurityMode: pulumi.String("string"),
DockerImage: &databricks.JobNewClusterDockerImageArgs{
Url: pulumi.String("string"),
BasicAuth: &databricks.JobNewClusterDockerImageBasicAuthArgs{
Password: pulumi.String("string"),
Username: pulumi.String("string"),
},
},
DriverInstancePoolId: pulumi.String("string"),
InitScripts: databricks.JobNewClusterInitScriptArray{
&databricks.JobNewClusterInitScriptArgs{
Abfss: &databricks.JobNewClusterInitScriptAbfssArgs{
Destination: pulumi.String("string"),
},
File: &databricks.JobNewClusterInitScriptFileArgs{
Destination: pulumi.String("string"),
},
Gcs: &databricks.JobNewClusterInitScriptGcsArgs{
Destination: pulumi.String("string"),
},
S3: &databricks.JobNewClusterInitScriptS3Args{
Destination: pulumi.String("string"),
CannedAcl: pulumi.String("string"),
EnableEncryption: pulumi.Bool(false),
EncryptionType: pulumi.String("string"),
Endpoint: pulumi.String("string"),
KmsKey: pulumi.String("string"),
Region: pulumi.String("string"),
},
Volumes: &databricks.JobNewClusterInitScriptVolumesArgs{
Destination: pulumi.String("string"),
},
Workspace: &databricks.JobNewClusterInitScriptWorkspaceArgs{
Destination: pulumi.String("string"),
},
},
},
EnableElasticDisk: pulumi.Bool(false),
ApplyPolicyDefaultValues: pulumi.Bool(false),
AzureAttributes: &databricks.JobNewClusterAzureAttributesArgs{
Availability: pulumi.String("string"),
FirstOnDemand: pulumi.Int(0),
LogAnalyticsInfo: &databricks.JobNewClusterAzureAttributesLogAnalyticsInfoArgs{
LogAnalyticsPrimaryKey: pulumi.String("string"),
LogAnalyticsWorkspaceId: pulumi.String("string"),
},
SpotBidMaxPrice: pulumi.Float64(0),
},
AwsAttributes: &databricks.JobNewClusterAwsAttributesArgs{
Availability: pulumi.String("string"),
EbsVolumeCount: pulumi.Int(0),
EbsVolumeIops: pulumi.Int(0),
EbsVolumeSize: pulumi.Int(0),
EbsVolumeThroughput: pulumi.Int(0),
EbsVolumeType: pulumi.String("string"),
FirstOnDemand: pulumi.Int(0),
InstanceProfileArn: pulumi.String("string"),
SpotBidPricePercent: pulumi.Int(0),
ZoneId: pulumi.String("string"),
},
DriverNodeTypeId: pulumi.String("string"),
InstancePoolId: pulumi.String("string"),
Libraries: databricks.JobNewClusterLibraryArray{
&databricks.JobNewClusterLibraryArgs{
Cran: &databricks.JobNewClusterLibraryCranArgs{
Package: pulumi.String("string"),
Repo: pulumi.String("string"),
},
Egg: pulumi.String("string"),
Jar: pulumi.String("string"),
Maven: &databricks.JobNewClusterLibraryMavenArgs{
Coordinates: pulumi.String("string"),
Exclusions: pulumi.StringArray{
pulumi.String("string"),
},
Repo: pulumi.String("string"),
},
Pypi: &databricks.JobNewClusterLibraryPypiArgs{
Package: pulumi.String("string"),
Repo: pulumi.String("string"),
},
Requirements: pulumi.String("string"),
Whl: pulumi.String("string"),
},
},
NodeTypeId: pulumi.String("string"),
NumWorkers: pulumi.Int(0),
PolicyId: pulumi.String("string"),
RuntimeEngine: pulumi.String("string"),
SingleUserName: pulumi.String("string"),
SparkConf: pulumi.StringMap{
"string": pulumi.String("string"),
},
SparkEnvVars: pulumi.StringMap{
"string": pulumi.String("string"),
},
Autoscale: &databricks.JobNewClusterAutoscaleArgs{
MaxWorkers: pulumi.Int(0),
MinWorkers: pulumi.Int(0),
},
SshPublicKeys: pulumi.StringArray{
pulumi.String("string"),
},
WorkloadType: &databricks.JobNewClusterWorkloadTypeArgs{
Clients: &databricks.JobNewClusterWorkloadTypeClientsArgs{
Jobs: pulumi.Bool(false),
Notebooks: pulumi.Bool(false),
},
},
},
NotificationSettings: &databricks.JobNotificationSettingsArgs{
NoAlertForCanceledRuns: pulumi.Bool(false),
NoAlertForSkippedRuns: pulumi.Bool(false),
},
Parameters: databricks.JobParameterArray{
&databricks.JobParameterArgs{
Default: pulumi.String("string"),
Name: pulumi.String("string"),
},
},
Queue: &databricks.JobQueueArgs{
Enabled: pulumi.Bool(false),
},
RunAs: &databricks.JobRunAsArgs{
ServicePrincipalName: pulumi.String("string"),
UserName: pulumi.String("string"),
},
Schedule: &databricks.JobScheduleArgs{
QuartzCronExpression: pulumi.String("string"),
TimezoneId: pulumi.String("string"),
PauseStatus: pulumi.String("string"),
},
Tags: pulumi.StringMap{
"string": pulumi.String("string"),
},
Tasks: databricks.JobTaskArray{
&databricks.JobTaskArgs{
TaskKey: pulumi.String("string"),
NewCluster: &databricks.JobTaskNewClusterArgs{
SparkVersion: pulumi.String("string"),
EnableLocalDiskEncryption: pulumi.Bool(false),
ClusterLogConf: &databricks.JobTaskNewClusterClusterLogConfArgs{
Dbfs: &databricks.JobTaskNewClusterClusterLogConfDbfsArgs{
Destination: pulumi.String("string"),
},
S3: &databricks.JobTaskNewClusterClusterLogConfS3Args{
Destination: pulumi.String("string"),
CannedAcl: pulumi.String("string"),
EnableEncryption: pulumi.Bool(false),
EncryptionType: pulumi.String("string"),
Endpoint: pulumi.String("string"),
KmsKey: pulumi.String("string"),
Region: pulumi.String("string"),
},
},
GcpAttributes: &databricks.JobTaskNewClusterGcpAttributesArgs{
Availability: pulumi.String("string"),
BootDiskSize: pulumi.Int(0),
GoogleServiceAccount: pulumi.String("string"),
LocalSsdCount: pulumi.Int(0),
UsePreemptibleExecutors: pulumi.Bool(false),
ZoneId: pulumi.String("string"),
},
ClusterId: pulumi.String("string"),
IdempotencyToken: pulumi.String("string"),
ClusterMountInfos: databricks.JobTaskNewClusterClusterMountInfoArray{
&databricks.JobTaskNewClusterClusterMountInfoArgs{
LocalMountDirPath: pulumi.String("string"),
NetworkFilesystemInfo: &databricks.JobTaskNewClusterClusterMountInfoNetworkFilesystemInfoArgs{
ServerAddress: pulumi.String("string"),
MountOptions: pulumi.String("string"),
},
RemoteMountDirPath: pulumi.String("string"),
},
},
ClusterName: pulumi.String("string"),
CustomTags: pulumi.StringMap{
"string": pulumi.String("string"),
},
DataSecurityMode: pulumi.String("string"),
DockerImage: &databricks.JobTaskNewClusterDockerImageArgs{
Url: pulumi.String("string"),
BasicAuth: &databricks.JobTaskNewClusterDockerImageBasicAuthArgs{
Password: pulumi.String("string"),
Username: pulumi.String("string"),
},
},
DriverInstancePoolId: pulumi.String("string"),
InitScripts: databricks.JobTaskNewClusterInitScriptArray{
&databricks.JobTaskNewClusterInitScriptArgs{
Abfss: &databricks.JobTaskNewClusterInitScriptAbfssArgs{
Destination: pulumi.String("string"),
},
File: &databricks.JobTaskNewClusterInitScriptFileArgs{
Destination: pulumi.String("string"),
},
Gcs: &databricks.JobTaskNewClusterInitScriptGcsArgs{
Destination: pulumi.String("string"),
},
S3: &databricks.JobTaskNewClusterInitScriptS3Args{
Destination: pulumi.String("string"),
CannedAcl: pulumi.String("string"),
EnableEncryption: pulumi.Bool(false),
EncryptionType: pulumi.String("string"),
Endpoint: pulumi.String("string"),
KmsKey: pulumi.String("string"),
Region: pulumi.String("string"),
},
Volumes: &databricks.JobTaskNewClusterInitScriptVolumesArgs{
Destination: pulumi.String("string"),
},
Workspace: &databricks.JobTaskNewClusterInitScriptWorkspaceArgs{
Destination: pulumi.String("string"),
},
},
},
EnableElasticDisk: pulumi.Bool(false),
ApplyPolicyDefaultValues: pulumi.Bool(false),
AzureAttributes: &databricks.JobTaskNewClusterAzureAttributesArgs{
Availability: pulumi.String("string"),
FirstOnDemand: pulumi.Int(0),
LogAnalyticsInfo: &databricks.JobTaskNewClusterAzureAttributesLogAnalyticsInfoArgs{
LogAnalyticsPrimaryKey: pulumi.String("string"),
LogAnalyticsWorkspaceId: pulumi.String("string"),
},
SpotBidMaxPrice: pulumi.Float64(0),
},
AwsAttributes: &databricks.JobTaskNewClusterAwsAttributesArgs{
Availability: pulumi.String("string"),
EbsVolumeCount: pulumi.Int(0),
EbsVolumeIops: pulumi.Int(0),
EbsVolumeSize: pulumi.Int(0),
EbsVolumeThroughput: pulumi.Int(0),
EbsVolumeType: pulumi.String("string"),
FirstOnDemand: pulumi.Int(0),
InstanceProfileArn: pulumi.String("string"),
SpotBidPricePercent: pulumi.Int(0),
ZoneId: pulumi.String("string"),
},
DriverNodeTypeId: pulumi.String("string"),
InstancePoolId: pulumi.String("string"),
Libraries: databricks.JobTaskNewClusterLibraryArray{
&databricks.JobTaskNewClusterLibraryArgs{
Cran: &databricks.JobTaskNewClusterLibraryCranArgs{
Package: pulumi.String("string"),
Repo: pulumi.String("string"),
},
Egg: pulumi.String("string"),
Jar: pulumi.String("string"),
Maven: &databricks.JobTaskNewClusterLibraryMavenArgs{
Coordinates: pulumi.String("string"),
Exclusions: pulumi.StringArray{
pulumi.String("string"),
},
Repo: pulumi.String("string"),
},
Pypi: &databricks.JobTaskNewClusterLibraryPypiArgs{
Package: pulumi.String("string"),
Repo: pulumi.String("string"),
},
Requirements: pulumi.String("string"),
Whl: pulumi.String("string"),
},
},
NodeTypeId: pulumi.String("string"),
NumWorkers: pulumi.Int(0),
PolicyId: pulumi.String("string"),
RuntimeEngine: pulumi.String("string"),
SingleUserName: pulumi.String("string"),
SparkConf: pulumi.StringMap{
"string": pulumi.String("string"),
},
SparkEnvVars: pulumi.StringMap{
"string": pulumi.String("string"),
},
Autoscale: &databricks.JobTaskNewClusterAutoscaleArgs{
MaxWorkers: pulumi.Int(0),
MinWorkers: pulumi.Int(0),
},
SshPublicKeys: pulumi.StringArray{
pulumi.String("string"),
},
WorkloadType: &databricks.JobTaskNewClusterWorkloadTypeArgs{
Clients: &databricks.JobTaskNewClusterWorkloadTypeClientsArgs{
Jobs: pulumi.Bool(false),
Notebooks: pulumi.Bool(false),
},
},
},
DbtTask: &databricks.JobTaskDbtTaskArgs{
Commands: pulumi.StringArray{
pulumi.String("string"),
},
Catalog: pulumi.String("string"),
ProfilesDirectory: pulumi.String("string"),
ProjectDirectory: pulumi.String("string"),
Schema: pulumi.String("string"),
Source: pulumi.String("string"),
WarehouseId: pulumi.String("string"),
},
Description: pulumi.String("string"),
DisableAutoOptimization: pulumi.Bool(false),
EmailNotifications: &databricks.JobTaskEmailNotificationsArgs{
NoAlertForSkippedRuns: pulumi.Bool(false),
OnDurationWarningThresholdExceededs: pulumi.StringArray{
pulumi.String("string"),
},
OnFailures: pulumi.StringArray{
pulumi.String("string"),
},
OnStarts: pulumi.StringArray{
pulumi.String("string"),
},
OnStreamingBacklogExceededs: pulumi.StringArray{
pulumi.String("string"),
},
OnSuccesses: pulumi.StringArray{
pulumi.String("string"),
},
},
EnvironmentKey: pulumi.String("string"),
ExistingClusterId: pulumi.String("string"),
ForEachTask: &databricks.JobTaskForEachTaskArgs{
Inputs: pulumi.String("string"),
Task: &databricks.JobTaskForEachTaskTaskArgs{
TaskKey: pulumi.String("string"),
NotebookTask: &databricks.JobTaskForEachTaskTaskNotebookTaskArgs{
NotebookPath: pulumi.String("string"),
BaseParameters: pulumi.StringMap{
"string": pulumi.String("string"),
},
Source: pulumi.String("string"),
WarehouseId: pulumi.String("string"),
},
WebhookNotifications: &databricks.JobTaskForEachTaskTaskWebhookNotificationsArgs{
OnDurationWarningThresholdExceededs: databricks.JobTaskForEachTaskTaskWebhookNotificationsOnDurationWarningThresholdExceededArray{
&databricks.JobTaskForEachTaskTaskWebhookNotificationsOnDurationWarningThresholdExceededArgs{
Id: pulumi.String("string"),
},
},
OnFailures: databricks.JobTaskForEachTaskTaskWebhookNotificationsOnFailureArray{
&databricks.JobTaskForEachTaskTaskWebhookNotificationsOnFailureArgs{
Id: pulumi.String("string"),
},
},
OnStarts: databricks.JobTaskForEachTaskTaskWebhookNotificationsOnStartArray{
&databricks.JobTaskForEachTaskTaskWebhookNotificationsOnStartArgs{
Id: pulumi.String("string"),
},
},
OnStreamingBacklogExceededs: databricks.JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArray{
&databricks.JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs{
Id: pulumi.String("string"),
},
},
OnSuccesses: databricks.JobTaskForEachTaskTaskWebhookNotificationsOnSuccessArray{
&databricks.JobTaskForEachTaskTaskWebhookNotificationsOnSuccessArgs{
Id: pulumi.String("string"),
},
},
},
NewCluster: &databricks.JobTaskForEachTaskTaskNewClusterArgs{
SparkVersion: pulumi.String("string"),
EnableLocalDiskEncryption: pulumi.Bool(false),
ClusterLogConf: &databricks.JobTaskForEachTaskTaskNewClusterClusterLogConfArgs{
Dbfs: &databricks.JobTaskForEachTaskTaskNewClusterClusterLogConfDbfsArgs{
Destination: pulumi.String("string"),
},
S3: &databricks.JobTaskForEachTaskTaskNewClusterClusterLogConfS3Args{
Destination: pulumi.String("string"),
CannedAcl: pulumi.String("string"),
EnableEncryption: pulumi.Bool(false),
EncryptionType: pulumi.String("string"),
Endpoint: pulumi.String("string"),
KmsKey: pulumi.String("string"),
Region: pulumi.String("string"),
},
},
GcpAttributes: &databricks.JobTaskForEachTaskTaskNewClusterGcpAttributesArgs{
Availability: pulumi.String("string"),
BootDiskSize: pulumi.Int(0),
GoogleServiceAccount: pulumi.String("string"),
LocalSsdCount: pulumi.Int(0),
UsePreemptibleExecutors: pulumi.Bool(false),
ZoneId: pulumi.String("string"),
},
ClusterId: pulumi.String("string"),
IdempotencyToken: pulumi.String("string"),
ClusterMountInfos: databricks.JobTaskForEachTaskTaskNewClusterClusterMountInfoArray{
&databricks.JobTaskForEachTaskTaskNewClusterClusterMountInfoArgs{
LocalMountDirPath: pulumi.String("string"),
NetworkFilesystemInfo: &databricks.JobTaskForEachTaskTaskNewClusterClusterMountInfoNetworkFilesystemInfoArgs{
ServerAddress: pulumi.String("string"),
MountOptions: pulumi.String("string"),
},
RemoteMountDirPath: pulumi.String("string"),
},
},
ClusterName: pulumi.String("string"),
CustomTags: pulumi.StringMap{
"string": pulumi.String("string"),
},
DataSecurityMode: pulumi.String("string"),
DockerImage: &databricks.JobTaskForEachTaskTaskNewClusterDockerImageArgs{
Url: pulumi.String("string"),
BasicAuth: &databricks.JobTaskForEachTaskTaskNewClusterDockerImageBasicAuthArgs{
Password: pulumi.String("string"),
Username: pulumi.String("string"),
},
},
DriverInstancePoolId: pulumi.String("string"),
InitScripts: databricks.JobTaskForEachTaskTaskNewClusterInitScriptArray{
&databricks.JobTaskForEachTaskTaskNewClusterInitScriptArgs{
Abfss: &databricks.JobTaskForEachTaskTaskNewClusterInitScriptAbfssArgs{
Destination: pulumi.String("string"),
},
File: &databricks.JobTaskForEachTaskTaskNewClusterInitScriptFileArgs{
Destination: pulumi.String("string"),
},
Gcs: &databricks.JobTaskForEachTaskTaskNewClusterInitScriptGcsArgs{
Destination: pulumi.String("string"),
},
S3: &databricks.JobTaskForEachTaskTaskNewClusterInitScriptS3Args{
Destination: pulumi.String("string"),
CannedAcl: pulumi.String("string"),
EnableEncryption: pulumi.Bool(false),
EncryptionType: pulumi.String("string"),
Endpoint: pulumi.String("string"),
KmsKey: pulumi.String("string"),
Region: pulumi.String("string"),
},
Volumes: &databricks.JobTaskForEachTaskTaskNewClusterInitScriptVolumesArgs{
Destination: pulumi.String("string"),
},
Workspace: &databricks.JobTaskForEachTaskTaskNewClusterInitScriptWorkspaceArgs{
Destination: pulumi.String("string"),
},
},
},
EnableElasticDisk: pulumi.Bool(false),
ApplyPolicyDefaultValues: pulumi.Bool(false),
AzureAttributes: &databricks.JobTaskForEachTaskTaskNewClusterAzureAttributesArgs{
Availability: pulumi.String("string"),
FirstOnDemand: pulumi.Int(0),
LogAnalyticsInfo: &databricks.JobTaskForEachTaskTaskNewClusterAzureAttributesLogAnalyticsInfoArgs{
LogAnalyticsPrimaryKey: pulumi.String("string"),
LogAnalyticsWorkspaceId: pulumi.String("string"),
},
SpotBidMaxPrice: pulumi.Float64(0),
},
AwsAttributes: &databricks.JobTaskForEachTaskTaskNewClusterAwsAttributesArgs{
Availability: pulumi.String("string"),
EbsVolumeCount: pulumi.Int(0),
EbsVolumeIops: pulumi.Int(0),
EbsVolumeSize: pulumi.Int(0),
EbsVolumeThroughput: pulumi.Int(0),
EbsVolumeType: pulumi.String("string"),
FirstOnDemand: pulumi.Int(0),
InstanceProfileArn: pulumi.String("string"),
SpotBidPricePercent: pulumi.Int(0),
ZoneId: pulumi.String("string"),
},
DriverNodeTypeId: pulumi.String("string"),
InstancePoolId: pulumi.String("string"),
Libraries: databricks.JobTaskForEachTaskTaskNewClusterLibraryArray{
&databricks.JobTaskForEachTaskTaskNewClusterLibraryArgs{
Cran: &databricks.JobTaskForEachTaskTaskNewClusterLibraryCranArgs{
Package: pulumi.String("string"),
Repo: pulumi.String("string"),
},
Egg: pulumi.String("string"),
Jar: pulumi.String("string"),
Maven: &databricks.JobTaskForEachTaskTaskNewClusterLibraryMavenArgs{
Coordinates: pulumi.String("string"),
Exclusions: pulumi.StringArray{
pulumi.String("string"),
},
Repo: pulumi.String("string"),
},
Pypi: &databricks.JobTaskForEachTaskTaskNewClusterLibraryPypiArgs{
Package: pulumi.String("string"),
Repo: pulumi.String("string"),
},
Requirements: pulumi.String("string"),
Whl: pulumi.String("string"),
},
},
NodeTypeId: pulumi.String("string"),
NumWorkers: pulumi.Int(0),
PolicyId: pulumi.String("string"),
RuntimeEngine: pulumi.String("string"),
SingleUserName: pulumi.String("string"),
SparkConf: pulumi.StringMap{
"string": pulumi.String("string"),
},
SparkEnvVars: pulumi.StringMap{
"string": pulumi.String("string"),
},
Autoscale: &databricks.JobTaskForEachTaskTaskNewClusterAutoscaleArgs{
MaxWorkers: pulumi.Int(0),
MinWorkers: pulumi.Int(0),
},
SshPublicKeys: pulumi.StringArray{
pulumi.String("string"),
},
WorkloadType: &databricks.JobTaskForEachTaskTaskNewClusterWorkloadTypeArgs{
Clients: &databricks.JobTaskForEachTaskTaskNewClusterWorkloadTypeClientsArgs{
Jobs: pulumi.Bool(false),
Notebooks: pulumi.Bool(false),
},
},
},
DisableAutoOptimization: pulumi.Bool(false),
EmailNotifications: &databricks.JobTaskForEachTaskTaskEmailNotificationsArgs{
NoAlertForSkippedRuns: pulumi.Bool(false),
OnDurationWarningThresholdExceededs: pulumi.StringArray{
pulumi.String("string"),
},
OnFailures: pulumi.StringArray{
pulumi.String("string"),
},
OnStarts: pulumi.StringArray{
pulumi.String("string"),
},
OnStreamingBacklogExceededs: pulumi.StringArray{
pulumi.String("string"),
},
OnSuccesses: pulumi.StringArray{
pulumi.String("string"),
},
},
EnvironmentKey: pulumi.String("string"),
ExistingClusterId: pulumi.String("string"),
Health: &databricks.JobTaskForEachTaskTaskHealthArgs{
Rules: databricks.JobTaskForEachTaskTaskHealthRuleArray{
&databricks.JobTaskForEachTaskTaskHealthRuleArgs{
Metric: pulumi.String("string"),
Op: pulumi.String("string"),
Value: pulumi.Int(0),
},
},
},
JobClusterKey: pulumi.String("string"),
Libraries: databricks.JobTaskForEachTaskTaskLibraryArray{
&databricks.JobTaskForEachTaskTaskLibraryArgs{
Cran: &databricks.JobTaskForEachTaskTaskLibraryCranArgs{
Package: pulumi.String("string"),
Repo: pulumi.String("string"),
},
Egg: pulumi.String("string"),
Jar: pulumi.String("string"),
Maven: &databricks.JobTaskForEachTaskTaskLibraryMavenArgs{
Coordinates: pulumi.String("string"),
Exclusions: pulumi.StringArray{
pulumi.String("string"),
},
Repo: pulumi.String("string"),
},
Pypi: &databricks.JobTaskForEachTaskTaskLibraryPypiArgs{
Package: pulumi.String("string"),
Repo: pulumi.String("string"),
},
Requirements: pulumi.String("string"),
Whl: pulumi.String("string"),
},
},
MaxRetries: pulumi.Int(0),
MinRetryIntervalMillis: pulumi.Int(0),
Description: pulumi.String("string"),
DependsOns: databricks.JobTaskForEachTaskTaskDependsOnArray{
&databricks.JobTaskForEachTaskTaskDependsOnArgs{
TaskKey: pulumi.String("string"),
Outcome: pulumi.String("string"),
},
},
SparkPythonTask: &databricks.JobTaskForEachTaskTaskSparkPythonTaskArgs{
PythonFile: pulumi.String("string"),
Parameters: pulumi.StringArray{
pulumi.String("string"),
},
Source: pulumi.String("string"),
},
PipelineTask: &databricks.JobTaskForEachTaskTaskPipelineTaskArgs{
PipelineId: pulumi.String("string"),
FullRefresh: pulumi.Bool(false),
},
PythonWheelTask: &databricks.JobTaskForEachTaskTaskPythonWheelTaskArgs{
EntryPoint: pulumi.String("string"),
NamedParameters: pulumi.StringMap{
"string": pulumi.String("string"),
},
PackageName: pulumi.String("string"),
Parameters: pulumi.StringArray{
pulumi.String("string"),
},
},
RetryOnTimeout: pulumi.Bool(false),
RunIf: pulumi.String("string"),
RunJobTask: &databricks.JobTaskForEachTaskTaskRunJobTaskArgs{
JobId: pulumi.Int(0),
DbtCommands: pulumi.StringArray{
pulumi.String("string"),
},
JarParams: pulumi.StringArray{
pulumi.String("string"),
},
JobParameters: pulumi.StringMap{
"string": pulumi.String("string"),
},
NotebookParams: pulumi.StringMap{
"string": pulumi.String("string"),
},
PipelineParams: &databricks.JobTaskForEachTaskTaskRunJobTaskPipelineParamsArgs{
FullRefresh: pulumi.Bool(false),
},
PythonNamedParams: pulumi.StringMap{
"string": pulumi.String("string"),
},
PythonParams: pulumi.StringArray{
pulumi.String("string"),
},
SparkSubmitParams: pulumi.StringArray{
pulumi.String("string"),
},
SqlParams: pulumi.StringMap{
"string": pulumi.String("string"),
},
},
SparkJarTask: &databricks.JobTaskForEachTaskTaskSparkJarTaskArgs{
JarUri: pulumi.String("string"),
MainClassName: pulumi.String("string"),
Parameters: pulumi.StringArray{
pulumi.String("string"),
},
},
NotificationSettings: &databricks.JobTaskForEachTaskTaskNotificationSettingsArgs{
AlertOnLastAttempt: pulumi.Bool(false),
NoAlertForCanceledRuns: pulumi.Bool(false),
NoAlertForSkippedRuns: pulumi.Bool(false),
},
SparkSubmitTask: &databricks.JobTaskForEachTaskTaskSparkSubmitTaskArgs{
Parameters: pulumi.StringArray{
pulumi.String("string"),
},
},
SqlTask: &databricks.JobTaskForEachTaskTaskSqlTaskArgs{
WarehouseId: pulumi.String("string"),
Alert: &databricks.JobTaskForEachTaskTaskSqlTaskAlertArgs{
AlertId: pulumi.String("string"),
PauseSubscriptions: pulumi.Bool(false),
Subscriptions: databricks.JobTaskForEachTaskTaskSqlTaskAlertSubscriptionArray{
&databricks.JobTaskForEachTaskTaskSqlTaskAlertSubscriptionArgs{
DestinationId: pulumi.String("string"),
UserName: pulumi.String("string"),
},
},
},
Dashboard: &databricks.JobTaskForEachTaskTaskSqlTaskDashboardArgs{
DashboardId: pulumi.String("string"),
CustomSubject: pulumi.String("string"),
PauseSubscriptions: pulumi.Bool(false),
Subscriptions: databricks.JobTaskForEachTaskTaskSqlTaskDashboardSubscriptionArray{
&databricks.JobTaskForEachTaskTaskSqlTaskDashboardSubscriptionArgs{
DestinationId: pulumi.String("string"),
UserName: pulumi.String("string"),
},
},
},
File: &databricks.JobTaskForEachTaskTaskSqlTaskFileArgs{
Path: pulumi.String("string"),
Source: pulumi.String("string"),
},
Parameters: pulumi.StringMap{
"string": pulumi.String("string"),
},
Query: &databricks.JobTaskForEachTaskTaskSqlTaskQueryArgs{
QueryId: pulumi.String("string"),
},
},
DbtTask: &databricks.JobTaskForEachTaskTaskDbtTaskArgs{
Commands: pulumi.StringArray{
pulumi.String("string"),
},
Catalog: pulumi.String("string"),
ProfilesDirectory: pulumi.String("string"),
ProjectDirectory: pulumi.String("string"),
Schema: pulumi.String("string"),
Source: pulumi.String("string"),
WarehouseId: pulumi.String("string"),
},
TimeoutSeconds: pulumi.Int(0),
ConditionTask: &databricks.JobTaskForEachTaskTaskConditionTaskArgs{
Left: pulumi.String("string"),
Op: pulumi.String("string"),
Right: pulumi.String("string"),
},
},
Concurrency: pulumi.Int(0),
},
Health: &databricks.JobTaskHealthArgs{
Rules: databricks.JobTaskHealthRuleArray{
&databricks.JobTaskHealthRuleArgs{
Metric: pulumi.String("string"),
Op: pulumi.String("string"),
Value: pulumi.Int(0),
},
},
},
JobClusterKey: pulumi.String("string"),
Libraries: databricks.JobTaskLibraryArray{
&databricks.JobTaskLibraryArgs{
Cran: &databricks.JobTaskLibraryCranArgs{
Package: pulumi.String("string"),
Repo: pulumi.String("string"),
},
Egg: pulumi.String("string"),
Jar: pulumi.String("string"),
Maven: &databricks.JobTaskLibraryMavenArgs{
Coordinates: pulumi.String("string"),
Exclusions: pulumi.StringArray{
pulumi.String("string"),
},
Repo: pulumi.String("string"),
},
Pypi: &databricks.JobTaskLibraryPypiArgs{
Package: pulumi.String("string"),
Repo: pulumi.String("string"),
},
Requirements: pulumi.String("string"),
Whl: pulumi.String("string"),
},
},
MaxRetries: pulumi.Int(0),
WebhookNotifications: &databricks.JobTaskWebhookNotificationsArgs{
OnDurationWarningThresholdExceededs: databricks.JobTaskWebhookNotificationsOnDurationWarningThresholdExceededArray{
&databricks.JobTaskWebhookNotificationsOnDurationWarningThresholdExceededArgs{
Id: pulumi.String("string"),
},
},
OnFailures: databricks.JobTaskWebhookNotificationsOnFailureArray{
&databricks.JobTaskWebhookNotificationsOnFailureArgs{
Id: pulumi.String("string"),
},
},
OnStarts: databricks.JobTaskWebhookNotificationsOnStartArray{
&databricks.JobTaskWebhookNotificationsOnStartArgs{
Id: pulumi.String("string"),
},
},
OnStreamingBacklogExceededs: databricks.JobTaskWebhookNotificationsOnStreamingBacklogExceededArray{
&databricks.JobTaskWebhookNotificationsOnStreamingBacklogExceededArgs{
Id: pulumi.String("string"),
},
},
OnSuccesses: databricks.JobTaskWebhookNotificationsOnSuccessArray{
&databricks.JobTaskWebhookNotificationsOnSuccessArgs{
Id: pulumi.String("string"),
},
},
},
DependsOns: databricks.JobTaskDependsOnArray{
&databricks.JobTaskDependsOnArgs{
TaskKey: pulumi.String("string"),
Outcome: pulumi.String("string"),
},
},
RetryOnTimeout: pulumi.Bool(false),
NotificationSettings: &databricks.JobTaskNotificationSettingsArgs{
AlertOnLastAttempt: pulumi.Bool(false),
NoAlertForCanceledRuns: pulumi.Bool(false),
NoAlertForSkippedRuns: pulumi.Bool(false),
},
PipelineTask: &databricks.JobTaskPipelineTaskArgs{
PipelineId: pulumi.String("string"),
FullRefresh: pulumi.Bool(false),
},
PythonWheelTask: &databricks.JobTaskPythonWheelTaskArgs{
EntryPoint: pulumi.String("string"),
NamedParameters: pulumi.StringMap{
"string": pulumi.String("string"),
},
PackageName: pulumi.String("string"),
Parameters: pulumi.StringArray{
pulumi.String("string"),
},
},
NotebookTask: &databricks.JobTaskNotebookTaskArgs{
NotebookPath: pulumi.String("string"),
BaseParameters: pulumi.StringMap{
"string": pulumi.String("string"),
},
Source: pulumi.String("string"),
WarehouseId: pulumi.String("string"),
},
RunIf: pulumi.String("string"),
RunJobTask: &databricks.JobTaskRunJobTaskArgs{
JobId: pulumi.Int(0),
DbtCommands: pulumi.StringArray{
pulumi.String("string"),
},
JarParams: pulumi.StringArray{
pulumi.String("string"),
},
JobParameters: pulumi.StringMap{
"string": pulumi.String("string"),
},
NotebookParams: pulumi.StringMap{
"string": pulumi.String("string"),
},
PipelineParams: &databricks.JobTaskRunJobTaskPipelineParamsArgs{
FullRefresh: pulumi.Bool(false),
},
PythonNamedParams: pulumi.StringMap{
"string": pulumi.String("string"),
},
PythonParams: pulumi.StringArray{
pulumi.String("string"),
},
SparkSubmitParams: pulumi.StringArray{
pulumi.String("string"),
},
SqlParams: pulumi.StringMap{
"string": pulumi.String("string"),
},
},
SparkJarTask: &databricks.JobTaskSparkJarTaskArgs{
JarUri: pulumi.String("string"),
MainClassName: pulumi.String("string"),
Parameters: pulumi.StringArray{
pulumi.String("string"),
},
},
SparkPythonTask: &databricks.JobTaskSparkPythonTaskArgs{
PythonFile: pulumi.String("string"),
Parameters: pulumi.StringArray{
pulumi.String("string"),
},
Source: pulumi.String("string"),
},
SparkSubmitTask: &databricks.JobTaskSparkSubmitTaskArgs{
Parameters: pulumi.StringArray{
pulumi.String("string"),
},
},
SqlTask: &databricks.JobTaskSqlTaskArgs{
WarehouseId: pulumi.String("string"),
Alert: &databricks.JobTaskSqlTaskAlertArgs{
AlertId: pulumi.String("string"),
PauseSubscriptions: pulumi.Bool(false),
Subscriptions: databricks.JobTaskSqlTaskAlertSubscriptionArray{
&databricks.JobTaskSqlTaskAlertSubscriptionArgs{
DestinationId: pulumi.String("string"),
UserName: pulumi.String("string"),
},
},
},
Dashboard: &databricks.JobTaskSqlTaskDashboardArgs{
DashboardId: pulumi.String("string"),
CustomSubject: pulumi.String("string"),
PauseSubscriptions: pulumi.Bool(false),
Subscriptions: databricks.JobTaskSqlTaskDashboardSubscriptionArray{
&databricks.JobTaskSqlTaskDashboardSubscriptionArgs{
DestinationId: pulumi.String("string"),
UserName: pulumi.String("string"),
},
},
},
File: &databricks.JobTaskSqlTaskFileArgs{
Path: pulumi.String("string"),
Source: pulumi.String("string"),
},
Parameters: pulumi.StringMap{
"string": pulumi.String("string"),
},
Query: &databricks.JobTaskSqlTaskQueryArgs{
QueryId: pulumi.String("string"),
},
},
ConditionTask: &databricks.JobTaskConditionTaskArgs{
Left: pulumi.String("string"),
Op: pulumi.String("string"),
Right: pulumi.String("string"),
},
TimeoutSeconds: pulumi.Int(0),
MinRetryIntervalMillis: pulumi.Int(0),
},
},
TimeoutSeconds: pulumi.Int(0),
Trigger: &databricks.JobTriggerArgs{
FileArrival: &databricks.JobTriggerFileArrivalArgs{
Url: pulumi.String("string"),
MinTimeBetweenTriggersSeconds: pulumi.Int(0),
WaitAfterLastChangeSeconds: pulumi.Int(0),
},
PauseStatus: pulumi.String("string"),
Periodic: &databricks.JobTriggerPeriodicArgs{
Interval: pulumi.Int(0),
Unit: pulumi.String("string"),
},
Table: &databricks.JobTriggerTableArgs{
Condition: pulumi.String("string"),
MinTimeBetweenTriggersSeconds: pulumi.Int(0),
TableNames: pulumi.StringArray{
pulumi.String("string"),
},
WaitAfterLastChangeSeconds: pulumi.Int(0),
},
TableUpdate: &databricks.JobTriggerTableUpdateArgs{
TableNames: pulumi.StringArray{
pulumi.String("string"),
},
Condition: pulumi.String("string"),
MinTimeBetweenTriggersSeconds: pulumi.Int(0),
WaitAfterLastChangeSeconds: pulumi.Int(0),
},
},
WebhookNotifications: &databricks.JobWebhookNotificationsArgs{
OnDurationWarningThresholdExceededs: databricks.JobWebhookNotificationsOnDurationWarningThresholdExceededArray{
&databricks.JobWebhookNotificationsOnDurationWarningThresholdExceededArgs{
Id: pulumi.String("string"),
},
},
OnFailures: databricks.JobWebhookNotificationsOnFailureArray{
&databricks.JobWebhookNotificationsOnFailureArgs{
Id: pulumi.String("string"),
},
},
OnStarts: databricks.JobWebhookNotificationsOnStartArray{
&databricks.JobWebhookNotificationsOnStartArgs{
Id: pulumi.String("string"),
},
},
OnStreamingBacklogExceededs: databricks.JobWebhookNotificationsOnStreamingBacklogExceededArray{
&databricks.JobWebhookNotificationsOnStreamingBacklogExceededArgs{
Id: pulumi.String("string"),
},
},
OnSuccesses: databricks.JobWebhookNotificationsOnSuccessArray{
&databricks.JobWebhookNotificationsOnSuccessArgs{
Id: pulumi.String("string"),
},
},
},
})
var jobResource = new Job("jobResource", JobArgs.builder()
.budgetPolicyId("string")
.continuous(JobContinuousArgs.builder()
.pauseStatus("string")
.build())
.controlRunState(false)
.deployment(JobDeploymentArgs.builder()
.kind("string")
.metadataFilePath("string")
.build())
.description("string")
.editMode("string")
.emailNotifications(JobEmailNotificationsArgs.builder()
.noAlertForSkippedRuns(false)
.onDurationWarningThresholdExceededs("string")
.onFailures("string")
.onStarts("string")
.onStreamingBacklogExceededs("string")
.onSuccesses("string")
.build())
.environments(JobEnvironmentArgs.builder()
.environmentKey("string")
.spec(JobEnvironmentSpecArgs.builder()
.client("string")
.dependencies("string")
.build())
.build())
.existingClusterId("string")
.format("string")
.gitSource(JobGitSourceArgs.builder()
.url("string")
.branch("string")
.commit("string")
.gitSnapshot(JobGitSourceGitSnapshotArgs.builder()
.usedCommit("string")
.build())
.jobSource(JobGitSourceJobSourceArgs.builder()
.importFromGitBranch("string")
.jobConfigPath("string")
.dirtyState("string")
.build())
.provider("string")
.tag("string")
.build())
.health(JobHealthArgs.builder()
.rules(JobHealthRuleArgs.builder()
.metric("string")
.op("string")
.value(0)
.build())
.build())
.jobClusters(JobJobClusterArgs.builder()
.jobClusterKey("string")
.newCluster(JobJobClusterNewClusterArgs.builder()
.sparkVersion("string")
.enableLocalDiskEncryption(false)
.clusterLogConf(JobJobClusterNewClusterClusterLogConfArgs.builder()
.dbfs(JobJobClusterNewClusterClusterLogConfDbfsArgs.builder()
.destination("string")
.build())
.s3(JobJobClusterNewClusterClusterLogConfS3Args.builder()
.destination("string")
.cannedAcl("string")
.enableEncryption(false)
.encryptionType("string")
.endpoint("string")
.kmsKey("string")
.region("string")
.build())
.build())
.gcpAttributes(JobJobClusterNewClusterGcpAttributesArgs.builder()
.availability("string")
.bootDiskSize(0)
.googleServiceAccount("string")
.localSsdCount(0)
.usePreemptibleExecutors(false)
.zoneId("string")
.build())
.clusterId("string")
.idempotencyToken("string")
.clusterMountInfos(JobJobClusterNewClusterClusterMountInfoArgs.builder()
.localMountDirPath("string")
.networkFilesystemInfo(JobJobClusterNewClusterClusterMountInfoNetworkFilesystemInfoArgs.builder()
.serverAddress("string")
.mountOptions("string")
.build())
.remoteMountDirPath("string")
.build())
.clusterName("string")
.customTags(Map.of("string", "string"))
.dataSecurityMode("string")
.dockerImage(JobJobClusterNewClusterDockerImageArgs.builder()
.url("string")
.basicAuth(JobJobClusterNewClusterDockerImageBasicAuthArgs.builder()
.password("string")
.username("string")
.build())
.build())
.driverInstancePoolId("string")
.initScripts(JobJobClusterNewClusterInitScriptArgs.builder()
.abfss(JobJobClusterNewClusterInitScriptAbfssArgs.builder()
.destination("string")
.build())
.file(JobJobClusterNewClusterInitScriptFileArgs.builder()
.destination("string")
.build())
.gcs(JobJobClusterNewClusterInitScriptGcsArgs.builder()
.destination("string")
.build())
.s3(JobJobClusterNewClusterInitScriptS3Args.builder()
.destination("string")
.cannedAcl("string")
.enableEncryption(false)
.encryptionType("string")
.endpoint("string")
.kmsKey("string")
.region("string")
.build())
.volumes(JobJobClusterNewClusterInitScriptVolumesArgs.builder()
.destination("string")
.build())
.workspace(JobJobClusterNewClusterInitScriptWorkspaceArgs.builder()
.destination("string")
.build())
.build())
.enableElasticDisk(false)
.applyPolicyDefaultValues(false)
.azureAttributes(JobJobClusterNewClusterAzureAttributesArgs.builder()
.availability("string")
.firstOnDemand(0)
.logAnalyticsInfo(JobJobClusterNewClusterAzureAttributesLogAnalyticsInfoArgs.builder()
.logAnalyticsPrimaryKey("string")
.logAnalyticsWorkspaceId("string")
.build())
.spotBidMaxPrice(0)
.build())
.awsAttributes(JobJobClusterNewClusterAwsAttributesArgs.builder()
.availability("string")
.ebsVolumeCount(0)
.ebsVolumeIops(0)
.ebsVolumeSize(0)
.ebsVolumeThroughput(0)
.ebsVolumeType("string")
.firstOnDemand(0)
.instanceProfileArn("string")
.spotBidPricePercent(0)
.zoneId("string")
.build())
.driverNodeTypeId("string")
.instancePoolId("string")
.libraries(JobJobClusterNewClusterLibraryArgs.builder()
.cran(JobJobClusterNewClusterLibraryCranArgs.builder()
.package_("string")
.repo("string")
.build())
.egg("string")
.jar("string")
.maven(JobJobClusterNewClusterLibraryMavenArgs.builder()
.coordinates("string")
.exclusions("string")
.repo("string")
.build())
.pypi(JobJobClusterNewClusterLibraryPypiArgs.builder()
.package_("string")
.repo("string")
.build())
.requirements("string")
.whl("string")
.build())
.nodeTypeId("string")
.numWorkers(0)
.policyId("string")
.runtimeEngine("string")
.singleUserName("string")
.sparkConf(Map.of("string", "string"))
.sparkEnvVars(Map.of("string", "string"))
.autoscale(JobJobClusterNewClusterAutoscaleArgs.builder()
.maxWorkers(0)
.minWorkers(0)
.build())
.sshPublicKeys("string")
.workloadType(JobJobClusterNewClusterWorkloadTypeArgs.builder()
.clients(JobJobClusterNewClusterWorkloadTypeClientsArgs.builder()
.jobs(false)
.notebooks(false)
.build())
.build())
.build())
.build())
.libraries(JobLibraryArgs.builder()
.cran(JobLibraryCranArgs.builder()
.package_("string")
.repo("string")
.build())
.egg("string")
.jar("string")
.maven(JobLibraryMavenArgs.builder()
.coordinates("string")
.exclusions("string")
.repo("string")
.build())
.pypi(JobLibraryPypiArgs.builder()
.package_("string")
.repo("string")
.build())
.requirements("string")
.whl("string")
.build())
.maxConcurrentRuns(0)
.name("string")
.newCluster(JobNewClusterArgs.builder()
.sparkVersion("string")
.enableLocalDiskEncryption(false)
.clusterLogConf(JobNewClusterClusterLogConfArgs.builder()
.dbfs(JobNewClusterClusterLogConfDbfsArgs.builder()
.destination("string")
.build())
.s3(JobNewClusterClusterLogConfS3Args.builder()
.destination("string")
.cannedAcl("string")
.enableEncryption(false)
.encryptionType("string")
.endpoint("string")
.kmsKey("string")
.region("string")
.build())
.build())
.gcpAttributes(JobNewClusterGcpAttributesArgs.builder()
.availability("string")
.bootDiskSize(0)
.googleServiceAccount("string")
.localSsdCount(0)
.usePreemptibleExecutors(false)
.zoneId("string")
.build())
.clusterId("string")
.idempotencyToken("string")
.clusterMountInfos(JobNewClusterClusterMountInfoArgs.builder()
.localMountDirPath("string")
.networkFilesystemInfo(JobNewClusterClusterMountInfoNetworkFilesystemInfoArgs.builder()
.serverAddress("string")
.mountOptions("string")
.build())
.remoteMountDirPath("string")
.build())
.clusterName("string")
.customTags(Map.of("string", "string"))
.dataSecurityMode("string")
.dockerImage(JobNewClusterDockerImageArgs.builder()
.url("string")
.basicAuth(JobNewClusterDockerImageBasicAuthArgs.builder()
.password("string")
.username("string")
.build())
.build())
.driverInstancePoolId("string")
.initScripts(JobNewClusterInitScriptArgs.builder()
.abfss(JobNewClusterInitScriptAbfssArgs.builder()
.destination("string")
.build())
.file(JobNewClusterInitScriptFileArgs.builder()
.destination("string")
.build())
.gcs(JobNewClusterInitScriptGcsArgs.builder()
.destination("string")
.build())
.s3(JobNewClusterInitScriptS3Args.builder()
.destination("string")
.cannedAcl("string")
.enableEncryption(false)
.encryptionType("string")
.endpoint("string")
.kmsKey("string")
.region("string")
.build())
.volumes(JobNewClusterInitScriptVolumesArgs.builder()
.destination("string")
.build())
.workspace(JobNewClusterInitScriptWorkspaceArgs.builder()
.destination("string")
.build())
.build())
.enableElasticDisk(false)
.applyPolicyDefaultValues(false)
.azureAttributes(JobNewClusterAzureAttributesArgs.builder()
.availability("string")
.firstOnDemand(0)
.logAnalyticsInfo(JobNewClusterAzureAttributesLogAnalyticsInfoArgs.builder()
.logAnalyticsPrimaryKey("string")
.logAnalyticsWorkspaceId("string")
.build())
.spotBidMaxPrice(0)
.build())
.awsAttributes(JobNewClusterAwsAttributesArgs.builder()
.availability("string")
.ebsVolumeCount(0)
.ebsVolumeIops(0)
.ebsVolumeSize(0)
.ebsVolumeThroughput(0)
.ebsVolumeType("string")
.firstOnDemand(0)
.instanceProfileArn("string")
.spotBidPricePercent(0)
.zoneId("string")
.build())
.driverNodeTypeId("string")
.instancePoolId("string")
.libraries(JobNewClusterLibraryArgs.builder()
.cran(JobNewClusterLibraryCranArgs.builder()
.package_("string")
.repo("string")
.build())
.egg("string")
.jar("string")
.maven(JobNewClusterLibraryMavenArgs.builder()
.coordinates("string")
.exclusions("string")
.repo("string")
.build())
.pypi(JobNewClusterLibraryPypiArgs.builder()
.package_("string")
.repo("string")
.build())
.requirements("string")
.whl("string")
.build())
.nodeTypeId("string")
.numWorkers(0)
.policyId("string")
.runtimeEngine("string")
.singleUserName("string")
.sparkConf(Map.of("string", "string"))
.sparkEnvVars(Map.of("string", "string"))
.autoscale(JobNewClusterAutoscaleArgs.builder()
.maxWorkers(0)
.minWorkers(0)
.build())
.sshPublicKeys("string")
.workloadType(JobNewClusterWorkloadTypeArgs.builder()
.clients(JobNewClusterWorkloadTypeClientsArgs.builder()
.jobs(false)
.notebooks(false)
.build())
.build())
.build())
.notificationSettings(JobNotificationSettingsArgs.builder()
.noAlertForCanceledRuns(false)
.noAlertForSkippedRuns(false)
.build())
.parameters(JobParameterArgs.builder()
.default_("string")
.name("string")
.build())
.queue(JobQueueArgs.builder()
.enabled(false)
.build())
.runAs(JobRunAsArgs.builder()
.servicePrincipalName("string")
.userName("string")
.build())
.schedule(JobScheduleArgs.builder()
.quartzCronExpression("string")
.timezoneId("string")
.pauseStatus("string")
.build())
.tags(Map.of("string", "string"))
.tasks(JobTaskArgs.builder()
.taskKey("string")
.newCluster(JobTaskNewClusterArgs.builder()
.sparkVersion("string")
.enableLocalDiskEncryption(false)
.clusterLogConf(JobTaskNewClusterClusterLogConfArgs.builder()
.dbfs(JobTaskNewClusterClusterLogConfDbfsArgs.builder()
.destination("string")
.build())
.s3(JobTaskNewClusterClusterLogConfS3Args.builder()
.destination("string")
.cannedAcl("string")
.enableEncryption(false)
.encryptionType("string")
.endpoint("string")
.kmsKey("string")
.region("string")
.build())
.build())
.gcpAttributes(JobTaskNewClusterGcpAttributesArgs.builder()
.availability("string")
.bootDiskSize(0)
.googleServiceAccount("string")
.localSsdCount(0)
.usePreemptibleExecutors(false)
.zoneId("string")
.build())
.clusterId("string")
.idempotencyToken("string")
.clusterMountInfos(JobTaskNewClusterClusterMountInfoArgs.builder()
.localMountDirPath("string")
.networkFilesystemInfo(JobTaskNewClusterClusterMountInfoNetworkFilesystemInfoArgs.builder()
.serverAddress("string")
.mountOptions("string")
.build())
.remoteMountDirPath("string")
.build())
.clusterName("string")
.customTags(Map.of("string", "string"))
.dataSecurityMode("string")
.dockerImage(JobTaskNewClusterDockerImageArgs.builder()
.url("string")
.basicAuth(JobTaskNewClusterDockerImageBasicAuthArgs.builder()
.password("string")
.username("string")
.build())
.build())
.driverInstancePoolId("string")
.initScripts(JobTaskNewClusterInitScriptArgs.builder()
.abfss(JobTaskNewClusterInitScriptAbfssArgs.builder()
.destination("string")
.build())
.file(JobTaskNewClusterInitScriptFileArgs.builder()
.destination("string")
.build())
.gcs(JobTaskNewClusterInitScriptGcsArgs.builder()
.destination("string")
.build())
.s3(JobTaskNewClusterInitScriptS3Args.builder()
.destination("string")
.cannedAcl("string")
.enableEncryption(false)
.encryptionType("string")
.endpoint("string")
.kmsKey("string")
.region("string")
.build())
.volumes(JobTaskNewClusterInitScriptVolumesArgs.builder()
.destination("string")
.build())
.workspace(JobTaskNewClusterInitScriptWorkspaceArgs.builder()
.destination("string")
.build())
.build())
.enableElasticDisk(false)
.applyPolicyDefaultValues(false)
.azureAttributes(JobTaskNewClusterAzureAttributesArgs.builder()
.availability("string")
.firstOnDemand(0)
.logAnalyticsInfo(JobTaskNewClusterAzureAttributesLogAnalyticsInfoArgs.builder()
.logAnalyticsPrimaryKey("string")
.logAnalyticsWorkspaceId("string")
.build())
.spotBidMaxPrice(0)
.build())
.awsAttributes(JobTaskNewClusterAwsAttributesArgs.builder()
.availability("string")
.ebsVolumeCount(0)
.ebsVolumeIops(0)
.ebsVolumeSize(0)
.ebsVolumeThroughput(0)
.ebsVolumeType("string")
.firstOnDemand(0)
.instanceProfileArn("string")
.spotBidPricePercent(0)
.zoneId("string")
.build())
.driverNodeTypeId("string")
.instancePoolId("string")
.libraries(JobTaskNewClusterLibraryArgs.builder()
.cran(JobTaskNewClusterLibraryCranArgs.builder()
.package_("string")
.repo("string")
.build())
.egg("string")
.jar("string")
.maven(JobTaskNewClusterLibraryMavenArgs.builder()
.coordinates("string")
.exclusions("string")
.repo("string")
.build())
.pypi(JobTaskNewClusterLibraryPypiArgs.builder()
.package_("string")
.repo("string")
.build())
.requirements("string")
.whl("string")
.build())
.nodeTypeId("string")
.numWorkers(0)
.policyId("string")
.runtimeEngine("string")
.singleUserName("string")
.sparkConf(Map.of("string", "string"))
.sparkEnvVars(Map.of("string", "string"))
.autoscale(JobTaskNewClusterAutoscaleArgs.builder()
.maxWorkers(0)
.minWorkers(0)
.build())
.sshPublicKeys("string")
.workloadType(JobTaskNewClusterWorkloadTypeArgs.builder()
.clients(JobTaskNewClusterWorkloadTypeClientsArgs.builder()
.jobs(false)
.notebooks(false)
.build())
.build())
.build())
.dbtTask(JobTaskDbtTaskArgs.builder()
.commands("string")
.catalog("string")
.profilesDirectory("string")
.projectDirectory("string")
.schema("string")
.source("string")
.warehouseId("string")
.build())
.description("string")
.disableAutoOptimization(false)
.emailNotifications(JobTaskEmailNotificationsArgs.builder()
.noAlertForSkippedRuns(false)
.onDurationWarningThresholdExceededs("string")
.onFailures("string")
.onStarts("string")
.onStreamingBacklogExceededs("string")
.onSuccesses("string")
.build())
.environmentKey("string")
.existingClusterId("string")
.forEachTask(JobTaskForEachTaskArgs.builder()
.inputs("string")
.task(JobTaskForEachTaskTaskArgs.builder()
.taskKey("string")
.notebookTask(JobTaskForEachTaskTaskNotebookTaskArgs.builder()
.notebookPath("string")
.baseParameters(Map.of("string", "string"))
.source("string")
.warehouseId("string")
.build())
.webhookNotifications(JobTaskForEachTaskTaskWebhookNotificationsArgs.builder()
.onDurationWarningThresholdExceededs(JobTaskForEachTaskTaskWebhookNotificationsOnDurationWarningThresholdExceededArgs.builder()
.id("string")
.build())
.onFailures(JobTaskForEachTaskTaskWebhookNotificationsOnFailureArgs.builder()
.id("string")
.build())
.onStarts(JobTaskForEachTaskTaskWebhookNotificationsOnStartArgs.builder()
.id("string")
.build())
.onStreamingBacklogExceededs(JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs.builder()
.id("string")
.build())
.onSuccesses(JobTaskForEachTaskTaskWebhookNotificationsOnSuccessArgs.builder()
.id("string")
.build())
.build())
.newCluster(JobTaskForEachTaskTaskNewClusterArgs.builder()
.sparkVersion("string")
.enableLocalDiskEncryption(false)
.clusterLogConf(JobTaskForEachTaskTaskNewClusterClusterLogConfArgs.builder()
.dbfs(JobTaskForEachTaskTaskNewClusterClusterLogConfDbfsArgs.builder()
.destination("string")
.build())
.s3(JobTaskForEachTaskTaskNewClusterClusterLogConfS3Args.builder()
.destination("string")
.cannedAcl("string")
.enableEncryption(false)
.encryptionType("string")
.endpoint("string")
.kmsKey("string")
.region("string")
.build())
.build())
.gcpAttributes(JobTaskForEachTaskTaskNewClusterGcpAttributesArgs.builder()
.availability("string")
.bootDiskSize(0)
.googleServiceAccount("string")
.localSsdCount(0)
.usePreemptibleExecutors(false)
.zoneId("string")
.build())
.clusterId("string")
.idempotencyToken("string")
.clusterMountInfos(JobTaskForEachTaskTaskNewClusterClusterMountInfoArgs.builder()
.localMountDirPath("string")
.networkFilesystemInfo(JobTaskForEachTaskTaskNewClusterClusterMountInfoNetworkFilesystemInfoArgs.builder()
.serverAddress("string")
.mountOptions("string")
.build())
.remoteMountDirPath("string")
.build())
.clusterName("string")
.customTags(Map.of("string", "string"))
.dataSecurityMode("string")
.dockerImage(JobTaskForEachTaskTaskNewClusterDockerImageArgs.builder()
.url("string")
.basicAuth(JobTaskForEachTaskTaskNewClusterDockerImageBasicAuthArgs.builder()
.password("string")
.username("string")
.build())
.build())
.driverInstancePoolId("string")
.initScripts(JobTaskForEachTaskTaskNewClusterInitScriptArgs.builder()
.abfss(JobTaskForEachTaskTaskNewClusterInitScriptAbfssArgs.builder()
.destination("string")
.build())
.file(JobTaskForEachTaskTaskNewClusterInitScriptFileArgs.builder()
.destination("string")
.build())
.gcs(JobTaskForEachTaskTaskNewClusterInitScriptGcsArgs.builder()
.destination("string")
.build())
.s3(JobTaskForEachTaskTaskNewClusterInitScriptS3Args.builder()
.destination("string")
.cannedAcl("string")
.enableEncryption(false)
.encryptionType("string")
.endpoint("string")
.kmsKey("string")
.region("string")
.build())
.volumes(JobTaskForEachTaskTaskNewClusterInitScriptVolumesArgs.builder()
.destination("string")
.build())
.workspace(JobTaskForEachTaskTaskNewClusterInitScriptWorkspaceArgs.builder()
.destination("string")
.build())
.build())
.enableElasticDisk(false)
.applyPolicyDefaultValues(false)
.azureAttributes(JobTaskForEachTaskTaskNewClusterAzureAttributesArgs.builder()
.availability("string")
.firstOnDemand(0)
.logAnalyticsInfo(JobTaskForEachTaskTaskNewClusterAzureAttributesLogAnalyticsInfoArgs.builder()
.logAnalyticsPrimaryKey("string")
.logAnalyticsWorkspaceId("string")
.build())
.spotBidMaxPrice(0)
.build())
.awsAttributes(JobTaskForEachTaskTaskNewClusterAwsAttributesArgs.builder()
.availability("string")
.ebsVolumeCount(0)
.ebsVolumeIops(0)
.ebsVolumeSize(0)
.ebsVolumeThroughput(0)
.ebsVolumeType("string")
.firstOnDemand(0)
.instanceProfileArn("string")
.spotBidPricePercent(0)
.zoneId("string")
.build())
.driverNodeTypeId("string")
.instancePoolId("string")
.libraries(JobTaskForEachTaskTaskNewClusterLibraryArgs.builder()
.cran(JobTaskForEachTaskTaskNewClusterLibraryCranArgs.builder()
.package_("string")
.repo("string")
.build())
.egg("string")
.jar("string")
.maven(JobTaskForEachTaskTaskNewClusterLibraryMavenArgs.builder()
.coordinates("string")
.exclusions("string")
.repo("string")
.build())
.pypi(JobTaskForEachTaskTaskNewClusterLibraryPypiArgs.builder()
.package_("string")
.repo("string")
.build())
.requirements("string")
.whl("string")
.build())
.nodeTypeId("string")
.numWorkers(0)
.policyId("string")
.runtimeEngine("string")
.singleUserName("string")
.sparkConf(Map.of("string", "string"))
.sparkEnvVars(Map.of("string", "string"))
.autoscale(JobTaskForEachTaskTaskNewClusterAutoscaleArgs.builder()
.maxWorkers(0)
.minWorkers(0)
.build())
.sshPublicKeys("string")
.workloadType(JobTaskForEachTaskTaskNewClusterWorkloadTypeArgs.builder()
.clients(JobTaskForEachTaskTaskNewClusterWorkloadTypeClientsArgs.builder()
.jobs(false)
.notebooks(false)
.build())
.build())
.build())
.disableAutoOptimization(false)
.emailNotifications(JobTaskForEachTaskTaskEmailNotificationsArgs.builder()
.noAlertForSkippedRuns(false)
.onDurationWarningThresholdExceededs("string")
.onFailures("string")
.onStarts("string")
.onStreamingBacklogExceededs("string")
.onSuccesses("string")
.build())
.environmentKey("string")
.existingClusterId("string")
.health(JobTaskForEachTaskTaskHealthArgs.builder()
.rules(JobTaskForEachTaskTaskHealthRuleArgs.builder()
.metric("string")
.op("string")
.value(0)
.build())
.build())
.jobClusterKey("string")
.libraries(JobTaskForEachTaskTaskLibraryArgs.builder()
.cran(JobTaskForEachTaskTaskLibraryCranArgs.builder()
.package_("string")
.repo("string")
.build())
.egg("string")
.jar("string")
.maven(JobTaskForEachTaskTaskLibraryMavenArgs.builder()
.coordinates("string")
.exclusions("string")
.repo("string")
.build())
.pypi(JobTaskForEachTaskTaskLibraryPypiArgs.builder()
.package_("string")
.repo("string")
.build())
.requirements("string")
.whl("string")
.build())
.maxRetries(0)
.minRetryIntervalMillis(0)
.description("string")
.dependsOns(JobTaskForEachTaskTaskDependsOnArgs.builder()
.taskKey("string")
.outcome("string")
.build())
.sparkPythonTask(JobTaskForEachTaskTaskSparkPythonTaskArgs.builder()
.pythonFile("string")
.parameters("string")
.source("string")
.build())
.pipelineTask(JobTaskForEachTaskTaskPipelineTaskArgs.builder()
.pipelineId("string")
.fullRefresh(false)
.build())
.pythonWheelTask(JobTaskForEachTaskTaskPythonWheelTaskArgs.builder()
.entryPoint("string")
.namedParameters(Map.of("string", "string"))
.packageName("string")
.parameters("string")
.build())
.retryOnTimeout(false)
.runIf("string")
.runJobTask(JobTaskForEachTaskTaskRunJobTaskArgs.builder()
.jobId(0)
.dbtCommands("string")
.jarParams("string")
.jobParameters(Map.of("string", "string"))
.notebookParams(Map.of("string", "string"))
.pipelineParams(JobTaskForEachTaskTaskRunJobTaskPipelineParamsArgs.builder()
.fullRefresh(false)
.build())
.pythonNamedParams(Map.of("string", "string"))
.pythonParams("string")
.sparkSubmitParams("string")
.sqlParams(Map.of("string", "string"))
.build())
.sparkJarTask(JobTaskForEachTaskTaskSparkJarTaskArgs.builder()
.jarUri("string")
.mainClassName("string")
.parameters("string")
.build())
.notificationSettings(JobTaskForEachTaskTaskNotificationSettingsArgs.builder()
.alertOnLastAttempt(false)
.noAlertForCanceledRuns(false)
.noAlertForSkippedRuns(false)
.build())
.sparkSubmitTask(JobTaskForEachTaskTaskSparkSubmitTaskArgs.builder()
.parameters("string")
.build())
.sqlTask(JobTaskForEachTaskTaskSqlTaskArgs.builder()
.warehouseId("string")
.alert(JobTaskForEachTaskTaskSqlTaskAlertArgs.builder()
.alertId("string")
.pauseSubscriptions(false)
.subscriptions(JobTaskForEachTaskTaskSqlTaskAlertSubscriptionArgs.builder()
.destinationId("string")
.userName("string")
.build())
.build())
.dashboard(JobTaskForEachTaskTaskSqlTaskDashboardArgs.builder()
.dashboardId("string")
.customSubject("string")
.pauseSubscriptions(false)
.subscriptions(JobTaskForEachTaskTaskSqlTaskDashboardSubscriptionArgs.builder()
.destinationId("string")
.userName("string")
.build())
.build())
.file(JobTaskForEachTaskTaskSqlTaskFileArgs.builder()
.path("string")
.source("string")
.build())
.parameters(Map.of("string", "string"))
.query(JobTaskForEachTaskTaskSqlTaskQueryArgs.builder()
.queryId("string")
.build())
.build())
.dbtTask(JobTaskForEachTaskTaskDbtTaskArgs.builder()
.commands("string")
.catalog("string")
.profilesDirectory("string")
.projectDirectory("string")
.schema("string")
.source("string")
.warehouseId("string")
.build())
.timeoutSeconds(0)
.conditionTask(JobTaskForEachTaskTaskConditionTaskArgs.builder()
.left("string")
.op("string")
.right("string")
.build())
.build())
.concurrency(0)
.build())
.health(JobTaskHealthArgs.builder()
.rules(JobTaskHealthRuleArgs.builder()
.metric("string")
.op("string")
.value(0)
.build())
.build())
.jobClusterKey("string")
.libraries(JobTaskLibraryArgs.builder()
.cran(JobTaskLibraryCranArgs.builder()
.package_("string")
.repo("string")
.build())
.egg("string")
.jar("string")
.maven(JobTaskLibraryMavenArgs.builder()
.coordinates("string")
.exclusions("string")
.repo("string")
.build())
.pypi(JobTaskLibraryPypiArgs.builder()
.package_("string")
.repo("string")
.build())
.requirements("string")
.whl("string")
.build())
.maxRetries(0)
.webhookNotifications(JobTaskWebhookNotificationsArgs.builder()
.onDurationWarningThresholdExceededs(JobTaskWebhookNotificationsOnDurationWarningThresholdExceededArgs.builder()
.id("string")
.build())
.onFailures(JobTaskWebhookNotificationsOnFailureArgs.builder()
.id("string")
.build())
.onStarts(JobTaskWebhookNotificationsOnStartArgs.builder()
.id("string")
.build())
.onStreamingBacklogExceededs(JobTaskWebhookNotificationsOnStreamingBacklogExceededArgs.builder()
.id("string")
.build())
.onSuccesses(JobTaskWebhookNotificationsOnSuccessArgs.builder()
.id("string")
.build())
.build())
.dependsOns(JobTaskDependsOnArgs.builder()
.taskKey("string")
.outcome("string")
.build())
.retryOnTimeout(false)
.notificationSettings(JobTaskNotificationSettingsArgs.builder()
.alertOnLastAttempt(false)
.noAlertForCanceledRuns(false)
.noAlertForSkippedRuns(false)
.build())
.pipelineTask(JobTaskPipelineTaskArgs.builder()
.pipelineId("string")
.fullRefresh(false)
.build())
.pythonWheelTask(JobTaskPythonWheelTaskArgs.builder()
.entryPoint("string")
.namedParameters(Map.of("string", "string"))
.packageName("string")
.parameters("string")
.build())
.notebookTask(JobTaskNotebookTaskArgs.builder()
.notebookPath("string")
.baseParameters(Map.of("string", "string"))
.source("string")
.warehouseId("string")
.build())
.runIf("string")
.runJobTask(JobTaskRunJobTaskArgs.builder()
.jobId(0)
.dbtCommands("string")
.jarParams("string")
.jobParameters(Map.of("string", "string"))
.notebookParams(Map.of("string", "string"))
.pipelineParams(JobTaskRunJobTaskPipelineParamsArgs.builder()
.fullRefresh(false)
.build())
.pythonNamedParams(Map.of("string", "string"))
.pythonParams("string")
.sparkSubmitParams("string")
.sqlParams(Map.of("string", "string"))
.build())
.sparkJarTask(JobTaskSparkJarTaskArgs.builder()
.jarUri("string")
.mainClassName("string")
.parameters("string")
.build())
.sparkPythonTask(JobTaskSparkPythonTaskArgs.builder()
.pythonFile("string")
.parameters("string")
.source("string")
.build())
.sparkSubmitTask(JobTaskSparkSubmitTaskArgs.builder()
.parameters("string")
.build())
.sqlTask(JobTaskSqlTaskArgs.builder()
.warehouseId("string")
.alert(JobTaskSqlTaskAlertArgs.builder()
.alertId("string")
.pauseSubscriptions(false)
.subscriptions(JobTaskSqlTaskAlertSubscriptionArgs.builder()
.destinationId("string")
.userName("string")
.build())
.build())
.dashboard(JobTaskSqlTaskDashboardArgs.builder()
.dashboardId("string")
.customSubject("string")
.pauseSubscriptions(false)
.subscriptions(JobTaskSqlTaskDashboardSubscriptionArgs.builder()
.destinationId("string")
.userName("string")
.build())
.build())
.file(JobTaskSqlTaskFileArgs.builder()
.path("string")
.source("string")
.build())
.parameters(Map.of("string", "string"))
.query(JobTaskSqlTaskQueryArgs.builder()
.queryId("string")
.build())
.build())
.conditionTask(JobTaskConditionTaskArgs.builder()
.left("string")
.op("string")
.right("string")
.build())
.timeoutSeconds(0)
.minRetryIntervalMillis(0)
.build())
.timeoutSeconds(0)
.trigger(JobTriggerArgs.builder()
.fileArrival(JobTriggerFileArrivalArgs.builder()
.url("string")
.minTimeBetweenTriggersSeconds(0)
.waitAfterLastChangeSeconds(0)
.build())
.pauseStatus("string")
.periodic(JobTriggerPeriodicArgs.builder()
.interval(0)
.unit("string")
.build())
.table(JobTriggerTableArgs.builder()
.condition("string")
.minTimeBetweenTriggersSeconds(0)
.tableNames("string")
.waitAfterLastChangeSeconds(0)
.build())
.tableUpdate(JobTriggerTableUpdateArgs.builder()
.tableNames("string")
.condition("string")
.minTimeBetweenTriggersSeconds(0)
.waitAfterLastChangeSeconds(0)
.build())
.build())
.webhookNotifications(JobWebhookNotificationsArgs.builder()
.onDurationWarningThresholdExceededs(JobWebhookNotificationsOnDurationWarningThresholdExceededArgs.builder()
.id("string")
.build())
.onFailures(JobWebhookNotificationsOnFailureArgs.builder()
.id("string")
.build())
.onStarts(JobWebhookNotificationsOnStartArgs.builder()
.id("string")
.build())
.onStreamingBacklogExceededs(JobWebhookNotificationsOnStreamingBacklogExceededArgs.builder()
.id("string")
.build())
.onSuccesses(JobWebhookNotificationsOnSuccessArgs.builder()
.id("string")
.build())
.build())
.build());
job_resource = databricks.Job("jobResource",
budget_policy_id="string",
continuous={
"pause_status": "string",
},
control_run_state=False,
deployment={
"kind": "string",
"metadata_file_path": "string",
},
description="string",
edit_mode="string",
email_notifications={
"no_alert_for_skipped_runs": False,
"on_duration_warning_threshold_exceededs": ["string"],
"on_failures": ["string"],
"on_starts": ["string"],
"on_streaming_backlog_exceededs": ["string"],
"on_successes": ["string"],
},
environments=[{
"environment_key": "string",
"spec": {
"client": "string",
"dependencies": ["string"],
},
}],
existing_cluster_id="string",
format="string",
git_source={
"url": "string",
"branch": "string",
"commit": "string",
"git_snapshot": {
"used_commit": "string",
},
"job_source": {
"import_from_git_branch": "string",
"job_config_path": "string",
"dirty_state": "string",
},
"provider": "string",
"tag": "string",
},
health={
"rules": [{
"metric": "string",
"op": "string",
"value": 0,
}],
},
job_clusters=[{
"job_cluster_key": "string",
"new_cluster": {
"spark_version": "string",
"enable_local_disk_encryption": False,
"cluster_log_conf": {
"dbfs": {
"destination": "string",
},
"s3": {
"destination": "string",
"canned_acl": "string",
"enable_encryption": False,
"encryption_type": "string",
"endpoint": "string",
"kms_key": "string",
"region": "string",
},
},
"gcp_attributes": {
"availability": "string",
"boot_disk_size": 0,
"google_service_account": "string",
"local_ssd_count": 0,
"use_preemptible_executors": False,
"zone_id": "string",
},
"cluster_id": "string",
"idempotency_token": "string",
"cluster_mount_infos": [{
"local_mount_dir_path": "string",
"network_filesystem_info": {
"server_address": "string",
"mount_options": "string",
},
"remote_mount_dir_path": "string",
}],
"cluster_name": "string",
"custom_tags": {
"string": "string",
},
"data_security_mode": "string",
"docker_image": {
"url": "string",
"basic_auth": {
"password": "string",
"username": "string",
},
},
"driver_instance_pool_id": "string",
"init_scripts": [{
"abfss": {
"destination": "string",
},
"file": {
"destination": "string",
},
"gcs": {
"destination": "string",
},
"s3": {
"destination": "string",
"canned_acl": "string",
"enable_encryption": False,
"encryption_type": "string",
"endpoint": "string",
"kms_key": "string",
"region": "string",
},
"volumes": {
"destination": "string",
},
"workspace": {
"destination": "string",
},
}],
"enable_elastic_disk": False,
"apply_policy_default_values": False,
"azure_attributes": {
"availability": "string",
"first_on_demand": 0,
"log_analytics_info": {
"log_analytics_primary_key": "string",
"log_analytics_workspace_id": "string",
},
"spot_bid_max_price": 0,
},
"aws_attributes": {
"availability": "string",
"ebs_volume_count": 0,
"ebs_volume_iops": 0,
"ebs_volume_size": 0,
"ebs_volume_throughput": 0,
"ebs_volume_type": "string",
"first_on_demand": 0,
"instance_profile_arn": "string",
"spot_bid_price_percent": 0,
"zone_id": "string",
},
"driver_node_type_id": "string",
"instance_pool_id": "string",
"libraries": [{
"cran": {
"package": "string",
"repo": "string",
},
"egg": "string",
"jar": "string",
"maven": {
"coordinates": "string",
"exclusions": ["string"],
"repo": "string",
},
"pypi": {
"package": "string",
"repo": "string",
},
"requirements": "string",
"whl": "string",
}],
"node_type_id": "string",
"num_workers": 0,
"policy_id": "string",
"runtime_engine": "string",
"single_user_name": "string",
"spark_conf": {
"string": "string",
},
"spark_env_vars": {
"string": "string",
},
"autoscale": {
"max_workers": 0,
"min_workers": 0,
},
"ssh_public_keys": ["string"],
"workload_type": {
"clients": {
"jobs": False,
"notebooks": False,
},
},
},
}],
libraries=[{
"cran": {
"package": "string",
"repo": "string",
},
"egg": "string",
"jar": "string",
"maven": {
"coordinates": "string",
"exclusions": ["string"],
"repo": "string",
},
"pypi": {
"package": "string",
"repo": "string",
},
"requirements": "string",
"whl": "string",
}],
max_concurrent_runs=0,
name="string",
new_cluster={
"spark_version": "string",
"enable_local_disk_encryption": False,
"cluster_log_conf": {
"dbfs": {
"destination": "string",
},
"s3": {
"destination": "string",
"canned_acl": "string",
"enable_encryption": False,
"encryption_type": "string",
"endpoint": "string",
"kms_key": "string",
"region": "string",
},
},
"gcp_attributes": {
"availability": "string",
"boot_disk_size": 0,
"google_service_account": "string",
"local_ssd_count": 0,
"use_preemptible_executors": False,
"zone_id": "string",
},
"cluster_id": "string",
"idempotency_token": "string",
"cluster_mount_infos": [{
"local_mount_dir_path": "string",
"network_filesystem_info": {
"server_address": "string",
"mount_options": "string",
},
"remote_mount_dir_path": "string",
}],
"cluster_name": "string",
"custom_tags": {
"string": "string",
},
"data_security_mode": "string",
"docker_image": {
"url": "string",
"basic_auth": {
"password": "string",
"username": "string",
},
},
"driver_instance_pool_id": "string",
"init_scripts": [{
"abfss": {
"destination": "string",
},
"file": {
"destination": "string",
},
"gcs": {
"destination": "string",
},
"s3": {
"destination": "string",
"canned_acl": "string",
"enable_encryption": False,
"encryption_type": "string",
"endpoint": "string",
"kms_key": "string",
"region": "string",
},
"volumes": {
"destination": "string",
},
"workspace": {
"destination": "string",
},
}],
"enable_elastic_disk": False,
"apply_policy_default_values": False,
"azure_attributes": {
"availability": "string",
"first_on_demand": 0,
"log_analytics_info": {
"log_analytics_primary_key": "string",
"log_analytics_workspace_id": "string",
},
"spot_bid_max_price": 0,
},
"aws_attributes": {
"availability": "string",
"ebs_volume_count": 0,
"ebs_volume_iops": 0,
"ebs_volume_size": 0,
"ebs_volume_throughput": 0,
"ebs_volume_type": "string",
"first_on_demand": 0,
"instance_profile_arn": "string",
"spot_bid_price_percent": 0,
"zone_id": "string",
},
"driver_node_type_id": "string",
"instance_pool_id": "string",
"libraries": [{
"cran": {
"package": "string",
"repo": "string",
},
"egg": "string",
"jar": "string",
"maven": {
"coordinates": "string",
"exclusions": ["string"],
"repo": "string",
},
"pypi": {
"package": "string",
"repo": "string",
},
"requirements": "string",
"whl": "string",
}],
"node_type_id": "string",
"num_workers": 0,
"policy_id": "string",
"runtime_engine": "string",
"single_user_name": "string",
"spark_conf": {
"string": "string",
},
"spark_env_vars": {
"string": "string",
},
"autoscale": {
"max_workers": 0,
"min_workers": 0,
},
"ssh_public_keys": ["string"],
"workload_type": {
"clients": {
"jobs": False,
"notebooks": False,
},
},
},
notification_settings={
"no_alert_for_canceled_runs": False,
"no_alert_for_skipped_runs": False,
},
parameters=[{
"default": "string",
"name": "string",
}],
queue={
"enabled": False,
},
run_as={
"service_principal_name": "string",
"user_name": "string",
},
schedule={
"quartz_cron_expression": "string",
"timezone_id": "string",
"pause_status": "string",
},
tags={
"string": "string",
},
tasks=[{
"task_key": "string",
"new_cluster": {
"spark_version": "string",
"enable_local_disk_encryption": False,
"cluster_log_conf": {
"dbfs": {
"destination": "string",
},
"s3": {
"destination": "string",
"canned_acl": "string",
"enable_encryption": False,
"encryption_type": "string",
"endpoint": "string",
"kms_key": "string",
"region": "string",
},
},
"gcp_attributes": {
"availability": "string",
"boot_disk_size": 0,
"google_service_account": "string",
"local_ssd_count": 0,
"use_preemptible_executors": False,
"zone_id": "string",
},
"cluster_id": "string",
"idempotency_token": "string",
"cluster_mount_infos": [{
"local_mount_dir_path": "string",
"network_filesystem_info": {
"server_address": "string",
"mount_options": "string",
},
"remote_mount_dir_path": "string",
}],
"cluster_name": "string",
"custom_tags": {
"string": "string",
},
"data_security_mode": "string",
"docker_image": {
"url": "string",
"basic_auth": {
"password": "string",
"username": "string",
},
},
"driver_instance_pool_id": "string",
"init_scripts": [{
"abfss": {
"destination": "string",
},
"file": {
"destination": "string",
},
"gcs": {
"destination": "string",
},
"s3": {
"destination": "string",
"canned_acl": "string",
"enable_encryption": False,
"encryption_type": "string",
"endpoint": "string",
"kms_key": "string",
"region": "string",
},
"volumes": {
"destination": "string",
},
"workspace": {
"destination": "string",
},
}],
"enable_elastic_disk": False,
"apply_policy_default_values": False,
"azure_attributes": {
"availability": "string",
"first_on_demand": 0,
"log_analytics_info": {
"log_analytics_primary_key": "string",
"log_analytics_workspace_id": "string",
},
"spot_bid_max_price": 0,
},
"aws_attributes": {
"availability": "string",
"ebs_volume_count": 0,
"ebs_volume_iops": 0,
"ebs_volume_size": 0,
"ebs_volume_throughput": 0,
"ebs_volume_type": "string",
"first_on_demand": 0,
"instance_profile_arn": "string",
"spot_bid_price_percent": 0,
"zone_id": "string",
},
"driver_node_type_id": "string",
"instance_pool_id": "string",
"libraries": [{
"cran": {
"package": "string",
"repo": "string",
},
"egg": "string",
"jar": "string",
"maven": {
"coordinates": "string",
"exclusions": ["string"],
"repo": "string",
},
"pypi": {
"package": "string",
"repo": "string",
},
"requirements": "string",
"whl": "string",
}],
"node_type_id": "string",
"num_workers": 0,
"policy_id": "string",
"runtime_engine": "string",
"single_user_name": "string",
"spark_conf": {
"string": "string",
},
"spark_env_vars": {
"string": "string",
},
"autoscale": {
"max_workers": 0,
"min_workers": 0,
},
"ssh_public_keys": ["string"],
"workload_type": {
"clients": {
"jobs": False,
"notebooks": False,
},
},
},
"dbt_task": {
"commands": ["string"],
"catalog": "string",
"profiles_directory": "string",
"project_directory": "string",
"schema": "string",
"source": "string",
"warehouse_id": "string",
},
"description": "string",
"disable_auto_optimization": False,
"email_notifications": {
"no_alert_for_skipped_runs": False,
"on_duration_warning_threshold_exceededs": ["string"],
"on_failures": ["string"],
"on_starts": ["string"],
"on_streaming_backlog_exceededs": ["string"],
"on_successes": ["string"],
},
"environment_key": "string",
"existing_cluster_id": "string",
"for_each_task": {
"inputs": "string",
"task": {
"task_key": "string",
"notebook_task": {
"notebook_path": "string",
"base_parameters": {
"string": "string",
},
"source": "string",
"warehouse_id": "string",
},
"webhook_notifications": {
"on_duration_warning_threshold_exceededs": [{
"id": "string",
}],
"on_failures": [{
"id": "string",
}],
"on_starts": [{
"id": "string",
}],
"on_streaming_backlog_exceededs": [{
"id": "string",
}],
"on_successes": [{
"id": "string",
}],
},
"new_cluster": {
"spark_version": "string",
"enable_local_disk_encryption": False,
"cluster_log_conf": {
"dbfs": {
"destination": "string",
},
"s3": {
"destination": "string",
"canned_acl": "string",
"enable_encryption": False,
"encryption_type": "string",
"endpoint": "string",
"kms_key": "string",
"region": "string",
},
},
"gcp_attributes": {
"availability": "string",
"boot_disk_size": 0,
"google_service_account": "string",
"local_ssd_count": 0,
"use_preemptible_executors": False,
"zone_id": "string",
},
"cluster_id": "string",
"idempotency_token": "string",
"cluster_mount_infos": [{
"local_mount_dir_path": "string",
"network_filesystem_info": {
"server_address": "string",
"mount_options": "string",
},
"remote_mount_dir_path": "string",
}],
"cluster_name": "string",
"custom_tags": {
"string": "string",
},
"data_security_mode": "string",
"docker_image": {
"url": "string",
"basic_auth": {
"password": "string",
"username": "string",
},
},
"driver_instance_pool_id": "string",
"init_scripts": [{
"abfss": {
"destination": "string",
},
"file": {
"destination": "string",
},
"gcs": {
"destination": "string",
},
"s3": {
"destination": "string",
"canned_acl": "string",
"enable_encryption": False,
"encryption_type": "string",
"endpoint": "string",
"kms_key": "string",
"region": "string",
},
"volumes": {
"destination": "string",
},
"workspace": {
"destination": "string",
},
}],
"enable_elastic_disk": False,
"apply_policy_default_values": False,
"azure_attributes": {
"availability": "string",
"first_on_demand": 0,
"log_analytics_info": {
"log_analytics_primary_key": "string",
"log_analytics_workspace_id": "string",
},
"spot_bid_max_price": 0,
},
"aws_attributes": {
"availability": "string",
"ebs_volume_count": 0,
"ebs_volume_iops": 0,
"ebs_volume_size": 0,
"ebs_volume_throughput": 0,
"ebs_volume_type": "string",
"first_on_demand": 0,
"instance_profile_arn": "string",
"spot_bid_price_percent": 0,
"zone_id": "string",
},
"driver_node_type_id": "string",
"instance_pool_id": "string",
"libraries": [{
"cran": {
"package": "string",
"repo": "string",
},
"egg": "string",
"jar": "string",
"maven": {
"coordinates": "string",
"exclusions": ["string"],
"repo": "string",
},
"pypi": {
"package": "string",
"repo": "string",
},
"requirements": "string",
"whl": "string",
}],
"node_type_id": "string",
"num_workers": 0,
"policy_id": "string",
"runtime_engine": "string",
"single_user_name": "string",
"spark_conf": {
"string": "string",
},
"spark_env_vars": {
"string": "string",
},
"autoscale": {
"max_workers": 0,
"min_workers": 0,
},
"ssh_public_keys": ["string"],
"workload_type": {
"clients": {
"jobs": False,
"notebooks": False,
},
},
},
"disable_auto_optimization": False,
"email_notifications": {
"no_alert_for_skipped_runs": False,
"on_duration_warning_threshold_exceededs": ["string"],
"on_failures": ["string"],
"on_starts": ["string"],
"on_streaming_backlog_exceededs": ["string"],
"on_successes": ["string"],
},
"environment_key": "string",
"existing_cluster_id": "string",
"health": {
"rules": [{
"metric": "string",
"op": "string",
"value": 0,
}],
},
"job_cluster_key": "string",
"libraries": [{
"cran": {
"package": "string",
"repo": "string",
},
"egg": "string",
"jar": "string",
"maven": {
"coordinates": "string",
"exclusions": ["string"],
"repo": "string",
},
"pypi": {
"package": "string",
"repo": "string",
},
"requirements": "string",
"whl": "string",
}],
"max_retries": 0,
"min_retry_interval_millis": 0,
"description": "string",
"depends_ons": [{
"task_key": "string",
"outcome": "string",
}],
"spark_python_task": {
"python_file": "string",
"parameters": ["string"],
"source": "string",
},
"pipeline_task": {
"pipeline_id": "string",
"full_refresh": False,
},
"python_wheel_task": {
"entry_point": "string",
"named_parameters": {
"string": "string",
},
"package_name": "string",
"parameters": ["string"],
},
"retry_on_timeout": False,
"run_if": "string",
"run_job_task": {
"job_id": 0,
"dbt_commands": ["string"],
"jar_params": ["string"],
"job_parameters": {
"string": "string",
},
"notebook_params": {
"string": "string",
},
"pipeline_params": {
"full_refresh": False,
},
"python_named_params": {
"string": "string",
},
"python_params": ["string"],
"spark_submit_params": ["string"],
"sql_params": {
"string": "string",
},
},
"spark_jar_task": {
"jar_uri": "string",
"main_class_name": "string",
"parameters": ["string"],
},
"notification_settings": {
"alert_on_last_attempt": False,
"no_alert_for_canceled_runs": False,
"no_alert_for_skipped_runs": False,
},
"spark_submit_task": {
"parameters": ["string"],
},
"sql_task": {
"warehouse_id": "string",
"alert": {
"alert_id": "string",
"pause_subscriptions": False,
"subscriptions": [{
"destination_id": "string",
"user_name": "string",
}],
},
"dashboard": {
"dashboard_id": "string",
"custom_subject": "string",
"pause_subscriptions": False,
"subscriptions": [{
"destination_id": "string",
"user_name": "string",
}],
},
"file": {
"path": "string",
"source": "string",
},
"parameters": {
"string": "string",
},
"query": {
"query_id": "string",
},
},
"dbt_task": {
"commands": ["string"],
"catalog": "string",
"profiles_directory": "string",
"project_directory": "string",
"schema": "string",
"source": "string",
"warehouse_id": "string",
},
"timeout_seconds": 0,
"condition_task": {
"left": "string",
"op": "string",
"right": "string",
},
},
"concurrency": 0,
},
"health": {
"rules": [{
"metric": "string",
"op": "string",
"value": 0,
}],
},
"job_cluster_key": "string",
"libraries": [{
"cran": {
"package": "string",
"repo": "string",
},
"egg": "string",
"jar": "string",
"maven": {
"coordinates": "string",
"exclusions": ["string"],
"repo": "string",
},
"pypi": {
"package": "string",
"repo": "string",
},
"requirements": "string",
"whl": "string",
}],
"max_retries": 0,
"webhook_notifications": {
"on_duration_warning_threshold_exceededs": [{
"id": "string",
}],
"on_failures": [{
"id": "string",
}],
"on_starts": [{
"id": "string",
}],
"on_streaming_backlog_exceededs": [{
"id": "string",
}],
"on_successes": [{
"id": "string",
}],
},
"depends_ons": [{
"task_key": "string",
"outcome": "string",
}],
"retry_on_timeout": False,
"notification_settings": {
"alert_on_last_attempt": False,
"no_alert_for_canceled_runs": False,
"no_alert_for_skipped_runs": False,
},
"pipeline_task": {
"pipeline_id": "string",
"full_refresh": False,
},
"python_wheel_task": {
"entry_point": "string",
"named_parameters": {
"string": "string",
},
"package_name": "string",
"parameters": ["string"],
},
"notebook_task": {
"notebook_path": "string",
"base_parameters": {
"string": "string",
},
"source": "string",
"warehouse_id": "string",
},
"run_if": "string",
"run_job_task": {
"job_id": 0,
"dbt_commands": ["string"],
"jar_params": ["string"],
"job_parameters": {
"string": "string",
},
"notebook_params": {
"string": "string",
},
"pipeline_params": {
"full_refresh": False,
},
"python_named_params": {
"string": "string",
},
"python_params": ["string"],
"spark_submit_params": ["string"],
"sql_params": {
"string": "string",
},
},
"spark_jar_task": {
"jar_uri": "string",
"main_class_name": "string",
"parameters": ["string"],
},
"spark_python_task": {
"python_file": "string",
"parameters": ["string"],
"source": "string",
},
"spark_submit_task": {
"parameters": ["string"],
},
"sql_task": {
"warehouse_id": "string",
"alert": {
"alert_id": "string",
"pause_subscriptions": False,
"subscriptions": [{
"destination_id": "string",
"user_name": "string",
}],
},
"dashboard": {
"dashboard_id": "string",
"custom_subject": "string",
"pause_subscriptions": False,
"subscriptions": [{
"destination_id": "string",
"user_name": "string",
}],
},
"file": {
"path": "string",
"source": "string",
},
"parameters": {
"string": "string",
},
"query": {
"query_id": "string",
},
},
"condition_task": {
"left": "string",
"op": "string",
"right": "string",
},
"timeout_seconds": 0,
"min_retry_interval_millis": 0,
}],
timeout_seconds=0,
trigger={
"file_arrival": {
"url": "string",
"min_time_between_triggers_seconds": 0,
"wait_after_last_change_seconds": 0,
},
"pause_status": "string",
"periodic": {
"interval": 0,
"unit": "string",
},
"table": {
"condition": "string",
"min_time_between_triggers_seconds": 0,
"table_names": ["string"],
"wait_after_last_change_seconds": 0,
},
"table_update": {
"table_names": ["string"],
"condition": "string",
"min_time_between_triggers_seconds": 0,
"wait_after_last_change_seconds": 0,
},
},
webhook_notifications={
"on_duration_warning_threshold_exceededs": [{
"id": "string",
}],
"on_failures": [{
"id": "string",
}],
"on_starts": [{
"id": "string",
}],
"on_streaming_backlog_exceededs": [{
"id": "string",
}],
"on_successes": [{
"id": "string",
}],
})
const jobResource = new databricks.Job("jobResource", {
budgetPolicyId: "string",
continuous: {
pauseStatus: "string",
},
controlRunState: false,
deployment: {
kind: "string",
metadataFilePath: "string",
},
description: "string",
editMode: "string",
emailNotifications: {
noAlertForSkippedRuns: false,
onDurationWarningThresholdExceededs: ["string"],
onFailures: ["string"],
onStarts: ["string"],
onStreamingBacklogExceededs: ["string"],
onSuccesses: ["string"],
},
environments: [{
environmentKey: "string",
spec: {
client: "string",
dependencies: ["string"],
},
}],
existingClusterId: "string",
format: "string",
gitSource: {
url: "string",
branch: "string",
commit: "string",
gitSnapshot: {
usedCommit: "string",
},
jobSource: {
importFromGitBranch: "string",
jobConfigPath: "string",
dirtyState: "string",
},
provider: "string",
tag: "string",
},
health: {
rules: [{
metric: "string",
op: "string",
value: 0,
}],
},
jobClusters: [{
jobClusterKey: "string",
newCluster: {
sparkVersion: "string",
enableLocalDiskEncryption: false,
clusterLogConf: {
dbfs: {
destination: "string",
},
s3: {
destination: "string",
cannedAcl: "string",
enableEncryption: false,
encryptionType: "string",
endpoint: "string",
kmsKey: "string",
region: "string",
},
},
gcpAttributes: {
availability: "string",
bootDiskSize: 0,
googleServiceAccount: "string",
localSsdCount: 0,
usePreemptibleExecutors: false,
zoneId: "string",
},
clusterId: "string",
idempotencyToken: "string",
clusterMountInfos: [{
localMountDirPath: "string",
networkFilesystemInfo: {
serverAddress: "string",
mountOptions: "string",
},
remoteMountDirPath: "string",
}],
clusterName: "string",
customTags: {
string: "string",
},
dataSecurityMode: "string",
dockerImage: {
url: "string",
basicAuth: {
password: "string",
username: "string",
},
},
driverInstancePoolId: "string",
initScripts: [{
abfss: {
destination: "string",
},
file: {
destination: "string",
},
gcs: {
destination: "string",
},
s3: {
destination: "string",
cannedAcl: "string",
enableEncryption: false,
encryptionType: "string",
endpoint: "string",
kmsKey: "string",
region: "string",
},
volumes: {
destination: "string",
},
workspace: {
destination: "string",
},
}],
enableElasticDisk: false,
applyPolicyDefaultValues: false,
azureAttributes: {
availability: "string",
firstOnDemand: 0,
logAnalyticsInfo: {
logAnalyticsPrimaryKey: "string",
logAnalyticsWorkspaceId: "string",
},
spotBidMaxPrice: 0,
},
awsAttributes: {
availability: "string",
ebsVolumeCount: 0,
ebsVolumeIops: 0,
ebsVolumeSize: 0,
ebsVolumeThroughput: 0,
ebsVolumeType: "string",
firstOnDemand: 0,
instanceProfileArn: "string",
spotBidPricePercent: 0,
zoneId: "string",
},
driverNodeTypeId: "string",
instancePoolId: "string",
libraries: [{
cran: {
"package": "string",
repo: "string",
},
egg: "string",
jar: "string",
maven: {
coordinates: "string",
exclusions: ["string"],
repo: "string",
},
pypi: {
"package": "string",
repo: "string",
},
requirements: "string",
whl: "string",
}],
nodeTypeId: "string",
numWorkers: 0,
policyId: "string",
runtimeEngine: "string",
singleUserName: "string",
sparkConf: {
string: "string",
},
sparkEnvVars: {
string: "string",
},
autoscale: {
maxWorkers: 0,
minWorkers: 0,
},
sshPublicKeys: ["string"],
workloadType: {
clients: {
jobs: false,
notebooks: false,
},
},
},
}],
libraries: [{
cran: {
"package": "string",
repo: "string",
},
egg: "string",
jar: "string",
maven: {
coordinates: "string",
exclusions: ["string"],
repo: "string",
},
pypi: {
"package": "string",
repo: "string",
},
requirements: "string",
whl: "string",
}],
maxConcurrentRuns: 0,
name: "string",
newCluster: {
sparkVersion: "string",
enableLocalDiskEncryption: false,
clusterLogConf: {
dbfs: {
destination: "string",
},
s3: {
destination: "string",
cannedAcl: "string",
enableEncryption: false,
encryptionType: "string",
endpoint: "string",
kmsKey: "string",
region: "string",
},
},
gcpAttributes: {
availability: "string",
bootDiskSize: 0,
googleServiceAccount: "string",
localSsdCount: 0,
usePreemptibleExecutors: false,
zoneId: "string",
},
clusterId: "string",
idempotencyToken: "string",
clusterMountInfos: [{
localMountDirPath: "string",
networkFilesystemInfo: {
serverAddress: "string",
mountOptions: "string",
},
remoteMountDirPath: "string",
}],
clusterName: "string",
customTags: {
string: "string",
},
dataSecurityMode: "string",
dockerImage: {
url: "string",
basicAuth: {
password: "string",
username: "string",
},
},
driverInstancePoolId: "string",
initScripts: [{
abfss: {
destination: "string",
},
file: {
destination: "string",
},
gcs: {
destination: "string",
},
s3: {
destination: "string",
cannedAcl: "string",
enableEncryption: false,
encryptionType: "string",
endpoint: "string",
kmsKey: "string",
region: "string",
},
volumes: {
destination: "string",
},
workspace: {
destination: "string",
},
}],
enableElasticDisk: false,
applyPolicyDefaultValues: false,
azureAttributes: {
availability: "string",
firstOnDemand: 0,
logAnalyticsInfo: {
logAnalyticsPrimaryKey: "string",
logAnalyticsWorkspaceId: "string",
},
spotBidMaxPrice: 0,
},
awsAttributes: {
availability: "string",
ebsVolumeCount: 0,
ebsVolumeIops: 0,
ebsVolumeSize: 0,
ebsVolumeThroughput: 0,
ebsVolumeType: "string",
firstOnDemand: 0,
instanceProfileArn: "string",
spotBidPricePercent: 0,
zoneId: "string",
},
driverNodeTypeId: "string",
instancePoolId: "string",
libraries: [{
cran: {
"package": "string",
repo: "string",
},
egg: "string",
jar: "string",
maven: {
coordinates: "string",
exclusions: ["string"],
repo: "string",
},
pypi: {
"package": "string",
repo: "string",
},
requirements: "string",
whl: "string",
}],
nodeTypeId: "string",
numWorkers: 0,
policyId: "string",
runtimeEngine: "string",
singleUserName: "string",
sparkConf: {
string: "string",
},
sparkEnvVars: {
string: "string",
},
autoscale: {
maxWorkers: 0,
minWorkers: 0,
},
sshPublicKeys: ["string"],
workloadType: {
clients: {
jobs: false,
notebooks: false,
},
},
},
notificationSettings: {
noAlertForCanceledRuns: false,
noAlertForSkippedRuns: false,
},
parameters: [{
"default": "string",
name: "string",
}],
queue: {
enabled: false,
},
runAs: {
servicePrincipalName: "string",
userName: "string",
},
schedule: {
quartzCronExpression: "string",
timezoneId: "string",
pauseStatus: "string",
},
tags: {
string: "string",
},
tasks: [{
taskKey: "string",
newCluster: {
sparkVersion: "string",
enableLocalDiskEncryption: false,
clusterLogConf: {
dbfs: {
destination: "string",
},
s3: {
destination: "string",
cannedAcl: "string",
enableEncryption: false,
encryptionType: "string",
endpoint: "string",
kmsKey: "string",
region: "string",
},
},
gcpAttributes: {
availability: "string",
bootDiskSize: 0,
googleServiceAccount: "string",
localSsdCount: 0,
usePreemptibleExecutors: false,
zoneId: "string",
},
clusterId: "string",
idempotencyToken: "string",
clusterMountInfos: [{
localMountDirPath: "string",
networkFilesystemInfo: {
serverAddress: "string",
mountOptions: "string",
},
remoteMountDirPath: "string",
}],
clusterName: "string",
customTags: {
string: "string",
},
dataSecurityMode: "string",
dockerImage: {
url: "string",
basicAuth: {
password: "string",
username: "string",
},
},
driverInstancePoolId: "string",
initScripts: [{
abfss: {
destination: "string",
},
file: {
destination: "string",
},
gcs: {
destination: "string",
},
s3: {
destination: "string",
cannedAcl: "string",
enableEncryption: false,
encryptionType: "string",
endpoint: "string",
kmsKey: "string",
region: "string",
},
volumes: {
destination: "string",
},
workspace: {
destination: "string",
},
}],
enableElasticDisk: false,
applyPolicyDefaultValues: false,
azureAttributes: {
availability: "string",
firstOnDemand: 0,
logAnalyticsInfo: {
logAnalyticsPrimaryKey: "string",
logAnalyticsWorkspaceId: "string",
},
spotBidMaxPrice: 0,
},
awsAttributes: {
availability: "string",
ebsVolumeCount: 0,
ebsVolumeIops: 0,
ebsVolumeSize: 0,
ebsVolumeThroughput: 0,
ebsVolumeType: "string",
firstOnDemand: 0,
instanceProfileArn: "string",
spotBidPricePercent: 0,
zoneId: "string",
},
driverNodeTypeId: "string",
instancePoolId: "string",
libraries: [{
cran: {
"package": "string",
repo: "string",
},
egg: "string",
jar: "string",
maven: {
coordinates: "string",
exclusions: ["string"],
repo: "string",
},
pypi: {
"package": "string",
repo: "string",
},
requirements: "string",
whl: "string",
}],
nodeTypeId: "string",
numWorkers: 0,
policyId: "string",
runtimeEngine: "string",
singleUserName: "string",
sparkConf: {
string: "string",
},
sparkEnvVars: {
string: "string",
},
autoscale: {
maxWorkers: 0,
minWorkers: 0,
},
sshPublicKeys: ["string"],
workloadType: {
clients: {
jobs: false,
notebooks: false,
},
},
},
dbtTask: {
commands: ["string"],
catalog: "string",
profilesDirectory: "string",
projectDirectory: "string",
schema: "string",
source: "string",
warehouseId: "string",
},
description: "string",
disableAutoOptimization: false,
emailNotifications: {
noAlertForSkippedRuns: false,
onDurationWarningThresholdExceededs: ["string"],
onFailures: ["string"],
onStarts: ["string"],
onStreamingBacklogExceededs: ["string"],
onSuccesses: ["string"],
},
environmentKey: "string",
existingClusterId: "string",
forEachTask: {
inputs: "string",
task: {
taskKey: "string",
notebookTask: {
notebookPath: "string",
baseParameters: {
string: "string",
},
source: "string",
warehouseId: "string",
},
webhookNotifications: {
onDurationWarningThresholdExceededs: [{
id: "string",
}],
onFailures: [{
id: "string",
}],
onStarts: [{
id: "string",
}],
onStreamingBacklogExceededs: [{
id: "string",
}],
onSuccesses: [{
id: "string",
}],
},
newCluster: {
sparkVersion: "string",
enableLocalDiskEncryption: false,
clusterLogConf: {
dbfs: {
destination: "string",
},
s3: {
destination: "string",
cannedAcl: "string",
enableEncryption: false,
encryptionType: "string",
endpoint: "string",
kmsKey: "string",
region: "string",
},
},
gcpAttributes: {
availability: "string",
bootDiskSize: 0,
googleServiceAccount: "string",
localSsdCount: 0,
usePreemptibleExecutors: false,
zoneId: "string",
},
clusterId: "string",
idempotencyToken: "string",
clusterMountInfos: [{
localMountDirPath: "string",
networkFilesystemInfo: {
serverAddress: "string",
mountOptions: "string",
},
remoteMountDirPath: "string",
}],
clusterName: "string",
customTags: {
string: "string",
},
dataSecurityMode: "string",
dockerImage: {
url: "string",
basicAuth: {
password: "string",
username: "string",
},
},
driverInstancePoolId: "string",
initScripts: [{
abfss: {
destination: "string",
},
file: {
destination: "string",
},
gcs: {
destination: "string",
},
s3: {
destination: "string",
cannedAcl: "string",
enableEncryption: false,
encryptionType: "string",
endpoint: "string",
kmsKey: "string",
region: "string",
},
volumes: {
destination: "string",
},
workspace: {
destination: "string",
},
}],
enableElasticDisk: false,
applyPolicyDefaultValues: false,
azureAttributes: {
availability: "string",
firstOnDemand: 0,
logAnalyticsInfo: {
logAnalyticsPrimaryKey: "string",
logAnalyticsWorkspaceId: "string",
},
spotBidMaxPrice: 0,
},
awsAttributes: {
availability: "string",
ebsVolumeCount: 0,
ebsVolumeIops: 0,
ebsVolumeSize: 0,
ebsVolumeThroughput: 0,
ebsVolumeType: "string",
firstOnDemand: 0,
instanceProfileArn: "string",
spotBidPricePercent: 0,
zoneId: "string",
},
driverNodeTypeId: "string",
instancePoolId: "string",
libraries: [{
cran: {
"package": "string",
repo: "string",
},
egg: "string",
jar: "string",
maven: {
coordinates: "string",
exclusions: ["string"],
repo: "string",
},
pypi: {
"package": "string",
repo: "string",
},
requirements: "string",
whl: "string",
}],
nodeTypeId: "string",
numWorkers: 0,
policyId: "string",
runtimeEngine: "string",
singleUserName: "string",
sparkConf: {
string: "string",
},
sparkEnvVars: {
string: "string",
},
autoscale: {
maxWorkers: 0,
minWorkers: 0,
},
sshPublicKeys: ["string"],
workloadType: {
clients: {
jobs: false,
notebooks: false,
},
},
},
disableAutoOptimization: false,
emailNotifications: {
noAlertForSkippedRuns: false,
onDurationWarningThresholdExceededs: ["string"],
onFailures: ["string"],
onStarts: ["string"],
onStreamingBacklogExceededs: ["string"],
onSuccesses: ["string"],
},
environmentKey: "string",
existingClusterId: "string",
health: {
rules: [{
metric: "string",
op: "string",
value: 0,
}],
},
jobClusterKey: "string",
libraries: [{
cran: {
"package": "string",
repo: "string",
},
egg: "string",
jar: "string",
maven: {
coordinates: "string",
exclusions: ["string"],
repo: "string",
},
pypi: {
"package": "string",
repo: "string",
},
requirements: "string",
whl: "string",
}],
maxRetries: 0,
minRetryIntervalMillis: 0,
description: "string",
dependsOns: [{
taskKey: "string",
outcome: "string",
}],
sparkPythonTask: {
pythonFile: "string",
parameters: ["string"],
source: "string",
},
pipelineTask: {
pipelineId: "string",
fullRefresh: false,
},
pythonWheelTask: {
entryPoint: "string",
namedParameters: {
string: "string",
},
packageName: "string",
parameters: ["string"],
},
retryOnTimeout: false,
runIf: "string",
runJobTask: {
jobId: 0,
dbtCommands: ["string"],
jarParams: ["string"],
jobParameters: {
string: "string",
},
notebookParams: {
string: "string",
},
pipelineParams: {
fullRefresh: false,
},
pythonNamedParams: {
string: "string",
},
pythonParams: ["string"],
sparkSubmitParams: ["string"],
sqlParams: {
string: "string",
},
},
sparkJarTask: {
jarUri: "string",
mainClassName: "string",
parameters: ["string"],
},
notificationSettings: {
alertOnLastAttempt: false,
noAlertForCanceledRuns: false,
noAlertForSkippedRuns: false,
},
sparkSubmitTask: {
parameters: ["string"],
},
sqlTask: {
warehouseId: "string",
alert: {
alertId: "string",
pauseSubscriptions: false,
subscriptions: [{
destinationId: "string",
userName: "string",
}],
},
dashboard: {
dashboardId: "string",
customSubject: "string",
pauseSubscriptions: false,
subscriptions: [{
destinationId: "string",
userName: "string",
}],
},
file: {
path: "string",
source: "string",
},
parameters: {
string: "string",
},
query: {
queryId: "string",
},
},
dbtTask: {
commands: ["string"],
catalog: "string",
profilesDirectory: "string",
projectDirectory: "string",
schema: "string",
source: "string",
warehouseId: "string",
},
timeoutSeconds: 0,
conditionTask: {
left: "string",
op: "string",
right: "string",
},
},
concurrency: 0,
},
health: {
rules: [{
metric: "string",
op: "string",
value: 0,
}],
},
jobClusterKey: "string",
libraries: [{
cran: {
"package": "string",
repo: "string",
},
egg: "string",
jar: "string",
maven: {
coordinates: "string",
exclusions: ["string"],
repo: "string",
},
pypi: {
"package": "string",
repo: "string",
},
requirements: "string",
whl: "string",
}],
maxRetries: 0,
webhookNotifications: {
onDurationWarningThresholdExceededs: [{
id: "string",
}],
onFailures: [{
id: "string",
}],
onStarts: [{
id: "string",
}],
onStreamingBacklogExceededs: [{
id: "string",
}],
onSuccesses: [{
id: "string",
}],
},
dependsOns: [{
taskKey: "string",
outcome: "string",
}],
retryOnTimeout: false,
notificationSettings: {
alertOnLastAttempt: false,
noAlertForCanceledRuns: false,
noAlertForSkippedRuns: false,
},
pipelineTask: {
pipelineId: "string",
fullRefresh: false,
},
pythonWheelTask: {
entryPoint: "string",
namedParameters: {
string: "string",
},
packageName: "string",
parameters: ["string"],
},
notebookTask: {
notebookPath: "string",
baseParameters: {
string: "string",
},
source: "string",
warehouseId: "string",
},
runIf: "string",
runJobTask: {
jobId: 0,
dbtCommands: ["string"],
jarParams: ["string"],
jobParameters: {
string: "string",
},
notebookParams: {
string: "string",
},
pipelineParams: {
fullRefresh: false,
},
pythonNamedParams: {
string: "string",
},
pythonParams: ["string"],
sparkSubmitParams: ["string"],
sqlParams: {
string: "string",
},
},
sparkJarTask: {
jarUri: "string",
mainClassName: "string",
parameters: ["string"],
},
sparkPythonTask: {
pythonFile: "string",
parameters: ["string"],
source: "string",
},
sparkSubmitTask: {
parameters: ["string"],
},
sqlTask: {
warehouseId: "string",
alert: {
alertId: "string",
pauseSubscriptions: false,
subscriptions: [{
destinationId: "string",
userName: "string",
}],
},
dashboard: {
dashboardId: "string",
customSubject: "string",
pauseSubscriptions: false,
subscriptions: [{
destinationId: "string",
userName: "string",
}],
},
file: {
path: "string",
source: "string",
},
parameters: {
string: "string",
},
query: {
queryId: "string",
},
},
conditionTask: {
left: "string",
op: "string",
right: "string",
},
timeoutSeconds: 0,
minRetryIntervalMillis: 0,
}],
timeoutSeconds: 0,
trigger: {
fileArrival: {
url: "string",
minTimeBetweenTriggersSeconds: 0,
waitAfterLastChangeSeconds: 0,
},
pauseStatus: "string",
periodic: {
interval: 0,
unit: "string",
},
table: {
condition: "string",
minTimeBetweenTriggersSeconds: 0,
tableNames: ["string"],
waitAfterLastChangeSeconds: 0,
},
tableUpdate: {
tableNames: ["string"],
condition: "string",
minTimeBetweenTriggersSeconds: 0,
waitAfterLastChangeSeconds: 0,
},
},
webhookNotifications: {
onDurationWarningThresholdExceededs: [{
id: "string",
}],
onFailures: [{
id: "string",
}],
onStarts: [{
id: "string",
}],
onStreamingBacklogExceededs: [{
id: "string",
}],
onSuccesses: [{
id: "string",
}],
},
});
type: databricks:Job
properties:
budgetPolicyId: string
continuous:
pauseStatus: string
controlRunState: false
deployment:
kind: string
metadataFilePath: string
description: string
editMode: string
emailNotifications:
noAlertForSkippedRuns: false
onDurationWarningThresholdExceededs:
- string
onFailures:
- string
onStarts:
- string
onStreamingBacklogExceededs:
- string
onSuccesses:
- string
environments:
- environmentKey: string
spec:
client: string
dependencies:
- string
existingClusterId: string
format: string
gitSource:
branch: string
commit: string
gitSnapshot:
usedCommit: string
jobSource:
dirtyState: string
importFromGitBranch: string
jobConfigPath: string
provider: string
tag: string
url: string
health:
rules:
- metric: string
op: string
value: 0
jobClusters:
- jobClusterKey: string
newCluster:
applyPolicyDefaultValues: false
autoscale:
maxWorkers: 0
minWorkers: 0
awsAttributes:
availability: string
ebsVolumeCount: 0
ebsVolumeIops: 0
ebsVolumeSize: 0
ebsVolumeThroughput: 0
ebsVolumeType: string
firstOnDemand: 0
instanceProfileArn: string
spotBidPricePercent: 0
zoneId: string
azureAttributes:
availability: string
firstOnDemand: 0
logAnalyticsInfo:
logAnalyticsPrimaryKey: string
logAnalyticsWorkspaceId: string
spotBidMaxPrice: 0
clusterId: string
clusterLogConf:
dbfs:
destination: string
s3:
cannedAcl: string
destination: string
enableEncryption: false
encryptionType: string
endpoint: string
kmsKey: string
region: string
clusterMountInfos:
- localMountDirPath: string
networkFilesystemInfo:
mountOptions: string
serverAddress: string
remoteMountDirPath: string
clusterName: string
customTags:
string: string
dataSecurityMode: string
dockerImage:
basicAuth:
password: string
username: string
url: string
driverInstancePoolId: string
driverNodeTypeId: string
enableElasticDisk: false
enableLocalDiskEncryption: false
gcpAttributes:
availability: string
bootDiskSize: 0
googleServiceAccount: string
localSsdCount: 0
usePreemptibleExecutors: false
zoneId: string
idempotencyToken: string
initScripts:
- abfss:
destination: string
file:
destination: string
gcs:
destination: string
s3:
cannedAcl: string
destination: string
enableEncryption: false
encryptionType: string
endpoint: string
kmsKey: string
region: string
volumes:
destination: string
workspace:
destination: string
instancePoolId: string
libraries:
- cran:
package: string
repo: string
egg: string
jar: string
maven:
coordinates: string
exclusions:
- string
repo: string
pypi:
package: string
repo: string
requirements: string
whl: string
nodeTypeId: string
numWorkers: 0
policyId: string
runtimeEngine: string
singleUserName: string
sparkConf:
string: string
sparkEnvVars:
string: string
sparkVersion: string
sshPublicKeys:
- string
workloadType:
clients:
jobs: false
notebooks: false
libraries:
- cran:
package: string
repo: string
egg: string
jar: string
maven:
coordinates: string
exclusions:
- string
repo: string
pypi:
package: string
repo: string
requirements: string
whl: string
maxConcurrentRuns: 0
name: string
newCluster:
applyPolicyDefaultValues: false
autoscale:
maxWorkers: 0
minWorkers: 0
awsAttributes:
availability: string
ebsVolumeCount: 0
ebsVolumeIops: 0
ebsVolumeSize: 0
ebsVolumeThroughput: 0
ebsVolumeType: string
firstOnDemand: 0
instanceProfileArn: string
spotBidPricePercent: 0
zoneId: string
azureAttributes:
availability: string
firstOnDemand: 0
logAnalyticsInfo:
logAnalyticsPrimaryKey: string
logAnalyticsWorkspaceId: string
spotBidMaxPrice: 0
clusterId: string
clusterLogConf:
dbfs:
destination: string
s3:
cannedAcl: string
destination: string
enableEncryption: false
encryptionType: string
endpoint: string
kmsKey: string
region: string
clusterMountInfos:
- localMountDirPath: string
networkFilesystemInfo:
mountOptions: string
serverAddress: string
remoteMountDirPath: string
clusterName: string
customTags:
string: string
dataSecurityMode: string
dockerImage:
basicAuth:
password: string
username: string
url: string
driverInstancePoolId: string
driverNodeTypeId: string
enableElasticDisk: false
enableLocalDiskEncryption: false
gcpAttributes:
availability: string
bootDiskSize: 0
googleServiceAccount: string
localSsdCount: 0
usePreemptibleExecutors: false
zoneId: string
idempotencyToken: string
initScripts:
- abfss:
destination: string
file:
destination: string
gcs:
destination: string
s3:
cannedAcl: string
destination: string
enableEncryption: false
encryptionType: string
endpoint: string
kmsKey: string
region: string
volumes:
destination: string
workspace:
destination: string
instancePoolId: string
libraries:
- cran:
package: string
repo: string
egg: string
jar: string
maven:
coordinates: string
exclusions:
- string
repo: string
pypi:
package: string
repo: string
requirements: string
whl: string
nodeTypeId: string
numWorkers: 0
policyId: string
runtimeEngine: string
singleUserName: string
sparkConf:
string: string
sparkEnvVars:
string: string
sparkVersion: string
sshPublicKeys:
- string
workloadType:
clients:
jobs: false
notebooks: false
notificationSettings:
noAlertForCanceledRuns: false
noAlertForSkippedRuns: false
parameters:
- default: string
name: string
queue:
enabled: false
runAs:
servicePrincipalName: string
userName: string
schedule:
pauseStatus: string
quartzCronExpression: string
timezoneId: string
tags:
string: string
tasks:
- conditionTask:
left: string
op: string
right: string
dbtTask:
catalog: string
commands:
- string
profilesDirectory: string
projectDirectory: string
schema: string
source: string
warehouseId: string
dependsOns:
- outcome: string
taskKey: string
description: string
disableAutoOptimization: false
emailNotifications:
noAlertForSkippedRuns: false
onDurationWarningThresholdExceededs:
- string
onFailures:
- string
onStarts:
- string
onStreamingBacklogExceededs:
- string
onSuccesses:
- string
environmentKey: string
existingClusterId: string
forEachTask:
concurrency: 0
inputs: string
task:
conditionTask:
left: string
op: string
right: string
dbtTask:
catalog: string
commands:
- string
profilesDirectory: string
projectDirectory: string
schema: string
source: string
warehouseId: string
dependsOns:
- outcome: string
taskKey: string
description: string
disableAutoOptimization: false
emailNotifications:
noAlertForSkippedRuns: false
onDurationWarningThresholdExceededs:
- string
onFailures:
- string
onStarts:
- string
onStreamingBacklogExceededs:
- string
onSuccesses:
- string
environmentKey: string
existingClusterId: string
health:
rules:
- metric: string
op: string
value: 0
jobClusterKey: string
libraries:
- cran:
package: string
repo: string
egg: string
jar: string
maven:
coordinates: string
exclusions:
- string
repo: string
pypi:
package: string
repo: string
requirements: string
whl: string
maxRetries: 0
minRetryIntervalMillis: 0
newCluster:
applyPolicyDefaultValues: false
autoscale:
maxWorkers: 0
minWorkers: 0
awsAttributes:
availability: string
ebsVolumeCount: 0
ebsVolumeIops: 0
ebsVolumeSize: 0
ebsVolumeThroughput: 0
ebsVolumeType: string
firstOnDemand: 0
instanceProfileArn: string
spotBidPricePercent: 0
zoneId: string
azureAttributes:
availability: string
firstOnDemand: 0
logAnalyticsInfo:
logAnalyticsPrimaryKey: string
logAnalyticsWorkspaceId: string
spotBidMaxPrice: 0
clusterId: string
clusterLogConf:
dbfs:
destination: string
s3:
cannedAcl: string
destination: string
enableEncryption: false
encryptionType: string
endpoint: string
kmsKey: string
region: string
clusterMountInfos:
- localMountDirPath: string
networkFilesystemInfo:
mountOptions: string
serverAddress: string
remoteMountDirPath: string
clusterName: string
customTags:
string: string
dataSecurityMode: string
dockerImage:
basicAuth:
password: string
username: string
url: string
driverInstancePoolId: string
driverNodeTypeId: string
enableElasticDisk: false
enableLocalDiskEncryption: false
gcpAttributes:
availability: string
bootDiskSize: 0
googleServiceAccount: string
localSsdCount: 0
usePreemptibleExecutors: false
zoneId: string
idempotencyToken: string
initScripts:
- abfss:
destination: string
file:
destination: string
gcs:
destination: string
s3:
cannedAcl: string
destination: string
enableEncryption: false
encryptionType: string
endpoint: string
kmsKey: string
region: string
volumes:
destination: string
workspace:
destination: string
instancePoolId: string
libraries:
- cran:
package: string
repo: string
egg: string
jar: string
maven:
coordinates: string
exclusions:
- string
repo: string
pypi:
package: string
repo: string
requirements: string
whl: string
nodeTypeId: string
numWorkers: 0
policyId: string
runtimeEngine: string
singleUserName: string
sparkConf:
string: string
sparkEnvVars:
string: string
sparkVersion: string
sshPublicKeys:
- string
workloadType:
clients:
jobs: false
notebooks: false
notebookTask:
baseParameters:
string: string
notebookPath: string
source: string
warehouseId: string
notificationSettings:
alertOnLastAttempt: false
noAlertForCanceledRuns: false
noAlertForSkippedRuns: false
pipelineTask:
fullRefresh: false
pipelineId: string
pythonWheelTask:
entryPoint: string
namedParameters:
string: string
packageName: string
parameters:
- string
retryOnTimeout: false
runIf: string
runJobTask:
dbtCommands:
- string
jarParams:
- string
jobId: 0
jobParameters:
string: string
notebookParams:
string: string
pipelineParams:
fullRefresh: false
pythonNamedParams:
string: string
pythonParams:
- string
sparkSubmitParams:
- string
sqlParams:
string: string
sparkJarTask:
jarUri: string
mainClassName: string
parameters:
- string
sparkPythonTask:
parameters:
- string
pythonFile: string
source: string
sparkSubmitTask:
parameters:
- string
sqlTask:
alert:
alertId: string
pauseSubscriptions: false
subscriptions:
- destinationId: string
userName: string
dashboard:
customSubject: string
dashboardId: string
pauseSubscriptions: false
subscriptions:
- destinationId: string
userName: string
file:
path: string
source: string
parameters:
string: string
query:
queryId: string
warehouseId: string
taskKey: string
timeoutSeconds: 0
webhookNotifications:
onDurationWarningThresholdExceededs:
- id: string
onFailures:
- id: string
onStarts:
- id: string
onStreamingBacklogExceededs:
- id: string
onSuccesses:
- id: string
health:
rules:
- metric: string
op: string
value: 0
jobClusterKey: string
libraries:
- cran:
package: string
repo: string
egg: string
jar: string
maven:
coordinates: string
exclusions:
- string
repo: string
pypi:
package: string
repo: string
requirements: string
whl: string
maxRetries: 0
minRetryIntervalMillis: 0
newCluster:
applyPolicyDefaultValues: false
autoscale:
maxWorkers: 0
minWorkers: 0
awsAttributes:
availability: string
ebsVolumeCount: 0
ebsVolumeIops: 0
ebsVolumeSize: 0
ebsVolumeThroughput: 0
ebsVolumeType: string
firstOnDemand: 0
instanceProfileArn: string
spotBidPricePercent: 0
zoneId: string
azureAttributes:
availability: string
firstOnDemand: 0
logAnalyticsInfo:
logAnalyticsPrimaryKey: string
logAnalyticsWorkspaceId: string
spotBidMaxPrice: 0
clusterId: string
clusterLogConf:
dbfs:
destination: string
s3:
cannedAcl: string
destination: string
enableEncryption: false
encryptionType: string
endpoint: string
kmsKey: string
region: string
clusterMountInfos:
- localMountDirPath: string
networkFilesystemInfo:
mountOptions: string
serverAddress: string
remoteMountDirPath: string
clusterName: string
customTags:
string: string
dataSecurityMode: string
dockerImage:
basicAuth:
password: string
username: string
url: string
driverInstancePoolId: string
driverNodeTypeId: string
enableElasticDisk: false
enableLocalDiskEncryption: false
gcpAttributes:
availability: string
bootDiskSize: 0
googleServiceAccount: string
localSsdCount: 0
usePreemptibleExecutors: false
zoneId: string
idempotencyToken: string
initScripts:
- abfss:
destination: string
file:
destination: string
gcs:
destination: string
s3:
cannedAcl: string
destination: string
enableEncryption: false
encryptionType: string
endpoint: string
kmsKey: string
region: string
volumes:
destination: string
workspace:
destination: string
instancePoolId: string
libraries:
- cran:
package: string
repo: string
egg: string
jar: string
maven:
coordinates: string
exclusions:
- string
repo: string
pypi:
package: string
repo: string
requirements: string
whl: string
nodeTypeId: string
numWorkers: 0
policyId: string
runtimeEngine: string
singleUserName: string
sparkConf:
string: string
sparkEnvVars:
string: string
sparkVersion: string
sshPublicKeys:
- string
workloadType:
clients:
jobs: false
notebooks: false
notebookTask:
baseParameters:
string: string
notebookPath: string
source: string
warehouseId: string
notificationSettings:
alertOnLastAttempt: false
noAlertForCanceledRuns: false
noAlertForSkippedRuns: false
pipelineTask:
fullRefresh: false
pipelineId: string
pythonWheelTask:
entryPoint: string
namedParameters:
string: string
packageName: string
parameters:
- string
retryOnTimeout: false
runIf: string
runJobTask:
dbtCommands:
- string
jarParams:
- string
jobId: 0
jobParameters:
string: string
notebookParams:
string: string
pipelineParams:
fullRefresh: false
pythonNamedParams:
string: string
pythonParams:
- string
sparkSubmitParams:
- string
sqlParams:
string: string
sparkJarTask:
jarUri: string
mainClassName: string
parameters:
- string
sparkPythonTask:
parameters:
- string
pythonFile: string
source: string
sparkSubmitTask:
parameters:
- string
sqlTask:
alert:
alertId: string
pauseSubscriptions: false
subscriptions:
- destinationId: string
userName: string
dashboard:
customSubject: string
dashboardId: string
pauseSubscriptions: false
subscriptions:
- destinationId: string
userName: string
file:
path: string
source: string
parameters:
string: string
query:
queryId: string
warehouseId: string
taskKey: string
timeoutSeconds: 0
webhookNotifications:
onDurationWarningThresholdExceededs:
- id: string
onFailures:
- id: string
onStarts:
- id: string
onStreamingBacklogExceededs:
- id: string
onSuccesses:
- id: string
timeoutSeconds: 0
trigger:
fileArrival:
minTimeBetweenTriggersSeconds: 0
url: string
waitAfterLastChangeSeconds: 0
pauseStatus: string
periodic:
interval: 0
unit: string
table:
condition: string
minTimeBetweenTriggersSeconds: 0
tableNames:
- string
waitAfterLastChangeSeconds: 0
tableUpdate:
condition: string
minTimeBetweenTriggersSeconds: 0
tableNames:
- string
waitAfterLastChangeSeconds: 0
webhookNotifications:
onDurationWarningThresholdExceededs:
- id: string
onFailures:
- id: string
onStarts:
- id: string
onStreamingBacklogExceededs:
- id: string
onSuccesses:
- id: string
Job Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The Job resource accepts the following input properties:
- Always
Running bool - (Bool) Whenever the job is always running, like a Spark Streaming application, on every update restart the current active run or start it again, if nothing it is not running. False by default. Any job runs are started with
parameters
specified inspark_jar_task
orspark_submit_task
orspark_python_task
ornotebook_task
blocks. - Budget
Policy stringId - The ID of the user-specified budget policy to use for this job. If not specified, a default budget policy may be applied when creating or modifying the job.
- Continuous
Job
Continuous - Configuration block to configure pause status. See continuous Configuration Block.
- Control
Run boolState (Bool) If true, the Databricks provider will stop and start the job as needed to ensure that the active run for the job reflects the deployed configuration. For continuous jobs, the provider respects the
pause_status
by stopping the current active run. This flag cannot be set for non-continuous jobs.When migrating from
always_running
tocontrol_run_state
, setcontinuous
as follows:- Dbt
Task JobDbt Task - Deployment
Job
Deployment - Description string
- An optional description for the job. The maximum length is 1024 characters in UTF-8 encoding.
- Edit
Mode string - Email
Notifications JobEmail Notifications - (List) An optional set of email addresses notified when runs of this job begins, completes or fails. The default behavior is to not send any emails. This field is a block and is documented below.
- Environments
List<Job
Environment> - Existing
Cluster stringId - Format string
- Git
Source JobGit Source - Specifices the a Git repository for task source code. See git_source Configuration Block below.
- Health
Job
Health - An optional block that specifies the health conditions for the job documented below.
- Job
Clusters List<JobJob Cluster> - A list of job databricks.Cluster specifications that can be shared and reused by tasks of this job. Libraries cannot be declared in a shared job cluster. You must declare dependent libraries in task settings. Multi-task syntax
- Libraries
List<Job
Library> - (List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
- Max
Concurrent intRuns - (Integer) An optional maximum allowed number of concurrent runs of the job. Defaults to 1.
- Max
Retries int - Min
Retry intInterval Millis - (Integer) An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried.
- Name string
- An optional name for the job. The default value is Untitled.
- New
Cluster JobNew Cluster - Notebook
Task JobNotebook Task - Notification
Settings JobNotification Settings - An optional block controlling the notification settings on the job level documented below.
- Parameters
List<Job
Parameter> - Specifices job parameter for the job. See parameter Configuration Block
- Pipeline
Task JobPipeline Task - Python
Wheel JobTask Python Wheel Task - Queue
Job
Queue - The queue status for the job. See queue Configuration Block below.
- Retry
On boolTimeout - Run
As JobRun As - The user or the service prinicipal the job runs as. See run_as Configuration Block below.
- Run
Job JobTask Run Job Task - Schedule
Job
Schedule - An optional periodic schedule for this job. The default behavior is that the job runs when triggered by clicking Run Now in the Jobs UI or sending an API request to runNow. See schedule Configuration Block below.
- Spark
Jar JobTask Spark Jar Task - Spark
Python JobTask Spark Python Task - Spark
Submit JobTask Spark Submit Task - Dictionary<string, string>
- An optional map of the tags associated with the job. See tags Configuration Map
- Tasks
List<Job
Task> - A list of task specification that the job will execute. See task Configuration Block below.
- Timeout
Seconds int - (Integer) An optional timeout applied to each run of this job. The default behavior is to have no timeout.
- Trigger
Job
Trigger - The conditions that triggers the job to start. See trigger Configuration Block below.
- Webhook
Notifications JobWebhook Notifications - (List) An optional set of system destinations (for example, webhook destinations or Slack) to be notified when runs of this job begins, completes or fails. The default behavior is to not send any notifications. This field is a block and is documented below.
- Always
Running bool - (Bool) Whenever the job is always running, like a Spark Streaming application, on every update restart the current active run or start it again, if nothing it is not running. False by default. Any job runs are started with
parameters
specified inspark_jar_task
orspark_submit_task
orspark_python_task
ornotebook_task
blocks. - Budget
Policy stringId - The ID of the user-specified budget policy to use for this job. If not specified, a default budget policy may be applied when creating or modifying the job.
- Continuous
Job
Continuous Args - Configuration block to configure pause status. See continuous Configuration Block.
- Control
Run boolState (Bool) If true, the Databricks provider will stop and start the job as needed to ensure that the active run for the job reflects the deployed configuration. For continuous jobs, the provider respects the
pause_status
by stopping the current active run. This flag cannot be set for non-continuous jobs.When migrating from
always_running
tocontrol_run_state
, setcontinuous
as follows:- Dbt
Task JobDbt Task Args - Deployment
Job
Deployment Args - Description string
- An optional description for the job. The maximum length is 1024 characters in UTF-8 encoding.
- Edit
Mode string - Email
Notifications JobEmail Notifications Args - (List) An optional set of email addresses notified when runs of this job begins, completes or fails. The default behavior is to not send any emails. This field is a block and is documented below.
- Environments
[]Job
Environment Args - Existing
Cluster stringId - Format string
- Git
Source JobGit Source Args - Specifices the a Git repository for task source code. See git_source Configuration Block below.
- Health
Job
Health Args - An optional block that specifies the health conditions for the job documented below.
- Job
Clusters []JobJob Cluster Args - A list of job databricks.Cluster specifications that can be shared and reused by tasks of this job. Libraries cannot be declared in a shared job cluster. You must declare dependent libraries in task settings. Multi-task syntax
- Libraries
[]Job
Library Args - (List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
- Max
Concurrent intRuns - (Integer) An optional maximum allowed number of concurrent runs of the job. Defaults to 1.
- Max
Retries int - Min
Retry intInterval Millis - (Integer) An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried.
- Name string
- An optional name for the job. The default value is Untitled.
- New
Cluster JobNew Cluster Args - Notebook
Task JobNotebook Task Args - Notification
Settings JobNotification Settings Args - An optional block controlling the notification settings on the job level documented below.
- Parameters
[]Job
Parameter Args - Specifices job parameter for the job. See parameter Configuration Block
- Pipeline
Task JobPipeline Task Args - Python
Wheel JobTask Python Wheel Task Args - Queue
Job
Queue Args - The queue status for the job. See queue Configuration Block below.
- Retry
On boolTimeout - Run
As JobRun As Args - The user or the service prinicipal the job runs as. See run_as Configuration Block below.
- Run
Job JobTask Run Job Task Args - Schedule
Job
Schedule Args - An optional periodic schedule for this job. The default behavior is that the job runs when triggered by clicking Run Now in the Jobs UI or sending an API request to runNow. See schedule Configuration Block below.
- Spark
Jar JobTask Spark Jar Task Args - Spark
Python JobTask Spark Python Task Args - Spark
Submit JobTask Spark Submit Task Args - map[string]string
- An optional map of the tags associated with the job. See tags Configuration Map
- Tasks
[]Job
Task Args - A list of task specification that the job will execute. See task Configuration Block below.
- Timeout
Seconds int - (Integer) An optional timeout applied to each run of this job. The default behavior is to have no timeout.
- Trigger
Job
Trigger Args - The conditions that triggers the job to start. See trigger Configuration Block below.
- Webhook
Notifications JobWebhook Notifications Args - (List) An optional set of system destinations (for example, webhook destinations or Slack) to be notified when runs of this job begins, completes or fails. The default behavior is to not send any notifications. This field is a block and is documented below.
- always
Running Boolean - (Bool) Whenever the job is always running, like a Spark Streaming application, on every update restart the current active run or start it again, if nothing it is not running. False by default. Any job runs are started with
parameters
specified inspark_jar_task
orspark_submit_task
orspark_python_task
ornotebook_task
blocks. - budget
Policy StringId - The ID of the user-specified budget policy to use for this job. If not specified, a default budget policy may be applied when creating or modifying the job.
- continuous
Job
Continuous - Configuration block to configure pause status. See continuous Configuration Block.
- control
Run BooleanState (Bool) If true, the Databricks provider will stop and start the job as needed to ensure that the active run for the job reflects the deployed configuration. For continuous jobs, the provider respects the
pause_status
by stopping the current active run. This flag cannot be set for non-continuous jobs.When migrating from
always_running
tocontrol_run_state
, setcontinuous
as follows:- dbt
Task JobDbt Task - deployment
Job
Deployment - description String
- An optional description for the job. The maximum length is 1024 characters in UTF-8 encoding.
- edit
Mode String - email
Notifications JobEmail Notifications - (List) An optional set of email addresses notified when runs of this job begins, completes or fails. The default behavior is to not send any emails. This field is a block and is documented below.
- environments
List<Job
Environment> - existing
Cluster StringId - format String
- git
Source JobGit Source - Specifices the a Git repository for task source code. See git_source Configuration Block below.
- health
Job
Health - An optional block that specifies the health conditions for the job documented below.
- job
Clusters List<JobJob Cluster> - A list of job databricks.Cluster specifications that can be shared and reused by tasks of this job. Libraries cannot be declared in a shared job cluster. You must declare dependent libraries in task settings. Multi-task syntax
- libraries
List<Job
Library> - (List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
- max
Concurrent IntegerRuns - (Integer) An optional maximum allowed number of concurrent runs of the job. Defaults to 1.
- max
Retries Integer - min
Retry IntegerInterval Millis - (Integer) An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried.
- name String
- An optional name for the job. The default value is Untitled.
- new
Cluster JobNew Cluster - notebook
Task JobNotebook Task - notification
Settings JobNotification Settings - An optional block controlling the notification settings on the job level documented below.
- parameters
List<Job
Parameter> - Specifices job parameter for the job. See parameter Configuration Block
- pipeline
Task JobPipeline Task - python
Wheel JobTask Python Wheel Task - queue
Job
Queue - The queue status for the job. See queue Configuration Block below.
- retry
On BooleanTimeout - run
As JobRun As - The user or the service prinicipal the job runs as. See run_as Configuration Block below.
- run
Job JobTask Run Job Task - schedule
Job
Schedule - An optional periodic schedule for this job. The default behavior is that the job runs when triggered by clicking Run Now in the Jobs UI or sending an API request to runNow. See schedule Configuration Block below.
- spark
Jar JobTask Spark Jar Task - spark
Python JobTask Spark Python Task - spark
Submit JobTask Spark Submit Task - Map<String,String>
- An optional map of the tags associated with the job. See tags Configuration Map
- tasks
List<Job
Task> - A list of task specification that the job will execute. See task Configuration Block below.
- timeout
Seconds Integer - (Integer) An optional timeout applied to each run of this job. The default behavior is to have no timeout.
- trigger
Job
Trigger - The conditions that triggers the job to start. See trigger Configuration Block below.
- webhook
Notifications JobWebhook Notifications - (List) An optional set of system destinations (for example, webhook destinations or Slack) to be notified when runs of this job begins, completes or fails. The default behavior is to not send any notifications. This field is a block and is documented below.
- always
Running boolean - (Bool) Whenever the job is always running, like a Spark Streaming application, on every update restart the current active run or start it again, if nothing it is not running. False by default. Any job runs are started with
parameters
specified inspark_jar_task
orspark_submit_task
orspark_python_task
ornotebook_task
blocks. - budget
Policy stringId - The ID of the user-specified budget policy to use for this job. If not specified, a default budget policy may be applied when creating or modifying the job.
- continuous
Job
Continuous - Configuration block to configure pause status. See continuous Configuration Block.
- control
Run booleanState (Bool) If true, the Databricks provider will stop and start the job as needed to ensure that the active run for the job reflects the deployed configuration. For continuous jobs, the provider respects the
pause_status
by stopping the current active run. This flag cannot be set for non-continuous jobs.When migrating from
always_running
tocontrol_run_state
, setcontinuous
as follows:- dbt
Task JobDbt Task - deployment
Job
Deployment - description string
- An optional description for the job. The maximum length is 1024 characters in UTF-8 encoding.
- edit
Mode string - email
Notifications JobEmail Notifications - (List) An optional set of email addresses notified when runs of this job begins, completes or fails. The default behavior is to not send any emails. This field is a block and is documented below.
- environments
Job
Environment[] - existing
Cluster stringId - format string
- git
Source JobGit Source - Specifices the a Git repository for task source code. See git_source Configuration Block below.
- health
Job
Health - An optional block that specifies the health conditions for the job documented below.
- job
Clusters JobJob Cluster[] - A list of job databricks.Cluster specifications that can be shared and reused by tasks of this job. Libraries cannot be declared in a shared job cluster. You must declare dependent libraries in task settings. Multi-task syntax
- libraries
Job
Library[] - (List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
- max
Concurrent numberRuns - (Integer) An optional maximum allowed number of concurrent runs of the job. Defaults to 1.
- max
Retries number - min
Retry numberInterval Millis - (Integer) An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried.
- name string
- An optional name for the job. The default value is Untitled.
- new
Cluster JobNew Cluster - notebook
Task JobNotebook Task - notification
Settings JobNotification Settings - An optional block controlling the notification settings on the job level documented below.
- parameters
Job
Parameter[] - Specifices job parameter for the job. See parameter Configuration Block
- pipeline
Task JobPipeline Task - python
Wheel JobTask Python Wheel Task - queue
Job
Queue - The queue status for the job. See queue Configuration Block below.
- retry
On booleanTimeout - run
As JobRun As - The user or the service prinicipal the job runs as. See run_as Configuration Block below.
- run
Job JobTask Run Job Task - schedule
Job
Schedule - An optional periodic schedule for this job. The default behavior is that the job runs when triggered by clicking Run Now in the Jobs UI or sending an API request to runNow. See schedule Configuration Block below.
- spark
Jar JobTask Spark Jar Task - spark
Python JobTask Spark Python Task - spark
Submit JobTask Spark Submit Task - {[key: string]: string}
- An optional map of the tags associated with the job. See tags Configuration Map
- tasks
Job
Task[] - A list of task specification that the job will execute. See task Configuration Block below.
- timeout
Seconds number - (Integer) An optional timeout applied to each run of this job. The default behavior is to have no timeout.
- trigger
Job
Trigger - The conditions that triggers the job to start. See trigger Configuration Block below.
- webhook
Notifications JobWebhook Notifications - (List) An optional set of system destinations (for example, webhook destinations or Slack) to be notified when runs of this job begins, completes or fails. The default behavior is to not send any notifications. This field is a block and is documented below.
- always_
running bool - (Bool) Whenever the job is always running, like a Spark Streaming application, on every update restart the current active run or start it again, if nothing it is not running. False by default. Any job runs are started with
parameters
specified inspark_jar_task
orspark_submit_task
orspark_python_task
ornotebook_task
blocks. - budget_
policy_ strid - The ID of the user-specified budget policy to use for this job. If not specified, a default budget policy may be applied when creating or modifying the job.
- continuous
Job
Continuous Args - Configuration block to configure pause status. See continuous Configuration Block.
- control_
run_ boolstate (Bool) If true, the Databricks provider will stop and start the job as needed to ensure that the active run for the job reflects the deployed configuration. For continuous jobs, the provider respects the
pause_status
by stopping the current active run. This flag cannot be set for non-continuous jobs.When migrating from
always_running
tocontrol_run_state
, setcontinuous
as follows:- dbt_
task JobDbt Task Args - deployment
Job
Deployment Args - description str
- An optional description for the job. The maximum length is 1024 characters in UTF-8 encoding.
- edit_
mode str - email_
notifications JobEmail Notifications Args - (List) An optional set of email addresses notified when runs of this job begins, completes or fails. The default behavior is to not send any emails. This field is a block and is documented below.
- environments
Sequence[Job
Environment Args] - existing_
cluster_ strid - format str
- git_
source JobGit Source Args - Specifices the a Git repository for task source code. See git_source Configuration Block below.
- health
Job
Health Args - An optional block that specifies the health conditions for the job documented below.
- job_
clusters Sequence[JobJob Cluster Args] - A list of job databricks.Cluster specifications that can be shared and reused by tasks of this job. Libraries cannot be declared in a shared job cluster. You must declare dependent libraries in task settings. Multi-task syntax
- libraries
Sequence[Job
Library Args] - (List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
- max_
concurrent_ intruns - (Integer) An optional maximum allowed number of concurrent runs of the job. Defaults to 1.
- max_
retries int - min_
retry_ intinterval_ millis - (Integer) An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried.
- name str
- An optional name for the job. The default value is Untitled.
- new_
cluster JobNew Cluster Args - notebook_
task JobNotebook Task Args - notification_
settings JobNotification Settings Args - An optional block controlling the notification settings on the job level documented below.
- parameters
Sequence[Job
Parameter Args] - Specifices job parameter for the job. See parameter Configuration Block
- pipeline_
task JobPipeline Task Args - python_
wheel_ Jobtask Python Wheel Task Args - queue
Job
Queue Args - The queue status for the job. See queue Configuration Block below.
- retry_
on_ booltimeout - run_
as JobRun As Args - The user or the service prinicipal the job runs as. See run_as Configuration Block below.
- run_
job_ Jobtask Run Job Task Args - schedule
Job
Schedule Args - An optional periodic schedule for this job. The default behavior is that the job runs when triggered by clicking Run Now in the Jobs UI or sending an API request to runNow. See schedule Configuration Block below.
- spark_
jar_ Jobtask Spark Jar Task Args - spark_
python_ Jobtask Spark Python Task Args - spark_
submit_ Jobtask Spark Submit Task Args - Mapping[str, str]
- An optional map of the tags associated with the job. See tags Configuration Map
- tasks
Sequence[Job
Task Args] - A list of task specification that the job will execute. See task Configuration Block below.
- timeout_
seconds int - (Integer) An optional timeout applied to each run of this job. The default behavior is to have no timeout.
- trigger
Job
Trigger Args - The conditions that triggers the job to start. See trigger Configuration Block below.
- webhook_
notifications JobWebhook Notifications Args - (List) An optional set of system destinations (for example, webhook destinations or Slack) to be notified when runs of this job begins, completes or fails. The default behavior is to not send any notifications. This field is a block and is documented below.
- always
Running Boolean - (Bool) Whenever the job is always running, like a Spark Streaming application, on every update restart the current active run or start it again, if nothing it is not running. False by default. Any job runs are started with
parameters
specified inspark_jar_task
orspark_submit_task
orspark_python_task
ornotebook_task
blocks. - budget
Policy StringId - The ID of the user-specified budget policy to use for this job. If not specified, a default budget policy may be applied when creating or modifying the job.
- continuous Property Map
- Configuration block to configure pause status. See continuous Configuration Block.
- control
Run BooleanState (Bool) If true, the Databricks provider will stop and start the job as needed to ensure that the active run for the job reflects the deployed configuration. For continuous jobs, the provider respects the
pause_status
by stopping the current active run. This flag cannot be set for non-continuous jobs.When migrating from
always_running
tocontrol_run_state
, setcontinuous
as follows:- dbt
Task Property Map - deployment Property Map
- description String
- An optional description for the job. The maximum length is 1024 characters in UTF-8 encoding.
- edit
Mode String - email
Notifications Property Map - (List) An optional set of email addresses notified when runs of this job begins, completes or fails. The default behavior is to not send any emails. This field is a block and is documented below.
- environments List<Property Map>
- existing
Cluster StringId - format String
- git
Source Property Map - Specifices the a Git repository for task source code. See git_source Configuration Block below.
- health Property Map
- An optional block that specifies the health conditions for the job documented below.
- job
Clusters List<Property Map> - A list of job databricks.Cluster specifications that can be shared and reused by tasks of this job. Libraries cannot be declared in a shared job cluster. You must declare dependent libraries in task settings. Multi-task syntax
- libraries List<Property Map>
- (List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
- max
Concurrent NumberRuns - (Integer) An optional maximum allowed number of concurrent runs of the job. Defaults to 1.
- max
Retries Number - min
Retry NumberInterval Millis - (Integer) An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried.
- name String
- An optional name for the job. The default value is Untitled.
- new
Cluster Property Map - notebook
Task Property Map - notification
Settings Property Map - An optional block controlling the notification settings on the job level documented below.
- parameters List<Property Map>
- Specifices job parameter for the job. See parameter Configuration Block
- pipeline
Task Property Map - python
Wheel Property MapTask - queue Property Map
- The queue status for the job. See queue Configuration Block below.
- retry
On BooleanTimeout - run
As Property Map - The user or the service prinicipal the job runs as. See run_as Configuration Block below.
- run
Job Property MapTask - schedule Property Map
- An optional periodic schedule for this job. The default behavior is that the job runs when triggered by clicking Run Now in the Jobs UI or sending an API request to runNow. See schedule Configuration Block below.
- spark
Jar Property MapTask - spark
Python Property MapTask - spark
Submit Property MapTask - Map<String>
- An optional map of the tags associated with the job. See tags Configuration Map
- tasks List<Property Map>
- A list of task specification that the job will execute. See task Configuration Block below.
- timeout
Seconds Number - (Integer) An optional timeout applied to each run of this job. The default behavior is to have no timeout.
- trigger Property Map
- The conditions that triggers the job to start. See trigger Configuration Block below.
- webhook
Notifications Property Map - (List) An optional set of system destinations (for example, webhook destinations or Slack) to be notified when runs of this job begins, completes or fails. The default behavior is to not send any notifications. This field is a block and is documented below.
Outputs
All input properties are implicitly available as output properties. Additionally, the Job resource produces the following output properties:
Look up Existing Job Resource
Get an existing Job resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: JobState, opts?: CustomResourceOptions): Job
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
always_running: Optional[bool] = None,
budget_policy_id: Optional[str] = None,
continuous: Optional[JobContinuousArgs] = None,
control_run_state: Optional[bool] = None,
dbt_task: Optional[JobDbtTaskArgs] = None,
deployment: Optional[JobDeploymentArgs] = None,
description: Optional[str] = None,
edit_mode: Optional[str] = None,
email_notifications: Optional[JobEmailNotificationsArgs] = None,
environments: Optional[Sequence[JobEnvironmentArgs]] = None,
existing_cluster_id: Optional[str] = None,
format: Optional[str] = None,
git_source: Optional[JobGitSourceArgs] = None,
health: Optional[JobHealthArgs] = None,
job_clusters: Optional[Sequence[JobJobClusterArgs]] = None,
libraries: Optional[Sequence[JobLibraryArgs]] = None,
max_concurrent_runs: Optional[int] = None,
max_retries: Optional[int] = None,
min_retry_interval_millis: Optional[int] = None,
name: Optional[str] = None,
new_cluster: Optional[JobNewClusterArgs] = None,
notebook_task: Optional[JobNotebookTaskArgs] = None,
notification_settings: Optional[JobNotificationSettingsArgs] = None,
parameters: Optional[Sequence[JobParameterArgs]] = None,
pipeline_task: Optional[JobPipelineTaskArgs] = None,
python_wheel_task: Optional[JobPythonWheelTaskArgs] = None,
queue: Optional[JobQueueArgs] = None,
retry_on_timeout: Optional[bool] = None,
run_as: Optional[JobRunAsArgs] = None,
run_job_task: Optional[JobRunJobTaskArgs] = None,
schedule: Optional[JobScheduleArgs] = None,
spark_jar_task: Optional[JobSparkJarTaskArgs] = None,
spark_python_task: Optional[JobSparkPythonTaskArgs] = None,
spark_submit_task: Optional[JobSparkSubmitTaskArgs] = None,
tags: Optional[Mapping[str, str]] = None,
tasks: Optional[Sequence[JobTaskArgs]] = None,
timeout_seconds: Optional[int] = None,
trigger: Optional[JobTriggerArgs] = None,
url: Optional[str] = None,
webhook_notifications: Optional[JobWebhookNotificationsArgs] = None) -> Job
func GetJob(ctx *Context, name string, id IDInput, state *JobState, opts ...ResourceOption) (*Job, error)
public static Job Get(string name, Input<string> id, JobState? state, CustomResourceOptions? opts = null)
public static Job get(String name, Output<String> id, JobState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Always
Running bool - (Bool) Whenever the job is always running, like a Spark Streaming application, on every update restart the current active run or start it again, if nothing it is not running. False by default. Any job runs are started with
parameters
specified inspark_jar_task
orspark_submit_task
orspark_python_task
ornotebook_task
blocks. - Budget
Policy stringId - The ID of the user-specified budget policy to use for this job. If not specified, a default budget policy may be applied when creating or modifying the job.
- Continuous
Job
Continuous - Configuration block to configure pause status. See continuous Configuration Block.
- Control
Run boolState (Bool) If true, the Databricks provider will stop and start the job as needed to ensure that the active run for the job reflects the deployed configuration. For continuous jobs, the provider respects the
pause_status
by stopping the current active run. This flag cannot be set for non-continuous jobs.When migrating from
always_running
tocontrol_run_state
, setcontinuous
as follows:- Dbt
Task JobDbt Task - Deployment
Job
Deployment - Description string
- An optional description for the job. The maximum length is 1024 characters in UTF-8 encoding.
- Edit
Mode string - Email
Notifications JobEmail Notifications - (List) An optional set of email addresses notified when runs of this job begins, completes or fails. The default behavior is to not send any emails. This field is a block and is documented below.
- Environments
List<Job
Environment> - Existing
Cluster stringId - Format string
- Git
Source JobGit Source - Specifices the a Git repository for task source code. See git_source Configuration Block below.
- Health
Job
Health - An optional block that specifies the health conditions for the job documented below.
- Job
Clusters List<JobJob Cluster> - A list of job databricks.Cluster specifications that can be shared and reused by tasks of this job. Libraries cannot be declared in a shared job cluster. You must declare dependent libraries in task settings. Multi-task syntax
- Libraries
List<Job
Library> - (List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
- Max
Concurrent intRuns - (Integer) An optional maximum allowed number of concurrent runs of the job. Defaults to 1.
- Max
Retries int - Min
Retry intInterval Millis - (Integer) An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried.
- Name string
- An optional name for the job. The default value is Untitled.
- New
Cluster JobNew Cluster - Notebook
Task JobNotebook Task - Notification
Settings JobNotification Settings - An optional block controlling the notification settings on the job level documented below.
- Parameters
List<Job
Parameter> - Specifices job parameter for the job. See parameter Configuration Block
- Pipeline
Task JobPipeline Task - Python
Wheel JobTask Python Wheel Task - Queue
Job
Queue - The queue status for the job. See queue Configuration Block below.
- Retry
On boolTimeout - Run
As JobRun As - The user or the service prinicipal the job runs as. See run_as Configuration Block below.
- Run
Job JobTask Run Job Task - Schedule
Job
Schedule - An optional periodic schedule for this job. The default behavior is that the job runs when triggered by clicking Run Now in the Jobs UI or sending an API request to runNow. See schedule Configuration Block below.
- Spark
Jar JobTask Spark Jar Task - Spark
Python JobTask Spark Python Task - Spark
Submit JobTask Spark Submit Task - Dictionary<string, string>
- An optional map of the tags associated with the job. See tags Configuration Map
- Tasks
List<Job
Task> - A list of task specification that the job will execute. See task Configuration Block below.
- Timeout
Seconds int - (Integer) An optional timeout applied to each run of this job. The default behavior is to have no timeout.
- Trigger
Job
Trigger - The conditions that triggers the job to start. See trigger Configuration Block below.
- Url string
- URL of the job on the given workspace
- Webhook
Notifications JobWebhook Notifications - (List) An optional set of system destinations (for example, webhook destinations or Slack) to be notified when runs of this job begins, completes or fails. The default behavior is to not send any notifications. This field is a block and is documented below.
- Always
Running bool - (Bool) Whenever the job is always running, like a Spark Streaming application, on every update restart the current active run or start it again, if nothing it is not running. False by default. Any job runs are started with
parameters
specified inspark_jar_task
orspark_submit_task
orspark_python_task
ornotebook_task
blocks. - Budget
Policy stringId - The ID of the user-specified budget policy to use for this job. If not specified, a default budget policy may be applied when creating or modifying the job.
- Continuous
Job
Continuous Args - Configuration block to configure pause status. See continuous Configuration Block.
- Control
Run boolState (Bool) If true, the Databricks provider will stop and start the job as needed to ensure that the active run for the job reflects the deployed configuration. For continuous jobs, the provider respects the
pause_status
by stopping the current active run. This flag cannot be set for non-continuous jobs.When migrating from
always_running
tocontrol_run_state
, setcontinuous
as follows:- Dbt
Task JobDbt Task Args - Deployment
Job
Deployment Args - Description string
- An optional description for the job. The maximum length is 1024 characters in UTF-8 encoding.
- Edit
Mode string - Email
Notifications JobEmail Notifications Args - (List) An optional set of email addresses notified when runs of this job begins, completes or fails. The default behavior is to not send any emails. This field is a block and is documented below.
- Environments
[]Job
Environment Args - Existing
Cluster stringId - Format string
- Git
Source JobGit Source Args - Specifices the a Git repository for task source code. See git_source Configuration Block below.
- Health
Job
Health Args - An optional block that specifies the health conditions for the job documented below.
- Job
Clusters []JobJob Cluster Args - A list of job databricks.Cluster specifications that can be shared and reused by tasks of this job. Libraries cannot be declared in a shared job cluster. You must declare dependent libraries in task settings. Multi-task syntax
- Libraries
[]Job
Library Args - (List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
- Max
Concurrent intRuns - (Integer) An optional maximum allowed number of concurrent runs of the job. Defaults to 1.
- Max
Retries int - Min
Retry intInterval Millis - (Integer) An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried.
- Name string
- An optional name for the job. The default value is Untitled.
- New
Cluster JobNew Cluster Args - Notebook
Task JobNotebook Task Args - Notification
Settings JobNotification Settings Args - An optional block controlling the notification settings on the job level documented below.
- Parameters
[]Job
Parameter Args - Specifices job parameter for the job. See parameter Configuration Block
- Pipeline
Task JobPipeline Task Args - Python
Wheel JobTask Python Wheel Task Args - Queue
Job
Queue Args - The queue status for the job. See queue Configuration Block below.
- Retry
On boolTimeout - Run
As JobRun As Args - The user or the service prinicipal the job runs as. See run_as Configuration Block below.
- Run
Job JobTask Run Job Task Args - Schedule
Job
Schedule Args - An optional periodic schedule for this job. The default behavior is that the job runs when triggered by clicking Run Now in the Jobs UI or sending an API request to runNow. See schedule Configuration Block below.
- Spark
Jar JobTask Spark Jar Task Args - Spark
Python JobTask Spark Python Task Args - Spark
Submit JobTask Spark Submit Task Args - map[string]string
- An optional map of the tags associated with the job. See tags Configuration Map
- Tasks
[]Job
Task Args - A list of task specification that the job will execute. See task Configuration Block below.
- Timeout
Seconds int - (Integer) An optional timeout applied to each run of this job. The default behavior is to have no timeout.
- Trigger
Job
Trigger Args - The conditions that triggers the job to start. See trigger Configuration Block below.
- Url string
- URL of the job on the given workspace
- Webhook
Notifications JobWebhook Notifications Args - (List) An optional set of system destinations (for example, webhook destinations or Slack) to be notified when runs of this job begins, completes or fails. The default behavior is to not send any notifications. This field is a block and is documented below.
- always
Running Boolean - (Bool) Whenever the job is always running, like a Spark Streaming application, on every update restart the current active run or start it again, if nothing it is not running. False by default. Any job runs are started with
parameters
specified inspark_jar_task
orspark_submit_task
orspark_python_task
ornotebook_task
blocks. - budget
Policy StringId - The ID of the user-specified budget policy to use for this job. If not specified, a default budget policy may be applied when creating or modifying the job.
- continuous
Job
Continuous - Configuration block to configure pause status. See continuous Configuration Block.
- control
Run BooleanState (Bool) If true, the Databricks provider will stop and start the job as needed to ensure that the active run for the job reflects the deployed configuration. For continuous jobs, the provider respects the
pause_status
by stopping the current active run. This flag cannot be set for non-continuous jobs.When migrating from
always_running
tocontrol_run_state
, setcontinuous
as follows:- dbt
Task JobDbt Task - deployment
Job
Deployment - description String
- An optional description for the job. The maximum length is 1024 characters in UTF-8 encoding.
- edit
Mode String - email
Notifications JobEmail Notifications - (List) An optional set of email addresses notified when runs of this job begins, completes or fails. The default behavior is to not send any emails. This field is a block and is documented below.
- environments
List<Job
Environment> - existing
Cluster StringId - format String
- git
Source JobGit Source - Specifices the a Git repository for task source code. See git_source Configuration Block below.
- health
Job
Health - An optional block that specifies the health conditions for the job documented below.
- job
Clusters List<JobJob Cluster> - A list of job databricks.Cluster specifications that can be shared and reused by tasks of this job. Libraries cannot be declared in a shared job cluster. You must declare dependent libraries in task settings. Multi-task syntax
- libraries
List<Job
Library> - (List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
- max
Concurrent IntegerRuns - (Integer) An optional maximum allowed number of concurrent runs of the job. Defaults to 1.
- max
Retries Integer - min
Retry IntegerInterval Millis - (Integer) An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried.
- name String
- An optional name for the job. The default value is Untitled.
- new
Cluster JobNew Cluster - notebook
Task JobNotebook Task - notification
Settings JobNotification Settings - An optional block controlling the notification settings on the job level documented below.
- parameters
List<Job
Parameter> - Specifices job parameter for the job. See parameter Configuration Block
- pipeline
Task JobPipeline Task - python
Wheel JobTask Python Wheel Task - queue
Job
Queue - The queue status for the job. See queue Configuration Block below.
- retry
On BooleanTimeout - run
As JobRun As - The user or the service prinicipal the job runs as. See run_as Configuration Block below.
- run
Job JobTask Run Job Task - schedule
Job
Schedule - An optional periodic schedule for this job. The default behavior is that the job runs when triggered by clicking Run Now in the Jobs UI or sending an API request to runNow. See schedule Configuration Block below.
- spark
Jar JobTask Spark Jar Task - spark
Python JobTask Spark Python Task - spark
Submit JobTask Spark Submit Task - Map<String,String>
- An optional map of the tags associated with the job. See tags Configuration Map
- tasks
List<Job
Task> - A list of task specification that the job will execute. See task Configuration Block below.
- timeout
Seconds Integer - (Integer) An optional timeout applied to each run of this job. The default behavior is to have no timeout.
- trigger
Job
Trigger - The conditions that triggers the job to start. See trigger Configuration Block below.
- url String
- URL of the job on the given workspace
- webhook
Notifications JobWebhook Notifications - (List) An optional set of system destinations (for example, webhook destinations or Slack) to be notified when runs of this job begins, completes or fails. The default behavior is to not send any notifications. This field is a block and is documented below.
- always
Running boolean - (Bool) Whenever the job is always running, like a Spark Streaming application, on every update restart the current active run or start it again, if nothing it is not running. False by default. Any job runs are started with
parameters
specified inspark_jar_task
orspark_submit_task
orspark_python_task
ornotebook_task
blocks. - budget
Policy stringId - The ID of the user-specified budget policy to use for this job. If not specified, a default budget policy may be applied when creating or modifying the job.
- continuous
Job
Continuous - Configuration block to configure pause status. See continuous Configuration Block.
- control
Run booleanState (Bool) If true, the Databricks provider will stop and start the job as needed to ensure that the active run for the job reflects the deployed configuration. For continuous jobs, the provider respects the
pause_status
by stopping the current active run. This flag cannot be set for non-continuous jobs.When migrating from
always_running
tocontrol_run_state
, setcontinuous
as follows:- dbt
Task JobDbt Task - deployment
Job
Deployment - description string
- An optional description for the job. The maximum length is 1024 characters in UTF-8 encoding.
- edit
Mode string - email
Notifications JobEmail Notifications - (List) An optional set of email addresses notified when runs of this job begins, completes or fails. The default behavior is to not send any emails. This field is a block and is documented below.
- environments
Job
Environment[] - existing
Cluster stringId - format string
- git
Source JobGit Source - Specifices the a Git repository for task source code. See git_source Configuration Block below.
- health
Job
Health - An optional block that specifies the health conditions for the job documented below.
- job
Clusters JobJob Cluster[] - A list of job databricks.Cluster specifications that can be shared and reused by tasks of this job. Libraries cannot be declared in a shared job cluster. You must declare dependent libraries in task settings. Multi-task syntax
- libraries
Job
Library[] - (List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
- max
Concurrent numberRuns - (Integer) An optional maximum allowed number of concurrent runs of the job. Defaults to 1.
- max
Retries number - min
Retry numberInterval Millis - (Integer) An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried.
- name string
- An optional name for the job. The default value is Untitled.
- new
Cluster JobNew Cluster - notebook
Task JobNotebook Task - notification
Settings JobNotification Settings - An optional block controlling the notification settings on the job level documented below.
- parameters
Job
Parameter[] - Specifices job parameter for the job. See parameter Configuration Block
- pipeline
Task JobPipeline Task - python
Wheel JobTask Python Wheel Task - queue
Job
Queue - The queue status for the job. See queue Configuration Block below.
- retry
On booleanTimeout - run
As JobRun As - The user or the service prinicipal the job runs as. See run_as Configuration Block below.
- run
Job JobTask Run Job Task - schedule
Job
Schedule - An optional periodic schedule for this job. The default behavior is that the job runs when triggered by clicking Run Now in the Jobs UI or sending an API request to runNow. See schedule Configuration Block below.
- spark
Jar JobTask Spark Jar Task - spark
Python JobTask Spark Python Task - spark
Submit JobTask Spark Submit Task - {[key: string]: string}
- An optional map of the tags associated with the job. See tags Configuration Map
- tasks
Job
Task[] - A list of task specification that the job will execute. See task Configuration Block below.
- timeout
Seconds number - (Integer) An optional timeout applied to each run of this job. The default behavior is to have no timeout.
- trigger
Job
Trigger - The conditions that triggers the job to start. See trigger Configuration Block below.
- url string
- URL of the job on the given workspace
- webhook
Notifications JobWebhook Notifications - (List) An optional set of system destinations (for example, webhook destinations or Slack) to be notified when runs of this job begins, completes or fails. The default behavior is to not send any notifications. This field is a block and is documented below.
- always_
running bool - (Bool) Whenever the job is always running, like a Spark Streaming application, on every update restart the current active run or start it again, if nothing it is not running. False by default. Any job runs are started with
parameters
specified inspark_jar_task
orspark_submit_task
orspark_python_task
ornotebook_task
blocks. - budget_
policy_ strid - The ID of the user-specified budget policy to use for this job. If not specified, a default budget policy may be applied when creating or modifying the job.
- continuous
Job
Continuous Args - Configuration block to configure pause status. See continuous Configuration Block.
- control_
run_ boolstate (Bool) If true, the Databricks provider will stop and start the job as needed to ensure that the active run for the job reflects the deployed configuration. For continuous jobs, the provider respects the
pause_status
by stopping the current active run. This flag cannot be set for non-continuous jobs.When migrating from
always_running
tocontrol_run_state
, setcontinuous
as follows:- dbt_
task JobDbt Task Args - deployment
Job
Deployment Args - description str
- An optional description for the job. The maximum length is 1024 characters in UTF-8 encoding.
- edit_
mode str - email_
notifications JobEmail Notifications Args - (List) An optional set of email addresses notified when runs of this job begins, completes or fails. The default behavior is to not send any emails. This field is a block and is documented below.
- environments
Sequence[Job
Environment Args] - existing_
cluster_ strid - format str
- git_
source JobGit Source Args - Specifices the a Git repository for task source code. See git_source Configuration Block below.
- health
Job
Health Args - An optional block that specifies the health conditions for the job documented below.
- job_
clusters Sequence[JobJob Cluster Args] - A list of job databricks.Cluster specifications that can be shared and reused by tasks of this job. Libraries cannot be declared in a shared job cluster. You must declare dependent libraries in task settings. Multi-task syntax
- libraries
Sequence[Job
Library Args] - (List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
- max_
concurrent_ intruns - (Integer) An optional maximum allowed number of concurrent runs of the job. Defaults to 1.
- max_
retries int - min_
retry_ intinterval_ millis - (Integer) An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried.
- name str
- An optional name for the job. The default value is Untitled.
- new_
cluster JobNew Cluster Args - notebook_
task JobNotebook Task Args - notification_
settings JobNotification Settings Args - An optional block controlling the notification settings on the job level documented below.
- parameters
Sequence[Job
Parameter Args] - Specifices job parameter for the job. See parameter Configuration Block
- pipeline_
task JobPipeline Task Args - python_
wheel_ Jobtask Python Wheel Task Args - queue
Job
Queue Args - The queue status for the job. See queue Configuration Block below.
- retry_
on_ booltimeout - run_
as JobRun As Args - The user or the service prinicipal the job runs as. See run_as Configuration Block below.
- run_
job_ Jobtask Run Job Task Args - schedule
Job
Schedule Args - An optional periodic schedule for this job. The default behavior is that the job runs when triggered by clicking Run Now in the Jobs UI or sending an API request to runNow. See schedule Configuration Block below.
- spark_
jar_ Jobtask Spark Jar Task Args - spark_
python_ Jobtask Spark Python Task Args - spark_
submit_ Jobtask Spark Submit Task Args - Mapping[str, str]
- An optional map of the tags associated with the job. See tags Configuration Map
- tasks
Sequence[Job
Task Args] - A list of task specification that the job will execute. See task Configuration Block below.
- timeout_
seconds int - (Integer) An optional timeout applied to each run of this job. The default behavior is to have no timeout.
- trigger
Job
Trigger Args - The conditions that triggers the job to start. See trigger Configuration Block below.
- url str
- URL of the job on the given workspace
- webhook_
notifications JobWebhook Notifications Args - (List) An optional set of system destinations (for example, webhook destinations or Slack) to be notified when runs of this job begins, completes or fails. The default behavior is to not send any notifications. This field is a block and is documented below.
- always
Running Boolean - (Bool) Whenever the job is always running, like a Spark Streaming application, on every update restart the current active run or start it again, if nothing it is not running. False by default. Any job runs are started with
parameters
specified inspark_jar_task
orspark_submit_task
orspark_python_task
ornotebook_task
blocks. - budget
Policy StringId - The ID of the user-specified budget policy to use for this job. If not specified, a default budget policy may be applied when creating or modifying the job.
- continuous Property Map
- Configuration block to configure pause status. See continuous Configuration Block.
- control
Run BooleanState (Bool) If true, the Databricks provider will stop and start the job as needed to ensure that the active run for the job reflects the deployed configuration. For continuous jobs, the provider respects the
pause_status
by stopping the current active run. This flag cannot be set for non-continuous jobs.When migrating from
always_running
tocontrol_run_state
, setcontinuous
as follows:- dbt
Task Property Map - deployment Property Map
- description String
- An optional description for the job. The maximum length is 1024 characters in UTF-8 encoding.
- edit
Mode String - email
Notifications Property Map - (List) An optional set of email addresses notified when runs of this job begins, completes or fails. The default behavior is to not send any emails. This field is a block and is documented below.
- environments List<Property Map>
- existing
Cluster StringId - format String
- git
Source Property Map - Specifices the a Git repository for task source code. See git_source Configuration Block below.
- health Property Map
- An optional block that specifies the health conditions for the job documented below.
- job
Clusters List<Property Map> - A list of job databricks.Cluster specifications that can be shared and reused by tasks of this job. Libraries cannot be declared in a shared job cluster. You must declare dependent libraries in task settings. Multi-task syntax
- libraries List<Property Map>
- (List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
- max
Concurrent NumberRuns - (Integer) An optional maximum allowed number of concurrent runs of the job. Defaults to 1.
- max
Retries Number - min
Retry NumberInterval Millis - (Integer) An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried.
- name String
- An optional name for the job. The default value is Untitled.
- new
Cluster Property Map - notebook
Task Property Map - notification
Settings Property Map - An optional block controlling the notification settings on the job level documented below.
- parameters List<Property Map>
- Specifices job parameter for the job. See parameter Configuration Block
- pipeline
Task Property Map - python
Wheel Property MapTask - queue Property Map
- The queue status for the job. See queue Configuration Block below.
- retry
On BooleanTimeout - run
As Property Map - The user or the service prinicipal the job runs as. See run_as Configuration Block below.
- run
Job Property MapTask - schedule Property Map
- An optional periodic schedule for this job. The default behavior is that the job runs when triggered by clicking Run Now in the Jobs UI or sending an API request to runNow. See schedule Configuration Block below.
- spark
Jar Property MapTask - spark
Python Property MapTask - spark
Submit Property MapTask - Map<String>
- An optional map of the tags associated with the job. See tags Configuration Map
- tasks List<Property Map>
- A list of task specification that the job will execute. See task Configuration Block below.
- timeout
Seconds Number - (Integer) An optional timeout applied to each run of this job. The default behavior is to have no timeout.
- trigger Property Map
- The conditions that triggers the job to start. See trigger Configuration Block below.
- url String
- URL of the job on the given workspace
- webhook
Notifications Property Map - (List) An optional set of system destinations (for example, webhook destinations or Slack) to be notified when runs of this job begins, completes or fails. The default behavior is to not send any notifications. This field is a block and is documented below.
Supporting Types
JobContinuous, JobContinuousArgs
- Pause
Status string - Indicate whether this continuous job is paused or not. Either
PAUSED
orUNPAUSED
. When thepause_status
field is omitted in the block, the server will default to usingUNPAUSED
as a value forpause_status
.
- Pause
Status string - Indicate whether this continuous job is paused or not. Either
PAUSED
orUNPAUSED
. When thepause_status
field is omitted in the block, the server will default to usingUNPAUSED
as a value forpause_status
.
- pause
Status String - Indicate whether this continuous job is paused or not. Either
PAUSED
orUNPAUSED
. When thepause_status
field is omitted in the block, the server will default to usingUNPAUSED
as a value forpause_status
.
- pause
Status string - Indicate whether this continuous job is paused or not. Either
PAUSED
orUNPAUSED
. When thepause_status
field is omitted in the block, the server will default to usingUNPAUSED
as a value forpause_status
.
- pause_
status str - Indicate whether this continuous job is paused or not. Either
PAUSED
orUNPAUSED
. When thepause_status
field is omitted in the block, the server will default to usingUNPAUSED
as a value forpause_status
.
- pause
Status String - Indicate whether this continuous job is paused or not. Either
PAUSED
orUNPAUSED
. When thepause_status
field is omitted in the block, the server will default to usingUNPAUSED
as a value forpause_status
.
JobDbtTask, JobDbtTaskArgs
- Commands List<string>
- (Array) Series of dbt commands to execute in sequence. Every command must start with "dbt".
- Catalog string
- The name of the catalog to use inside Unity Catalog.
- Profiles
Directory string - The relative path to the directory in the repository specified by
git_source
where dbt should look in for theprofiles.yml
file. If not specified, defaults to the repository's root directory. Equivalent to passing--profile-dir
to a dbt command. - Project
Directory string - The path where dbt should look for
dbt_project.yml
. Equivalent to passing--project-dir
to the dbt CLI.- If
source
isGIT
: Relative path to the directory in the repository specified in thegit_source
block. Defaults to the repository's root directory when not specified. - If
source
isWORKSPACE
: Absolute path to the folder in the workspace.
- If
- Schema string
- The name of the schema dbt should run in. Defaults to
default
. - Source string
- The source of the project. Possible values are
WORKSPACE
andGIT
. Defaults toGIT
if agit_source
block is present in the job definition. - Warehouse
Id string The ID of the SQL warehouse that dbt should execute against.
You also need to include a
git_source
block to configure the repository that contains the dbt project.
- Commands []string
- (Array) Series of dbt commands to execute in sequence. Every command must start with "dbt".
- Catalog string
- The name of the catalog to use inside Unity Catalog.
- Profiles
Directory string - The relative path to the directory in the repository specified by
git_source
where dbt should look in for theprofiles.yml
file. If not specified, defaults to the repository's root directory. Equivalent to passing--profile-dir
to a dbt command. - Project
Directory string - The path where dbt should look for
dbt_project.yml
. Equivalent to passing--project-dir
to the dbt CLI.- If
source
isGIT
: Relative path to the directory in the repository specified in thegit_source
block. Defaults to the repository's root directory when not specified. - If
source
isWORKSPACE
: Absolute path to the folder in the workspace.
- If
- Schema string
- The name of the schema dbt should run in. Defaults to
default
. - Source string
- The source of the project. Possible values are
WORKSPACE
andGIT
. Defaults toGIT
if agit_source
block is present in the job definition. - Warehouse
Id string The ID of the SQL warehouse that dbt should execute against.
You also need to include a
git_source
block to configure the repository that contains the dbt project.
- commands List<String>
- (Array) Series of dbt commands to execute in sequence. Every command must start with "dbt".
- catalog String
- The name of the catalog to use inside Unity Catalog.
- profiles
Directory String - The relative path to the directory in the repository specified by
git_source
where dbt should look in for theprofiles.yml
file. If not specified, defaults to the repository's root directory. Equivalent to passing--profile-dir
to a dbt command. - project
Directory String - The path where dbt should look for
dbt_project.yml
. Equivalent to passing--project-dir
to the dbt CLI.- If
source
isGIT
: Relative path to the directory in the repository specified in thegit_source
block. Defaults to the repository's root directory when not specified. - If
source
isWORKSPACE
: Absolute path to the folder in the workspace.
- If
- schema String
- The name of the schema dbt should run in. Defaults to
default
. - source String
- The source of the project. Possible values are
WORKSPACE
andGIT
. Defaults toGIT
if agit_source
block is present in the job definition. - warehouse
Id String The ID of the SQL warehouse that dbt should execute against.
You also need to include a
git_source
block to configure the repository that contains the dbt project.
- commands string[]
- (Array) Series of dbt commands to execute in sequence. Every command must start with "dbt".
- catalog string
- The name of the catalog to use inside Unity Catalog.
- profiles
Directory string - The relative path to the directory in the repository specified by
git_source
where dbt should look in for theprofiles.yml
file. If not specified, defaults to the repository's root directory. Equivalent to passing--profile-dir
to a dbt command. - project
Directory string - The path where dbt should look for
dbt_project.yml
. Equivalent to passing--project-dir
to the dbt CLI.- If
source
isGIT
: Relative path to the directory in the repository specified in thegit_source
block. Defaults to the repository's root directory when not specified. - If
source
isWORKSPACE
: Absolute path to the folder in the workspace.
- If
- schema string
- The name of the schema dbt should run in. Defaults to
default
. - source string
- The source of the project. Possible values are
WORKSPACE
andGIT
. Defaults toGIT
if agit_source
block is present in the job definition. - warehouse
Id string The ID of the SQL warehouse that dbt should execute against.
You also need to include a
git_source
block to configure the repository that contains the dbt project.
- commands Sequence[str]
- (Array) Series of dbt commands to execute in sequence. Every command must start with "dbt".
- catalog str
- The name of the catalog to use inside Unity Catalog.
- profiles_
directory str - The relative path to the directory in the repository specified by
git_source
where dbt should look in for theprofiles.yml
file. If not specified, defaults to the repository's root directory. Equivalent to passing--profile-dir
to a dbt command. - project_
directory str - The path where dbt should look for
dbt_project.yml
. Equivalent to passing--project-dir
to the dbt CLI.- If
source
isGIT
: Relative path to the directory in the repository specified in thegit_source
block. Defaults to the repository's root directory when not specified. - If
source
isWORKSPACE
: Absolute path to the folder in the workspace.
- If
- schema str
- The name of the schema dbt should run in. Defaults to
default
. - source str
- The source of the project. Possible values are
WORKSPACE
andGIT
. Defaults toGIT
if agit_source
block is present in the job definition. - warehouse_
id str The ID of the SQL warehouse that dbt should execute against.
You also need to include a
git_source
block to configure the repository that contains the dbt project.
- commands List<String>
- (Array) Series of dbt commands to execute in sequence. Every command must start with "dbt".
- catalog String
- The name of the catalog to use inside Unity Catalog.
- profiles
Directory String - The relative path to the directory in the repository specified by
git_source
where dbt should look in for theprofiles.yml
file. If not specified, defaults to the repository's root directory. Equivalent to passing--profile-dir
to a dbt command. - project
Directory String - The path where dbt should look for
dbt_project.yml
. Equivalent to passing--project-dir
to the dbt CLI.- If
source
isGIT
: Relative path to the directory in the repository specified in thegit_source
block. Defaults to the repository's root directory when not specified. - If
source
isWORKSPACE
: Absolute path to the folder in the workspace.
- If
- schema String
- The name of the schema dbt should run in. Defaults to
default
. - source String
- The source of the project. Possible values are
WORKSPACE
andGIT
. Defaults toGIT
if agit_source
block is present in the job definition. - warehouse
Id String The ID of the SQL warehouse that dbt should execute against.
You also need to include a
git_source
block to configure the repository that contains the dbt project.
JobDeployment, JobDeploymentArgs
- Kind string
- Metadata
File stringPath
- Kind string
- Metadata
File stringPath
- kind String
- metadata
File StringPath
- kind string
- metadata
File stringPath
- kind str
- metadata_
file_ strpath
- kind String
- metadata
File StringPath
JobEmailNotifications, JobEmailNotificationsArgs
- No
Alert boolFor Skipped Runs - (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the
notification_settings
configuration block). - On
Duration List<string>Warning Threshold Exceededs (List) list of emails to notify when the duration of a run exceeds the threshold specified by the
RUN_DURATION_SECONDS
metric in thehealth
block.The following parameter is only available for the job level configuration.
- On
Failures List<string> - (List) list of emails to notify when the run fails.
- On
Starts List<string> - (List) list of emails to notify when the run starts.
- On
Streaming List<string>Backlog Exceededs - On
Successes List<string> - (List) list of emails to notify when the run completes successfully.
- No
Alert boolFor Skipped Runs - (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the
notification_settings
configuration block). - On
Duration []stringWarning Threshold Exceededs (List) list of emails to notify when the duration of a run exceeds the threshold specified by the
RUN_DURATION_SECONDS
metric in thehealth
block.The following parameter is only available for the job level configuration.
- On
Failures []string - (List) list of emails to notify when the run fails.
- On
Starts []string - (List) list of emails to notify when the run starts.
- On
Streaming []stringBacklog Exceededs - On
Successes []string - (List) list of emails to notify when the run completes successfully.
- no
Alert BooleanFor Skipped Runs - (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the
notification_settings
configuration block). - on
Duration List<String>Warning Threshold Exceededs (List) list of emails to notify when the duration of a run exceeds the threshold specified by the
RUN_DURATION_SECONDS
metric in thehealth
block.The following parameter is only available for the job level configuration.
- on
Failures List<String> - (List) list of emails to notify when the run fails.
- on
Starts List<String> - (List) list of emails to notify when the run starts.
- on
Streaming List<String>Backlog Exceededs - on
Successes List<String> - (List) list of emails to notify when the run completes successfully.
- no
Alert booleanFor Skipped Runs - (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the
notification_settings
configuration block). - on
Duration string[]Warning Threshold Exceededs (List) list of emails to notify when the duration of a run exceeds the threshold specified by the
RUN_DURATION_SECONDS
metric in thehealth
block.The following parameter is only available for the job level configuration.
- on
Failures string[] - (List) list of emails to notify when the run fails.
- on
Starts string[] - (List) list of emails to notify when the run starts.
- on
Streaming string[]Backlog Exceededs - on
Successes string[] - (List) list of emails to notify when the run completes successfully.
- no_
alert_ boolfor_ skipped_ runs - (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the
notification_settings
configuration block). - on_
duration_ Sequence[str]warning_ threshold_ exceededs (List) list of emails to notify when the duration of a run exceeds the threshold specified by the
RUN_DURATION_SECONDS
metric in thehealth
block.The following parameter is only available for the job level configuration.
- on_
failures Sequence[str] - (List) list of emails to notify when the run fails.
- on_
starts Sequence[str] - (List) list of emails to notify when the run starts.
- on_
streaming_ Sequence[str]backlog_ exceededs - on_
successes Sequence[str] - (List) list of emails to notify when the run completes successfully.
- no
Alert BooleanFor Skipped Runs - (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the
notification_settings
configuration block). - on
Duration List<String>Warning Threshold Exceededs (List) list of emails to notify when the duration of a run exceeds the threshold specified by the
RUN_DURATION_SECONDS
metric in thehealth
block.The following parameter is only available for the job level configuration.
- on
Failures List<String> - (List) list of emails to notify when the run fails.
- on
Starts List<String> - (List) list of emails to notify when the run starts.
- on
Streaming List<String>Backlog Exceededs - on
Successes List<String> - (List) list of emails to notify when the run completes successfully.
JobEnvironment, JobEnvironmentArgs
- Environment
Key string - an unique identifier of the Environment. It will be referenced from
environment_key
attribute of corresponding task. - Spec
Job
Environment Spec - block describing the Environment. Consists of following attributes:
- Environment
Key string - an unique identifier of the Environment. It will be referenced from
environment_key
attribute of corresponding task. - Spec
Job
Environment Spec - block describing the Environment. Consists of following attributes:
- environment
Key String - an unique identifier of the Environment. It will be referenced from
environment_key
attribute of corresponding task. - spec
Job
Environment Spec - block describing the Environment. Consists of following attributes:
- environment
Key string - an unique identifier of the Environment. It will be referenced from
environment_key
attribute of corresponding task. - spec
Job
Environment Spec - block describing the Environment. Consists of following attributes:
- environment_
key str - an unique identifier of the Environment. It will be referenced from
environment_key
attribute of corresponding task. - spec
Job
Environment Spec - block describing the Environment. Consists of following attributes:
- environment
Key String - an unique identifier of the Environment. It will be referenced from
environment_key
attribute of corresponding task. - spec Property Map
- block describing the Environment. Consists of following attributes:
JobEnvironmentSpec, JobEnvironmentSpecArgs
- Client string
- client version used by the environment.
- Dependencies List<string>
- List of pip dependencies, as supported by the version of pip in this environment. Each dependency is a pip requirement file line. See API docs for more information.
- Client string
- client version used by the environment.
- Dependencies []string
- List of pip dependencies, as supported by the version of pip in this environment. Each dependency is a pip requirement file line. See API docs for more information.
- client String
- client version used by the environment.
- dependencies List<String>
- List of pip dependencies, as supported by the version of pip in this environment. Each dependency is a pip requirement file line. See API docs for more information.
- client string
- client version used by the environment.
- dependencies string[]
- List of pip dependencies, as supported by the version of pip in this environment. Each dependency is a pip requirement file line. See API docs for more information.
- client str
- client version used by the environment.
- dependencies Sequence[str]
- List of pip dependencies, as supported by the version of pip in this environment. Each dependency is a pip requirement file line. See API docs for more information.
- client String
- client version used by the environment.
- dependencies List<String>
- List of pip dependencies, as supported by the version of pip in this environment. Each dependency is a pip requirement file line. See API docs for more information.
JobGitSource, JobGitSourceArgs
- Url string
- URL of the Git repository to use.
- Branch string
- name of the Git branch to use. Conflicts with
tag
andcommit
. - Commit string
- hash of Git commit to use. Conflicts with
branch
andtag
. - Git
Snapshot JobGit Source Git Snapshot - Job
Source JobGit Source Job Source - Provider string
- case insensitive name of the Git provider. Following values are supported right now (could be a subject for change, consult Repos API documentation):
gitHub
,gitHubEnterprise
,bitbucketCloud
,bitbucketServer
,azureDevOpsServices
,gitLab
,gitLabEnterpriseEdition
. - Tag string
- name of the Git branch to use. Conflicts with
branch
andcommit
.
- Url string
- URL of the Git repository to use.
- Branch string
- name of the Git branch to use. Conflicts with
tag
andcommit
. - Commit string
- hash of Git commit to use. Conflicts with
branch
andtag
. - Git
Snapshot JobGit Source Git Snapshot - Job
Source JobGit Source Job Source - Provider string
- case insensitive name of the Git provider. Following values are supported right now (could be a subject for change, consult Repos API documentation):
gitHub
,gitHubEnterprise
,bitbucketCloud
,bitbucketServer
,azureDevOpsServices
,gitLab
,gitLabEnterpriseEdition
. - Tag string
- name of the Git branch to use. Conflicts with
branch
andcommit
.
- url String
- URL of the Git repository to use.
- branch String
- name of the Git branch to use. Conflicts with
tag
andcommit
. - commit String
- hash of Git commit to use. Conflicts with
branch
andtag
. - git
Snapshot JobGit Source Git Snapshot - job
Source JobGit Source Job Source - provider String
- case insensitive name of the Git provider. Following values are supported right now (could be a subject for change, consult Repos API documentation):
gitHub
,gitHubEnterprise
,bitbucketCloud
,bitbucketServer
,azureDevOpsServices
,gitLab
,gitLabEnterpriseEdition
. - tag String
- name of the Git branch to use. Conflicts with
branch
andcommit
.
- url string
- URL of the Git repository to use.
- branch string
- name of the Git branch to use. Conflicts with
tag
andcommit
. - commit string
- hash of Git commit to use. Conflicts with
branch
andtag
. - git
Snapshot JobGit Source Git Snapshot - job
Source JobGit Source Job Source - provider string
- case insensitive name of the Git provider. Following values are supported right now (could be a subject for change, consult Repos API documentation):
gitHub
,gitHubEnterprise
,bitbucketCloud
,bitbucketServer
,azureDevOpsServices
,gitLab
,gitLabEnterpriseEdition
. - tag string
- name of the Git branch to use. Conflicts with
branch
andcommit
.
- url str
- URL of the Git repository to use.
- branch str
- name of the Git branch to use. Conflicts with
tag
andcommit
. - commit str
- hash of Git commit to use. Conflicts with
branch
andtag
. - git_
snapshot JobGit Source Git Snapshot - job_
source JobGit Source Job Source - provider str
- case insensitive name of the Git provider. Following values are supported right now (could be a subject for change, consult Repos API documentation):
gitHub
,gitHubEnterprise
,bitbucketCloud
,bitbucketServer
,azureDevOpsServices
,gitLab
,gitLabEnterpriseEdition
. - tag str
- name of the Git branch to use. Conflicts with
branch
andcommit
.
- url String
- URL of the Git repository to use.
- branch String
- name of the Git branch to use. Conflicts with
tag
andcommit
. - commit String
- hash of Git commit to use. Conflicts with
branch
andtag
. - git
Snapshot Property Map - job
Source Property Map - provider String
- case insensitive name of the Git provider. Following values are supported right now (could be a subject for change, consult Repos API documentation):
gitHub
,gitHubEnterprise
,bitbucketCloud
,bitbucketServer
,azureDevOpsServices
,gitLab
,gitLabEnterpriseEdition
. - tag String
- name of the Git branch to use. Conflicts with
branch
andcommit
.
JobGitSourceGitSnapshot, JobGitSourceGitSnapshotArgs
- Used
Commit string
- Used
Commit string
- used
Commit String
- used
Commit string
- used_
commit str
- used
Commit String
JobGitSourceJobSource, JobGitSourceJobSourceArgs
- Import
From stringGit Branch - Job
Config stringPath - Dirty
State string
- Import
From stringGit Branch - Job
Config stringPath - Dirty
State string
- import
From StringGit Branch - job
Config StringPath - dirty
State String
- import
From stringGit Branch - job
Config stringPath - dirty
State string
- import_
from_ strgit_ branch - job_
config_ strpath - dirty_
state str
- import
From StringGit Branch - job
Config StringPath - dirty
State String
JobHealth, JobHealthArgs
- Rules
List<Job
Health Rule> - list of rules that are represented as objects with the following attributes:
- Rules
[]Job
Health Rule - list of rules that are represented as objects with the following attributes:
- rules
List<Job
Health Rule> - list of rules that are represented as objects with the following attributes:
- rules
Job
Health Rule[] - list of rules that are represented as objects with the following attributes:
- rules
Sequence[Job
Health Rule] - list of rules that are represented as objects with the following attributes:
- rules List<Property Map>
- list of rules that are represented as objects with the following attributes:
JobHealthRule, JobHealthRuleArgs
- Metric string
- string specifying the metric to check. The only supported metric is
RUN_DURATION_SECONDS
(check Jobs REST API documentation for the latest information). - Op string
- string specifying the operation used to evaluate the given metric. The only supported operation is
GREATER_THAN
. - Value int
- integer value used to compare to the given metric.
- Metric string
- string specifying the metric to check. The only supported metric is
RUN_DURATION_SECONDS
(check Jobs REST API documentation for the latest information). - Op string
- string specifying the operation used to evaluate the given metric. The only supported operation is
GREATER_THAN
. - Value int
- integer value used to compare to the given metric.
- metric String
- string specifying the metric to check. The only supported metric is
RUN_DURATION_SECONDS
(check Jobs REST API documentation for the latest information). - op String
- string specifying the operation used to evaluate the given metric. The only supported operation is
GREATER_THAN
. - value Integer
- integer value used to compare to the given metric.
- metric string
- string specifying the metric to check. The only supported metric is
RUN_DURATION_SECONDS
(check Jobs REST API documentation for the latest information). - op string
- string specifying the operation used to evaluate the given metric. The only supported operation is
GREATER_THAN
. - value number
- integer value used to compare to the given metric.
- metric str
- string specifying the metric to check. The only supported metric is
RUN_DURATION_SECONDS
(check Jobs REST API documentation for the latest information). - op str
- string specifying the operation used to evaluate the given metric. The only supported operation is
GREATER_THAN
. - value int
- integer value used to compare to the given metric.
- metric String
- string specifying the metric to check. The only supported metric is
RUN_DURATION_SECONDS
(check Jobs REST API documentation for the latest information). - op String
- string specifying the operation used to evaluate the given metric. The only supported operation is
GREATER_THAN
. - value Number
- integer value used to compare to the given metric.
JobJobCluster, JobJobClusterArgs
- Job
Cluster stringKey - Identifier that can be referenced in
task
block, so that cluster is shared between tasks - New
Cluster JobJob Cluster New Cluster - Block with almost the same set of parameters as for databricks.Cluster resource, except following (check the REST API documentation for full list of supported parameters):
- Job
Cluster stringKey - Identifier that can be referenced in
task
block, so that cluster is shared between tasks - New
Cluster JobJob Cluster New Cluster - Block with almost the same set of parameters as for databricks.Cluster resource, except following (check the REST API documentation for full list of supported parameters):
- job
Cluster StringKey - Identifier that can be referenced in
task
block, so that cluster is shared between tasks - new
Cluster JobJob Cluster New Cluster - Block with almost the same set of parameters as for databricks.Cluster resource, except following (check the REST API documentation for full list of supported parameters):
- job
Cluster stringKey - Identifier that can be referenced in
task
block, so that cluster is shared between tasks - new
Cluster JobJob Cluster New Cluster - Block with almost the same set of parameters as for databricks.Cluster resource, except following (check the REST API documentation for full list of supported parameters):
- job_
cluster_ strkey - Identifier that can be referenced in
task
block, so that cluster is shared between tasks - new_
cluster JobJob Cluster New Cluster - Block with almost the same set of parameters as for databricks.Cluster resource, except following (check the REST API documentation for full list of supported parameters):
- job
Cluster StringKey - Identifier that can be referenced in
task
block, so that cluster is shared between tasks - new
Cluster Property Map - Block with almost the same set of parameters as for databricks.Cluster resource, except following (check the REST API documentation for full list of supported parameters):
JobJobClusterNewCluster, JobJobClusterNewClusterArgs
- Spark
Version string - Apply
Policy boolDefault Values - Autoscale
Job
Job Cluster New Cluster Autoscale - Aws
Attributes JobJob Cluster New Cluster Aws Attributes - Azure
Attributes JobJob Cluster New Cluster Azure Attributes - Cluster
Id string - Cluster
Log JobConf Job Cluster New Cluster Cluster Log Conf - Cluster
Mount List<JobInfos Job Cluster New Cluster Cluster Mount Info> - Cluster
Name string - Dictionary<string, string>
- Data
Security stringMode - Docker
Image JobJob Cluster New Cluster Docker Image - Driver
Instance stringPool Id - Driver
Node stringType Id - Enable
Elastic boolDisk - Enable
Local boolDisk Encryption - Gcp
Attributes JobJob Cluster New Cluster Gcp Attributes - Idempotency
Token string - Init
Scripts List<JobJob Cluster New Cluster Init Script> - Instance
Pool stringId - Libraries
List<Job
Job Cluster New Cluster Library> - (List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
- Node
Type stringId - Num
Workers int - Policy
Id string - Runtime
Engine string - Single
User stringName - Spark
Conf Dictionary<string, string> - Spark
Env Dictionary<string, string>Vars - Ssh
Public List<string>Keys - Workload
Type JobJob Cluster New Cluster Workload Type - isn't supported
- Spark
Version string - Apply
Policy boolDefault Values - Autoscale
Job
Job Cluster New Cluster Autoscale - Aws
Attributes JobJob Cluster New Cluster Aws Attributes - Azure
Attributes JobJob Cluster New Cluster Azure Attributes - Cluster
Id string - Cluster
Log JobConf Job Cluster New Cluster Cluster Log Conf - Cluster
Mount []JobInfos Job Cluster New Cluster Cluster Mount Info - Cluster
Name string - map[string]string
- Data
Security stringMode - Docker
Image JobJob Cluster New Cluster Docker Image - Driver
Instance stringPool Id - Driver
Node stringType Id - Enable
Elastic boolDisk - Enable
Local boolDisk Encryption - Gcp
Attributes JobJob Cluster New Cluster Gcp Attributes - Idempotency
Token string - Init
Scripts []JobJob Cluster New Cluster Init Script - Instance
Pool stringId - Libraries
[]Job
Job Cluster New Cluster Library - (List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
- Node
Type stringId - Num
Workers int - Policy
Id string - Runtime
Engine string - Single
User stringName - Spark
Conf map[string]string - Spark
Env map[string]stringVars - Ssh
Public []stringKeys - Workload
Type JobJob Cluster New Cluster Workload Type - isn't supported
- spark
Version String - apply
Policy BooleanDefault Values - autoscale
Job
Job Cluster New Cluster Autoscale - aws
Attributes JobJob Cluster New Cluster Aws Attributes - azure
Attributes JobJob Cluster New Cluster Azure Attributes - cluster
Id String - cluster
Log JobConf Job Cluster New Cluster Cluster Log Conf - cluster
Mount List<JobInfos Job Cluster New Cluster Cluster Mount Info> - cluster
Name String - Map<String,String>
- data
Security StringMode - docker
Image JobJob Cluster New Cluster Docker Image - driver
Instance StringPool Id - driver
Node StringType Id - enable
Elastic BooleanDisk - enable
Local BooleanDisk Encryption - gcp
Attributes JobJob Cluster New Cluster Gcp Attributes - idempotency
Token String - init
Scripts List<JobJob Cluster New Cluster Init Script> - instance
Pool StringId - libraries
List<Job
Job Cluster New Cluster Library> - (List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
- node
Type StringId - num
Workers Integer - policy
Id String - runtime
Engine String - single
User StringName - spark
Conf Map<String,String> - spark
Env Map<String,String>Vars - ssh
Public List<String>Keys - workload
Type JobJob Cluster New Cluster Workload Type - isn't supported
- spark
Version string - apply
Policy booleanDefault Values - autoscale
Job
Job Cluster New Cluster Autoscale - aws
Attributes JobJob Cluster New Cluster Aws Attributes - azure
Attributes JobJob Cluster New Cluster Azure Attributes - cluster
Id string - cluster
Log JobConf Job Cluster New Cluster Cluster Log Conf - cluster
Mount JobInfos Job Cluster New Cluster Cluster Mount Info[] - cluster
Name string - {[key: string]: string}
- data
Security stringMode - docker
Image JobJob Cluster New Cluster Docker Image - driver
Instance stringPool Id - driver
Node stringType Id - enable
Elastic booleanDisk - enable
Local booleanDisk Encryption - gcp
Attributes JobJob Cluster New Cluster Gcp Attributes - idempotency
Token string - init
Scripts JobJob Cluster New Cluster Init Script[] - instance
Pool stringId - libraries
Job
Job Cluster New Cluster Library[] - (List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
- node
Type stringId - num
Workers number - policy
Id string - runtime
Engine string - single
User stringName - spark
Conf {[key: string]: string} - spark
Env {[key: string]: string}Vars - ssh
Public string[]Keys - workload
Type JobJob Cluster New Cluster Workload Type - isn't supported
- spark_
version str - apply_
policy_ booldefault_ values - autoscale
Job
Job Cluster New Cluster Autoscale - aws_
attributes JobJob Cluster New Cluster Aws Attributes - azure_
attributes JobJob Cluster New Cluster Azure Attributes - cluster_
id str - cluster_
log_ Jobconf Job Cluster New Cluster Cluster Log Conf - cluster_
mount_ Sequence[Jobinfos Job Cluster New Cluster Cluster Mount Info] - cluster_
name str - Mapping[str, str]
- data_
security_ strmode - docker_
image JobJob Cluster New Cluster Docker Image - driver_
instance_ strpool_ id - driver_
node_ strtype_ id - enable_
elastic_ booldisk - enable_
local_ booldisk_ encryption - gcp_
attributes JobJob Cluster New Cluster Gcp Attributes - idempotency_
token str - init_
scripts Sequence[JobJob Cluster New Cluster Init Script] - instance_
pool_ strid - libraries
Sequence[Job
Job Cluster New Cluster Library] - (List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
- node_
type_ strid - num_
workers int - policy_
id str - runtime_
engine str - single_
user_ strname - spark_
conf Mapping[str, str] - spark_
env_ Mapping[str, str]vars - ssh_
public_ Sequence[str]keys - workload_
type JobJob Cluster New Cluster Workload Type - isn't supported
- spark
Version String - apply
Policy BooleanDefault Values - autoscale Property Map
- aws
Attributes Property Map - azure
Attributes Property Map - cluster
Id String - cluster
Log Property MapConf - cluster
Mount List<Property Map>Infos - cluster
Name String - Map<String>
- data
Security StringMode - docker
Image Property Map - driver
Instance StringPool Id - driver
Node StringType Id - enable
Elastic BooleanDisk - enable
Local BooleanDisk Encryption - gcp
Attributes Property Map - idempotency
Token String - init
Scripts List<Property Map> - instance
Pool StringId - libraries List<Property Map>
- (List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
- node
Type StringId - num
Workers Number - policy
Id String - runtime
Engine String - single
User StringName - spark
Conf Map<String> - spark
Env Map<String>Vars - ssh
Public List<String>Keys - workload
Type Property Map - isn't supported
JobJobClusterNewClusterAutoscale, JobJobClusterNewClusterAutoscaleArgs
- Max
Workers int - Min
Workers int
- Max
Workers int - Min
Workers int
- max
Workers Integer - min
Workers Integer
- max
Workers number - min
Workers number
- max_
workers int - min_
workers int
- max
Workers Number - min
Workers Number
JobJobClusterNewClusterAwsAttributes, JobJobClusterNewClusterAwsAttributesArgs
- Availability string
- Ebs
Volume intCount - Ebs
Volume intIops - Ebs
Volume intSize - Ebs
Volume intThroughput - Ebs
Volume stringType - First
On intDemand - Instance
Profile stringArn - Spot
Bid intPrice Percent - Zone
Id string
- Availability string
- Ebs
Volume intCount - Ebs
Volume intIops - Ebs
Volume intSize - Ebs
Volume intThroughput - Ebs
Volume stringType - First
On intDemand - Instance
Profile stringArn - Spot
Bid intPrice Percent - Zone
Id string
- availability String
- ebs
Volume IntegerCount - ebs
Volume IntegerIops - ebs
Volume IntegerSize - ebs
Volume IntegerThroughput - ebs
Volume StringType - first
On IntegerDemand - instance
Profile StringArn - spot
Bid IntegerPrice Percent - zone
Id String
- availability string
- ebs
Volume numberCount - ebs
Volume numberIops - ebs
Volume numberSize - ebs
Volume numberThroughput - ebs
Volume stringType - first
On numberDemand - instance
Profile stringArn - spot
Bid numberPrice Percent - zone
Id string
- availability str
- ebs_
volume_ intcount - ebs_
volume_ intiops - ebs_
volume_ intsize - ebs_
volume_ intthroughput - ebs_
volume_ strtype - first_
on_ intdemand - instance_
profile_ strarn - spot_
bid_ intprice_ percent - zone_
id str
- availability String
- ebs
Volume NumberCount - ebs
Volume NumberIops - ebs
Volume NumberSize - ebs
Volume NumberThroughput - ebs
Volume StringType - first
On NumberDemand - instance
Profile StringArn - spot
Bid NumberPrice Percent - zone
Id String
JobJobClusterNewClusterAzureAttributes, JobJobClusterNewClusterAzureAttributesArgs
- availability String
- first
On NumberDemand - log
Analytics Property MapInfo - spot
Bid NumberMax Price
JobJobClusterNewClusterAzureAttributesLogAnalyticsInfo, JobJobClusterNewClusterAzureAttributesLogAnalyticsInfoArgs
- Log
Analytics stringPrimary Key - Log
Analytics stringWorkspace Id
- Log
Analytics stringPrimary Key - Log
Analytics stringWorkspace Id
- log
Analytics StringPrimary Key - log
Analytics StringWorkspace Id
- log
Analytics stringPrimary Key - log
Analytics stringWorkspace Id
- log
Analytics StringPrimary Key - log
Analytics StringWorkspace Id
JobJobClusterNewClusterClusterLogConf, JobJobClusterNewClusterClusterLogConfArgs
JobJobClusterNewClusterClusterLogConfDbfs, JobJobClusterNewClusterClusterLogConfDbfsArgs
- Destination string
- Destination string
- destination String
- destination string
- destination str
- destination String
JobJobClusterNewClusterClusterLogConfS3, JobJobClusterNewClusterClusterLogConfS3Args
- Destination string
- Canned
Acl string - Enable
Encryption bool - Encryption
Type string - Endpoint string
- Kms
Key string - Region string
- Destination string
- Canned
Acl string - Enable
Encryption bool - Encryption
Type string - Endpoint string
- Kms
Key string - Region string
- destination String
- canned
Acl String - enable
Encryption Boolean - encryption
Type String - endpoint String
- kms
Key String - region String
- destination string
- canned
Acl string - enable
Encryption boolean - encryption
Type string - endpoint string
- kms
Key string - region string
- destination str
- canned_
acl str - enable_
encryption bool - encryption_
type str - endpoint str
- kms_
key str - region str
- destination String
- canned
Acl String - enable
Encryption Boolean - encryption
Type String - endpoint String
- kms
Key String - region String
JobJobClusterNewClusterClusterMountInfo, JobJobClusterNewClusterClusterMountInfoArgs
JobJobClusterNewClusterClusterMountInfoNetworkFilesystemInfo, JobJobClusterNewClusterClusterMountInfoNetworkFilesystemInfoArgs
- Server
Address string - Mount
Options string
- Server
Address string - Mount
Options string
- server
Address String - mount
Options String
- server
Address string - mount
Options string
- server_
address str - mount_
options str
- server
Address String - mount
Options String
JobJobClusterNewClusterDockerImage, JobJobClusterNewClusterDockerImageArgs
- Url string
- URL of the job on the given workspace
- Basic
Auth JobJob Cluster New Cluster Docker Image Basic Auth
- Url string
- URL of the job on the given workspace
- Basic
Auth JobJob Cluster New Cluster Docker Image Basic Auth
- url String
- URL of the job on the given workspace
- basic
Auth JobJob Cluster New Cluster Docker Image Basic Auth
- url string
- URL of the job on the given workspace
- basic
Auth JobJob Cluster New Cluster Docker Image Basic Auth
- url str
- URL of the job on the given workspace
- basic_
auth JobJob Cluster New Cluster Docker Image Basic Auth
- url String
- URL of the job on the given workspace
- basic
Auth Property Map
JobJobClusterNewClusterDockerImageBasicAuth, JobJobClusterNewClusterDockerImageBasicAuthArgs
JobJobClusterNewClusterGcpAttributes, JobJobClusterNewClusterGcpAttributesArgs
- Availability string
- Boot
Disk intSize - Google
Service stringAccount - Local
Ssd intCount - Use
Preemptible boolExecutors - Zone
Id string
- Availability string
- Boot
Disk intSize - Google
Service stringAccount - Local
Ssd intCount - Use
Preemptible boolExecutors - Zone
Id string
- availability String
- boot
Disk IntegerSize - google
Service StringAccount - local
Ssd IntegerCount - use
Preemptible BooleanExecutors - zone
Id String
- availability string
- boot
Disk numberSize - google
Service stringAccount - local
Ssd numberCount - use
Preemptible booleanExecutors - zone
Id string
- availability str
- boot_
disk_ intsize - google_
service_ straccount - local_
ssd_ intcount - use_
preemptible_ boolexecutors - zone_
id str
- availability String
- boot
Disk NumberSize - google
Service StringAccount - local
Ssd NumberCount - use
Preemptible BooleanExecutors - zone
Id String
JobJobClusterNewClusterInitScript, JobJobClusterNewClusterInitScriptArgs
- Abfss
Job
Job Cluster New Cluster Init Script Abfss - Dbfs
Job
Job Cluster New Cluster Init Script Dbfs - File
Job
Job Cluster New Cluster Init Script File - block consisting of single string fields:
- Gcs
Job
Job Cluster New Cluster Init Script Gcs - S3
Job
Job Cluster New Cluster Init Script S3 - Volumes
Job
Job Cluster New Cluster Init Script Volumes - Workspace
Job
Job Cluster New Cluster Init Script Workspace
- Abfss
Job
Job Cluster New Cluster Init Script Abfss - Dbfs
Job
Job Cluster New Cluster Init Script Dbfs - File
Job
Job Cluster New Cluster Init Script File - block consisting of single string fields:
- Gcs
Job
Job Cluster New Cluster Init Script Gcs - S3
Job
Job Cluster New Cluster Init Script S3 - Volumes
Job
Job Cluster New Cluster Init Script Volumes - Workspace
Job
Job Cluster New Cluster Init Script Workspace
- abfss
Job
Job Cluster New Cluster Init Script Abfss - dbfs
Job
Job Cluster New Cluster Init Script Dbfs - file
Job
Job Cluster New Cluster Init Script File - block consisting of single string fields:
- gcs
Job
Job Cluster New Cluster Init Script Gcs - s3
Job
Job Cluster New Cluster Init Script S3 - volumes
Job
Job Cluster New Cluster Init Script Volumes - workspace
Job
Job Cluster New Cluster Init Script Workspace
- abfss
Job
Job Cluster New Cluster Init Script Abfss - dbfs
Job
Job Cluster New Cluster Init Script Dbfs - file
Job
Job Cluster New Cluster Init Script File - block consisting of single string fields:
- gcs
Job
Job Cluster New Cluster Init Script Gcs - s3
Job
Job Cluster New Cluster Init Script S3 - volumes
Job
Job Cluster New Cluster Init Script Volumes - workspace
Job
Job Cluster New Cluster Init Script Workspace
- abfss
Job
Job Cluster New Cluster Init Script Abfss - dbfs
Job
Job Cluster New Cluster Init Script Dbfs - file
Job
Job Cluster New Cluster Init Script File - block consisting of single string fields:
- gcs
Job
Job Cluster New Cluster Init Script Gcs - s3
Job
Job Cluster New Cluster Init Script S3 - volumes
Job
Job Cluster New Cluster Init Script Volumes - workspace
Job
Job Cluster New Cluster Init Script Workspace
- abfss Property Map
- dbfs Property Map
- file Property Map
- block consisting of single string fields:
- gcs Property Map
- s3 Property Map
- volumes Property Map
- workspace Property Map
JobJobClusterNewClusterInitScriptAbfss, JobJobClusterNewClusterInitScriptAbfssArgs
- Destination string
- Destination string
- destination String
- destination string
- destination str
- destination String
JobJobClusterNewClusterInitScriptDbfs, JobJobClusterNewClusterInitScriptDbfsArgs
- Destination string
- Destination string
- destination String
- destination string
- destination str
- destination String
JobJobClusterNewClusterInitScriptFile, JobJobClusterNewClusterInitScriptFileArgs
- Destination string
- Destination string
- destination String
- destination string
- destination str
- destination String
JobJobClusterNewClusterInitScriptGcs, JobJobClusterNewClusterInitScriptGcsArgs
- Destination string
- Destination string
- destination String
- destination string
- destination str
- destination String
JobJobClusterNewClusterInitScriptS3, JobJobClusterNewClusterInitScriptS3Args
- Destination string
- Canned
Acl string - Enable
Encryption bool - Encryption
Type string - Endpoint string
- Kms
Key string - Region string
- Destination string
- Canned
Acl string - Enable
Encryption bool - Encryption
Type string - Endpoint string
- Kms
Key string - Region string
- destination String
- canned
Acl String - enable
Encryption Boolean - encryption
Type String - endpoint String
- kms
Key String - region String
- destination string
- canned
Acl string - enable
Encryption boolean - encryption
Type string - endpoint string
- kms
Key string - region string
- destination str
- canned_
acl str - enable_
encryption bool - encryption_
type str - endpoint str
- kms_
key str - region str
- destination String
- canned
Acl String - enable
Encryption Boolean - encryption
Type String - endpoint String
- kms
Key String - region String
JobJobClusterNewClusterInitScriptVolumes, JobJobClusterNewClusterInitScriptVolumesArgs
- Destination string
- Destination string
- destination String
- destination string
- destination str
- destination String
JobJobClusterNewClusterInitScriptWorkspace, JobJobClusterNewClusterInitScriptWorkspaceArgs
- Destination string
- Destination string
- destination String
- destination string
- destination str
- destination String
JobJobClusterNewClusterLibrary, JobJobClusterNewClusterLibraryArgs
- cran Property Map
- egg String
- jar String
- maven Property Map
- pypi Property Map
- requirements String
- whl String
JobJobClusterNewClusterLibraryCran, JobJobClusterNewClusterLibraryCranArgs
JobJobClusterNewClusterLibraryMaven, JobJobClusterNewClusterLibraryMavenArgs
- Coordinates string
- Exclusions List<string>
- Repo string
- Coordinates string
- Exclusions []string
- Repo string
- coordinates String
- exclusions List<String>
- repo String
- coordinates string
- exclusions string[]
- repo string
- coordinates str
- exclusions Sequence[str]
- repo str
- coordinates String
- exclusions List<String>
- repo String
JobJobClusterNewClusterLibraryPypi, JobJobClusterNewClusterLibraryPypiArgs
JobJobClusterNewClusterWorkloadType, JobJobClusterNewClusterWorkloadTypeArgs
JobJobClusterNewClusterWorkloadTypeClients, JobJobClusterNewClusterWorkloadTypeClientsArgs
JobLibrary, JobLibraryArgs
- Cran
Job
Library Cran - Egg string
- Jar string
- Maven
Job
Library Maven - Pypi
Job
Library Pypi - Requirements string
- Whl string
- Cran
Job
Library Cran - Egg string
- Jar string
- Maven
Job
Library Maven - Pypi
Job
Library Pypi - Requirements string
- Whl string
- cran
Job
Library Cran - egg String
- jar String
- maven
Job
Library Maven - pypi
Job
Library Pypi - requirements String
- whl String
- cran
Job
Library Cran - egg string
- jar string
- maven
Job
Library Maven - pypi
Job
Library Pypi - requirements string
- whl string
- cran Property Map
- egg String
- jar String
- maven Property Map
- pypi Property Map
- requirements String
- whl String
JobLibraryCran, JobLibraryCranArgs
JobLibraryMaven, JobLibraryMavenArgs
- Coordinates string
- Exclusions List<string>
- Repo string
- Coordinates string
- Exclusions []string
- Repo string
- coordinates String
- exclusions List<String>
- repo String
- coordinates string
- exclusions string[]
- repo string
- coordinates str
- exclusions Sequence[str]
- repo str
- coordinates String
- exclusions List<String>
- repo String
JobLibraryPypi, JobLibraryPypiArgs
JobNewCluster, JobNewClusterArgs
- Spark
Version string - Apply
Policy boolDefault Values - Autoscale
Job
New Cluster Autoscale - Aws
Attributes JobNew Cluster Aws Attributes - Azure
Attributes JobNew Cluster Azure Attributes - Cluster
Id string - Cluster
Log JobConf New Cluster Cluster Log Conf - Cluster
Mount List<JobInfos New Cluster Cluster Mount Info> - Cluster
Name string - Dictionary<string, string>
- Data
Security stringMode - Docker
Image JobNew Cluster Docker Image - Driver
Instance stringPool Id - Driver
Node stringType Id - Enable
Elastic boolDisk - Enable
Local boolDisk Encryption - Gcp
Attributes JobNew Cluster Gcp Attributes - Idempotency
Token string - Init
Scripts List<JobNew Cluster Init Script> - Instance
Pool stringId - Libraries
List<Job
New Cluster Library> - (List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
- Node
Type stringId - Num
Workers int - Policy
Id string - Runtime
Engine string - Single
User stringName - Spark
Conf Dictionary<string, string> - Spark
Env Dictionary<string, string>Vars - Ssh
Public List<string>Keys - Workload
Type JobNew Cluster Workload Type - isn't supported
- Spark
Version string - Apply
Policy boolDefault Values - Autoscale
Job
New Cluster Autoscale - Aws
Attributes JobNew Cluster Aws Attributes - Azure
Attributes JobNew Cluster Azure Attributes - Cluster
Id string - Cluster
Log JobConf New Cluster Cluster Log Conf - Cluster
Mount []JobInfos New Cluster Cluster Mount Info - Cluster
Name string - map[string]string
- Data
Security stringMode - Docker
Image JobNew Cluster Docker Image - Driver
Instance stringPool Id - Driver
Node stringType Id - Enable
Elastic boolDisk - Enable
Local boolDisk Encryption - Gcp
Attributes JobNew Cluster Gcp Attributes - Idempotency
Token string - Init
Scripts []JobNew Cluster Init Script - Instance
Pool stringId - Libraries
[]Job
New Cluster Library - (List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
- Node
Type stringId - Num
Workers int - Policy
Id string - Runtime
Engine string - Single
User stringName - Spark
Conf map[string]string - Spark
Env map[string]stringVars - Ssh
Public []stringKeys - Workload
Type JobNew Cluster Workload Type - isn't supported
- spark
Version String - apply
Policy BooleanDefault Values - autoscale
Job
New Cluster Autoscale - aws
Attributes JobNew Cluster Aws Attributes - azure
Attributes JobNew Cluster Azure Attributes - cluster
Id String - cluster
Log JobConf New Cluster Cluster Log Conf - cluster
Mount List<JobInfos New Cluster Cluster Mount Info> - cluster
Name String - Map<String,String>
- data
Security StringMode - docker
Image JobNew Cluster Docker Image - driver
Instance StringPool Id - driver
Node StringType Id - enable
Elastic BooleanDisk - enable
Local BooleanDisk Encryption - gcp
Attributes JobNew Cluster Gcp Attributes - idempotency
Token String - init
Scripts List<JobNew Cluster Init Script> - instance
Pool StringId - libraries
List<Job
New Cluster Library> - (List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
- node
Type StringId - num
Workers Integer - policy
Id String - runtime
Engine String - single
User StringName - spark
Conf Map<String,String> - spark
Env Map<String,String>Vars - ssh
Public List<String>Keys - workload
Type JobNew Cluster Workload Type - isn't supported
- spark
Version string - apply
Policy booleanDefault Values - autoscale
Job
New Cluster Autoscale - aws
Attributes JobNew Cluster Aws Attributes - azure
Attributes JobNew Cluster Azure Attributes - cluster
Id string - cluster
Log JobConf New Cluster Cluster Log Conf - cluster
Mount JobInfos New Cluster Cluster Mount Info[] - cluster
Name string - {[key: string]: string}
- data
Security stringMode - docker
Image JobNew Cluster Docker Image - driver
Instance stringPool Id - driver
Node stringType Id - enable
Elastic booleanDisk - enable
Local booleanDisk Encryption - gcp
Attributes JobNew Cluster Gcp Attributes - idempotency
Token string - init
Scripts JobNew Cluster Init Script[] - instance
Pool stringId - libraries
Job
New Cluster Library[] - (List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
- node
Type stringId - num
Workers number - policy
Id string - runtime
Engine string - single
User stringName - spark
Conf {[key: string]: string} - spark
Env {[key: string]: string}Vars - ssh
Public string[]Keys - workload
Type JobNew Cluster Workload Type - isn't supported
- spark_
version str - apply_
policy_ booldefault_ values - autoscale
Job
New Cluster Autoscale - aws_
attributes JobNew Cluster Aws Attributes - azure_
attributes JobNew Cluster Azure Attributes - cluster_
id str - cluster_
log_ Jobconf New Cluster Cluster Log Conf - cluster_
mount_ Sequence[Jobinfos New Cluster Cluster Mount Info] - cluster_
name str - Mapping[str, str]
- data_
security_ strmode - docker_
image JobNew Cluster Docker Image - driver_
instance_ strpool_ id - driver_
node_ strtype_ id - enable_
elastic_ booldisk - enable_
local_ booldisk_ encryption - gcp_
attributes JobNew Cluster Gcp Attributes - idempotency_
token str - init_
scripts Sequence[JobNew Cluster Init Script] - instance_
pool_ strid - libraries
Sequence[Job
New Cluster Library] - (List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
- node_
type_ strid - num_
workers int - policy_
id str - runtime_
engine str - single_
user_ strname - spark_
conf Mapping[str, str] - spark_
env_ Mapping[str, str]vars - ssh_
public_ Sequence[str]keys - workload_
type JobNew Cluster Workload Type - isn't supported
- spark
Version String - apply
Policy BooleanDefault Values - autoscale Property Map
- aws
Attributes Property Map - azure
Attributes Property Map - cluster
Id String - cluster
Log Property MapConf - cluster
Mount List<Property Map>Infos - cluster
Name String - Map<String>
- data
Security StringMode - docker
Image Property Map - driver
Instance StringPool Id - driver
Node StringType Id - enable
Elastic BooleanDisk - enable
Local BooleanDisk Encryption - gcp
Attributes Property Map - idempotency
Token String - init
Scripts List<Property Map> - instance
Pool StringId - libraries List<Property Map>
- (List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
- node
Type StringId - num
Workers Number - policy
Id String - runtime
Engine String - single
User StringName - spark
Conf Map<String> - spark
Env Map<String>Vars - ssh
Public List<String>Keys - workload
Type Property Map - isn't supported
JobNewClusterAutoscale, JobNewClusterAutoscaleArgs
- Max
Workers int - Min
Workers int
- Max
Workers int - Min
Workers int
- max
Workers Integer - min
Workers Integer
- max
Workers number - min
Workers number
- max_
workers int - min_
workers int
- max
Workers Number - min
Workers Number
JobNewClusterAwsAttributes, JobNewClusterAwsAttributesArgs
- Availability string
- Ebs
Volume intCount - Ebs
Volume intIops - Ebs
Volume intSize - Ebs
Volume intThroughput - Ebs
Volume stringType - First
On intDemand - Instance
Profile stringArn - Spot
Bid intPrice Percent - Zone
Id string
- Availability string
- Ebs
Volume intCount - Ebs
Volume intIops - Ebs
Volume intSize - Ebs
Volume intThroughput - Ebs
Volume stringType - First
On intDemand - Instance
Profile stringArn - Spot
Bid intPrice Percent - Zone
Id string
- availability String
- ebs
Volume IntegerCount - ebs
Volume IntegerIops - ebs
Volume IntegerSize - ebs
Volume IntegerThroughput - ebs
Volume StringType - first
On IntegerDemand - instance
Profile StringArn - spot
Bid IntegerPrice Percent - zone
Id String
- availability string
- ebs
Volume numberCount - ebs
Volume numberIops - ebs
Volume numberSize - ebs
Volume numberThroughput - ebs
Volume stringType - first
On numberDemand - instance
Profile stringArn - spot
Bid numberPrice Percent - zone
Id string
- availability str
- ebs_
volume_ intcount - ebs_
volume_ intiops - ebs_
volume_ intsize - ebs_
volume_ intthroughput - ebs_
volume_ strtype - first_
on_ intdemand - instance_
profile_ strarn - spot_
bid_ intprice_ percent - zone_
id str
- availability String
- ebs
Volume NumberCount - ebs
Volume NumberIops - ebs
Volume NumberSize - ebs
Volume NumberThroughput - ebs
Volume StringType - first
On NumberDemand - instance
Profile StringArn - spot
Bid NumberPrice Percent - zone
Id String
JobNewClusterAzureAttributes, JobNewClusterAzureAttributesArgs
- availability String
- first
On NumberDemand - log
Analytics Property MapInfo - spot
Bid NumberMax Price
JobNewClusterAzureAttributesLogAnalyticsInfo, JobNewClusterAzureAttributesLogAnalyticsInfoArgs
- Log
Analytics stringPrimary Key - Log
Analytics stringWorkspace Id
- Log
Analytics stringPrimary Key - Log
Analytics stringWorkspace Id
- log
Analytics StringPrimary Key - log
Analytics StringWorkspace Id
- log
Analytics stringPrimary Key - log
Analytics stringWorkspace Id
- log
Analytics StringPrimary Key - log
Analytics StringWorkspace Id
JobNewClusterClusterLogConf, JobNewClusterClusterLogConfArgs
JobNewClusterClusterLogConfDbfs, JobNewClusterClusterLogConfDbfsArgs
- Destination string
- Destination string
- destination String
- destination string
- destination str
- destination String
JobNewClusterClusterLogConfS3, JobNewClusterClusterLogConfS3Args
- Destination string
- Canned
Acl string - Enable
Encryption bool - Encryption
Type string - Endpoint string
- Kms
Key string - Region string
- Destination string
- Canned
Acl string - Enable
Encryption bool - Encryption
Type string - Endpoint string
- Kms
Key string - Region string
- destination String
- canned
Acl String - enable
Encryption Boolean - encryption
Type String - endpoint String
- kms
Key String - region String
- destination string
- canned
Acl string - enable
Encryption boolean - encryption
Type string - endpoint string
- kms
Key string - region string
- destination str
- canned_
acl str - enable_
encryption bool - encryption_
type str - endpoint str
- kms_
key str - region str
- destination String
- canned
Acl String - enable
Encryption Boolean - encryption
Type String - endpoint String
- kms
Key String - region String
JobNewClusterClusterMountInfo, JobNewClusterClusterMountInfoArgs
JobNewClusterClusterMountInfoNetworkFilesystemInfo, JobNewClusterClusterMountInfoNetworkFilesystemInfoArgs
- Server
Address string - Mount
Options string
- Server
Address string - Mount
Options string
- server
Address String - mount
Options String
- server
Address string - mount
Options string
- server_
address str - mount_
options str
- server
Address String - mount
Options String
JobNewClusterDockerImage, JobNewClusterDockerImageArgs
- Url string
- URL of the job on the given workspace
- Basic
Auth JobNew Cluster Docker Image Basic Auth
- Url string
- URL of the job on the given workspace
- Basic
Auth JobNew Cluster Docker Image Basic Auth
- url String
- URL of the job on the given workspace
- basic
Auth JobNew Cluster Docker Image Basic Auth
- url string
- URL of the job on the given workspace
- basic
Auth JobNew Cluster Docker Image Basic Auth
- url str
- URL of the job on the given workspace
- basic_
auth JobNew Cluster Docker Image Basic Auth
- url String
- URL of the job on the given workspace
- basic
Auth Property Map
JobNewClusterDockerImageBasicAuth, JobNewClusterDockerImageBasicAuthArgs
JobNewClusterGcpAttributes, JobNewClusterGcpAttributesArgs
- Availability string
- Boot
Disk intSize - Google
Service stringAccount - Local
Ssd intCount - Use
Preemptible boolExecutors - Zone
Id string
- Availability string
- Boot
Disk intSize - Google
Service stringAccount - Local
Ssd intCount - Use
Preemptible boolExecutors - Zone
Id string
- availability String
- boot
Disk IntegerSize - google
Service StringAccount - local
Ssd IntegerCount - use
Preemptible BooleanExecutors - zone
Id String
- availability string
- boot
Disk numberSize - google
Service stringAccount - local
Ssd numberCount - use
Preemptible booleanExecutors - zone
Id string
- availability str
- boot_
disk_ intsize - google_
service_ straccount - local_
ssd_ intcount - use_
preemptible_ boolexecutors - zone_
id str
- availability String
- boot
Disk NumberSize - google
Service StringAccount - local
Ssd NumberCount - use
Preemptible BooleanExecutors - zone
Id String
JobNewClusterInitScript, JobNewClusterInitScriptArgs
- abfss Property Map
- dbfs Property Map
- file Property Map
- block consisting of single string fields:
- gcs Property Map
- s3 Property Map
- volumes Property Map
- workspace Property Map
JobNewClusterInitScriptAbfss, JobNewClusterInitScriptAbfssArgs
- Destination string
- Destination string
- destination String
- destination string
- destination str
- destination String
JobNewClusterInitScriptDbfs, JobNewClusterInitScriptDbfsArgs
- Destination string
- Destination string
- destination String
- destination string
- destination str
- destination String
JobNewClusterInitScriptFile, JobNewClusterInitScriptFileArgs
- Destination string
- Destination string
- destination String
- destination string
- destination str
- destination String
JobNewClusterInitScriptGcs, JobNewClusterInitScriptGcsArgs
- Destination string
- Destination string
- destination String
- destination string
- destination str
- destination String
JobNewClusterInitScriptS3, JobNewClusterInitScriptS3Args
- Destination string
- Canned
Acl string - Enable
Encryption bool - Encryption
Type string - Endpoint string
- Kms
Key string - Region string
- Destination string
- Canned
Acl string - Enable
Encryption bool - Encryption
Type string - Endpoint string
- Kms
Key string - Region string
- destination String
- canned
Acl String - enable
Encryption Boolean - encryption
Type String - endpoint String
- kms
Key String - region String
- destination string
- canned
Acl string - enable
Encryption boolean - encryption
Type string - endpoint string
- kms
Key string - region string
- destination str
- canned_
acl str - enable_
encryption bool - encryption_
type str - endpoint str
- kms_
key str - region str
- destination String
- canned
Acl String - enable
Encryption Boolean - encryption
Type String - endpoint String
- kms
Key String - region String
JobNewClusterInitScriptVolumes, JobNewClusterInitScriptVolumesArgs
- Destination string
- Destination string
- destination String
- destination string
- destination str
- destination String
JobNewClusterInitScriptWorkspace, JobNewClusterInitScriptWorkspaceArgs
- Destination string
- Destination string
- destination String
- destination string
- destination str
- destination String
JobNewClusterLibrary, JobNewClusterLibraryArgs
- Cran
Job
New Cluster Library Cran - Egg string
- Jar string
- Maven
Job
New Cluster Library Maven - Pypi
Job
New Cluster Library Pypi - Requirements string
- Whl string
- Cran
Job
New Cluster Library Cran - Egg string
- Jar string
- Maven
Job
New Cluster Library Maven - Pypi
Job
New Cluster Library Pypi - Requirements string
- Whl string
- cran
Job
New Cluster Library Cran - egg String
- jar String
- maven
Job
New Cluster Library Maven - pypi
Job
New Cluster Library Pypi - requirements String
- whl String
- cran
Job
New Cluster Library Cran - egg string
- jar string
- maven
Job
New Cluster Library Maven - pypi
Job
New Cluster Library Pypi - requirements string
- whl string
- cran Property Map
- egg String
- jar String
- maven Property Map
- pypi Property Map
- requirements String
- whl String
JobNewClusterLibraryCran, JobNewClusterLibraryCranArgs
JobNewClusterLibraryMaven, JobNewClusterLibraryMavenArgs
- Coordinates string
- Exclusions List<string>
- Repo string
- Coordinates string
- Exclusions []string
- Repo string
- coordinates String
- exclusions List<String>
- repo String
- coordinates string
- exclusions string[]
- repo string
- coordinates str
- exclusions Sequence[str]
- repo str
- coordinates String
- exclusions List<String>
- repo String
JobNewClusterLibraryPypi, JobNewClusterLibraryPypiArgs
JobNewClusterWorkloadType, JobNewClusterWorkloadTypeArgs
JobNewClusterWorkloadTypeClients, JobNewClusterWorkloadTypeClientsArgs
JobNotebookTask, JobNotebookTaskArgs
- Notebook
Path string - The path of the databricks.Notebook to be run in the Databricks workspace or remote repository. For notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash. For notebooks stored in a remote repository, the path must be relative. This field is required.
- Base
Parameters Dictionary<string, string> - (Map) Base parameters to be used for each run of this job. If the run is initiated by a call to run-now with parameters specified, the two parameters maps will be merged. If the same key is specified in base_parameters and in run-now, the value from run-now will be used. If the notebook takes a parameter that is not specified in the job’s base_parameters or the run-now override parameters, the default value from the notebook will be used. Retrieve these parameters in a notebook using
dbutils.widgets.get
. - Source string
- Location type of the notebook, can only be
WORKSPACE
orGIT
. When set toWORKSPACE
, the notebook will be retrieved from the local Databricks workspace. When set toGIT
, the notebook will be retrieved from a Git repository defined ingit_source
. If the value is empty, the task will useGIT
ifgit_source
is defined andWORKSPACE
otherwise. - Warehouse
Id string - ID of the (the databricks_sql_endpoint) that will be used to execute the task with SQL notebook.
- Notebook
Path string - The path of the databricks.Notebook to be run in the Databricks workspace or remote repository. For notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash. For notebooks stored in a remote repository, the path must be relative. This field is required.
- Base
Parameters map[string]string - (Map) Base parameters to be used for each run of this job. If the run is initiated by a call to run-now with parameters specified, the two parameters maps will be merged. If the same key is specified in base_parameters and in run-now, the value from run-now will be used. If the notebook takes a parameter that is not specified in the job’s base_parameters or the run-now override parameters, the default value from the notebook will be used. Retrieve these parameters in a notebook using
dbutils.widgets.get
. - Source string
- Location type of the notebook, can only be
WORKSPACE
orGIT
. When set toWORKSPACE
, the notebook will be retrieved from the local Databricks workspace. When set toGIT
, the notebook will be retrieved from a Git repository defined ingit_source
. If the value is empty, the task will useGIT
ifgit_source
is defined andWORKSPACE
otherwise. - Warehouse
Id string - ID of the (the databricks_sql_endpoint) that will be used to execute the task with SQL notebook.
- notebook
Path String - The path of the databricks.Notebook to be run in the Databricks workspace or remote repository. For notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash. For notebooks stored in a remote repository, the path must be relative. This field is required.
- base
Parameters Map<String,String> - (Map) Base parameters to be used for each run of this job. If the run is initiated by a call to run-now with parameters specified, the two parameters maps will be merged. If the same key is specified in base_parameters and in run-now, the value from run-now will be used. If the notebook takes a parameter that is not specified in the job’s base_parameters or the run-now override parameters, the default value from the notebook will be used. Retrieve these parameters in a notebook using
dbutils.widgets.get
. - source String
- Location type of the notebook, can only be
WORKSPACE
orGIT
. When set toWORKSPACE
, the notebook will be retrieved from the local Databricks workspace. When set toGIT
, the notebook will be retrieved from a Git repository defined ingit_source
. If the value is empty, the task will useGIT
ifgit_source
is defined andWORKSPACE
otherwise. - warehouse
Id String - ID of the (the databricks_sql_endpoint) that will be used to execute the task with SQL notebook.
- notebook
Path string - The path of the databricks.Notebook to be run in the Databricks workspace or remote repository. For notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash. For notebooks stored in a remote repository, the path must be relative. This field is required.
- base
Parameters {[key: string]: string} - (Map) Base parameters to be used for each run of this job. If the run is initiated by a call to run-now with parameters specified, the two parameters maps will be merged. If the same key is specified in base_parameters and in run-now, the value from run-now will be used. If the notebook takes a parameter that is not specified in the job’s base_parameters or the run-now override parameters, the default value from the notebook will be used. Retrieve these parameters in a notebook using
dbutils.widgets.get
. - source string
- Location type of the notebook, can only be
WORKSPACE
orGIT
. When set toWORKSPACE
, the notebook will be retrieved from the local Databricks workspace. When set toGIT
, the notebook will be retrieved from a Git repository defined ingit_source
. If the value is empty, the task will useGIT
ifgit_source
is defined andWORKSPACE
otherwise. - warehouse
Id string - ID of the (the databricks_sql_endpoint) that will be used to execute the task with SQL notebook.
- notebook_
path str - The path of the databricks.Notebook to be run in the Databricks workspace or remote repository. For notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash. For notebooks stored in a remote repository, the path must be relative. This field is required.
- base_
parameters Mapping[str, str] - (Map) Base parameters to be used for each run of this job. If the run is initiated by a call to run-now with parameters specified, the two parameters maps will be merged. If the same key is specified in base_parameters and in run-now, the value from run-now will be used. If the notebook takes a parameter that is not specified in the job’s base_parameters or the run-now override parameters, the default value from the notebook will be used. Retrieve these parameters in a notebook using
dbutils.widgets.get
. - source str
- Location type of the notebook, can only be
WORKSPACE
orGIT
. When set toWORKSPACE
, the notebook will be retrieved from the local Databricks workspace. When set toGIT
, the notebook will be retrieved from a Git repository defined ingit_source
. If the value is empty, the task will useGIT
ifgit_source
is defined andWORKSPACE
otherwise. - warehouse_
id str - ID of the (the databricks_sql_endpoint) that will be used to execute the task with SQL notebook.
- notebook
Path String - The path of the databricks.Notebook to be run in the Databricks workspace or remote repository. For notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash. For notebooks stored in a remote repository, the path must be relative. This field is required.
- base
Parameters Map<String> - (Map) Base parameters to be used for each run of this job. If the run is initiated by a call to run-now with parameters specified, the two parameters maps will be merged. If the same key is specified in base_parameters and in run-now, the value from run-now will be used. If the notebook takes a parameter that is not specified in the job’s base_parameters or the run-now override parameters, the default value from the notebook will be used. Retrieve these parameters in a notebook using
dbutils.widgets.get
. - source String
- Location type of the notebook, can only be
WORKSPACE
orGIT
. When set toWORKSPACE
, the notebook will be retrieved from the local Databricks workspace. When set toGIT
, the notebook will be retrieved from a Git repository defined ingit_source
. If the value is empty, the task will useGIT
ifgit_source
is defined andWORKSPACE
otherwise. - warehouse
Id String - ID of the (the databricks_sql_endpoint) that will be used to execute the task with SQL notebook.
JobNotificationSettings, JobNotificationSettingsArgs
- No
Alert boolFor Canceled Runs (Bool) don't send alert for cancelled runs.
The following parameter is only available on task level.
- No
Alert boolFor Skipped Runs - (Bool) don't send alert for skipped runs.
- No
Alert boolFor Canceled Runs (Bool) don't send alert for cancelled runs.
The following parameter is only available on task level.
- No
Alert boolFor Skipped Runs - (Bool) don't send alert for skipped runs.
- no
Alert BooleanFor Canceled Runs (Bool) don't send alert for cancelled runs.
The following parameter is only available on task level.
- no
Alert BooleanFor Skipped Runs - (Bool) don't send alert for skipped runs.
- no
Alert booleanFor Canceled Runs (Bool) don't send alert for cancelled runs.
The following parameter is only available on task level.
- no
Alert booleanFor Skipped Runs - (Bool) don't send alert for skipped runs.
- no_
alert_ boolfor_ canceled_ runs (Bool) don't send alert for cancelled runs.
The following parameter is only available on task level.
- no_
alert_ boolfor_ skipped_ runs - (Bool) don't send alert for skipped runs.
- no
Alert BooleanFor Canceled Runs (Bool) don't send alert for cancelled runs.
The following parameter is only available on task level.
- no
Alert BooleanFor Skipped Runs - (Bool) don't send alert for skipped runs.
JobParameter, JobParameterArgs
JobPipelineTask, JobPipelineTaskArgs
- Pipeline
Id string - The pipeline's unique ID.
- Full
Refresh bool (Bool) Specifies if there should be full refresh of the pipeline.
The following configuration blocks are only supported inside a
task
block
- Pipeline
Id string - The pipeline's unique ID.
- Full
Refresh bool (Bool) Specifies if there should be full refresh of the pipeline.
The following configuration blocks are only supported inside a
task
block
- pipeline
Id String - The pipeline's unique ID.
- full
Refresh Boolean (Bool) Specifies if there should be full refresh of the pipeline.
The following configuration blocks are only supported inside a
task
block
- pipeline
Id string - The pipeline's unique ID.
- full
Refresh boolean (Bool) Specifies if there should be full refresh of the pipeline.
The following configuration blocks are only supported inside a
task
block
- pipeline_
id str - The pipeline's unique ID.
- full_
refresh bool (Bool) Specifies if there should be full refresh of the pipeline.
The following configuration blocks are only supported inside a
task
block
- pipeline
Id String - The pipeline's unique ID.
- full
Refresh Boolean (Bool) Specifies if there should be full refresh of the pipeline.
The following configuration blocks are only supported inside a
task
block
JobPythonWheelTask, JobPythonWheelTaskArgs
- Entry
Point string - Python function as entry point for the task
- Named
Parameters Dictionary<string, string> - Named parameters for the task
- Package
Name string - Name of Python package
- Parameters List<string>
- Parameters for the task
- Entry
Point string - Python function as entry point for the task
- Named
Parameters map[string]string - Named parameters for the task
- Package
Name string - Name of Python package
- Parameters []string
- Parameters for the task
- entry
Point String - Python function as entry point for the task
- named
Parameters Map<String,String> - Named parameters for the task
- package
Name String - Name of Python package
- parameters List<String>
- Parameters for the task
- entry
Point string - Python function as entry point for the task
- named
Parameters {[key: string]: string} - Named parameters for the task
- package
Name string - Name of Python package
- parameters string[]
- Parameters for the task
- entry_
point str - Python function as entry point for the task
- named_
parameters Mapping[str, str] - Named parameters for the task
- package_
name str - Name of Python package
- parameters Sequence[str]
- Parameters for the task
- entry
Point String - Python function as entry point for the task
- named
Parameters Map<String> - Named parameters for the task
- package
Name String - Name of Python package
- parameters List<String>
- Parameters for the task
JobQueue, JobQueueArgs
- Enabled bool
- If true, enable queueing for the job.
- Enabled bool
- If true, enable queueing for the job.
- enabled Boolean
- If true, enable queueing for the job.
- enabled boolean
- If true, enable queueing for the job.
- enabled bool
- If true, enable queueing for the job.
- enabled Boolean
- If true, enable queueing for the job.
JobRunAs, JobRunAsArgs
- Service
Principal stringName The application ID of an active service principal. Setting this field requires the
servicePrincipal/user
role.Example:
import * as pulumi from "@pulumi/pulumi"; import * as databricks from "@pulumi/databricks";
const _this = new databricks.Job("this", {runAs: { servicePrincipalName: "8d23ae77-912e-4a19-81e4-b9c3f5cc9349", }});
import pulumi import pulumi_databricks as databricks this = databricks.Job("this", run_as={ "service_principal_name": "8d23ae77-912e-4a19-81e4-b9c3f5cc9349", })
using System.Collections.Generic; using System.Linq; using Pulumi; using Databricks = Pulumi.Databricks; return await Deployment.RunAsync(() => { var @this = new Databricks.Job("this", new() { RunAs = new Databricks.Inputs.JobRunAsArgs { ServicePrincipalName = "8d23ae77-912e-4a19-81e4-b9c3f5cc9349", }, }); });
package main import ( "github.com/pulumi/pulumi-databricks/sdk/go/databricks" "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) func main() { pulumi.Run(func(ctx *pulumi.Context) error { _, err := databricks.NewJob(ctx, "this", &databricks.JobArgs{ RunAs: &databricks.JobRunAsArgs{ ServicePrincipalName: pulumi.String("8d23ae77-912e-4a19-81e4-b9c3f5cc9349"), }, }) if err != nil { return err } return nil }) }
package generated_program; import com.pulumi.Context; import com.pulumi.Pulumi; import com.pulumi.core.Output; import com.pulumi.databricks.Job; import com.pulumi.databricks.JobArgs; import com.pulumi.databricks.inputs.JobRunAsArgs; import java.util.List; import java.util.ArrayList; import java.util.Map; import java.io.File; import java.nio.file.Files; import java.nio.file.Paths; public class App { public static void main(String[] args) { Pulumi.run(App::stack); } public static void stack(Context ctx) { var this_ = new Job("this", JobArgs.builder() .runAs(JobRunAsArgs.builder() .servicePrincipalName("8d23ae77-912e-4a19-81e4-b9c3f5cc9349") .build()) .build()); } }
resources: this: type: databricks:Job properties: runAs: servicePrincipalName: 8d23ae77-912e-4a19-81e4-b9c3f5cc9349
- User
Name string - The email of an active workspace user. Non-admin users can only set this field to their own email.
- Service
Principal stringName The application ID of an active service principal. Setting this field requires the
servicePrincipal/user
role.Example:
import * as pulumi from "@pulumi/pulumi"; import * as databricks from "@pulumi/databricks";
const _this = new databricks.Job("this", {runAs: { servicePrincipalName: "8d23ae77-912e-4a19-81e4-b9c3f5cc9349", }});
import pulumi import pulumi_databricks as databricks this = databricks.Job("this", run_as={ "service_principal_name": "8d23ae77-912e-4a19-81e4-b9c3f5cc9349", })
using System.Collections.Generic; using System.Linq; using Pulumi; using Databricks = Pulumi.Databricks; return await Deployment.RunAsync(() => { var @this = new Databricks.Job("this", new() { RunAs = new Databricks.Inputs.JobRunAsArgs { ServicePrincipalName = "8d23ae77-912e-4a19-81e4-b9c3f5cc9349", }, }); });
package main import ( "github.com/pulumi/pulumi-databricks/sdk/go/databricks" "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) func main() { pulumi.Run(func(ctx *pulumi.Context) error { _, err := databricks.NewJob(ctx, "this", &databricks.JobArgs{ RunAs: &databricks.JobRunAsArgs{ ServicePrincipalName: pulumi.String("8d23ae77-912e-4a19-81e4-b9c3f5cc9349"), }, }) if err != nil { return err } return nil }) }
package generated_program; import com.pulumi.Context; import com.pulumi.Pulumi; import com.pulumi.core.Output; import com.pulumi.databricks.Job; import com.pulumi.databricks.JobArgs; import com.pulumi.databricks.inputs.JobRunAsArgs; import java.util.List; import java.util.ArrayList; import java.util.Map; import java.io.File; import java.nio.file.Files; import java.nio.file.Paths; public class App { public static void main(String[] args) { Pulumi.run(App::stack); } public static void stack(Context ctx) { var this_ = new Job("this", JobArgs.builder() .runAs(JobRunAsArgs.builder() .servicePrincipalName("8d23ae77-912e-4a19-81e4-b9c3f5cc9349") .build()) .build()); } }
resources: this: type: databricks:Job properties: runAs: servicePrincipalName: 8d23ae77-912e-4a19-81e4-b9c3f5cc9349
- User
Name string - The email of an active workspace user. Non-admin users can only set this field to their own email.
- service
Principal StringName The application ID of an active service principal. Setting this field requires the
servicePrincipal/user
role.Example:
import * as pulumi from "@pulumi/pulumi"; import * as databricks from "@pulumi/databricks";
const _this = new databricks.Job("this", {runAs: { servicePrincipalName: "8d23ae77-912e-4a19-81e4-b9c3f5cc9349", }});
import pulumi import pulumi_databricks as databricks this = databricks.Job("this", run_as={ "service_principal_name": "8d23ae77-912e-4a19-81e4-b9c3f5cc9349", })
using System.Collections.Generic; using System.Linq; using Pulumi; using Databricks = Pulumi.Databricks; return await Deployment.RunAsync(() => { var @this = new Databricks.Job("this", new() { RunAs = new Databricks.Inputs.JobRunAsArgs { ServicePrincipalName = "8d23ae77-912e-4a19-81e4-b9c3f5cc9349", }, }); });
package main import ( "github.com/pulumi/pulumi-databricks/sdk/go/databricks" "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) func main() { pulumi.Run(func(ctx *pulumi.Context) error { _, err := databricks.NewJob(ctx, "this", &databricks.JobArgs{ RunAs: &databricks.JobRunAsArgs{ ServicePrincipalName: pulumi.String("8d23ae77-912e-4a19-81e4-b9c3f5cc9349"), }, }) if err != nil { return err } return nil }) }
package generated_program; import com.pulumi.Context; import com.pulumi.Pulumi; import com.pulumi.core.Output; import com.pulumi.databricks.Job; import com.pulumi.databricks.JobArgs; import com.pulumi.databricks.inputs.JobRunAsArgs; import java.util.List; import java.util.ArrayList; import java.util.Map; import java.io.File; import java.nio.file.Files; import java.nio.file.Paths; public class App { public static void main(String[] args) { Pulumi.run(App::stack); } public static void stack(Context ctx) { var this_ = new Job("this", JobArgs.builder() .runAs(JobRunAsArgs.builder() .servicePrincipalName("8d23ae77-912e-4a19-81e4-b9c3f5cc9349") .build()) .build()); } }
resources: this: type: databricks:Job properties: runAs: servicePrincipalName: 8d23ae77-912e-4a19-81e4-b9c3f5cc9349
- user
Name String - The email of an active workspace user. Non-admin users can only set this field to their own email.
- service
Principal stringName The application ID of an active service principal. Setting this field requires the
servicePrincipal/user
role.Example:
import * as pulumi from "@pulumi/pulumi"; import * as databricks from "@pulumi/databricks";
const _this = new databricks.Job("this", {runAs: { servicePrincipalName: "8d23ae77-912e-4a19-81e4-b9c3f5cc9349", }});
import pulumi import pulumi_databricks as databricks this = databricks.Job("this", run_as={ "service_principal_name": "8d23ae77-912e-4a19-81e4-b9c3f5cc9349", })
using System.Collections.Generic; using System.Linq; using Pulumi; using Databricks = Pulumi.Databricks; return await Deployment.RunAsync(() => { var @this = new Databricks.Job("this", new() { RunAs = new Databricks.Inputs.JobRunAsArgs { ServicePrincipalName = "8d23ae77-912e-4a19-81e4-b9c3f5cc9349", }, }); });
package main import ( "github.com/pulumi/pulumi-databricks/sdk/go/databricks" "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) func main() { pulumi.Run(func(ctx *pulumi.Context) error { _, err := databricks.NewJob(ctx, "this", &databricks.JobArgs{ RunAs: &databricks.JobRunAsArgs{ ServicePrincipalName: pulumi.String("8d23ae77-912e-4a19-81e4-b9c3f5cc9349"), }, }) if err != nil { return err } return nil }) }
package generated_program; import com.pulumi.Context; import com.pulumi.Pulumi; import com.pulumi.core.Output; import com.pulumi.databricks.Job; import com.pulumi.databricks.JobArgs; import com.pulumi.databricks.inputs.JobRunAsArgs; import java.util.List; import java.util.ArrayList; import java.util.Map; import java.io.File; import java.nio.file.Files; import java.nio.file.Paths; public class App { public static void main(String[] args) { Pulumi.run(App::stack); } public static void stack(Context ctx) { var this_ = new Job("this", JobArgs.builder() .runAs(JobRunAsArgs.builder() .servicePrincipalName("8d23ae77-912e-4a19-81e4-b9c3f5cc9349") .build()) .build()); } }
resources: this: type: databricks:Job properties: runAs: servicePrincipalName: 8d23ae77-912e-4a19-81e4-b9c3f5cc9349
- user
Name string - The email of an active workspace user. Non-admin users can only set this field to their own email.
- service_
principal_ strname The application ID of an active service principal. Setting this field requires the
servicePrincipal/user
role.Example:
import * as pulumi from "@pulumi/pulumi"; import * as databricks from "@pulumi/databricks";
const _this = new databricks.Job("this", {runAs: { servicePrincipalName: "8d23ae77-912e-4a19-81e4-b9c3f5cc9349", }});
import pulumi import pulumi_databricks as databricks this = databricks.Job("this", run_as={ "service_principal_name": "8d23ae77-912e-4a19-81e4-b9c3f5cc9349", })
using System.Collections.Generic; using System.Linq; using Pulumi; using Databricks = Pulumi.Databricks; return await Deployment.RunAsync(() => { var @this = new Databricks.Job("this", new() { RunAs = new Databricks.Inputs.JobRunAsArgs { ServicePrincipalName = "8d23ae77-912e-4a19-81e4-b9c3f5cc9349", }, }); });
package main import ( "github.com/pulumi/pulumi-databricks/sdk/go/databricks" "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) func main() { pulumi.Run(func(ctx *pulumi.Context) error { _, err := databricks.NewJob(ctx, "this", &databricks.JobArgs{ RunAs: &databricks.JobRunAsArgs{ ServicePrincipalName: pulumi.String("8d23ae77-912e-4a19-81e4-b9c3f5cc9349"), }, }) if err != nil { return err } return nil }) }
package generated_program; import com.pulumi.Context; import com.pulumi.Pulumi; import com.pulumi.core.Output; import com.pulumi.databricks.Job; import com.pulumi.databricks.JobArgs; import com.pulumi.databricks.inputs.JobRunAsArgs; import java.util.List; import java.util.ArrayList; import java.util.Map; import java.io.File; import java.nio.file.Files; import java.nio.file.Paths; public class App { public static void main(String[] args) { Pulumi.run(App::stack); } public static void stack(Context ctx) { var this_ = new Job("this", JobArgs.builder() .runAs(JobRunAsArgs.builder() .servicePrincipalName("8d23ae77-912e-4a19-81e4-b9c3f5cc9349") .build()) .build()); } }
resources: this: type: databricks:Job properties: runAs: servicePrincipalName: 8d23ae77-912e-4a19-81e4-b9c3f5cc9349
- user_
name str - The email of an active workspace user. Non-admin users can only set this field to their own email.
- service
Principal StringName The application ID of an active service principal. Setting this field requires the
servicePrincipal/user
role.Example:
import * as pulumi from "@pulumi/pulumi"; import * as databricks from "@pulumi/databricks";
const _this = new databricks.Job("this", {runAs: { servicePrincipalName: "8d23ae77-912e-4a19-81e4-b9c3f5cc9349", }});
import pulumi import pulumi_databricks as databricks this = databricks.Job("this", run_as={ "service_principal_name": "8d23ae77-912e-4a19-81e4-b9c3f5cc9349", })
using System.Collections.Generic; using System.Linq; using Pulumi; using Databricks = Pulumi.Databricks; return await Deployment.RunAsync(() => { var @this = new Databricks.Job("this", new() { RunAs = new Databricks.Inputs.JobRunAsArgs { ServicePrincipalName = "8d23ae77-912e-4a19-81e4-b9c3f5cc9349", }, }); });
package main import ( "github.com/pulumi/pulumi-databricks/sdk/go/databricks" "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) func main() { pulumi.Run(func(ctx *pulumi.Context) error { _, err := databricks.NewJob(ctx, "this", &databricks.JobArgs{ RunAs: &databricks.JobRunAsArgs{ ServicePrincipalName: pulumi.String("8d23ae77-912e-4a19-81e4-b9c3f5cc9349"), }, }) if err != nil { return err } return nil }) }
package generated_program; import com.pulumi.Context; import com.pulumi.Pulumi; import com.pulumi.core.Output; import com.pulumi.databricks.Job; import com.pulumi.databricks.JobArgs; import com.pulumi.databricks.inputs.JobRunAsArgs; import java.util.List; import java.util.ArrayList; import java.util.Map; import java.io.File; import java.nio.file.Files; import java.nio.file.Paths; public class App { public static void main(String[] args) { Pulumi.run(App::stack); } public static void stack(Context ctx) { var this_ = new Job("this", JobArgs.builder() .runAs(JobRunAsArgs.builder() .servicePrincipalName("8d23ae77-912e-4a19-81e4-b9c3f5cc9349") .build()) .build()); } }
resources: this: type: databricks:Job properties: runAs: servicePrincipalName: 8d23ae77-912e-4a19-81e4-b9c3f5cc9349
- user
Name String - The email of an active workspace user. Non-admin users can only set this field to their own email.
JobRunJobTask, JobRunJobTaskArgs
- Job
Id int - (String) ID of the job
- Job
Parameters Dictionary<string, string> - (Map) Job parameters for the task
- Job
Id int - (String) ID of the job
- Job
Parameters map[string]string - (Map) Job parameters for the task
- job
Id Integer - (String) ID of the job
- job
Parameters Map<String,String> - (Map) Job parameters for the task
- job
Id number - (String) ID of the job
- job
Parameters {[key: string]: string} - (Map) Job parameters for the task
- job_
id int - (String) ID of the job
- job_
parameters Mapping[str, str] - (Map) Job parameters for the task
- job
Id Number - (String) ID of the job
- job
Parameters Map<String> - (Map) Job parameters for the task
JobSchedule, JobScheduleArgs
- Quartz
Cron stringExpression - A Cron expression using Quartz syntax that describes the schedule for a job. This field is required.
- Timezone
Id string - A Java timezone ID. The schedule for a job will be resolved with respect to this timezone. See Java TimeZone for details. This field is required.
- Pause
Status string - Indicate whether this schedule is paused or not. Either
PAUSED
orUNPAUSED
. When thepause_status
field is omitted and a schedule is provided, the server will default to usingUNPAUSED
as a value forpause_status
.
- Quartz
Cron stringExpression - A Cron expression using Quartz syntax that describes the schedule for a job. This field is required.
- Timezone
Id string - A Java timezone ID. The schedule for a job will be resolved with respect to this timezone. See Java TimeZone for details. This field is required.
- Pause
Status string - Indicate whether this schedule is paused or not. Either
PAUSED
orUNPAUSED
. When thepause_status
field is omitted and a schedule is provided, the server will default to usingUNPAUSED
as a value forpause_status
.
- quartz
Cron StringExpression - A Cron expression using Quartz syntax that describes the schedule for a job. This field is required.
- timezone
Id String - A Java timezone ID. The schedule for a job will be resolved with respect to this timezone. See Java TimeZone for details. This field is required.
- pause
Status String - Indicate whether this schedule is paused or not. Either
PAUSED
orUNPAUSED
. When thepause_status
field is omitted and a schedule is provided, the server will default to usingUNPAUSED
as a value forpause_status
.
- quartz
Cron stringExpression - A Cron expression using Quartz syntax that describes the schedule for a job. This field is required.
- timezone
Id string - A Java timezone ID. The schedule for a job will be resolved with respect to this timezone. See Java TimeZone for details. This field is required.
- pause
Status string - Indicate whether this schedule is paused or not. Either
PAUSED
orUNPAUSED
. When thepause_status
field is omitted and a schedule is provided, the server will default to usingUNPAUSED
as a value forpause_status
.
- quartz_
cron_ strexpression - A Cron expression using Quartz syntax that describes the schedule for a job. This field is required.
- timezone_
id str - A Java timezone ID. The schedule for a job will be resolved with respect to this timezone. See Java TimeZone for details. This field is required.
- pause_
status str - Indicate whether this schedule is paused or not. Either
PAUSED
orUNPAUSED
. When thepause_status
field is omitted and a schedule is provided, the server will default to usingUNPAUSED
as a value forpause_status
.
- quartz
Cron StringExpression - A Cron expression using Quartz syntax that describes the schedule for a job. This field is required.
- timezone
Id String - A Java timezone ID. The schedule for a job will be resolved with respect to this timezone. See Java TimeZone for details. This field is required.
- pause
Status String - Indicate whether this schedule is paused or not. Either
PAUSED
orUNPAUSED
. When thepause_status
field is omitted and a schedule is provided, the server will default to usingUNPAUSED
as a value forpause_status
.
JobSparkJarTask, JobSparkJarTaskArgs
- Jar
Uri string - Main
Class stringName - The full name of the class containing the main method to be executed. This class must be contained in a JAR provided as a library. The code should use
SparkContext.getOrCreate
to obtain a Spark context; otherwise, runs of the job will fail. - Parameters List<string>
- (List) Parameters passed to the main method.
- Jar
Uri string - Main
Class stringName - The full name of the class containing the main method to be executed. This class must be contained in a JAR provided as a library. The code should use
SparkContext.getOrCreate
to obtain a Spark context; otherwise, runs of the job will fail. - Parameters []string
- (List) Parameters passed to the main method.
- jar
Uri String - main
Class StringName - The full name of the class containing the main method to be executed. This class must be contained in a JAR provided as a library. The code should use
SparkContext.getOrCreate
to obtain a Spark context; otherwise, runs of the job will fail. - parameters List<String>
- (List) Parameters passed to the main method.
- jar
Uri string - main
Class stringName - The full name of the class containing the main method to be executed. This class must be contained in a JAR provided as a library. The code should use
SparkContext.getOrCreate
to obtain a Spark context; otherwise, runs of the job will fail. - parameters string[]
- (List) Parameters passed to the main method.
- jar_
uri str - main_
class_ strname - The full name of the class containing the main method to be executed. This class must be contained in a JAR provided as a library. The code should use
SparkContext.getOrCreate
to obtain a Spark context; otherwise, runs of the job will fail. - parameters Sequence[str]
- (List) Parameters passed to the main method.
- jar
Uri String - main
Class StringName - The full name of the class containing the main method to be executed. This class must be contained in a JAR provided as a library. The code should use
SparkContext.getOrCreate
to obtain a Spark context; otherwise, runs of the job will fail. - parameters List<String>
- (List) Parameters passed to the main method.
JobSparkPythonTask, JobSparkPythonTaskArgs
- Python
File string - The URI of the Python file to be executed. databricks_dbfs_file, cloud file URIs (e.g.
s3:/
,abfss:/
,gs:/
), workspace paths and remote repository are supported. For Python files stored in the Databricks workspace, the path must be absolute and begin with/Repos
. For files stored in a remote repository, the path must be relative. This field is required. - Parameters List<string>
- (List) Command line parameters passed to the Python file.
- Source string
- Location type of the Python file, can only be
GIT
. When set toGIT
, the Python file will be retrieved from a Git repository defined ingit_source
.
- Python
File string - The URI of the Python file to be executed. databricks_dbfs_file, cloud file URIs (e.g.
s3:/
,abfss:/
,gs:/
), workspace paths and remote repository are supported. For Python files stored in the Databricks workspace, the path must be absolute and begin with/Repos
. For files stored in a remote repository, the path must be relative. This field is required. - Parameters []string
- (List) Command line parameters passed to the Python file.
- Source string
- Location type of the Python file, can only be
GIT
. When set toGIT
, the Python file will be retrieved from a Git repository defined ingit_source
.
- python
File String - The URI of the Python file to be executed. databricks_dbfs_file, cloud file URIs (e.g.
s3:/
,abfss:/
,gs:/
), workspace paths and remote repository are supported. For Python files stored in the Databricks workspace, the path must be absolute and begin with/Repos
. For files stored in a remote repository, the path must be relative. This field is required. - parameters List<String>
- (List) Command line parameters passed to the Python file.
- source String
- Location type of the Python file, can only be
GIT
. When set toGIT
, the Python file will be retrieved from a Git repository defined ingit_source
.
- python
File string - The URI of the Python file to be executed. databricks_dbfs_file, cloud file URIs (e.g.
s3:/
,abfss:/
,gs:/
), workspace paths and remote repository are supported. For Python files stored in the Databricks workspace, the path must be absolute and begin with/Repos
. For files stored in a remote repository, the path must be relative. This field is required. - parameters string[]
- (List) Command line parameters passed to the Python file.
- source string
- Location type of the Python file, can only be
GIT
. When set toGIT
, the Python file will be retrieved from a Git repository defined ingit_source
.
- python_
file str - The URI of the Python file to be executed. databricks_dbfs_file, cloud file URIs (e.g.
s3:/
,abfss:/
,gs:/
), workspace paths and remote repository are supported. For Python files stored in the Databricks workspace, the path must be absolute and begin with/Repos
. For files stored in a remote repository, the path must be relative. This field is required. - parameters Sequence[str]
- (List) Command line parameters passed to the Python file.
- source str
- Location type of the Python file, can only be
GIT
. When set toGIT
, the Python file will be retrieved from a Git repository defined ingit_source
.
- python
File String - The URI of the Python file to be executed. databricks_dbfs_file, cloud file URIs (e.g.
s3:/
,abfss:/
,gs:/
), workspace paths and remote repository are supported. For Python files stored in the Databricks workspace, the path must be absolute and begin with/Repos
. For files stored in a remote repository, the path must be relative. This field is required. - parameters List<String>
- (List) Command line parameters passed to the Python file.
- source String
- Location type of the Python file, can only be
GIT
. When set toGIT
, the Python file will be retrieved from a Git repository defined ingit_source
.
JobSparkSubmitTask, JobSparkSubmitTaskArgs
- Parameters List<string>
- (List) Command-line parameters passed to spark submit.
- Parameters []string
- (List) Command-line parameters passed to spark submit.
- parameters List<String>
- (List) Command-line parameters passed to spark submit.
- parameters string[]
- (List) Command-line parameters passed to spark submit.
- parameters Sequence[str]
- (List) Command-line parameters passed to spark submit.
- parameters List<String>
- (List) Command-line parameters passed to spark submit.
JobTask, JobTaskArgs
- Task
Key string - string specifying an unique key for a given task.
*_task
- (Required) one of the specific task blocks described below:
- Condition
Task JobTask Condition Task - Dbt
Task JobTask Dbt Task - Depends
Ons List<JobTask Depends On> - block specifying dependency(-ies) for a given task.
- Description string
- description for this task.
- Disable
Auto boolOptimization - A flag to disable auto optimization in serverless tasks.
- Email
Notifications JobTask Email Notifications - An optional block to specify a set of email addresses notified when this task begins, completes or fails. The default behavior is to not send any emails. This block is documented below.
- Environment
Key string - identifier of an
environment
block that is used to specify libraries. Required for some tasks (spark_python_task
,python_wheel_task
, ...) running on serverless compute. - Existing
Cluster stringId - Identifier of the interactive cluster to run job on. Note: running tasks on interactive clusters may lead to increased costs!
- For
Each JobTask Task For Each Task - Health
Job
Task Health - block described below that specifies health conditions for a given task.
- Job
Cluster stringKey - Identifier of the Job cluster specified in the
job_cluster
block. - Libraries
List<Job
Task Library> - (Set) An optional list of libraries to be installed on the cluster that will execute the job.
- Max
Retries int - (Integer) An optional maximum number of times to retry an unsuccessful run. A run is considered to be unsuccessful if it completes with a
FAILED
orINTERNAL_ERROR
lifecycle state. The value -1 means to retry indefinitely and the value 0 means to never retry. The default behavior is to never retry. A run can have the following lifecycle state:PENDING
,RUNNING
,TERMINATING
,TERMINATED
,SKIPPED
orINTERNAL_ERROR
. - Min
Retry intInterval Millis - (Integer) An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried.
- New
Cluster JobTask New Cluster - Task will run on a dedicated cluster. See databricks.Cluster documentation for specification. Some parameters, such as
autotermination_minutes
,is_pinned
,workload_type
aren't supported! - Notebook
Task JobTask Notebook Task - Notification
Settings JobTask Notification Settings - An optional block controlling the notification settings on the job level documented below.
- Pipeline
Task JobTask Pipeline Task - Python
Wheel JobTask Task Python Wheel Task - Retry
On boolTimeout - (Bool) An optional policy to specify whether to retry a job when it times out. The default behavior is to not retry on timeout.
- Run
If string - An optional value indicating the condition that determines whether the task should be run once its dependencies have been completed. One of
ALL_SUCCESS
,AT_LEAST_ONE_SUCCESS
,NONE_FAILED
,ALL_DONE
,AT_LEAST_ONE_FAILED
orALL_FAILED
. When omitted, defaults toALL_SUCCESS
. - Run
Job JobTask Task Run Job Task - Spark
Jar JobTask Task Spark Jar Task - Spark
Python JobTask Task Spark Python Task - Spark
Submit JobTask Task Spark Submit Task - Sql
Task JobTask Sql Task - Timeout
Seconds int - (Integer) An optional timeout applied to each run of this job. The default behavior is to have no timeout.
- Webhook
Notifications JobTask Webhook Notifications (List) An optional set of system destinations (for example, webhook destinations or Slack) to be notified when runs of this task begins, completes or fails. The default behavior is to not send any notifications. This field is a block and is documented below.
If no
job_cluster_key
,existing_cluster_id
, ornew_cluster
were specified in task definition, then task will executed using serverless compute.
- Task
Key string - string specifying an unique key for a given task.
*_task
- (Required) one of the specific task blocks described below:
- Condition
Task JobTask Condition Task - Dbt
Task JobTask Dbt Task - Depends
Ons []JobTask Depends On - block specifying dependency(-ies) for a given task.
- Description string
- description for this task.
- Disable
Auto boolOptimization - A flag to disable auto optimization in serverless tasks.
- Email
Notifications JobTask Email Notifications - An optional block to specify a set of email addresses notified when this task begins, completes or fails. The default behavior is to not send any emails. This block is documented below.
- Environment
Key string - identifier of an
environment
block that is used to specify libraries. Required for some tasks (spark_python_task
,python_wheel_task
, ...) running on serverless compute. - Existing
Cluster stringId - Identifier of the interactive cluster to run job on. Note: running tasks on interactive clusters may lead to increased costs!
- For
Each JobTask Task For Each Task - Health
Job
Task Health - block described below that specifies health conditions for a given task.
- Job
Cluster stringKey - Identifier of the Job cluster specified in the
job_cluster
block. - Libraries
[]Job
Task Library - (Set) An optional list of libraries to be installed on the cluster that will execute the job.
- Max
Retries int - (Integer) An optional maximum number of times to retry an unsuccessful run. A run is considered to be unsuccessful if it completes with a
FAILED
orINTERNAL_ERROR
lifecycle state. The value -1 means to retry indefinitely and the value 0 means to never retry. The default behavior is to never retry. A run can have the following lifecycle state:PENDING
,RUNNING
,TERMINATING
,TERMINATED
,SKIPPED
orINTERNAL_ERROR
. - Min
Retry intInterval Millis - (Integer) An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried.
- New
Cluster JobTask New Cluster - Task will run on a dedicated cluster. See databricks.Cluster documentation for specification. Some parameters, such as
autotermination_minutes
,is_pinned
,workload_type
aren't supported! - Notebook
Task JobTask Notebook Task - Notification
Settings JobTask Notification Settings - An optional block controlling the notification settings on the job level documented below.
- Pipeline
Task JobTask Pipeline Task - Python
Wheel JobTask Task Python Wheel Task - Retry
On boolTimeout - (Bool) An optional policy to specify whether to retry a job when it times out. The default behavior is to not retry on timeout.
- Run
If string - An optional value indicating the condition that determines whether the task should be run once its dependencies have been completed. One of
ALL_SUCCESS
,AT_LEAST_ONE_SUCCESS
,NONE_FAILED
,ALL_DONE
,AT_LEAST_ONE_FAILED
orALL_FAILED
. When omitted, defaults toALL_SUCCESS
. - Run
Job JobTask Task Run Job Task - Spark
Jar JobTask Task Spark Jar Task - Spark
Python JobTask Task Spark Python Task - Spark
Submit JobTask Task Spark Submit Task - Sql
Task JobTask Sql Task - Timeout
Seconds int - (Integer) An optional timeout applied to each run of this job. The default behavior is to have no timeout.
- Webhook
Notifications JobTask Webhook Notifications (List) An optional set of system destinations (for example, webhook destinations or Slack) to be notified when runs of this task begins, completes or fails. The default behavior is to not send any notifications. This field is a block and is documented below.
If no
job_cluster_key
,existing_cluster_id
, ornew_cluster
were specified in task definition, then task will executed using serverless compute.
- task
Key String - string specifying an unique key for a given task.
*_task
- (Required) one of the specific task blocks described below:
- condition
Task JobTask Condition Task - dbt
Task JobTask Dbt Task - depends
Ons List<JobTask Depends On> - block specifying dependency(-ies) for a given task.
- description String
- description for this task.
- disable
Auto BooleanOptimization - A flag to disable auto optimization in serverless tasks.
- email
Notifications JobTask Email Notifications - An optional block to specify a set of email addresses notified when this task begins, completes or fails. The default behavior is to not send any emails. This block is documented below.
- environment
Key String - identifier of an
environment
block that is used to specify libraries. Required for some tasks (spark_python_task
,python_wheel_task
, ...) running on serverless compute. - existing
Cluster StringId - Identifier of the interactive cluster to run job on. Note: running tasks on interactive clusters may lead to increased costs!
- for
Each JobTask Task For Each Task - health
Job
Task Health - block described below that specifies health conditions for a given task.
- job
Cluster StringKey - Identifier of the Job cluster specified in the
job_cluster
block. - libraries
List<Job
Task Library> - (Set) An optional list of libraries to be installed on the cluster that will execute the job.
- max
Retries Integer - (Integer) An optional maximum number of times to retry an unsuccessful run. A run is considered to be unsuccessful if it completes with a
FAILED
orINTERNAL_ERROR
lifecycle state. The value -1 means to retry indefinitely and the value 0 means to never retry. The default behavior is to never retry. A run can have the following lifecycle state:PENDING
,RUNNING
,TERMINATING
,TERMINATED
,SKIPPED
orINTERNAL_ERROR
. - min
Retry IntegerInterval Millis - (Integer) An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried.
- new
Cluster JobTask New Cluster - Task will run on a dedicated cluster. See databricks.Cluster documentation for specification. Some parameters, such as
autotermination_minutes
,is_pinned
,workload_type
aren't supported! - notebook
Task JobTask Notebook Task - notification
Settings JobTask Notification Settings - An optional block controlling the notification settings on the job level documented below.
- pipeline
Task JobTask Pipeline Task - python
Wheel JobTask Task Python Wheel Task - retry
On BooleanTimeout - (Bool) An optional policy to specify whether to retry a job when it times out. The default behavior is to not retry on timeout.
- run
If String - An optional value indicating the condition that determines whether the task should be run once its dependencies have been completed. One of
ALL_SUCCESS
,AT_LEAST_ONE_SUCCESS
,NONE_FAILED
,ALL_DONE
,AT_LEAST_ONE_FAILED
orALL_FAILED
. When omitted, defaults toALL_SUCCESS
. - run
Job JobTask Task Run Job Task - spark
Jar JobTask Task Spark Jar Task - spark
Python JobTask Task Spark Python Task - spark
Submit JobTask Task Spark Submit Task - sql
Task JobTask Sql Task - timeout
Seconds Integer - (Integer) An optional timeout applied to each run of this job. The default behavior is to have no timeout.
- webhook
Notifications JobTask Webhook Notifications (List) An optional set of system destinations (for example, webhook destinations or Slack) to be notified when runs of this task begins, completes or fails. The default behavior is to not send any notifications. This field is a block and is documented below.
If no
job_cluster_key
,existing_cluster_id
, ornew_cluster
were specified in task definition, then task will executed using serverless compute.
- task
Key string - string specifying an unique key for a given task.
*_task
- (Required) one of the specific task blocks described below:
- condition
Task JobTask Condition Task - dbt
Task JobTask Dbt Task - depends
Ons JobTask Depends On[] - block specifying dependency(-ies) for a given task.
- description string
- description for this task.
- disable
Auto booleanOptimization - A flag to disable auto optimization in serverless tasks.
- email
Notifications JobTask Email Notifications - An optional block to specify a set of email addresses notified when this task begins, completes or fails. The default behavior is to not send any emails. This block is documented below.
- environment
Key string - identifier of an
environment
block that is used to specify libraries. Required for some tasks (spark_python_task
,python_wheel_task
, ...) running on serverless compute. - existing
Cluster stringId - Identifier of the interactive cluster to run job on. Note: running tasks on interactive clusters may lead to increased costs!
- for
Each JobTask Task For Each Task - health
Job
Task Health - block described below that specifies health conditions for a given task.
- job
Cluster stringKey - Identifier of the Job cluster specified in the
job_cluster
block. - libraries
Job
Task Library[] - (Set) An optional list of libraries to be installed on the cluster that will execute the job.
- max
Retries number - (Integer) An optional maximum number of times to retry an unsuccessful run. A run is considered to be unsuccessful if it completes with a
FAILED
orINTERNAL_ERROR
lifecycle state. The value -1 means to retry indefinitely and the value 0 means to never retry. The default behavior is to never retry. A run can have the following lifecycle state:PENDING
,RUNNING
,TERMINATING
,TERMINATED
,SKIPPED
orINTERNAL_ERROR
. - min
Retry numberInterval Millis - (Integer) An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried.
- new
Cluster JobTask New Cluster - Task will run on a dedicated cluster. See databricks.Cluster documentation for specification. Some parameters, such as
autotermination_minutes
,is_pinned
,workload_type
aren't supported! - notebook
Task JobTask Notebook Task - notification
Settings JobTask Notification Settings - An optional block controlling the notification settings on the job level documented below.
- pipeline
Task JobTask Pipeline Task - python
Wheel JobTask Task Python Wheel Task - retry
On booleanTimeout - (Bool) An optional policy to specify whether to retry a job when it times out. The default behavior is to not retry on timeout.
- run
If string - An optional value indicating the condition that determines whether the task should be run once its dependencies have been completed. One of
ALL_SUCCESS
,AT_LEAST_ONE_SUCCESS
,NONE_FAILED
,ALL_DONE
,AT_LEAST_ONE_FAILED
orALL_FAILED
. When omitted, defaults toALL_SUCCESS
. - run
Job JobTask Task Run Job Task - spark
Jar JobTask Task Spark Jar Task - spark
Python JobTask Task Spark Python Task - spark
Submit JobTask Task Spark Submit Task - sql
Task JobTask Sql Task - timeout
Seconds number - (Integer) An optional timeout applied to each run of this job. The default behavior is to have no timeout.
- webhook
Notifications JobTask Webhook Notifications (List) An optional set of system destinations (for example, webhook destinations or Slack) to be notified when runs of this task begins, completes or fails. The default behavior is to not send any notifications. This field is a block and is documented below.
If no
job_cluster_key
,existing_cluster_id
, ornew_cluster
were specified in task definition, then task will executed using serverless compute.
- task_
key str - string specifying an unique key for a given task.
*_task
- (Required) one of the specific task blocks described below:
- condition_
task JobTask Condition Task - dbt_
task JobTask Dbt Task - depends_
ons Sequence[JobTask Depends On] - block specifying dependency(-ies) for a given task.
- description str
- description for this task.
- disable_
auto_ booloptimization - A flag to disable auto optimization in serverless tasks.
- email_
notifications JobTask Email Notifications - An optional block to specify a set of email addresses notified when this task begins, completes or fails. The default behavior is to not send any emails. This block is documented below.
- environment_
key str - identifier of an
environment
block that is used to specify libraries. Required for some tasks (spark_python_task
,python_wheel_task
, ...) running on serverless compute. - existing_
cluster_ strid - Identifier of the interactive cluster to run job on. Note: running tasks on interactive clusters may lead to increased costs!
- for_
each_ Jobtask Task For Each Task - health
Job
Task Health - block described below that specifies health conditions for a given task.
- job_
cluster_ strkey - Identifier of the Job cluster specified in the
job_cluster
block. - libraries
Sequence[Job
Task Library] - (Set) An optional list of libraries to be installed on the cluster that will execute the job.
- max_
retries int - (Integer) An optional maximum number of times to retry an unsuccessful run. A run is considered to be unsuccessful if it completes with a
FAILED
orINTERNAL_ERROR
lifecycle state. The value -1 means to retry indefinitely and the value 0 means to never retry. The default behavior is to never retry. A run can have the following lifecycle state:PENDING
,RUNNING
,TERMINATING
,TERMINATED
,SKIPPED
orINTERNAL_ERROR
. - min_
retry_ intinterval_ millis - (Integer) An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried.
- new_
cluster JobTask New Cluster - Task will run on a dedicated cluster. See databricks.Cluster documentation for specification. Some parameters, such as
autotermination_minutes
,is_pinned
,workload_type
aren't supported! - notebook_
task JobTask Notebook Task - notification_
settings JobTask Notification Settings - An optional block controlling the notification settings on the job level documented below.
- pipeline_
task JobTask Pipeline Task - python_
wheel_ Jobtask Task Python Wheel Task - retry_
on_ booltimeout - (Bool) An optional policy to specify whether to retry a job when it times out. The default behavior is to not retry on timeout.
- run_
if str - An optional value indicating the condition that determines whether the task should be run once its dependencies have been completed. One of
ALL_SUCCESS
,AT_LEAST_ONE_SUCCESS
,NONE_FAILED
,ALL_DONE
,AT_LEAST_ONE_FAILED
orALL_FAILED
. When omitted, defaults toALL_SUCCESS
. - run_
job_ Jobtask Task Run Job Task - spark_
jar_ Jobtask Task Spark Jar Task - spark_
python_ Jobtask Task Spark Python Task - spark_
submit_ Jobtask Task Spark Submit Task - sql_
task JobTask Sql Task - timeout_
seconds int - (Integer) An optional timeout applied to each run of this job. The default behavior is to have no timeout.
- webhook_
notifications JobTask Webhook Notifications (List) An optional set of system destinations (for example, webhook destinations or Slack) to be notified when runs of this task begins, completes or fails. The default behavior is to not send any notifications. This field is a block and is documented below.
If no
job_cluster_key
,existing_cluster_id
, ornew_cluster
were specified in task definition, then task will executed using serverless compute.
- task
Key String - string specifying an unique key for a given task.
*_task
- (Required) one of the specific task blocks described below:
- condition
Task Property Map - dbt
Task Property Map - depends
Ons List<Property Map> - block specifying dependency(-ies) for a given task.
- description String
- description for this task.
- disable
Auto BooleanOptimization - A flag to disable auto optimization in serverless tasks.
- email
Notifications Property Map - An optional block to specify a set of email addresses notified when this task begins, completes or fails. The default behavior is to not send any emails. This block is documented below.
- environment
Key String - identifier of an
environment
block that is used to specify libraries. Required for some tasks (spark_python_task
,python_wheel_task
, ...) running on serverless compute. - existing
Cluster StringId - Identifier of the interactive cluster to run job on. Note: running tasks on interactive clusters may lead to increased costs!
- for
Each Property MapTask - health Property Map
- block described below that specifies health conditions for a given task.
- job
Cluster StringKey - Identifier of the Job cluster specified in the
job_cluster
block. - libraries List<Property Map>
- (Set) An optional list of libraries to be installed on the cluster that will execute the job.
- max
Retries Number - (Integer) An optional maximum number of times to retry an unsuccessful run. A run is considered to be unsuccessful if it completes with a
FAILED
orINTERNAL_ERROR
lifecycle state. The value -1 means to retry indefinitely and the value 0 means to never retry. The default behavior is to never retry. A run can have the following lifecycle state:PENDING
,RUNNING
,TERMINATING
,TERMINATED
,SKIPPED
orINTERNAL_ERROR
. - min
Retry NumberInterval Millis - (Integer) An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried.
- new
Cluster Property Map - Task will run on a dedicated cluster. See databricks.Cluster documentation for specification. Some parameters, such as
autotermination_minutes
,is_pinned
,workload_type
aren't supported! - notebook
Task Property Map - notification
Settings Property Map - An optional block controlling the notification settings on the job level documented below.
- pipeline
Task Property Map - python
Wheel Property MapTask - retry
On BooleanTimeout - (Bool) An optional policy to specify whether to retry a job when it times out. The default behavior is to not retry on timeout.
- run
If String - An optional value indicating the condition that determines whether the task should be run once its dependencies have been completed. One of
ALL_SUCCESS
,AT_LEAST_ONE_SUCCESS
,NONE_FAILED
,ALL_DONE
,AT_LEAST_ONE_FAILED
orALL_FAILED
. When omitted, defaults toALL_SUCCESS
. - run
Job Property MapTask - spark
Jar Property MapTask - spark
Python Property MapTask - spark
Submit Property MapTask - sql
Task Property Map - timeout
Seconds Number - (Integer) An optional timeout applied to each run of this job. The default behavior is to have no timeout.
- webhook
Notifications Property Map (List) An optional set of system destinations (for example, webhook destinations or Slack) to be notified when runs of this task begins, completes or fails. The default behavior is to not send any notifications. This field is a block and is documented below.
If no
job_cluster_key
,existing_cluster_id
, ornew_cluster
were specified in task definition, then task will executed using serverless compute.
JobTaskConditionTask, JobTaskConditionTaskArgs
- Left string
- The left operand of the condition task. It could be a string value, job state, or a parameter reference.
- Op string
The string specifying the operation used to compare operands. Currently, following operators are supported:
EQUAL_TO
,GREATER_THAN
,GREATER_THAN_OR_EQUAL
,LESS_THAN
,LESS_THAN_OR_EQUAL
,NOT_EQUAL
. (Check the API docs for the latest information).This task does not require a cluster to execute and does not support retries or notifications.
- Right string
- The right operand of the condition task. It could be a string value, job state, or parameter reference.
- Left string
- The left operand of the condition task. It could be a string value, job state, or a parameter reference.
- Op string
The string specifying the operation used to compare operands. Currently, following operators are supported:
EQUAL_TO
,GREATER_THAN
,GREATER_THAN_OR_EQUAL
,LESS_THAN
,LESS_THAN_OR_EQUAL
,NOT_EQUAL
. (Check the API docs for the latest information).This task does not require a cluster to execute and does not support retries or notifications.
- Right string
- The right operand of the condition task. It could be a string value, job state, or parameter reference.
- left String
- The left operand of the condition task. It could be a string value, job state, or a parameter reference.
- op String
The string specifying the operation used to compare operands. Currently, following operators are supported:
EQUAL_TO
,GREATER_THAN
,GREATER_THAN_OR_EQUAL
,LESS_THAN
,LESS_THAN_OR_EQUAL
,NOT_EQUAL
. (Check the API docs for the latest information).This task does not require a cluster to execute and does not support retries or notifications.
- right String
- The right operand of the condition task. It could be a string value, job state, or parameter reference.
- left string
- The left operand of the condition task. It could be a string value, job state, or a parameter reference.
- op string
The string specifying the operation used to compare operands. Currently, following operators are supported:
EQUAL_TO
,GREATER_THAN
,GREATER_THAN_OR_EQUAL
,LESS_THAN
,LESS_THAN_OR_EQUAL
,NOT_EQUAL
. (Check the API docs for the latest information).This task does not require a cluster to execute and does not support retries or notifications.
- right string
- The right operand of the condition task. It could be a string value, job state, or parameter reference.
- left str
- The left operand of the condition task. It could be a string value, job state, or a parameter reference.
- op str
The string specifying the operation used to compare operands. Currently, following operators are supported:
EQUAL_TO
,GREATER_THAN
,GREATER_THAN_OR_EQUAL
,LESS_THAN
,LESS_THAN_OR_EQUAL
,NOT_EQUAL
. (Check the API docs for the latest information).This task does not require a cluster to execute and does not support retries or notifications.
- right str
- The right operand of the condition task. It could be a string value, job state, or parameter reference.
- left String
- The left operand of the condition task. It could be a string value, job state, or a parameter reference.
- op String
The string specifying the operation used to compare operands. Currently, following operators are supported:
EQUAL_TO
,GREATER_THAN
,GREATER_THAN_OR_EQUAL
,LESS_THAN
,LESS_THAN_OR_EQUAL
,NOT_EQUAL
. (Check the API docs for the latest information).This task does not require a cluster to execute and does not support retries or notifications.
- right String
- The right operand of the condition task. It could be a string value, job state, or parameter reference.
JobTaskDbtTask, JobTaskDbtTaskArgs
- Commands List<string>
- (Array) Series of dbt commands to execute in sequence. Every command must start with "dbt".
- Catalog string
- The name of the catalog to use inside Unity Catalog.
- Profiles
Directory string - The relative path to the directory in the repository specified by
git_source
where dbt should look in for theprofiles.yml
file. If not specified, defaults to the repository's root directory. Equivalent to passing--profile-dir
to a dbt command. - Project
Directory string - The path where dbt should look for
dbt_project.yml
. Equivalent to passing--project-dir
to the dbt CLI.- If
source
isGIT
: Relative path to the directory in the repository specified in thegit_source
block. Defaults to the repository's root directory when not specified. - If
source
isWORKSPACE
: Absolute path to the folder in the workspace.
- If
- Schema string
- The name of the schema dbt should run in. Defaults to
default
. - Source string
- The source of the project. Possible values are
WORKSPACE
andGIT
. Defaults toGIT
if agit_source
block is present in the job definition. - Warehouse
Id string The ID of the SQL warehouse that dbt should execute against.
You also need to include a
git_source
block to configure the repository that contains the dbt project.
- Commands []string
- (Array) Series of dbt commands to execute in sequence. Every command must start with "dbt".
- Catalog string
- The name of the catalog to use inside Unity Catalog.
- Profiles
Directory string - The relative path to the directory in the repository specified by
git_source
where dbt should look in for theprofiles.yml
file. If not specified, defaults to the repository's root directory. Equivalent to passing--profile-dir
to a dbt command. - Project
Directory string - The path where dbt should look for
dbt_project.yml
. Equivalent to passing--project-dir
to the dbt CLI.- If
source
isGIT
: Relative path to the directory in the repository specified in thegit_source
block. Defaults to the repository's root directory when not specified. - If
source
isWORKSPACE
: Absolute path to the folder in the workspace.
- If
- Schema string
- The name of the schema dbt should run in. Defaults to
default
. - Source string
- The source of the project. Possible values are
WORKSPACE
andGIT
. Defaults toGIT
if agit_source
block is present in the job definition. - Warehouse
Id string The ID of the SQL warehouse that dbt should execute against.
You also need to include a
git_source
block to configure the repository that contains the dbt project.
- commands List<String>
- (Array) Series of dbt commands to execute in sequence. Every command must start with "dbt".
- catalog String
- The name of the catalog to use inside Unity Catalog.
- profiles
Directory String - The relative path to the directory in the repository specified by
git_source
where dbt should look in for theprofiles.yml
file. If not specified, defaults to the repository's root directory. Equivalent to passing--profile-dir
to a dbt command. - project
Directory String - The path where dbt should look for
dbt_project.yml
. Equivalent to passing--project-dir
to the dbt CLI.- If
source
isGIT
: Relative path to the directory in the repository specified in thegit_source
block. Defaults to the repository's root directory when not specified. - If
source
isWORKSPACE
: Absolute path to the folder in the workspace.
- If
- schema String
- The name of the schema dbt should run in. Defaults to
default
. - source String
- The source of the project. Possible values are
WORKSPACE
andGIT
. Defaults toGIT
if agit_source
block is present in the job definition. - warehouse
Id String The ID of the SQL warehouse that dbt should execute against.
You also need to include a
git_source
block to configure the repository that contains the dbt project.
- commands string[]
- (Array) Series of dbt commands to execute in sequence. Every command must start with "dbt".
- catalog string
- The name of the catalog to use inside Unity Catalog.
- profiles
Directory string - The relative path to the directory in the repository specified by
git_source
where dbt should look in for theprofiles.yml
file. If not specified, defaults to the repository's root directory. Equivalent to passing--profile-dir
to a dbt command. - project
Directory string - The path where dbt should look for
dbt_project.yml
. Equivalent to passing--project-dir
to the dbt CLI.- If
source
isGIT
: Relative path to the directory in the repository specified in thegit_source
block. Defaults to the repository's root directory when not specified. - If
source
isWORKSPACE
: Absolute path to the folder in the workspace.
- If
- schema string
- The name of the schema dbt should run in. Defaults to
default
. - source string
- The source of the project. Possible values are
WORKSPACE
andGIT
. Defaults toGIT
if agit_source
block is present in the job definition. - warehouse
Id string The ID of the SQL warehouse that dbt should execute against.
You also need to include a
git_source
block to configure the repository that contains the dbt project.
- commands Sequence[str]
- (Array) Series of dbt commands to execute in sequence. Every command must start with "dbt".
- catalog str
- The name of the catalog to use inside Unity Catalog.
- profiles_
directory str - The relative path to the directory in the repository specified by
git_source
where dbt should look in for theprofiles.yml
file. If not specified, defaults to the repository's root directory. Equivalent to passing--profile-dir
to a dbt command. - project_
directory str - The path where dbt should look for
dbt_project.yml
. Equivalent to passing--project-dir
to the dbt CLI.- If
source
isGIT
: Relative path to the directory in the repository specified in thegit_source
block. Defaults to the repository's root directory when not specified. - If
source
isWORKSPACE
: Absolute path to the folder in the workspace.
- If
- schema str
- The name of the schema dbt should run in. Defaults to
default
. - source str
- The source of the project. Possible values are
WORKSPACE
andGIT
. Defaults toGIT
if agit_source
block is present in the job definition. - warehouse_
id str The ID of the SQL warehouse that dbt should execute against.
You also need to include a
git_source
block to configure the repository that contains the dbt project.
- commands List<String>
- (Array) Series of dbt commands to execute in sequence. Every command must start with "dbt".
- catalog String
- The name of the catalog to use inside Unity Catalog.
- profiles
Directory String - The relative path to the directory in the repository specified by
git_source
where dbt should look in for theprofiles.yml
file. If not specified, defaults to the repository's root directory. Equivalent to passing--profile-dir
to a dbt command. - project
Directory String - The path where dbt should look for
dbt_project.yml
. Equivalent to passing--project-dir
to the dbt CLI.- If
source
isGIT
: Relative path to the directory in the repository specified in thegit_source
block. Defaults to the repository's root directory when not specified. - If
source
isWORKSPACE
: Absolute path to the folder in the workspace.
- If
- schema String
- The name of the schema dbt should run in. Defaults to
default
. - source String
- The source of the project. Possible values are
WORKSPACE
andGIT
. Defaults toGIT
if agit_source
block is present in the job definition. - warehouse
Id String The ID of the SQL warehouse that dbt should execute against.
You also need to include a
git_source
block to configure the repository that contains the dbt project.
JobTaskDependsOn, JobTaskDependsOnArgs
- Task
Key string - The name of the task this task depends on.
- Outcome string
Can only be specified on condition task dependencies. The outcome of the dependent task that must be met for this task to run. Possible values are
"true"
or"false"
.Similar to the tasks themselves, each dependency inside the task need to be declared in alphabetical order with respect to task_key in order to get consistent Pulumi diffs.
- Task
Key string - The name of the task this task depends on.
- Outcome string
Can only be specified on condition task dependencies. The outcome of the dependent task that must be met for this task to run. Possible values are
"true"
or"false"
.Similar to the tasks themselves, each dependency inside the task need to be declared in alphabetical order with respect to task_key in order to get consistent Pulumi diffs.
- task
Key String - The name of the task this task depends on.
- outcome String
Can only be specified on condition task dependencies. The outcome of the dependent task that must be met for this task to run. Possible values are
"true"
or"false"
.Similar to the tasks themselves, each dependency inside the task need to be declared in alphabetical order with respect to task_key in order to get consistent Pulumi diffs.
- task
Key string - The name of the task this task depends on.
- outcome string
Can only be specified on condition task dependencies. The outcome of the dependent task that must be met for this task to run. Possible values are
"true"
or"false"
.Similar to the tasks themselves, each dependency inside the task need to be declared in alphabetical order with respect to task_key in order to get consistent Pulumi diffs.
- task_
key str - The name of the task this task depends on.
- outcome str
Can only be specified on condition task dependencies. The outcome of the dependent task that must be met for this task to run. Possible values are
"true"
or"false"
.Similar to the tasks themselves, each dependency inside the task need to be declared in alphabetical order with respect to task_key in order to get consistent Pulumi diffs.
- task
Key String - The name of the task this task depends on.
- outcome String
Can only be specified on condition task dependencies. The outcome of the dependent task that must be met for this task to run. Possible values are
"true"
or"false"
.Similar to the tasks themselves, each dependency inside the task need to be declared in alphabetical order with respect to task_key in order to get consistent Pulumi diffs.
JobTaskEmailNotifications, JobTaskEmailNotificationsArgs
- No
Alert boolFor Skipped Runs - (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the
notification_settings
configuration block). - On
Duration List<string>Warning Threshold Exceededs (List) list of emails to notify when the duration of a run exceeds the threshold specified by the
RUN_DURATION_SECONDS
metric in thehealth
block.The following parameter is only available for the job level configuration.
- On
Failures List<string> - (List) list of emails to notify when the run fails.
- On
Starts List<string> - (List) list of emails to notify when the run starts.
- On
Streaming List<string>Backlog Exceededs - On
Successes List<string> - (List) list of emails to notify when the run completes successfully.
- No
Alert boolFor Skipped Runs - (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the
notification_settings
configuration block). - On
Duration []stringWarning Threshold Exceededs (List) list of emails to notify when the duration of a run exceeds the threshold specified by the
RUN_DURATION_SECONDS
metric in thehealth
block.The following parameter is only available for the job level configuration.
- On
Failures []string - (List) list of emails to notify when the run fails.
- On
Starts []string - (List) list of emails to notify when the run starts.
- On
Streaming []stringBacklog Exceededs - On
Successes []string - (List) list of emails to notify when the run completes successfully.
- no
Alert BooleanFor Skipped Runs - (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the
notification_settings
configuration block). - on
Duration List<String>Warning Threshold Exceededs (List) list of emails to notify when the duration of a run exceeds the threshold specified by the
RUN_DURATION_SECONDS
metric in thehealth
block.The following parameter is only available for the job level configuration.
- on
Failures List<String> - (List) list of emails to notify when the run fails.
- on
Starts List<String> - (List) list of emails to notify when the run starts.
- on
Streaming List<String>Backlog Exceededs - on
Successes List<String> - (List) list of emails to notify when the run completes successfully.
- no
Alert booleanFor Skipped Runs - (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the
notification_settings
configuration block). - on
Duration string[]Warning Threshold Exceededs (List) list of emails to notify when the duration of a run exceeds the threshold specified by the
RUN_DURATION_SECONDS
metric in thehealth
block.The following parameter is only available for the job level configuration.
- on
Failures string[] - (List) list of emails to notify when the run fails.
- on
Starts string[] - (List) list of emails to notify when the run starts.
- on
Streaming string[]Backlog Exceededs - on
Successes string[] - (List) list of emails to notify when the run completes successfully.
- no_
alert_ boolfor_ skipped_ runs - (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the
notification_settings
configuration block). - on_
duration_ Sequence[str]warning_ threshold_ exceededs (List) list of emails to notify when the duration of a run exceeds the threshold specified by the
RUN_DURATION_SECONDS
metric in thehealth
block.The following parameter is only available for the job level configuration.
- on_
failures Sequence[str] - (List) list of emails to notify when the run fails.
- on_
starts Sequence[str] - (List) list of emails to notify when the run starts.
- on_
streaming_ Sequence[str]backlog_ exceededs - on_
successes Sequence[str] - (List) list of emails to notify when the run completes successfully.
- no
Alert BooleanFor Skipped Runs - (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the
notification_settings
configuration block). - on
Duration List<String>Warning Threshold Exceededs (List) list of emails to notify when the duration of a run exceeds the threshold specified by the
RUN_DURATION_SECONDS
metric in thehealth
block.The following parameter is only available for the job level configuration.
- on
Failures List<String> - (List) list of emails to notify when the run fails.
- on
Starts List<String> - (List) list of emails to notify when the run starts.
- on
Streaming List<String>Backlog Exceededs - on
Successes List<String> - (List) list of emails to notify when the run completes successfully.
JobTaskForEachTask, JobTaskForEachTaskArgs
- Inputs string
- (String) Array for task to iterate on. This can be a JSON string or a reference to an array parameter.
- Task
Job
Task For Each Task Task - Task to run against the
inputs
list. - Concurrency int
- Controls the number of active iteration task runs. Default is 20, maximum allowed is 100.
- Inputs string
- (String) Array for task to iterate on. This can be a JSON string or a reference to an array parameter.
- Task
Job
Task For Each Task Task - Task to run against the
inputs
list. - Concurrency int
- Controls the number of active iteration task runs. Default is 20, maximum allowed is 100.
- inputs String
- (String) Array for task to iterate on. This can be a JSON string or a reference to an array parameter.
- task
Job
Task For Each Task Task - Task to run against the
inputs
list. - concurrency Integer
- Controls the number of active iteration task runs. Default is 20, maximum allowed is 100.
- inputs string
- (String) Array for task to iterate on. This can be a JSON string or a reference to an array parameter.
- task
Job
Task For Each Task Task - Task to run against the
inputs
list. - concurrency number
- Controls the number of active iteration task runs. Default is 20, maximum allowed is 100.
- inputs str
- (String) Array for task to iterate on. This can be a JSON string or a reference to an array parameter.
- task
Job
Task For Each Task Task - Task to run against the
inputs
list. - concurrency int
- Controls the number of active iteration task runs. Default is 20, maximum allowed is 100.
- inputs String
- (String) Array for task to iterate on. This can be a JSON string or a reference to an array parameter.
- task Property Map
- Task to run against the
inputs
list. - concurrency Number
- Controls the number of active iteration task runs. Default is 20, maximum allowed is 100.
JobTaskForEachTaskTask, JobTaskForEachTaskTaskArgs
- Task
Key string - string specifying an unique key for a given task.
*_task
- (Required) one of the specific task blocks described below:
- Condition
Task JobTask For Each Task Task Condition Task - Dbt
Task JobTask For Each Task Task Dbt Task - Depends
Ons List<JobTask For Each Task Task Depends On> - block specifying dependency(-ies) for a given task.
- Description string
- description for this task.
- Disable
Auto boolOptimization - A flag to disable auto optimization in serverless tasks.
- Email
Notifications JobTask For Each Task Task Email Notifications - An optional block to specify a set of email addresses notified when this task begins, completes or fails. The default behavior is to not send any emails. This block is documented below.
- Environment
Key string - identifier of an
environment
block that is used to specify libraries. Required for some tasks (spark_python_task
,python_wheel_task
, ...) running on serverless compute. - Existing
Cluster stringId - Identifier of the interactive cluster to run job on. Note: running tasks on interactive clusters may lead to increased costs!
- Health
Job
Task For Each Task Task Health - block described below that specifies health conditions for a given task.
- Job
Cluster stringKey - Identifier of the Job cluster specified in the
job_cluster
block. - Libraries
List<Job
Task For Each Task Task Library> - (Set) An optional list of libraries to be installed on the cluster that will execute the job.
- Max
Retries int - (Integer) An optional maximum number of times to retry an unsuccessful run. A run is considered to be unsuccessful if it completes with a
FAILED
orINTERNAL_ERROR
lifecycle state. The value -1 means to retry indefinitely and the value 0 means to never retry. The default behavior is to never retry. A run can have the following lifecycle state:PENDING
,RUNNING
,TERMINATING
,TERMINATED
,SKIPPED
orINTERNAL_ERROR
. - Min
Retry intInterval Millis - (Integer) An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried.
- New
Cluster JobTask For Each Task Task New Cluster - Task will run on a dedicated cluster. See databricks.Cluster documentation for specification. Some parameters, such as
autotermination_minutes
,is_pinned
,workload_type
aren't supported! - Notebook
Task JobTask For Each Task Task Notebook Task - Notification
Settings JobTask For Each Task Task Notification Settings - An optional block controlling the notification settings on the job level documented below.
- Pipeline
Task JobTask For Each Task Task Pipeline Task - Python
Wheel JobTask Task For Each Task Task Python Wheel Task - Retry
On boolTimeout - (Bool) An optional policy to specify whether to retry a job when it times out. The default behavior is to not retry on timeout.
- Run
If string - An optional value indicating the condition that determines whether the task should be run once its dependencies have been completed. One of
ALL_SUCCESS
,AT_LEAST_ONE_SUCCESS
,NONE_FAILED
,ALL_DONE
,AT_LEAST_ONE_FAILED
orALL_FAILED
. When omitted, defaults toALL_SUCCESS
. - Run
Job JobTask Task For Each Task Task Run Job Task - Spark
Jar JobTask Task For Each Task Task Spark Jar Task - Spark
Python JobTask Task For Each Task Task Spark Python Task - Spark
Submit JobTask Task For Each Task Task Spark Submit Task - Sql
Task JobTask For Each Task Task Sql Task - Timeout
Seconds int - (Integer) An optional timeout applied to each run of this job. The default behavior is to have no timeout.
- Webhook
Notifications JobTask For Each Task Task Webhook Notifications (List) An optional set of system destinations (for example, webhook destinations or Slack) to be notified when runs of this task begins, completes or fails. The default behavior is to not send any notifications. This field is a block and is documented below.
If no
job_cluster_key
,existing_cluster_id
, ornew_cluster
were specified in task definition, then task will executed using serverless compute.
- Task
Key string - string specifying an unique key for a given task.
*_task
- (Required) one of the specific task blocks described below:
- Condition
Task JobTask For Each Task Task Condition Task - Dbt
Task JobTask For Each Task Task Dbt Task - Depends
Ons []JobTask For Each Task Task Depends On - block specifying dependency(-ies) for a given task.
- Description string
- description for this task.
- Disable
Auto boolOptimization - A flag to disable auto optimization in serverless tasks.
- Email
Notifications JobTask For Each Task Task Email Notifications - An optional block to specify a set of email addresses notified when this task begins, completes or fails. The default behavior is to not send any emails. This block is documented below.
- Environment
Key string - identifier of an
environment
block that is used to specify libraries. Required for some tasks (spark_python_task
,python_wheel_task
, ...) running on serverless compute. - Existing
Cluster stringId - Identifier of the interactive cluster to run job on. Note: running tasks on interactive clusters may lead to increased costs!
- Health
Job
Task For Each Task Task Health - block described below that specifies health conditions for a given task.
- Job
Cluster stringKey - Identifier of the Job cluster specified in the
job_cluster
block. - Libraries
[]Job
Task For Each Task Task Library - (Set) An optional list of libraries to be installed on the cluster that will execute the job.
- Max
Retries int - (Integer) An optional maximum number of times to retry an unsuccessful run. A run is considered to be unsuccessful if it completes with a
FAILED
orINTERNAL_ERROR
lifecycle state. The value -1 means to retry indefinitely and the value 0 means to never retry. The default behavior is to never retry. A run can have the following lifecycle state:PENDING
,RUNNING
,TERMINATING
,TERMINATED
,SKIPPED
orINTERNAL_ERROR
. - Min
Retry intInterval Millis - (Integer) An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried.
- New
Cluster JobTask For Each Task Task New Cluster - Task will run on a dedicated cluster. See databricks.Cluster documentation for specification. Some parameters, such as
autotermination_minutes
,is_pinned
,workload_type
aren't supported! - Notebook
Task JobTask For Each Task Task Notebook Task - Notification
Settings JobTask For Each Task Task Notification Settings - An optional block controlling the notification settings on the job level documented below.
- Pipeline
Task JobTask For Each Task Task Pipeline Task - Python
Wheel JobTask Task For Each Task Task Python Wheel Task - Retry
On boolTimeout - (Bool) An optional policy to specify whether to retry a job when it times out. The default behavior is to not retry on timeout.
- Run
If string - An optional value indicating the condition that determines whether the task should be run once its dependencies have been completed. One of
ALL_SUCCESS
,AT_LEAST_ONE_SUCCESS
,NONE_FAILED
,ALL_DONE
,AT_LEAST_ONE_FAILED
orALL_FAILED
. When omitted, defaults toALL_SUCCESS
. - Run
Job JobTask Task For Each Task Task Run Job Task - Spark
Jar JobTask Task For Each Task Task Spark Jar Task - Spark
Python JobTask Task For Each Task Task Spark Python Task - Spark
Submit JobTask Task For Each Task Task Spark Submit Task - Sql
Task JobTask For Each Task Task Sql Task - Timeout
Seconds int - (Integer) An optional timeout applied to each run of this job. The default behavior is to have no timeout.
- Webhook
Notifications JobTask For Each Task Task Webhook Notifications (List) An optional set of system destinations (for example, webhook destinations or Slack) to be notified when runs of this task begins, completes or fails. The default behavior is to not send any notifications. This field is a block and is documented below.
If no
job_cluster_key
,existing_cluster_id
, ornew_cluster
were specified in task definition, then task will executed using serverless compute.
- task
Key String - string specifying an unique key for a given task.
*_task
- (Required) one of the specific task blocks described below:
- condition
Task JobTask For Each Task Task Condition Task - dbt
Task JobTask For Each Task Task Dbt Task - depends
Ons List<JobTask For Each Task Task Depends On> - block specifying dependency(-ies) for a given task.
- description String
- description for this task.
- disable
Auto BooleanOptimization - A flag to disable auto optimization in serverless tasks.
- email
Notifications JobTask For Each Task Task Email Notifications - An optional block to specify a set of email addresses notified when this task begins, completes or fails. The default behavior is to not send any emails. This block is documented below.
- environment
Key String - identifier of an
environment
block that is used to specify libraries. Required for some tasks (spark_python_task
,python_wheel_task
, ...) running on serverless compute. - existing
Cluster StringId - Identifier of the interactive cluster to run job on. Note: running tasks on interactive clusters may lead to increased costs!
- health
Job
Task For Each Task Task Health - block described below that specifies health conditions for a given task.
- job
Cluster StringKey - Identifier of the Job cluster specified in the
job_cluster
block. - libraries
List<Job
Task For Each Task Task Library> - (Set) An optional list of libraries to be installed on the cluster that will execute the job.
- max
Retries Integer - (Integer) An optional maximum number of times to retry an unsuccessful run. A run is considered to be unsuccessful if it completes with a
FAILED
orINTERNAL_ERROR
lifecycle state. The value -1 means to retry indefinitely and the value 0 means to never retry. The default behavior is to never retry. A run can have the following lifecycle state:PENDING
,RUNNING
,TERMINATING
,TERMINATED
,SKIPPED
orINTERNAL_ERROR
. - min
Retry IntegerInterval Millis - (Integer) An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried.
- new
Cluster JobTask For Each Task Task New Cluster - Task will run on a dedicated cluster. See databricks.Cluster documentation for specification. Some parameters, such as
autotermination_minutes
,is_pinned
,workload_type
aren't supported! - notebook
Task JobTask For Each Task Task Notebook Task - notification
Settings JobTask For Each Task Task Notification Settings - An optional block controlling the notification settings on the job level documented below.
- pipeline
Task JobTask For Each Task Task Pipeline Task - python
Wheel JobTask Task For Each Task Task Python Wheel Task - retry
On BooleanTimeout - (Bool) An optional policy to specify whether to retry a job when it times out. The default behavior is to not retry on timeout.
- run
If String - An optional value indicating the condition that determines whether the task should be run once its dependencies have been completed. One of
ALL_SUCCESS
,AT_LEAST_ONE_SUCCESS
,NONE_FAILED
,ALL_DONE
,AT_LEAST_ONE_FAILED
orALL_FAILED
. When omitted, defaults toALL_SUCCESS
. - run
Job JobTask Task For Each Task Task Run Job Task - spark
Jar JobTask Task For Each Task Task Spark Jar Task - spark
Python JobTask Task For Each Task Task Spark Python Task - spark
Submit JobTask Task For Each Task Task Spark Submit Task - sql
Task JobTask For Each Task Task Sql Task - timeout
Seconds Integer - (Integer) An optional timeout applied to each run of this job. The default behavior is to have no timeout.
- webhook
Notifications JobTask For Each Task Task Webhook Notifications (List) An optional set of system destinations (for example, webhook destinations or Slack) to be notified when runs of this task begins, completes or fails. The default behavior is to not send any notifications. This field is a block and is documented below.
If no
job_cluster_key
,existing_cluster_id
, ornew_cluster
were specified in task definition, then task will executed using serverless compute.
- task
Key string - string specifying an unique key for a given task.
*_task
- (Required) one of the specific task blocks described below:
- condition
Task JobTask For Each Task Task Condition Task - dbt
Task JobTask For Each Task Task Dbt Task - depends
Ons JobTask For Each Task Task Depends On[] - block specifying dependency(-ies) for a given task.
- description string
- description for this task.
- disable
Auto booleanOptimization - A flag to disable auto optimization in serverless tasks.
- email
Notifications JobTask For Each Task Task Email Notifications - An optional block to specify a set of email addresses notified when this task begins, completes or fails. The default behavior is to not send any emails. This block is documented below.
- environment
Key string - identifier of an
environment
block that is used to specify libraries. Required for some tasks (spark_python_task
,python_wheel_task
, ...) running on serverless compute. - existing
Cluster stringId - Identifier of the interactive cluster to run job on. Note: running tasks on interactive clusters may lead to increased costs!
- health
Job
Task For Each Task Task Health - block described below that specifies health conditions for a given task.
- job
Cluster stringKey - Identifier of the Job cluster specified in the
job_cluster
block. - libraries
Job
Task For Each Task Task Library[] - (Set) An optional list of libraries to be installed on the cluster that will execute the job.
- max
Retries number - (Integer) An optional maximum number of times to retry an unsuccessful run. A run is considered to be unsuccessful if it completes with a
FAILED
orINTERNAL_ERROR
lifecycle state. The value -1 means to retry indefinitely and the value 0 means to never retry. The default behavior is to never retry. A run can have the following lifecycle state:PENDING
,RUNNING
,TERMINATING
,TERMINATED
,SKIPPED
orINTERNAL_ERROR
. - min
Retry numberInterval Millis - (Integer) An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried.
- new
Cluster JobTask For Each Task Task New Cluster - Task will run on a dedicated cluster. See databricks.Cluster documentation for specification. Some parameters, such as
autotermination_minutes
,is_pinned
,workload_type
aren't supported! - notebook
Task JobTask For Each Task Task Notebook Task - notification
Settings JobTask For Each Task Task Notification Settings - An optional block controlling the notification settings on the job level documented below.
- pipeline
Task JobTask For Each Task Task Pipeline Task - python
Wheel JobTask Task For Each Task Task Python Wheel Task - retry
On booleanTimeout - (Bool) An optional policy to specify whether to retry a job when it times out. The default behavior is to not retry on timeout.
- run
If string - An optional value indicating the condition that determines whether the task should be run once its dependencies have been completed. One of
ALL_SUCCESS
,AT_LEAST_ONE_SUCCESS
,NONE_FAILED
,ALL_DONE
,AT_LEAST_ONE_FAILED
orALL_FAILED
. When omitted, defaults toALL_SUCCESS
. - run
Job JobTask Task For Each Task Task Run Job Task - spark
Jar JobTask Task For Each Task Task Spark Jar Task - spark
Python JobTask Task For Each Task Task Spark Python Task - spark
Submit JobTask Task For Each Task Task Spark Submit Task - sql
Task JobTask For Each Task Task Sql Task - timeout
Seconds number - (Integer) An optional timeout applied to each run of this job. The default behavior is to have no timeout.
- webhook
Notifications JobTask For Each Task Task Webhook Notifications (List) An optional set of system destinations (for example, webhook destinations or Slack) to be notified when runs of this task begins, completes or fails. The default behavior is to not send any notifications. This field is a block and is documented below.
If no
job_cluster_key
,existing_cluster_id
, ornew_cluster
were specified in task definition, then task will executed using serverless compute.
- task_
key str - string specifying an unique key for a given task.
*_task
- (Required) one of the specific task blocks described below:
- condition_
task JobTask For Each Task Task Condition Task - dbt_
task JobTask For Each Task Task Dbt Task - depends_
ons Sequence[JobTask For Each Task Task Depends On] - block specifying dependency(-ies) for a given task.
- description str
- description for this task.
- disable_
auto_ booloptimization - A flag to disable auto optimization in serverless tasks.
- email_
notifications JobTask For Each Task Task Email Notifications - An optional block to specify a set of email addresses notified when this task begins, completes or fails. The default behavior is to not send any emails. This block is documented below.
- environment_
key str - identifier of an
environment
block that is used to specify libraries. Required for some tasks (spark_python_task
,python_wheel_task
, ...) running on serverless compute. - existing_
cluster_ strid - Identifier of the interactive cluster to run job on. Note: running tasks on interactive clusters may lead to increased costs!
- health
Job
Task For Each Task Task Health - block described below that specifies health conditions for a given task.
- job_
cluster_ strkey - Identifier of the Job cluster specified in the
job_cluster
block. - libraries
Sequence[Job
Task For Each Task Task Library] - (Set) An optional list of libraries to be installed on the cluster that will execute the job.
- max_
retries int - (Integer) An optional maximum number of times to retry an unsuccessful run. A run is considered to be unsuccessful if it completes with a
FAILED
orINTERNAL_ERROR
lifecycle state. The value -1 means to retry indefinitely and the value 0 means to never retry. The default behavior is to never retry. A run can have the following lifecycle state:PENDING
,RUNNING
,TERMINATING
,TERMINATED
,SKIPPED
orINTERNAL_ERROR
. - min_
retry_ intinterval_ millis - (Integer) An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried.
- new_
cluster JobTask For Each Task Task New Cluster - Task will run on a dedicated cluster. See databricks.Cluster documentation for specification. Some parameters, such as
autotermination_minutes
,is_pinned
,workload_type
aren't supported! - notebook_
task JobTask For Each Task Task Notebook Task - notification_
settings JobTask For Each Task Task Notification Settings - An optional block controlling the notification settings on the job level documented below.
- pipeline_
task JobTask For Each Task Task Pipeline Task - python_
wheel_ Jobtask Task For Each Task Task Python Wheel Task - retry_
on_ booltimeout - (Bool) An optional policy to specify whether to retry a job when it times out. The default behavior is to not retry on timeout.
- run_
if str - An optional value indicating the condition that determines whether the task should be run once its dependencies have been completed. One of
ALL_SUCCESS
,AT_LEAST_ONE_SUCCESS
,NONE_FAILED
,ALL_DONE
,AT_LEAST_ONE_FAILED
orALL_FAILED
. When omitted, defaults toALL_SUCCESS
. - run_
job_ Jobtask Task For Each Task Task Run Job Task - spark_
jar_ Jobtask Task For Each Task Task Spark Jar Task - spark_
python_ Jobtask Task For Each Task Task Spark Python Task - spark_
submit_ Jobtask Task For Each Task Task Spark Submit Task - sql_
task JobTask For Each Task Task Sql Task - timeout_
seconds int - (Integer) An optional timeout applied to each run of this job. The default behavior is to have no timeout.
- webhook_
notifications JobTask For Each Task Task Webhook Notifications (List) An optional set of system destinations (for example, webhook destinations or Slack) to be notified when runs of this task begins, completes or fails. The default behavior is to not send any notifications. This field is a block and is documented below.
If no
job_cluster_key
,existing_cluster_id
, ornew_cluster
were specified in task definition, then task will executed using serverless compute.
- task
Key String - string specifying an unique key for a given task.
*_task
- (Required) one of the specific task blocks described below:
- condition
Task Property Map - dbt
Task Property Map - depends
Ons List<Property Map> - block specifying dependency(-ies) for a given task.
- description String
- description for this task.
- disable
Auto BooleanOptimization - A flag to disable auto optimization in serverless tasks.
- email
Notifications Property Map - An optional block to specify a set of email addresses notified when this task begins, completes or fails. The default behavior is to not send any emails. This block is documented below.
- environment
Key String - identifier of an
environment
block that is used to specify libraries. Required for some tasks (spark_python_task
,python_wheel_task
, ...) running on serverless compute. - existing
Cluster StringId - Identifier of the interactive cluster to run job on. Note: running tasks on interactive clusters may lead to increased costs!
- health Property Map
- block described below that specifies health conditions for a given task.
- job
Cluster StringKey - Identifier of the Job cluster specified in the
job_cluster
block. - libraries List<Property Map>
- (Set) An optional list of libraries to be installed on the cluster that will execute the job.
- max
Retries Number - (Integer) An optional maximum number of times to retry an unsuccessful run. A run is considered to be unsuccessful if it completes with a
FAILED
orINTERNAL_ERROR
lifecycle state. The value -1 means to retry indefinitely and the value 0 means to never retry. The default behavior is to never retry. A run can have the following lifecycle state:PENDING
,RUNNING
,TERMINATING
,TERMINATED
,SKIPPED
orINTERNAL_ERROR
. - min
Retry NumberInterval Millis - (Integer) An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried.
- new
Cluster Property Map - Task will run on a dedicated cluster. See databricks.Cluster documentation for specification. Some parameters, such as
autotermination_minutes
,is_pinned
,workload_type
aren't supported! - notebook
Task Property Map - notification
Settings Property Map - An optional block controlling the notification settings on the job level documented below.
- pipeline
Task Property Map - python
Wheel Property MapTask - retry
On BooleanTimeout - (Bool) An optional policy to specify whether to retry a job when it times out. The default behavior is to not retry on timeout.
- run
If String - An optional value indicating the condition that determines whether the task should be run once its dependencies have been completed. One of
ALL_SUCCESS
,AT_LEAST_ONE_SUCCESS
,NONE_FAILED
,ALL_DONE
,AT_LEAST_ONE_FAILED
orALL_FAILED
. When omitted, defaults toALL_SUCCESS
. - run
Job Property MapTask - spark
Jar Property MapTask - spark
Python Property MapTask - spark
Submit Property MapTask - sql
Task Property Map - timeout
Seconds Number - (Integer) An optional timeout applied to each run of this job. The default behavior is to have no timeout.
- webhook
Notifications Property Map (List) An optional set of system destinations (for example, webhook destinations or Slack) to be notified when runs of this task begins, completes or fails. The default behavior is to not send any notifications. This field is a block and is documented below.
If no
job_cluster_key
,existing_cluster_id
, ornew_cluster
were specified in task definition, then task will executed using serverless compute.
JobTaskForEachTaskTaskConditionTask, JobTaskForEachTaskTaskConditionTaskArgs
- Left string
- The left operand of the condition task. It could be a string value, job state, or a parameter reference.
- Op string
The string specifying the operation used to compare operands. Currently, following operators are supported:
EQUAL_TO
,GREATER_THAN
,GREATER_THAN_OR_EQUAL
,LESS_THAN
,LESS_THAN_OR_EQUAL
,NOT_EQUAL
. (Check the API docs for the latest information).This task does not require a cluster to execute and does not support retries or notifications.
- Right string
- The right operand of the condition task. It could be a string value, job state, or parameter reference.
- Left string
- The left operand of the condition task. It could be a string value, job state, or a parameter reference.
- Op string
The string specifying the operation used to compare operands. Currently, following operators are supported:
EQUAL_TO
,GREATER_THAN
,GREATER_THAN_OR_EQUAL
,LESS_THAN
,LESS_THAN_OR_EQUAL
,NOT_EQUAL
. (Check the API docs for the latest information).This task does not require a cluster to execute and does not support retries or notifications.
- Right string
- The right operand of the condition task. It could be a string value, job state, or parameter reference.
- left String
- The left operand of the condition task. It could be a string value, job state, or a parameter reference.
- op String
The string specifying the operation used to compare operands. Currently, following operators are supported:
EQUAL_TO
,GREATER_THAN
,GREATER_THAN_OR_EQUAL
,LESS_THAN
,LESS_THAN_OR_EQUAL
,NOT_EQUAL
. (Check the API docs for the latest information).This task does not require a cluster to execute and does not support retries or notifications.
- right String
- The right operand of the condition task. It could be a string value, job state, or parameter reference.
- left string
- The left operand of the condition task. It could be a string value, job state, or a parameter reference.
- op string
The string specifying the operation used to compare operands. Currently, following operators are supported:
EQUAL_TO
,GREATER_THAN
,GREATER_THAN_OR_EQUAL
,LESS_THAN
,LESS_THAN_OR_EQUAL
,NOT_EQUAL
. (Check the API docs for the latest information).This task does not require a cluster to execute and does not support retries or notifications.
- right string
- The right operand of the condition task. It could be a string value, job state, or parameter reference.
- left str
- The left operand of the condition task. It could be a string value, job state, or a parameter reference.
- op str
The string specifying the operation used to compare operands. Currently, following operators are supported:
EQUAL_TO
,GREATER_THAN
,GREATER_THAN_OR_EQUAL
,LESS_THAN
,LESS_THAN_OR_EQUAL
,NOT_EQUAL
. (Check the API docs for the latest information).This task does not require a cluster to execute and does not support retries or notifications.
- right str
- The right operand of the condition task. It could be a string value, job state, or parameter reference.
- left String
- The left operand of the condition task. It could be a string value, job state, or a parameter reference.
- op String
The string specifying the operation used to compare operands. Currently, following operators are supported:
EQUAL_TO
,GREATER_THAN
,GREATER_THAN_OR_EQUAL
,LESS_THAN
,LESS_THAN_OR_EQUAL
,NOT_EQUAL
. (Check the API docs for the latest information).This task does not require a cluster to execute and does not support retries or notifications.
- right String
- The right operand of the condition task. It could be a string value, job state, or parameter reference.
JobTaskForEachTaskTaskDbtTask, JobTaskForEachTaskTaskDbtTaskArgs
- Commands List<string>
- (Array) Series of dbt commands to execute in sequence. Every command must start with "dbt".
- Catalog string
- The name of the catalog to use inside Unity Catalog.
- Profiles
Directory string - The relative path to the directory in the repository specified by
git_source
where dbt should look in for theprofiles.yml
file. If not specified, defaults to the repository's root directory. Equivalent to passing--profile-dir
to a dbt command. - Project
Directory string - The path where dbt should look for
dbt_project.yml
. Equivalent to passing--project-dir
to the dbt CLI.- If
source
isGIT
: Relative path to the directory in the repository specified in thegit_source
block. Defaults to the repository's root directory when not specified. - If
source
isWORKSPACE
: Absolute path to the folder in the workspace.
- If
- Schema string
- The name of the schema dbt should run in. Defaults to
default
. - Source string
- The source of the project. Possible values are
WORKSPACE
andGIT
. Defaults toGIT
if agit_source
block is present in the job definition. - Warehouse
Id string The ID of the SQL warehouse that dbt should execute against.
You also need to include a
git_source
block to configure the repository that contains the dbt project.
- Commands []string
- (Array) Series of dbt commands to execute in sequence. Every command must start with "dbt".
- Catalog string
- The name of the catalog to use inside Unity Catalog.
- Profiles
Directory string - The relative path to the directory in the repository specified by
git_source
where dbt should look in for theprofiles.yml
file. If not specified, defaults to the repository's root directory. Equivalent to passing--profile-dir
to a dbt command. - Project
Directory string - The path where dbt should look for
dbt_project.yml
. Equivalent to passing--project-dir
to the dbt CLI.- If
source
isGIT
: Relative path to the directory in the repository specified in thegit_source
block. Defaults to the repository's root directory when not specified. - If
source
isWORKSPACE
: Absolute path to the folder in the workspace.
- If
- Schema string
- The name of the schema dbt should run in. Defaults to
default
. - Source string
- The source of the project. Possible values are
WORKSPACE
andGIT
. Defaults toGIT
if agit_source
block is present in the job definition. - Warehouse
Id string The ID of the SQL warehouse that dbt should execute against.
You also need to include a
git_source
block to configure the repository that contains the dbt project.
- commands List<String>
- (Array) Series of dbt commands to execute in sequence. Every command must start with "dbt".
- catalog String
- The name of the catalog to use inside Unity Catalog.
- profiles
Directory String - The relative path to the directory in the repository specified by
git_source
where dbt should look in for theprofiles.yml
file. If not specified, defaults to the repository's root directory. Equivalent to passing--profile-dir
to a dbt command. - project
Directory String - The path where dbt should look for
dbt_project.yml
. Equivalent to passing--project-dir
to the dbt CLI.- If
source
isGIT
: Relative path to the directory in the repository specified in thegit_source
block. Defaults to the repository's root directory when not specified. - If
source
isWORKSPACE
: Absolute path to the folder in the workspace.
- If
- schema String
- The name of the schema dbt should run in. Defaults to
default
. - source String
- The source of the project. Possible values are
WORKSPACE
andGIT
. Defaults toGIT
if agit_source
block is present in the job definition. - warehouse
Id String The ID of the SQL warehouse that dbt should execute against.
You also need to include a
git_source
block to configure the repository that contains the dbt project.
- commands string[]
- (Array) Series of dbt commands to execute in sequence. Every command must start with "dbt".
- catalog string
- The name of the catalog to use inside Unity Catalog.
- profiles
Directory string - The relative path to the directory in the repository specified by
git_source
where dbt should look in for theprofiles.yml
file. If not specified, defaults to the repository's root directory. Equivalent to passing--profile-dir
to a dbt command. - project
Directory string - The path where dbt should look for
dbt_project.yml
. Equivalent to passing--project-dir
to the dbt CLI.- If
source
isGIT
: Relative path to the directory in the repository specified in thegit_source
block. Defaults to the repository's root directory when not specified. - If
source
isWORKSPACE
: Absolute path to the folder in the workspace.
- If
- schema string
- The name of the schema dbt should run in. Defaults to
default
. - source string
- The source of the project. Possible values are
WORKSPACE
andGIT
. Defaults toGIT
if agit_source
block is present in the job definition. - warehouse
Id string The ID of the SQL warehouse that dbt should execute against.
You also need to include a
git_source
block to configure the repository that contains the dbt project.
- commands Sequence[str]
- (Array) Series of dbt commands to execute in sequence. Every command must start with "dbt".
- catalog str
- The name of the catalog to use inside Unity Catalog.
- profiles_
directory str - The relative path to the directory in the repository specified by
git_source
where dbt should look in for theprofiles.yml
file. If not specified, defaults to the repository's root directory. Equivalent to passing--profile-dir
to a dbt command. - project_
directory str - The path where dbt should look for
dbt_project.yml
. Equivalent to passing--project-dir
to the dbt CLI.- If
source
isGIT
: Relative path to the directory in the repository specified in thegit_source
block. Defaults to the repository's root directory when not specified. - If
source
isWORKSPACE
: Absolute path to the folder in the workspace.
- If
- schema str
- The name of the schema dbt should run in. Defaults to
default
. - source str
- The source of the project. Possible values are
WORKSPACE
andGIT
. Defaults toGIT
if agit_source
block is present in the job definition. - warehouse_
id str The ID of the SQL warehouse that dbt should execute against.
You also need to include a
git_source
block to configure the repository that contains the dbt project.
- commands List<String>
- (Array) Series of dbt commands to execute in sequence. Every command must start with "dbt".
- catalog String
- The name of the catalog to use inside Unity Catalog.
- profiles
Directory String - The relative path to the directory in the repository specified by
git_source
where dbt should look in for theprofiles.yml
file. If not specified, defaults to the repository's root directory. Equivalent to passing--profile-dir
to a dbt command. - project
Directory String - The path where dbt should look for
dbt_project.yml
. Equivalent to passing--project-dir
to the dbt CLI.- If
source
isGIT
: Relative path to the directory in the repository specified in thegit_source
block. Defaults to the repository's root directory when not specified. - If
source
isWORKSPACE
: Absolute path to the folder in the workspace.
- If
- schema String
- The name of the schema dbt should run in. Defaults to
default
. - source String
- The source of the project. Possible values are
WORKSPACE
andGIT
. Defaults toGIT
if agit_source
block is present in the job definition. - warehouse
Id String The ID of the SQL warehouse that dbt should execute against.
You also need to include a
git_source
block to configure the repository that contains the dbt project.
JobTaskForEachTaskTaskDependsOn, JobTaskForEachTaskTaskDependsOnArgs
- Task
Key string - The name of the task this task depends on.
- Outcome string
Can only be specified on condition task dependencies. The outcome of the dependent task that must be met for this task to run. Possible values are
"true"
or"false"
.Similar to the tasks themselves, each dependency inside the task need to be declared in alphabetical order with respect to task_key in order to get consistent Pulumi diffs.
- Task
Key string - The name of the task this task depends on.
- Outcome string
Can only be specified on condition task dependencies. The outcome of the dependent task that must be met for this task to run. Possible values are
"true"
or"false"
.Similar to the tasks themselves, each dependency inside the task need to be declared in alphabetical order with respect to task_key in order to get consistent Pulumi diffs.
- task
Key String - The name of the task this task depends on.
- outcome String
Can only be specified on condition task dependencies. The outcome of the dependent task that must be met for this task to run. Possible values are
"true"
or"false"
.Similar to the tasks themselves, each dependency inside the task need to be declared in alphabetical order with respect to task_key in order to get consistent Pulumi diffs.
- task
Key string - The name of the task this task depends on.
- outcome string
Can only be specified on condition task dependencies. The outcome of the dependent task that must be met for this task to run. Possible values are
"true"
or"false"
.Similar to the tasks themselves, each dependency inside the task need to be declared in alphabetical order with respect to task_key in order to get consistent Pulumi diffs.
- task_
key str - The name of the task this task depends on.
- outcome str
Can only be specified on condition task dependencies. The outcome of the dependent task that must be met for this task to run. Possible values are
"true"
or"false"
.Similar to the tasks themselves, each dependency inside the task need to be declared in alphabetical order with respect to task_key in order to get consistent Pulumi diffs.
- task
Key String - The name of the task this task depends on.
- outcome String
Can only be specified on condition task dependencies. The outcome of the dependent task that must be met for this task to run. Possible values are
"true"
or"false"
.Similar to the tasks themselves, each dependency inside the task need to be declared in alphabetical order with respect to task_key in order to get consistent Pulumi diffs.
JobTaskForEachTaskTaskEmailNotifications, JobTaskForEachTaskTaskEmailNotificationsArgs
- No
Alert boolFor Skipped Runs - (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the
notification_settings
configuration block). - On
Duration List<string>Warning Threshold Exceededs (List) list of emails to notify when the duration of a run exceeds the threshold specified by the
RUN_DURATION_SECONDS
metric in thehealth
block.The following parameter is only available for the job level configuration.
- On
Failures List<string> - (List) list of emails to notify when the run fails.
- On
Starts List<string> - (List) list of emails to notify when the run starts.
- On
Streaming List<string>Backlog Exceededs - On
Successes List<string> - (List) list of emails to notify when the run completes successfully.
- No
Alert boolFor Skipped Runs - (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the
notification_settings
configuration block). - On
Duration []stringWarning Threshold Exceededs (List) list of emails to notify when the duration of a run exceeds the threshold specified by the
RUN_DURATION_SECONDS
metric in thehealth
block.The following parameter is only available for the job level configuration.
- On
Failures []string - (List) list of emails to notify when the run fails.
- On
Starts []string - (List) list of emails to notify when the run starts.
- On
Streaming []stringBacklog Exceededs - On
Successes []string - (List) list of emails to notify when the run completes successfully.
- no
Alert BooleanFor Skipped Runs - (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the
notification_settings
configuration block). - on
Duration List<String>Warning Threshold Exceededs (List) list of emails to notify when the duration of a run exceeds the threshold specified by the
RUN_DURATION_SECONDS
metric in thehealth
block.The following parameter is only available for the job level configuration.
- on
Failures List<String> - (List) list of emails to notify when the run fails.
- on
Starts List<String> - (List) list of emails to notify when the run starts.
- on
Streaming List<String>Backlog Exceededs - on
Successes List<String> - (List) list of emails to notify when the run completes successfully.
- no
Alert booleanFor Skipped Runs - (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the
notification_settings
configuration block). - on
Duration string[]Warning Threshold Exceededs (List) list of emails to notify when the duration of a run exceeds the threshold specified by the
RUN_DURATION_SECONDS
metric in thehealth
block.The following parameter is only available for the job level configuration.
- on
Failures string[] - (List) list of emails to notify when the run fails.
- on
Starts string[] - (List) list of emails to notify when the run starts.
- on
Streaming string[]Backlog Exceededs - on
Successes string[] - (List) list of emails to notify when the run completes successfully.
- no_
alert_ boolfor_ skipped_ runs - (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the
notification_settings
configuration block). - on_
duration_ Sequence[str]warning_ threshold_ exceededs (List) list of emails to notify when the duration of a run exceeds the threshold specified by the
RUN_DURATION_SECONDS
metric in thehealth
block.The following parameter is only available for the job level configuration.
- on_
failures Sequence[str] - (List) list of emails to notify when the run fails.
- on_
starts Sequence[str] - (List) list of emails to notify when the run starts.
- on_
streaming_ Sequence[str]backlog_ exceededs - on_
successes Sequence[str] - (List) list of emails to notify when the run completes successfully.
- no
Alert BooleanFor Skipped Runs - (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the
notification_settings
configuration block). - on
Duration List<String>Warning Threshold Exceededs (List) list of emails to notify when the duration of a run exceeds the threshold specified by the
RUN_DURATION_SECONDS
metric in thehealth
block.The following parameter is only available for the job level configuration.
- on
Failures List<String> - (List) list of emails to notify when the run fails.
- on
Starts List<String> - (List) list of emails to notify when the run starts.
- on
Streaming List<String>Backlog Exceededs - on
Successes List<String> - (List) list of emails to notify when the run completes successfully.
JobTaskForEachTaskTaskHealth, JobTaskForEachTaskTaskHealthArgs
- Rules
List<Job
Task For Each Task Task Health Rule> - list of rules that are represented as objects with the following attributes:
- Rules
[]Job
Task For Each Task Task Health Rule - list of rules that are represented as objects with the following attributes:
- rules
List<Job
Task For Each Task Task Health Rule> - list of rules that are represented as objects with the following attributes:
- rules
Job
Task For Each Task Task Health Rule[] - list of rules that are represented as objects with the following attributes:
- rules
Sequence[Job
Task For Each Task Task Health Rule] - list of rules that are represented as objects with the following attributes:
- rules List<Property Map>
- list of rules that are represented as objects with the following attributes:
JobTaskForEachTaskTaskHealthRule, JobTaskForEachTaskTaskHealthRuleArgs
- Metric string
- string specifying the metric to check. The only supported metric is
RUN_DURATION_SECONDS
(check Jobs REST API documentation for the latest information). - Op string
- string specifying the operation used to evaluate the given metric. The only supported operation is
GREATER_THAN
. - Value int
- integer value used to compare to the given metric.
- Metric string
- string specifying the metric to check. The only supported metric is
RUN_DURATION_SECONDS
(check Jobs REST API documentation for the latest information). - Op string
- string specifying the operation used to evaluate the given metric. The only supported operation is
GREATER_THAN
. - Value int
- integer value used to compare to the given metric.
- metric String
- string specifying the metric to check. The only supported metric is
RUN_DURATION_SECONDS
(check Jobs REST API documentation for the latest information). - op String
- string specifying the operation used to evaluate the given metric. The only supported operation is
GREATER_THAN
. - value Integer
- integer value used to compare to the given metric.
- metric string
- string specifying the metric to check. The only supported metric is
RUN_DURATION_SECONDS
(check Jobs REST API documentation for the latest information). - op string
- string specifying the operation used to evaluate the given metric. The only supported operation is
GREATER_THAN
. - value number
- integer value used to compare to the given metric.
- metric str
- string specifying the metric to check. The only supported metric is
RUN_DURATION_SECONDS
(check Jobs REST API documentation for the latest information). - op str
- string specifying the operation used to evaluate the given metric. The only supported operation is
GREATER_THAN
. - value int
- integer value used to compare to the given metric.
- metric String
- string specifying the metric to check. The only supported metric is
RUN_DURATION_SECONDS
(check Jobs REST API documentation for the latest information). - op String
- string specifying the operation used to evaluate the given metric. The only supported operation is
GREATER_THAN
. - value Number
- integer value used to compare to the given metric.
JobTaskForEachTaskTaskLibrary, JobTaskForEachTaskTaskLibraryArgs
- cran Property Map
- egg String
- jar String
- maven Property Map
- pypi Property Map
- requirements String
- whl String
JobTaskForEachTaskTaskLibraryCran, JobTaskForEachTaskTaskLibraryCranArgs
JobTaskForEachTaskTaskLibraryMaven, JobTaskForEachTaskTaskLibraryMavenArgs
- Coordinates string
- Exclusions List<string>
- Repo string
- Coordinates string
- Exclusions []string
- Repo string
- coordinates String
- exclusions List<String>
- repo String
- coordinates string
- exclusions string[]
- repo string
- coordinates str
- exclusions Sequence[str]
- repo str
- coordinates String
- exclusions List<String>
- repo String
JobTaskForEachTaskTaskLibraryPypi, JobTaskForEachTaskTaskLibraryPypiArgs
JobTaskForEachTaskTaskNewCluster, JobTaskForEachTaskTaskNewClusterArgs
- Spark
Version string - Apply
Policy boolDefault Values - Autoscale
Job
Task For Each Task Task New Cluster Autoscale - Aws
Attributes JobTask For Each Task Task New Cluster Aws Attributes - Azure
Attributes JobTask For Each Task Task New Cluster Azure Attributes - Cluster
Id string - Cluster
Log JobConf Task For Each Task Task New Cluster Cluster Log Conf - Cluster
Mount List<JobInfos Task For Each Task Task New Cluster Cluster Mount Info> - Cluster
Name string - Dictionary<string, string>
- Data
Security stringMode - Docker
Image JobTask For Each Task Task New Cluster Docker Image - Driver
Instance stringPool Id - Driver
Node stringType Id - Enable
Elastic boolDisk - Enable
Local boolDisk Encryption - Gcp
Attributes JobTask For Each Task Task New Cluster Gcp Attributes - Idempotency
Token string - Init
Scripts List<JobTask For Each Task Task New Cluster Init Script> - Instance
Pool stringId - Libraries
List<Job
Task For Each Task Task New Cluster Library> - (List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
- Node
Type stringId - Num
Workers int - Policy
Id string - Runtime
Engine string - Single
User stringName - Spark
Conf Dictionary<string, string> - Spark
Env Dictionary<string, string>Vars - Ssh
Public List<string>Keys - Workload
Type JobTask For Each Task Task New Cluster Workload Type - isn't supported
- Spark
Version string - Apply
Policy boolDefault Values - Autoscale
Job
Task For Each Task Task New Cluster Autoscale - Aws
Attributes JobTask For Each Task Task New Cluster Aws Attributes - Azure
Attributes JobTask For Each Task Task New Cluster Azure Attributes - Cluster
Id string - Cluster
Log JobConf Task For Each Task Task New Cluster Cluster Log Conf - Cluster
Mount []JobInfos Task For Each Task Task New Cluster Cluster Mount Info - Cluster
Name string - map[string]string
- Data
Security stringMode - Docker
Image JobTask For Each Task Task New Cluster Docker Image - Driver
Instance stringPool Id - Driver
Node stringType Id - Enable
Elastic boolDisk - Enable
Local boolDisk Encryption - Gcp
Attributes JobTask For Each Task Task New Cluster Gcp Attributes - Idempotency
Token string - Init
Scripts []JobTask For Each Task Task New Cluster Init Script - Instance
Pool stringId - Libraries
[]Job
Task For Each Task Task New Cluster Library - (List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
- Node
Type stringId - Num
Workers int - Policy
Id string - Runtime
Engine string - Single
User stringName - Spark
Conf map[string]string - Spark
Env map[string]stringVars - Ssh
Public []stringKeys - Workload
Type JobTask For Each Task Task New Cluster Workload Type - isn't supported
- spark
Version String - apply
Policy BooleanDefault Values - autoscale
Job
Task For Each Task Task New Cluster Autoscale - aws
Attributes JobTask For Each Task Task New Cluster Aws Attributes - azure
Attributes JobTask For Each Task Task New Cluster Azure Attributes - cluster
Id String - cluster
Log JobConf Task For Each Task Task New Cluster Cluster Log Conf - cluster
Mount List<JobInfos Task For Each Task Task New Cluster Cluster Mount Info> - cluster
Name String - Map<String,String>
- data
Security StringMode - docker
Image JobTask For Each Task Task New Cluster Docker Image - driver
Instance StringPool Id - driver
Node StringType Id - enable
Elastic BooleanDisk - enable
Local BooleanDisk Encryption - gcp
Attributes JobTask For Each Task Task New Cluster Gcp Attributes - idempotency
Token String - init
Scripts List<JobTask For Each Task Task New Cluster Init Script> - instance
Pool StringId - libraries
List<Job
Task For Each Task Task New Cluster Library> - (List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
- node
Type StringId - num
Workers Integer - policy
Id String - runtime
Engine String - single
User StringName - spark
Conf Map<String,String> - spark
Env Map<String,String>Vars - ssh
Public List<String>Keys - workload
Type JobTask For Each Task Task New Cluster Workload Type - isn't supported
- spark
Version string - apply
Policy booleanDefault Values - autoscale
Job
Task For Each Task Task New Cluster Autoscale - aws
Attributes JobTask For Each Task Task New Cluster Aws Attributes - azure
Attributes JobTask For Each Task Task New Cluster Azure Attributes - cluster
Id string - cluster
Log JobConf Task For Each Task Task New Cluster Cluster Log Conf - cluster
Mount JobInfos Task For Each Task Task New Cluster Cluster Mount Info[] - cluster
Name string - {[key: string]: string}
- data
Security stringMode - docker
Image JobTask For Each Task Task New Cluster Docker Image - driver
Instance stringPool Id - driver
Node stringType Id - enable
Elastic booleanDisk - enable
Local booleanDisk Encryption - gcp
Attributes JobTask For Each Task Task New Cluster Gcp Attributes - idempotency
Token string - init
Scripts JobTask For Each Task Task New Cluster Init Script[] - instance
Pool stringId - libraries
Job
Task For Each Task Task New Cluster Library[] - (List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
- node
Type stringId - num
Workers number - policy
Id string - runtime
Engine string - single
User stringName - spark
Conf {[key: string]: string} - spark
Env {[key: string]: string}Vars - ssh
Public string[]Keys - workload
Type JobTask For Each Task Task New Cluster Workload Type - isn't supported
- spark_
version str - apply_
policy_ booldefault_ values - autoscale
Job
Task For Each Task Task New Cluster Autoscale - aws_
attributes JobTask For Each Task Task New Cluster Aws Attributes - azure_
attributes JobTask For Each Task Task New Cluster Azure Attributes - cluster_
id str - cluster_
log_ Jobconf Task For Each Task Task New Cluster Cluster Log Conf - cluster_
mount_ Sequence[Jobinfos Task For Each Task Task New Cluster Cluster Mount Info] - cluster_
name str - Mapping[str, str]
- data_
security_ strmode - docker_
image JobTask For Each Task Task New Cluster Docker Image - driver_
instance_ strpool_ id - driver_
node_ strtype_ id - enable_
elastic_ booldisk - enable_
local_ booldisk_ encryption - gcp_
attributes JobTask For Each Task Task New Cluster Gcp Attributes - idempotency_
token str - init_
scripts Sequence[JobTask For Each Task Task New Cluster Init Script] - instance_
pool_ strid - libraries
Sequence[Job
Task For Each Task Task New Cluster Library] - (List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
- node_
type_ strid - num_
workers int - policy_
id str - runtime_
engine str - single_
user_ strname - spark_
conf Mapping[str, str] - spark_
env_ Mapping[str, str]vars - ssh_
public_ Sequence[str]keys - workload_
type JobTask For Each Task Task New Cluster Workload Type - isn't supported
- spark
Version String - apply
Policy BooleanDefault Values - autoscale Property Map
- aws
Attributes Property Map - azure
Attributes Property Map - cluster
Id String - cluster
Log Property MapConf - cluster
Mount List<Property Map>Infos - cluster
Name String - Map<String>
- data
Security StringMode - docker
Image Property Map - driver
Instance StringPool Id - driver
Node StringType Id - enable
Elastic BooleanDisk - enable
Local BooleanDisk Encryption - gcp
Attributes Property Map - idempotency
Token String - init
Scripts List<Property Map> - instance
Pool StringId - libraries List<Property Map>
- (List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
- node
Type StringId - num
Workers Number - policy
Id String - runtime
Engine String - single
User StringName - spark
Conf Map<String> - spark
Env Map<String>Vars - ssh
Public List<String>Keys - workload
Type Property Map - isn't supported
JobTaskForEachTaskTaskNewClusterAutoscale, JobTaskForEachTaskTaskNewClusterAutoscaleArgs
- Max
Workers int - Min
Workers int
- Max
Workers int - Min
Workers int
- max
Workers Integer - min
Workers Integer
- max
Workers number - min
Workers number
- max_
workers int - min_
workers int
- max
Workers Number - min
Workers Number
JobTaskForEachTaskTaskNewClusterAwsAttributes, JobTaskForEachTaskTaskNewClusterAwsAttributesArgs
- Availability string
- Ebs
Volume intCount - Ebs
Volume intIops - Ebs
Volume intSize - Ebs
Volume intThroughput - Ebs
Volume stringType - First
On intDemand - Instance
Profile stringArn - Spot
Bid intPrice Percent - Zone
Id string
- Availability string
- Ebs
Volume intCount - Ebs
Volume intIops - Ebs
Volume intSize - Ebs
Volume intThroughput - Ebs
Volume stringType - First
On intDemand - Instance
Profile stringArn - Spot
Bid intPrice Percent - Zone
Id string
- availability String
- ebs
Volume IntegerCount - ebs
Volume IntegerIops - ebs
Volume IntegerSize - ebs
Volume IntegerThroughput - ebs
Volume StringType - first
On IntegerDemand - instance
Profile StringArn - spot
Bid IntegerPrice Percent - zone
Id String
- availability string
- ebs
Volume numberCount - ebs
Volume numberIops - ebs
Volume numberSize - ebs
Volume numberThroughput - ebs
Volume stringType - first
On numberDemand - instance
Profile stringArn - spot
Bid numberPrice Percent - zone
Id string
- availability str
- ebs_
volume_ intcount - ebs_
volume_ intiops - ebs_
volume_ intsize - ebs_
volume_ intthroughput - ebs_
volume_ strtype - first_
on_ intdemand - instance_
profile_ strarn - spot_
bid_ intprice_ percent - zone_
id str
- availability String
- ebs
Volume NumberCount - ebs
Volume NumberIops - ebs
Volume NumberSize - ebs
Volume NumberThroughput - ebs
Volume StringType - first
On NumberDemand - instance
Profile StringArn - spot
Bid NumberPrice Percent - zone
Id String
JobTaskForEachTaskTaskNewClusterAzureAttributes, JobTaskForEachTaskTaskNewClusterAzureAttributesArgs
- availability String
- first
On NumberDemand - log
Analytics Property MapInfo - spot
Bid NumberMax Price
JobTaskForEachTaskTaskNewClusterAzureAttributesLogAnalyticsInfo, JobTaskForEachTaskTaskNewClusterAzureAttributesLogAnalyticsInfoArgs
- Log
Analytics stringPrimary Key - Log
Analytics stringWorkspace Id
- Log
Analytics stringPrimary Key - Log
Analytics stringWorkspace Id
- log
Analytics StringPrimary Key - log
Analytics StringWorkspace Id
- log
Analytics stringPrimary Key - log
Analytics stringWorkspace Id
- log
Analytics StringPrimary Key - log
Analytics StringWorkspace Id
JobTaskForEachTaskTaskNewClusterClusterLogConf, JobTaskForEachTaskTaskNewClusterClusterLogConfArgs
JobTaskForEachTaskTaskNewClusterClusterLogConfDbfs, JobTaskForEachTaskTaskNewClusterClusterLogConfDbfsArgs
- Destination string
- Destination string
- destination String
- destination string
- destination str
- destination String
JobTaskForEachTaskTaskNewClusterClusterLogConfS3, JobTaskForEachTaskTaskNewClusterClusterLogConfS3Args
- Destination string
- Canned
Acl string - Enable
Encryption bool - Encryption
Type string - Endpoint string
- Kms
Key string - Region string
- Destination string
- Canned
Acl string - Enable
Encryption bool - Encryption
Type string - Endpoint string
- Kms
Key string - Region string
- destination String
- canned
Acl String - enable
Encryption Boolean - encryption
Type String - endpoint String
- kms
Key String - region String
- destination string
- canned
Acl string - enable
Encryption boolean - encryption
Type string - endpoint string
- kms
Key string - region string
- destination str
- canned_
acl str - enable_
encryption bool - encryption_
type str - endpoint str
- kms_
key str - region str
- destination String
- canned
Acl String - enable
Encryption Boolean - encryption
Type String - endpoint String
- kms
Key String - region String
JobTaskForEachTaskTaskNewClusterClusterMountInfo, JobTaskForEachTaskTaskNewClusterClusterMountInfoArgs
JobTaskForEachTaskTaskNewClusterClusterMountInfoNetworkFilesystemInfo, JobTaskForEachTaskTaskNewClusterClusterMountInfoNetworkFilesystemInfoArgs
- Server
Address string - Mount
Options string
- Server
Address string - Mount
Options string
- server
Address String - mount
Options String
- server
Address string - mount
Options string
- server_
address str - mount_
options str
- server
Address String - mount
Options String
JobTaskForEachTaskTaskNewClusterDockerImage, JobTaskForEachTaskTaskNewClusterDockerImageArgs
- Url string
- URL of the job on the given workspace
- Basic
Auth JobTask For Each Task Task New Cluster Docker Image Basic Auth
- Url string
- URL of the job on the given workspace
- Basic
Auth JobTask For Each Task Task New Cluster Docker Image Basic Auth
- url String
- URL of the job on the given workspace
- basic
Auth JobTask For Each Task Task New Cluster Docker Image Basic Auth
- url string
- URL of the job on the given workspace
- basic
Auth JobTask For Each Task Task New Cluster Docker Image Basic Auth
- url str
- URL of the job on the given workspace
- basic_
auth JobTask For Each Task Task New Cluster Docker Image Basic Auth
- url String
- URL of the job on the given workspace
- basic
Auth Property Map
JobTaskForEachTaskTaskNewClusterDockerImageBasicAuth, JobTaskForEachTaskTaskNewClusterDockerImageBasicAuthArgs
JobTaskForEachTaskTaskNewClusterGcpAttributes, JobTaskForEachTaskTaskNewClusterGcpAttributesArgs
- Availability string
- Boot
Disk intSize - Google
Service stringAccount - Local
Ssd intCount - Use
Preemptible boolExecutors - Zone
Id string
- Availability string
- Boot
Disk intSize - Google
Service stringAccount - Local
Ssd intCount - Use
Preemptible boolExecutors - Zone
Id string
- availability String
- boot
Disk IntegerSize - google
Service StringAccount - local
Ssd IntegerCount - use
Preemptible BooleanExecutors - zone
Id String
- availability string
- boot
Disk numberSize - google
Service stringAccount - local
Ssd numberCount - use
Preemptible booleanExecutors - zone
Id string
- availability str
- boot_
disk_ intsize - google_
service_ straccount - local_
ssd_ intcount - use_
preemptible_ boolexecutors - zone_
id str
- availability String
- boot
Disk NumberSize - google
Service StringAccount - local
Ssd NumberCount - use
Preemptible BooleanExecutors - zone
Id String
JobTaskForEachTaskTaskNewClusterInitScript, JobTaskForEachTaskTaskNewClusterInitScriptArgs
- Abfss
Job
Task For Each Task Task New Cluster Init Script Abfss - Dbfs
Job
Task For Each Task Task New Cluster Init Script Dbfs - File
Job
Task For Each Task Task New Cluster Init Script File - block consisting of single string fields:
- Gcs
Job
Task For Each Task Task New Cluster Init Script Gcs - S3
Job
Task For Each Task Task New Cluster Init Script S3 - Volumes
Job
Task For Each Task Task New Cluster Init Script Volumes - Workspace
Job
Task For Each Task Task New Cluster Init Script Workspace
- Abfss
Job
Task For Each Task Task New Cluster Init Script Abfss - Dbfs
Job
Task For Each Task Task New Cluster Init Script Dbfs - File
Job
Task For Each Task Task New Cluster Init Script File - block consisting of single string fields:
- Gcs
Job
Task For Each Task Task New Cluster Init Script Gcs - S3
Job
Task For Each Task Task New Cluster Init Script S3 - Volumes
Job
Task For Each Task Task New Cluster Init Script Volumes - Workspace
Job
Task For Each Task Task New Cluster Init Script Workspace
- abfss
Job
Task For Each Task Task New Cluster Init Script Abfss - dbfs
Job
Task For Each Task Task New Cluster Init Script Dbfs - file
Job
Task For Each Task Task New Cluster Init Script File - block consisting of single string fields:
- gcs
Job
Task For Each Task Task New Cluster Init Script Gcs - s3
Job
Task For Each Task Task New Cluster Init Script S3 - volumes
Job
Task For Each Task Task New Cluster Init Script Volumes - workspace
Job
Task For Each Task Task New Cluster Init Script Workspace
- abfss
Job
Task For Each Task Task New Cluster Init Script Abfss - dbfs
Job
Task For Each Task Task New Cluster Init Script Dbfs - file
Job
Task For Each Task Task New Cluster Init Script File - block consisting of single string fields:
- gcs
Job
Task For Each Task Task New Cluster Init Script Gcs - s3
Job
Task For Each Task Task New Cluster Init Script S3 - volumes
Job
Task For Each Task Task New Cluster Init Script Volumes - workspace
Job
Task For Each Task Task New Cluster Init Script Workspace
- abfss
Job
Task For Each Task Task New Cluster Init Script Abfss - dbfs
Job
Task For Each Task Task New Cluster Init Script Dbfs - file
Job
Task For Each Task Task New Cluster Init Script File - block consisting of single string fields:
- gcs
Job
Task For Each Task Task New Cluster Init Script Gcs - s3
Job
Task For Each Task Task New Cluster Init Script S3 - volumes
Job
Task For Each Task Task New Cluster Init Script Volumes - workspace
Job
Task For Each Task Task New Cluster Init Script Workspace
- abfss Property Map
- dbfs Property Map
- file Property Map
- block consisting of single string fields:
- gcs Property Map
- s3 Property Map
- volumes Property Map
- workspace Property Map
JobTaskForEachTaskTaskNewClusterInitScriptAbfss, JobTaskForEachTaskTaskNewClusterInitScriptAbfssArgs
- Destination string
- Destination string
- destination String
- destination string
- destination str
- destination String
JobTaskForEachTaskTaskNewClusterInitScriptDbfs, JobTaskForEachTaskTaskNewClusterInitScriptDbfsArgs
- Destination string
- Destination string
- destination String
- destination string
- destination str
- destination String
JobTaskForEachTaskTaskNewClusterInitScriptFile, JobTaskForEachTaskTaskNewClusterInitScriptFileArgs
- Destination string
- Destination string
- destination String
- destination string
- destination str
- destination String
JobTaskForEachTaskTaskNewClusterInitScriptGcs, JobTaskForEachTaskTaskNewClusterInitScriptGcsArgs
- Destination string
- Destination string
- destination String
- destination string
- destination str
- destination String
JobTaskForEachTaskTaskNewClusterInitScriptS3, JobTaskForEachTaskTaskNewClusterInitScriptS3Args
- Destination string
- Canned
Acl string - Enable
Encryption bool - Encryption
Type string - Endpoint string
- Kms
Key string - Region string
- Destination string
- Canned
Acl string - Enable
Encryption bool - Encryption
Type string - Endpoint string
- Kms
Key string - Region string
- destination String
- canned
Acl String - enable
Encryption Boolean - encryption
Type String - endpoint String
- kms
Key String - region String
- destination string
- canned
Acl string - enable
Encryption boolean - encryption
Type string - endpoint string
- kms
Key string - region string
- destination str
- canned_
acl str - enable_
encryption bool - encryption_
type str - endpoint str
- kms_
key str - region str
- destination String
- canned
Acl String - enable
Encryption Boolean - encryption
Type String - endpoint String
- kms
Key String - region String
JobTaskForEachTaskTaskNewClusterInitScriptVolumes, JobTaskForEachTaskTaskNewClusterInitScriptVolumesArgs
- Destination string
- Destination string
- destination String
- destination string
- destination str
- destination String
JobTaskForEachTaskTaskNewClusterInitScriptWorkspace, JobTaskForEachTaskTaskNewClusterInitScriptWorkspaceArgs
- Destination string
- Destination string
- destination String
- destination string
- destination str
- destination String
JobTaskForEachTaskTaskNewClusterLibrary, JobTaskForEachTaskTaskNewClusterLibraryArgs
- cran Property Map
- egg String
- jar String
- maven Property Map
- pypi Property Map
- requirements String
- whl String
JobTaskForEachTaskTaskNewClusterLibraryCran, JobTaskForEachTaskTaskNewClusterLibraryCranArgs
JobTaskForEachTaskTaskNewClusterLibraryMaven, JobTaskForEachTaskTaskNewClusterLibraryMavenArgs
- Coordinates string
- Exclusions List<string>
- Repo string
- Coordinates string
- Exclusions []string
- Repo string
- coordinates String
- exclusions List<String>
- repo String
- coordinates string
- exclusions string[]
- repo string
- coordinates str
- exclusions Sequence[str]
- repo str
- coordinates String
- exclusions List<String>
- repo String
JobTaskForEachTaskTaskNewClusterLibraryPypi, JobTaskForEachTaskTaskNewClusterLibraryPypiArgs
JobTaskForEachTaskTaskNewClusterWorkloadType, JobTaskForEachTaskTaskNewClusterWorkloadTypeArgs
JobTaskForEachTaskTaskNewClusterWorkloadTypeClients, JobTaskForEachTaskTaskNewClusterWorkloadTypeClientsArgs
JobTaskForEachTaskTaskNotebookTask, JobTaskForEachTaskTaskNotebookTaskArgs
- Notebook
Path string - The path of the databricks.Notebook to be run in the Databricks workspace or remote repository. For notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash. For notebooks stored in a remote repository, the path must be relative. This field is required.
- Base
Parameters Dictionary<string, string> - (Map) Base parameters to be used for each run of this job. If the run is initiated by a call to run-now with parameters specified, the two parameters maps will be merged. If the same key is specified in base_parameters and in run-now, the value from run-now will be used. If the notebook takes a parameter that is not specified in the job’s base_parameters or the run-now override parameters, the default value from the notebook will be used. Retrieve these parameters in a notebook using
dbutils.widgets.get
. - Source string
- Location type of the notebook, can only be
WORKSPACE
orGIT
. When set toWORKSPACE
, the notebook will be retrieved from the local Databricks workspace. When set toGIT
, the notebook will be retrieved from a Git repository defined ingit_source
. If the value is empty, the task will useGIT
ifgit_source
is defined andWORKSPACE
otherwise. - Warehouse
Id string - ID of the (the databricks_sql_endpoint) that will be used to execute the task with SQL notebook.
- Notebook
Path string - The path of the databricks.Notebook to be run in the Databricks workspace or remote repository. For notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash. For notebooks stored in a remote repository, the path must be relative. This field is required.
- Base
Parameters map[string]string - (Map) Base parameters to be used for each run of this job. If the run is initiated by a call to run-now with parameters specified, the two parameters maps will be merged. If the same key is specified in base_parameters and in run-now, the value from run-now will be used. If the notebook takes a parameter that is not specified in the job’s base_parameters or the run-now override parameters, the default value from the notebook will be used. Retrieve these parameters in a notebook using
dbutils.widgets.get
. - Source string
- Location type of the notebook, can only be
WORKSPACE
orGIT
. When set toWORKSPACE
, the notebook will be retrieved from the local Databricks workspace. When set toGIT
, the notebook will be retrieved from a Git repository defined ingit_source
. If the value is empty, the task will useGIT
ifgit_source
is defined andWORKSPACE
otherwise. - Warehouse
Id string - ID of the (the databricks_sql_endpoint) that will be used to execute the task with SQL notebook.
- notebook
Path String - The path of the databricks.Notebook to be run in the Databricks workspace or remote repository. For notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash. For notebooks stored in a remote repository, the path must be relative. This field is required.
- base
Parameters Map<String,String> - (Map) Base parameters to be used for each run of this job. If the run is initiated by a call to run-now with parameters specified, the two parameters maps will be merged. If the same key is specified in base_parameters and in run-now, the value from run-now will be used. If the notebook takes a parameter that is not specified in the job’s base_parameters or the run-now override parameters, the default value from the notebook will be used. Retrieve these parameters in a notebook using
dbutils.widgets.get
. - source String
- Location type of the notebook, can only be
WORKSPACE
orGIT
. When set toWORKSPACE
, the notebook will be retrieved from the local Databricks workspace. When set toGIT
, the notebook will be retrieved from a Git repository defined ingit_source
. If the value is empty, the task will useGIT
ifgit_source
is defined andWORKSPACE
otherwise. - warehouse
Id String - ID of the (the databricks_sql_endpoint) that will be used to execute the task with SQL notebook.
- notebook
Path string - The path of the databricks.Notebook to be run in the Databricks workspace or remote repository. For notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash. For notebooks stored in a remote repository, the path must be relative. This field is required.
- base
Parameters {[key: string]: string} - (Map) Base parameters to be used for each run of this job. If the run is initiated by a call to run-now with parameters specified, the two parameters maps will be merged. If the same key is specified in base_parameters and in run-now, the value from run-now will be used. If the notebook takes a parameter that is not specified in the job’s base_parameters or the run-now override parameters, the default value from the notebook will be used. Retrieve these parameters in a notebook using
dbutils.widgets.get
. - source string
- Location type of the notebook, can only be
WORKSPACE
orGIT
. When set toWORKSPACE
, the notebook will be retrieved from the local Databricks workspace. When set toGIT
, the notebook will be retrieved from a Git repository defined ingit_source
. If the value is empty, the task will useGIT
ifgit_source
is defined andWORKSPACE
otherwise. - warehouse
Id string - ID of the (the databricks_sql_endpoint) that will be used to execute the task with SQL notebook.
- notebook_
path str - The path of the databricks.Notebook to be run in the Databricks workspace or remote repository. For notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash. For notebooks stored in a remote repository, the path must be relative. This field is required.
- base_
parameters Mapping[str, str] - (Map) Base parameters to be used for each run of this job. If the run is initiated by a call to run-now with parameters specified, the two parameters maps will be merged. If the same key is specified in base_parameters and in run-now, the value from run-now will be used. If the notebook takes a parameter that is not specified in the job’s base_parameters or the run-now override parameters, the default value from the notebook will be used. Retrieve these parameters in a notebook using
dbutils.widgets.get
. - source str
- Location type of the notebook, can only be
WORKSPACE
orGIT
. When set toWORKSPACE
, the notebook will be retrieved from the local Databricks workspace. When set toGIT
, the notebook will be retrieved from a Git repository defined ingit_source
. If the value is empty, the task will useGIT
ifgit_source
is defined andWORKSPACE
otherwise. - warehouse_
id str - ID of the (the databricks_sql_endpoint) that will be used to execute the task with SQL notebook.
- notebook
Path String - The path of the databricks.Notebook to be run in the Databricks workspace or remote repository. For notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash. For notebooks stored in a remote repository, the path must be relative. This field is required.
- base
Parameters Map<String> - (Map) Base parameters to be used for each run of this job. If the run is initiated by a call to run-now with parameters specified, the two parameters maps will be merged. If the same key is specified in base_parameters and in run-now, the value from run-now will be used. If the notebook takes a parameter that is not specified in the job’s base_parameters or the run-now override parameters, the default value from the notebook will be used. Retrieve these parameters in a notebook using
dbutils.widgets.get
. - source String
- Location type of the notebook, can only be
WORKSPACE
orGIT
. When set toWORKSPACE
, the notebook will be retrieved from the local Databricks workspace. When set toGIT
, the notebook will be retrieved from a Git repository defined ingit_source
. If the value is empty, the task will useGIT
ifgit_source
is defined andWORKSPACE
otherwise. - warehouse
Id String - ID of the (the databricks_sql_endpoint) that will be used to execute the task with SQL notebook.
JobTaskForEachTaskTaskNotificationSettings, JobTaskForEachTaskTaskNotificationSettingsArgs
- Alert
On boolLast Attempt - (Bool) do not send notifications to recipients specified in
on_start
for the retried runs and do not send notifications to recipients specified inon_failure
until the last retry of the run. - No
Alert boolFor Canceled Runs (Bool) don't send alert for cancelled runs.
The following parameter is only available on task level.
- No
Alert boolFor Skipped Runs - (Bool) don't send alert for skipped runs.
- Alert
On boolLast Attempt - (Bool) do not send notifications to recipients specified in
on_start
for the retried runs and do not send notifications to recipients specified inon_failure
until the last retry of the run. - No
Alert boolFor Canceled Runs (Bool) don't send alert for cancelled runs.
The following parameter is only available on task level.
- No
Alert boolFor Skipped Runs - (Bool) don't send alert for skipped runs.
- alert
On BooleanLast Attempt - (Bool) do not send notifications to recipients specified in
on_start
for the retried runs and do not send notifications to recipients specified inon_failure
until the last retry of the run. - no
Alert BooleanFor Canceled Runs (Bool) don't send alert for cancelled runs.
The following parameter is only available on task level.
- no
Alert BooleanFor Skipped Runs - (Bool) don't send alert for skipped runs.
- alert
On booleanLast Attempt - (Bool) do not send notifications to recipients specified in
on_start
for the retried runs and do not send notifications to recipients specified inon_failure
until the last retry of the run. - no
Alert booleanFor Canceled Runs (Bool) don't send alert for cancelled runs.
The following parameter is only available on task level.
- no
Alert booleanFor Skipped Runs - (Bool) don't send alert for skipped runs.
- alert_
on_ boollast_ attempt - (Bool) do not send notifications to recipients specified in
on_start
for the retried runs and do not send notifications to recipients specified inon_failure
until the last retry of the run. - no_
alert_ boolfor_ canceled_ runs (Bool) don't send alert for cancelled runs.
The following parameter is only available on task level.
- no_
alert_ boolfor_ skipped_ runs - (Bool) don't send alert for skipped runs.
- alert
On BooleanLast Attempt - (Bool) do not send notifications to recipients specified in
on_start
for the retried runs and do not send notifications to recipients specified inon_failure
until the last retry of the run. - no
Alert BooleanFor Canceled Runs (Bool) don't send alert for cancelled runs.
The following parameter is only available on task level.
- no
Alert BooleanFor Skipped Runs - (Bool) don't send alert for skipped runs.
JobTaskForEachTaskTaskPipelineTask, JobTaskForEachTaskTaskPipelineTaskArgs
- Pipeline
Id string - The pipeline's unique ID.
- Full
Refresh bool (Bool) Specifies if there should be full refresh of the pipeline.
The following configuration blocks are only supported inside a
task
block
- Pipeline
Id string - The pipeline's unique ID.
- Full
Refresh bool (Bool) Specifies if there should be full refresh of the pipeline.
The following configuration blocks are only supported inside a
task
block
- pipeline
Id String - The pipeline's unique ID.
- full
Refresh Boolean (Bool) Specifies if there should be full refresh of the pipeline.
The following configuration blocks are only supported inside a
task
block
- pipeline
Id string - The pipeline's unique ID.
- full
Refresh boolean (Bool) Specifies if there should be full refresh of the pipeline.
The following configuration blocks are only supported inside a
task
block
- pipeline_
id str - The pipeline's unique ID.
- full_
refresh bool (Bool) Specifies if there should be full refresh of the pipeline.
The following configuration blocks are only supported inside a
task
block
- pipeline
Id String - The pipeline's unique ID.
- full
Refresh Boolean (Bool) Specifies if there should be full refresh of the pipeline.
The following configuration blocks are only supported inside a
task
block
JobTaskForEachTaskTaskPythonWheelTask, JobTaskForEachTaskTaskPythonWheelTaskArgs
- Entry
Point string - Python function as entry point for the task
- Named
Parameters Dictionary<string, string> - Named parameters for the task
- Package
Name string - Name of Python package
- Parameters List<string>
- Parameters for the task
- Entry
Point string - Python function as entry point for the task
- Named
Parameters map[string]string - Named parameters for the task
- Package
Name string - Name of Python package
- Parameters []string
- Parameters for the task
- entry
Point String - Python function as entry point for the task
- named
Parameters Map<String,String> - Named parameters for the task
- package
Name String - Name of Python package
- parameters List<String>
- Parameters for the task
- entry
Point string - Python function as entry point for the task
- named
Parameters {[key: string]: string} - Named parameters for the task
- package
Name string - Name of Python package
- parameters string[]
- Parameters for the task
- entry_
point str - Python function as entry point for the task
- named_
parameters Mapping[str, str] - Named parameters for the task
- package_
name str - Name of Python package
- parameters Sequence[str]
- Parameters for the task
- entry
Point String - Python function as entry point for the task
- named
Parameters Map<String> - Named parameters for the task
- package
Name String - Name of Python package
- parameters List<String>
- Parameters for the task
JobTaskForEachTaskTaskRunJobTask, JobTaskForEachTaskTaskRunJobTaskArgs
- Job
Id int - (String) ID of the job
- Dbt
Commands List<string> - Jar
Params List<string> - Job
Parameters Dictionary<string, string> - (Map) Job parameters for the task
- Notebook
Params Dictionary<string, string> - Pipeline
Params JobTask For Each Task Task Run Job Task Pipeline Params - Python
Named Dictionary<string, string>Params - Python
Params List<string> - Spark
Submit List<string>Params - Sql
Params Dictionary<string, string>
- Job
Id int - (String) ID of the job
- Dbt
Commands []string - Jar
Params []string - Job
Parameters map[string]string - (Map) Job parameters for the task
- Notebook
Params map[string]string - Pipeline
Params JobTask For Each Task Task Run Job Task Pipeline Params - Python
Named map[string]stringParams - Python
Params []string - Spark
Submit []stringParams - Sql
Params map[string]string
- job
Id Integer - (String) ID of the job
- dbt
Commands List<String> - jar
Params List<String> - job
Parameters Map<String,String> - (Map) Job parameters for the task
- notebook
Params Map<String,String> - pipeline
Params JobTask For Each Task Task Run Job Task Pipeline Params - python
Named Map<String,String>Params - python
Params List<String> - spark
Submit List<String>Params - sql
Params Map<String,String>
- job
Id number - (String) ID of the job
- dbt
Commands string[] - jar
Params string[] - job
Parameters {[key: string]: string} - (Map) Job parameters for the task
- notebook
Params {[key: string]: string} - pipeline
Params JobTask For Each Task Task Run Job Task Pipeline Params - python
Named {[key: string]: string}Params - python
Params string[] - spark
Submit string[]Params - sql
Params {[key: string]: string}
- job_
id int - (String) ID of the job
- dbt_
commands Sequence[str] - jar_
params Sequence[str] - job_
parameters Mapping[str, str] - (Map) Job parameters for the task
- notebook_
params Mapping[str, str] - pipeline_
params JobTask For Each Task Task Run Job Task Pipeline Params - python_
named_ Mapping[str, str]params - python_
params Sequence[str] - spark_
submit_ Sequence[str]params - sql_
params Mapping[str, str]
- job
Id Number - (String) ID of the job
- dbt
Commands List<String> - jar
Params List<String> - job
Parameters Map<String> - (Map) Job parameters for the task
- notebook
Params Map<String> - pipeline
Params Property Map - python
Named Map<String>Params - python
Params List<String> - spark
Submit List<String>Params - sql
Params Map<String>
JobTaskForEachTaskTaskRunJobTaskPipelineParams, JobTaskForEachTaskTaskRunJobTaskPipelineParamsArgs
- Full
Refresh bool (Bool) Specifies if there should be full refresh of the pipeline.
The following configuration blocks are only supported inside a
task
block
- Full
Refresh bool (Bool) Specifies if there should be full refresh of the pipeline.
The following configuration blocks are only supported inside a
task
block
- full
Refresh Boolean (Bool) Specifies if there should be full refresh of the pipeline.
The following configuration blocks are only supported inside a
task
block
- full
Refresh boolean (Bool) Specifies if there should be full refresh of the pipeline.
The following configuration blocks are only supported inside a
task
block
- full_
refresh bool (Bool) Specifies if there should be full refresh of the pipeline.
The following configuration blocks are only supported inside a
task
block
- full
Refresh Boolean (Bool) Specifies if there should be full refresh of the pipeline.
The following configuration blocks are only supported inside a
task
block
JobTaskForEachTaskTaskSparkJarTask, JobTaskForEachTaskTaskSparkJarTaskArgs
- Jar
Uri string - Main
Class stringName - The full name of the class containing the main method to be executed. This class must be contained in a JAR provided as a library. The code should use
SparkContext.getOrCreate
to obtain a Spark context; otherwise, runs of the job will fail. - Parameters List<string>
- (List) Parameters passed to the main method.
- Jar
Uri string - Main
Class stringName - The full name of the class containing the main method to be executed. This class must be contained in a JAR provided as a library. The code should use
SparkContext.getOrCreate
to obtain a Spark context; otherwise, runs of the job will fail. - Parameters []string
- (List) Parameters passed to the main method.
- jar
Uri String - main
Class StringName - The full name of the class containing the main method to be executed. This class must be contained in a JAR provided as a library. The code should use
SparkContext.getOrCreate
to obtain a Spark context; otherwise, runs of the job will fail. - parameters List<String>
- (List) Parameters passed to the main method.
- jar
Uri string - main
Class stringName - The full name of the class containing the main method to be executed. This class must be contained in a JAR provided as a library. The code should use
SparkContext.getOrCreate
to obtain a Spark context; otherwise, runs of the job will fail. - parameters string[]
- (List) Parameters passed to the main method.
- jar_
uri str - main_
class_ strname - The full name of the class containing the main method to be executed. This class must be contained in a JAR provided as a library. The code should use
SparkContext.getOrCreate
to obtain a Spark context; otherwise, runs of the job will fail. - parameters Sequence[str]
- (List) Parameters passed to the main method.
- jar
Uri String - main
Class StringName - The full name of the class containing the main method to be executed. This class must be contained in a JAR provided as a library. The code should use
SparkContext.getOrCreate
to obtain a Spark context; otherwise, runs of the job will fail. - parameters List<String>
- (List) Parameters passed to the main method.
JobTaskForEachTaskTaskSparkPythonTask, JobTaskForEachTaskTaskSparkPythonTaskArgs
- Python
File string - The URI of the Python file to be executed. databricks_dbfs_file, cloud file URIs (e.g.
s3:/
,abfss:/
,gs:/
), workspace paths and remote repository are supported. For Python files stored in the Databricks workspace, the path must be absolute and begin with/Repos
. For files stored in a remote repository, the path must be relative. This field is required. - Parameters List<string>
- (List) Command line parameters passed to the Python file.
- Source string
- Location type of the Python file, can only be
GIT
. When set toGIT
, the Python file will be retrieved from a Git repository defined ingit_source
.
- Python
File string - The URI of the Python file to be executed. databricks_dbfs_file, cloud file URIs (e.g.
s3:/
,abfss:/
,gs:/
), workspace paths and remote repository are supported. For Python files stored in the Databricks workspace, the path must be absolute and begin with/Repos
. For files stored in a remote repository, the path must be relative. This field is required. - Parameters []string
- (List) Command line parameters passed to the Python file.
- Source string
- Location type of the Python file, can only be
GIT
. When set toGIT
, the Python file will be retrieved from a Git repository defined ingit_source
.
- python
File String - The URI of the Python file to be executed. databricks_dbfs_file, cloud file URIs (e.g.
s3:/
,abfss:/
,gs:/
), workspace paths and remote repository are supported. For Python files stored in the Databricks workspace, the path must be absolute and begin with/Repos
. For files stored in a remote repository, the path must be relative. This field is required. - parameters List<String>
- (List) Command line parameters passed to the Python file.
- source String
- Location type of the Python file, can only be
GIT
. When set toGIT
, the Python file will be retrieved from a Git repository defined ingit_source
.
- python
File string - The URI of the Python file to be executed. databricks_dbfs_file, cloud file URIs (e.g.
s3:/
,abfss:/
,gs:/
), workspace paths and remote repository are supported. For Python files stored in the Databricks workspace, the path must be absolute and begin with/Repos
. For files stored in a remote repository, the path must be relative. This field is required. - parameters string[]
- (List) Command line parameters passed to the Python file.
- source string
- Location type of the Python file, can only be
GIT
. When set toGIT
, the Python file will be retrieved from a Git repository defined ingit_source
.
- python_
file str - The URI of the Python file to be executed. databricks_dbfs_file, cloud file URIs (e.g.
s3:/
,abfss:/
,gs:/
), workspace paths and remote repository are supported. For Python files stored in the Databricks workspace, the path must be absolute and begin with/Repos
. For files stored in a remote repository, the path must be relative. This field is required. - parameters Sequence[str]
- (List) Command line parameters passed to the Python file.
- source str
- Location type of the Python file, can only be
GIT
. When set toGIT
, the Python file will be retrieved from a Git repository defined ingit_source
.
- python
File String - The URI of the Python file to be executed. databricks_dbfs_file, cloud file URIs (e.g.
s3:/
,abfss:/
,gs:/
), workspace paths and remote repository are supported. For Python files stored in the Databricks workspace, the path must be absolute and begin with/Repos
. For files stored in a remote repository, the path must be relative. This field is required. - parameters List<String>
- (List) Command line parameters passed to the Python file.
- source String
- Location type of the Python file, can only be
GIT
. When set toGIT
, the Python file will be retrieved from a Git repository defined ingit_source
.
JobTaskForEachTaskTaskSparkSubmitTask, JobTaskForEachTaskTaskSparkSubmitTaskArgs
- Parameters List<string>
- (List) Command-line parameters passed to spark submit.
- Parameters []string
- (List) Command-line parameters passed to spark submit.
- parameters List<String>
- (List) Command-line parameters passed to spark submit.
- parameters string[]
- (List) Command-line parameters passed to spark submit.
- parameters Sequence[str]
- (List) Command-line parameters passed to spark submit.
- parameters List<String>
- (List) Command-line parameters passed to spark submit.
JobTaskForEachTaskTaskSqlTask, JobTaskForEachTaskTaskSqlTaskArgs
- Warehouse
Id string - ID of the (the databricks_sql_endpoint) that will be used to execute the task. Only Serverless & Pro warehouses are supported right now.
- Alert
Job
Task For Each Task Task Sql Task Alert - block consisting of following fields:
- Dashboard
Job
Task For Each Task Task Sql Task Dashboard - block consisting of following fields:
- File
Job
Task For Each Task Task Sql Task File - block consisting of single string fields:
- Parameters Dictionary<string, string>
- (Map) parameters to be used for each run of this task. The SQL alert task does not support custom parameters.
- Query
Job
Task For Each Task Task Sql Task Query - block consisting of single string field:
query_id
- identifier of the Databricks Query (databricks_query).
- Warehouse
Id string - ID of the (the databricks_sql_endpoint) that will be used to execute the task. Only Serverless & Pro warehouses are supported right now.
- Alert
Job
Task For Each Task Task Sql Task Alert - block consisting of following fields:
- Dashboard
Job
Task For Each Task Task Sql Task Dashboard - block consisting of following fields:
- File
Job
Task For Each Task Task Sql Task File - block consisting of single string fields:
- Parameters map[string]string
- (Map) parameters to be used for each run of this task. The SQL alert task does not support custom parameters.
- Query
Job
Task For Each Task Task Sql Task Query - block consisting of single string field:
query_id
- identifier of the Databricks Query (databricks_query).
- warehouse
Id String - ID of the (the databricks_sql_endpoint) that will be used to execute the task. Only Serverless & Pro warehouses are supported right now.
- alert
Job
Task For Each Task Task Sql Task Alert - block consisting of following fields:
- dashboard
Job
Task For Each Task Task Sql Task Dashboard - block consisting of following fields:
- file
Job
Task For Each Task Task Sql Task File - block consisting of single string fields:
- parameters Map<String,String>
- (Map) parameters to be used for each run of this task. The SQL alert task does not support custom parameters.
- query
Job
Task For Each Task Task Sql Task Query - block consisting of single string field:
query_id
- identifier of the Databricks Query (databricks_query).
- warehouse
Id string - ID of the (the databricks_sql_endpoint) that will be used to execute the task. Only Serverless & Pro warehouses are supported right now.
- alert
Job
Task For Each Task Task Sql Task Alert - block consisting of following fields:
- dashboard
Job
Task For Each Task Task Sql Task Dashboard - block consisting of following fields:
- file
Job
Task For Each Task Task Sql Task File - block consisting of single string fields:
- parameters {[key: string]: string}
- (Map) parameters to be used for each run of this task. The SQL alert task does not support custom parameters.
- query
Job
Task For Each Task Task Sql Task Query - block consisting of single string field:
query_id
- identifier of the Databricks Query (databricks_query).
- warehouse_
id str - ID of the (the databricks_sql_endpoint) that will be used to execute the task. Only Serverless & Pro warehouses are supported right now.
- alert
Job
Task For Each Task Task Sql Task Alert - block consisting of following fields:
- dashboard
Job
Task For Each Task Task Sql Task Dashboard - block consisting of following fields:
- file
Job
Task For Each Task Task Sql Task File - block consisting of single string fields:
- parameters Mapping[str, str]
- (Map) parameters to be used for each run of this task. The SQL alert task does not support custom parameters.
- query
Job
Task For Each Task Task Sql Task Query - block consisting of single string field:
query_id
- identifier of the Databricks Query (databricks_query).
- warehouse
Id String - ID of the (the databricks_sql_endpoint) that will be used to execute the task. Only Serverless & Pro warehouses are supported right now.
- alert Property Map
- block consisting of following fields:
- dashboard Property Map
- block consisting of following fields:
- file Property Map
- block consisting of single string fields:
- parameters Map<String>
- (Map) parameters to be used for each run of this task. The SQL alert task does not support custom parameters.
- query Property Map
- block consisting of single string field:
query_id
- identifier of the Databricks Query (databricks_query).
JobTaskForEachTaskTaskSqlTaskAlert, JobTaskForEachTaskTaskSqlTaskAlertArgs
- Alert
Id string - (String) identifier of the Databricks Alert (databricks_alert).
- Pause
Subscriptions bool - flag that specifies if subscriptions are paused or not.
- Subscriptions
List<Job
Task For Each Task Task Sql Task Alert Subscription> - a list of subscription blocks consisting out of one of the required fields:
user_name
for user emails ordestination_id
- for Alert destination's identifier.
- Alert
Id string - (String) identifier of the Databricks Alert (databricks_alert).
- Pause
Subscriptions bool - flag that specifies if subscriptions are paused or not.
- Subscriptions
[]Job
Task For Each Task Task Sql Task Alert Subscription - a list of subscription blocks consisting out of one of the required fields:
user_name
for user emails ordestination_id
- for Alert destination's identifier.
- alert
Id String - (String) identifier of the Databricks Alert (databricks_alert).
- pause
Subscriptions Boolean - flag that specifies if subscriptions are paused or not.
- subscriptions
List<Job
Task For Each Task Task Sql Task Alert Subscription> - a list of subscription blocks consisting out of one of the required fields:
user_name
for user emails ordestination_id
- for Alert destination's identifier.
- alert
Id string - (String) identifier of the Databricks Alert (databricks_alert).
- pause
Subscriptions boolean - flag that specifies if subscriptions are paused or not.
- subscriptions
Job
Task For Each Task Task Sql Task Alert Subscription[] - a list of subscription blocks consisting out of one of the required fields:
user_name
for user emails ordestination_id
- for Alert destination's identifier.
- alert_
id str - (String) identifier of the Databricks Alert (databricks_alert).
- pause_
subscriptions bool - flag that specifies if subscriptions are paused or not.
- subscriptions
Sequence[Job
Task For Each Task Task Sql Task Alert Subscription] - a list of subscription blocks consisting out of one of the required fields:
user_name
for user emails ordestination_id
- for Alert destination's identifier.
- alert
Id String - (String) identifier of the Databricks Alert (databricks_alert).
- pause
Subscriptions Boolean - flag that specifies if subscriptions are paused or not.
- subscriptions List<Property Map>
- a list of subscription blocks consisting out of one of the required fields:
user_name
for user emails ordestination_id
- for Alert destination's identifier.
JobTaskForEachTaskTaskSqlTaskAlertSubscription, JobTaskForEachTaskTaskSqlTaskAlertSubscriptionArgs
- Destination
Id string - User
Name string - The email of an active workspace user. Non-admin users can only set this field to their own email.
- Destination
Id string - User
Name string - The email of an active workspace user. Non-admin users can only set this field to their own email.
- destination
Id String - user
Name String - The email of an active workspace user. Non-admin users can only set this field to their own email.
- destination
Id string - user
Name string - The email of an active workspace user. Non-admin users can only set this field to their own email.
- destination_
id str - user_
name str - The email of an active workspace user. Non-admin users can only set this field to their own email.
- destination
Id String - user
Name String - The email of an active workspace user. Non-admin users can only set this field to their own email.
JobTaskForEachTaskTaskSqlTaskDashboard, JobTaskForEachTaskTaskSqlTaskDashboardArgs
- Dashboard
Id string - (String) identifier of the Databricks SQL Dashboard databricks_sql_dashboard.
- Custom
Subject string - string specifying a custom subject of email sent.
- Pause
Subscriptions bool - flag that specifies if subscriptions are paused or not.
- Subscriptions
List<Job
Task For Each Task Task Sql Task Dashboard Subscription> - a list of subscription blocks consisting out of one of the required fields:
user_name
for user emails ordestination_id
- for Alert destination's identifier.
- Dashboard
Id string - (String) identifier of the Databricks SQL Dashboard databricks_sql_dashboard.
- Custom
Subject string - string specifying a custom subject of email sent.
- Pause
Subscriptions bool - flag that specifies if subscriptions are paused or not.
- Subscriptions
[]Job
Task For Each Task Task Sql Task Dashboard Subscription - a list of subscription blocks consisting out of one of the required fields:
user_name
for user emails ordestination_id
- for Alert destination's identifier.
- dashboard
Id String - (String) identifier of the Databricks SQL Dashboard databricks_sql_dashboard.
- custom
Subject String - string specifying a custom subject of email sent.
- pause
Subscriptions Boolean - flag that specifies if subscriptions are paused or not.
- subscriptions
List<Job
Task For Each Task Task Sql Task Dashboard Subscription> - a list of subscription blocks consisting out of one of the required fields:
user_name
for user emails ordestination_id
- for Alert destination's identifier.
- dashboard
Id string - (String) identifier of the Databricks SQL Dashboard databricks_sql_dashboard.
- custom
Subject string - string specifying a custom subject of email sent.
- pause
Subscriptions boolean - flag that specifies if subscriptions are paused or not.
- subscriptions
Job
Task For Each Task Task Sql Task Dashboard Subscription[] - a list of subscription blocks consisting out of one of the required fields:
user_name
for user emails ordestination_id
- for Alert destination's identifier.
- dashboard_
id str - (String) identifier of the Databricks SQL Dashboard databricks_sql_dashboard.
- custom_
subject str - string specifying a custom subject of email sent.
- pause_
subscriptions bool - flag that specifies if subscriptions are paused or not.
- subscriptions
Sequence[Job
Task For Each Task Task Sql Task Dashboard Subscription] - a list of subscription blocks consisting out of one of the required fields:
user_name
for user emails ordestination_id
- for Alert destination's identifier.
- dashboard
Id String - (String) identifier of the Databricks SQL Dashboard databricks_sql_dashboard.
- custom
Subject String - string specifying a custom subject of email sent.
- pause
Subscriptions Boolean - flag that specifies if subscriptions are paused or not.
- subscriptions List<Property Map>
- a list of subscription blocks consisting out of one of the required fields:
user_name
for user emails ordestination_id
- for Alert destination's identifier.
JobTaskForEachTaskTaskSqlTaskDashboardSubscription, JobTaskForEachTaskTaskSqlTaskDashboardSubscriptionArgs
- Destination
Id string - User
Name string - The email of an active workspace user. Non-admin users can only set this field to their own email.
- Destination
Id string - User
Name string - The email of an active workspace user. Non-admin users can only set this field to their own email.
- destination
Id String - user
Name String - The email of an active workspace user. Non-admin users can only set this field to their own email.
- destination
Id string - user
Name string - The email of an active workspace user. Non-admin users can only set this field to their own email.
- destination_
id str - user_
name str - The email of an active workspace user. Non-admin users can only set this field to their own email.
- destination
Id String - user
Name String - The email of an active workspace user. Non-admin users can only set this field to their own email.
JobTaskForEachTaskTaskSqlTaskFile, JobTaskForEachTaskTaskSqlTaskFileArgs
- Path string
If
source
isGIT
: Relative path to the file in the repository specified in thegit_source
block with SQL commands to execute. Ifsource
isWORKSPACE
: Absolute path to the file in the workspace with SQL commands to execute.Example
import * as pulumi from "@pulumi/pulumi"; import * as databricks from "@pulumi/databricks";
const sqlAggregationJob = new databricks.Job("sql_aggregation_job", { name: "Example SQL Job", tasks: [ { taskKey: "run_agg_query", sqlTask: { warehouseId: sqlJobWarehouse.id, query: { queryId: aggQuery.id, }, }, }, { taskKey: "run_dashboard", sqlTask: { warehouseId: sqlJobWarehouse.id, dashboard: { dashboardId: dash.id, subscriptions: [{ userName: "user@domain.com", }], }, }, }, { taskKey: "run_alert", sqlTask: { warehouseId: sqlJobWarehouse.id, alert: { alertId: alert.id, subscriptions: [{ userName: "user@domain.com", }], }, }, }, ], });
import pulumi import pulumi_databricks as databricks sql_aggregation_job = databricks.Job("sql_aggregation_job", name="Example SQL Job", tasks=[ { "task_key": "run_agg_query", "sql_task": { "warehouse_id": sql_job_warehouse["id"], "query": { "query_id": agg_query["id"], }, }, }, { "task_key": "run_dashboard", "sql_task": { "warehouse_id": sql_job_warehouse["id"], "dashboard": { "dashboard_id": dash["id"], "subscriptions": [{ "user_name": "user@domain.com", }], }, }, }, { "task_key": "run_alert", "sql_task": { "warehouse_id": sql_job_warehouse["id"], "alert": { "alert_id": alert["id"], "subscriptions": [{ "user_name": "user@domain.com", }], }, }, }, ])
using System.Collections.Generic; using System.Linq; using Pulumi; using Databricks = Pulumi.Databricks; return await Deployment.RunAsync(() => { var sqlAggregationJob = new Databricks.Job("sql_aggregation_job", new() { Name = "Example SQL Job", Tasks = new[] { new Databricks.Inputs.JobTaskArgs { TaskKey = "run_agg_query", SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs { WarehouseId = sqlJobWarehouse.Id, Query = new Databricks.Inputs.JobTaskSqlTaskQueryArgs { QueryId = aggQuery.Id, }, }, }, new Databricks.Inputs.JobTaskArgs { TaskKey = "run_dashboard", SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs { WarehouseId = sqlJobWarehouse.Id, Dashboard = new Databricks.Inputs.JobTaskSqlTaskDashboardArgs { DashboardId = dash.Id, Subscriptions = new[] { new Databricks.Inputs.JobTaskSqlTaskDashboardSubscriptionArgs { UserName = "user@domain.com", }, }, }, }, }, new Databricks.Inputs.JobTaskArgs { TaskKey = "run_alert", SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs { WarehouseId = sqlJobWarehouse.Id, Alert = new Databricks.Inputs.JobTaskSqlTaskAlertArgs { AlertId = alert.Id, Subscriptions = new[] { new Databricks.Inputs.JobTaskSqlTaskAlertSubscriptionArgs { UserName = "user@domain.com", }, }, }, }, }, }, }); });
package main import ( "github.com/pulumi/pulumi-databricks/sdk/go/databricks" "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) func main() { pulumi.Run(func(ctx *pulumi.Context) error { _, err := databricks.NewJob(ctx, "sql_aggregation_job", &databricks.JobArgs{ Name: pulumi.String("Example SQL Job"), Tasks: databricks.JobTaskArray{ &databricks.JobTaskArgs{ TaskKey: pulumi.String("run_agg_query"), SqlTask: &databricks.JobTaskSqlTaskArgs{ WarehouseId: pulumi.Any(sqlJobWarehouse.Id), Query: &databricks.JobTaskSqlTaskQueryArgs{ QueryId: pulumi.Any(aggQuery.Id), }, }, }, &databricks.JobTaskArgs{ TaskKey: pulumi.String("run_dashboard"), SqlTask: &databricks.JobTaskSqlTaskArgs{ WarehouseId: pulumi.Any(sqlJobWarehouse.Id), Dashboard: &databricks.JobTaskSqlTaskDashboardArgs{ DashboardId: pulumi.Any(dash.Id), Subscriptions: databricks.JobTaskSqlTaskDashboardSubscriptionArray{ &databricks.JobTaskSqlTaskDashboardSubscriptionArgs{ UserName: pulumi.String("user@domain.com"), }, }, }, }, }, &databricks.JobTaskArgs{ TaskKey: pulumi.String("run_alert"), SqlTask: &databricks.JobTaskSqlTaskArgs{ WarehouseId: pulumi.Any(sqlJobWarehouse.Id), Alert: &databricks.JobTaskSqlTaskAlertArgs{ AlertId: pulumi.Any(alert.Id), Subscriptions: databricks.JobTaskSqlTaskAlertSubscriptionArray{ &databricks.JobTaskSqlTaskAlertSubscriptionArgs{ UserName: pulumi.String("user@domain.com"), }, }, }, }, }, }, }) if err != nil { return err } return nil }) }
package generated_program; import com.pulumi.Context; import com.pulumi.Pulumi; import com.pulumi.core.Output; import com.pulumi.databricks.Job; import com.pulumi.databricks.JobArgs; import com.pulumi.databricks.inputs.JobTaskArgs; import com.pulumi.databricks.inputs.JobTaskSqlTaskArgs; import com.pulumi.databricks.inputs.JobTaskSqlTaskQueryArgs; import com.pulumi.databricks.inputs.JobTaskSqlTaskDashboardArgs; import com.pulumi.databricks.inputs.JobTaskSqlTaskAlertArgs; import java.util.List; import java.util.ArrayList; import java.util.Map; import java.io.File; import java.nio.file.Files; import java.nio.file.Paths; public class App { public static void main(String[] args) { Pulumi.run(App::stack); } public static void stack(Context ctx) { var sqlAggregationJob = new Job("sqlAggregationJob", JobArgs.builder() .name("Example SQL Job") .tasks( JobTaskArgs.builder() .taskKey("run_agg_query") .sqlTask(JobTaskSqlTaskArgs.builder() .warehouseId(sqlJobWarehouse.id()) .query(JobTaskSqlTaskQueryArgs.builder() .queryId(aggQuery.id()) .build()) .build()) .build(), JobTaskArgs.builder() .taskKey("run_dashboard") .sqlTask(JobTaskSqlTaskArgs.builder() .warehouseId(sqlJobWarehouse.id()) .dashboard(JobTaskSqlTaskDashboardArgs.builder() .dashboardId(dash.id()) .subscriptions(JobTaskSqlTaskDashboardSubscriptionArgs.builder() .userName("user@domain.com") .build()) .build()) .build()) .build(), JobTaskArgs.builder() .taskKey("run_alert") .sqlTask(JobTaskSqlTaskArgs.builder() .warehouseId(sqlJobWarehouse.id()) .alert(JobTaskSqlTaskAlertArgs.builder() .alertId(alert.id()) .subscriptions(JobTaskSqlTaskAlertSubscriptionArgs.builder() .userName("user@domain.com") .build()) .build()) .build()) .build()) .build()); } }
resources: sqlAggregationJob: type: databricks:Job name: sql_aggregation_job properties: name: Example SQL Job tasks: - taskKey: run_agg_query sqlTask: warehouseId: ${sqlJobWarehouse.id} query: queryId: ${aggQuery.id} - taskKey: run_dashboard sqlTask: warehouseId: ${sqlJobWarehouse.id} dashboard: dashboardId: ${dash.id} subscriptions: - userName: user@domain.com - taskKey: run_alert sqlTask: warehouseId: ${sqlJobWarehouse.id} alert: alertId: ${alert.id} subscriptions: - userName: user@domain.com
- Source string
- The source of the project. Possible values are
WORKSPACE
andGIT
.
- Path string
If
source
isGIT
: Relative path to the file in the repository specified in thegit_source
block with SQL commands to execute. Ifsource
isWORKSPACE
: Absolute path to the file in the workspace with SQL commands to execute.Example
import * as pulumi from "@pulumi/pulumi"; import * as databricks from "@pulumi/databricks";
const sqlAggregationJob = new databricks.Job("sql_aggregation_job", { name: "Example SQL Job", tasks: [ { taskKey: "run_agg_query", sqlTask: { warehouseId: sqlJobWarehouse.id, query: { queryId: aggQuery.id, }, }, }, { taskKey: "run_dashboard", sqlTask: { warehouseId: sqlJobWarehouse.id, dashboard: { dashboardId: dash.id, subscriptions: [{ userName: "user@domain.com", }], }, }, }, { taskKey: "run_alert", sqlTask: { warehouseId: sqlJobWarehouse.id, alert: { alertId: alert.id, subscriptions: [{ userName: "user@domain.com", }], }, }, }, ], });
import pulumi import pulumi_databricks as databricks sql_aggregation_job = databricks.Job("sql_aggregation_job", name="Example SQL Job", tasks=[ { "task_key": "run_agg_query", "sql_task": { "warehouse_id": sql_job_warehouse["id"], "query": { "query_id": agg_query["id"], }, }, }, { "task_key": "run_dashboard", "sql_task": { "warehouse_id": sql_job_warehouse["id"], "dashboard": { "dashboard_id": dash["id"], "subscriptions": [{ "user_name": "user@domain.com", }], }, }, }, { "task_key": "run_alert", "sql_task": { "warehouse_id": sql_job_warehouse["id"], "alert": { "alert_id": alert["id"], "subscriptions": [{ "user_name": "user@domain.com", }], }, }, }, ])
using System.Collections.Generic; using System.Linq; using Pulumi; using Databricks = Pulumi.Databricks; return await Deployment.RunAsync(() => { var sqlAggregationJob = new Databricks.Job("sql_aggregation_job", new() { Name = "Example SQL Job", Tasks = new[] { new Databricks.Inputs.JobTaskArgs { TaskKey = "run_agg_query", SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs { WarehouseId = sqlJobWarehouse.Id, Query = new Databricks.Inputs.JobTaskSqlTaskQueryArgs { QueryId = aggQuery.Id, }, }, }, new Databricks.Inputs.JobTaskArgs { TaskKey = "run_dashboard", SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs { WarehouseId = sqlJobWarehouse.Id, Dashboard = new Databricks.Inputs.JobTaskSqlTaskDashboardArgs { DashboardId = dash.Id, Subscriptions = new[] { new Databricks.Inputs.JobTaskSqlTaskDashboardSubscriptionArgs { UserName = "user@domain.com", }, }, }, }, }, new Databricks.Inputs.JobTaskArgs { TaskKey = "run_alert", SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs { WarehouseId = sqlJobWarehouse.Id, Alert = new Databricks.Inputs.JobTaskSqlTaskAlertArgs { AlertId = alert.Id, Subscriptions = new[] { new Databricks.Inputs.JobTaskSqlTaskAlertSubscriptionArgs { UserName = "user@domain.com", }, }, }, }, }, }, }); });
package main import ( "github.com/pulumi/pulumi-databricks/sdk/go/databricks" "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) func main() { pulumi.Run(func(ctx *pulumi.Context) error { _, err := databricks.NewJob(ctx, "sql_aggregation_job", &databricks.JobArgs{ Name: pulumi.String("Example SQL Job"), Tasks: databricks.JobTaskArray{ &databricks.JobTaskArgs{ TaskKey: pulumi.String("run_agg_query"), SqlTask: &databricks.JobTaskSqlTaskArgs{ WarehouseId: pulumi.Any(sqlJobWarehouse.Id), Query: &databricks.JobTaskSqlTaskQueryArgs{ QueryId: pulumi.Any(aggQuery.Id), }, }, }, &databricks.JobTaskArgs{ TaskKey: pulumi.String("run_dashboard"), SqlTask: &databricks.JobTaskSqlTaskArgs{ WarehouseId: pulumi.Any(sqlJobWarehouse.Id), Dashboard: &databricks.JobTaskSqlTaskDashboardArgs{ DashboardId: pulumi.Any(dash.Id), Subscriptions: databricks.JobTaskSqlTaskDashboardSubscriptionArray{ &databricks.JobTaskSqlTaskDashboardSubscriptionArgs{ UserName: pulumi.String("user@domain.com"), }, }, }, }, }, &databricks.JobTaskArgs{ TaskKey: pulumi.String("run_alert"), SqlTask: &databricks.JobTaskSqlTaskArgs{ WarehouseId: pulumi.Any(sqlJobWarehouse.Id), Alert: &databricks.JobTaskSqlTaskAlertArgs{ AlertId: pulumi.Any(alert.Id), Subscriptions: databricks.JobTaskSqlTaskAlertSubscriptionArray{ &databricks.JobTaskSqlTaskAlertSubscriptionArgs{ UserName: pulumi.String("user@domain.com"), }, }, }, }, }, }, }) if err != nil { return err } return nil }) }
package generated_program; import com.pulumi.Context; import com.pulumi.Pulumi; import com.pulumi.core.Output; import com.pulumi.databricks.Job; import com.pulumi.databricks.JobArgs; import com.pulumi.databricks.inputs.JobTaskArgs; import com.pulumi.databricks.inputs.JobTaskSqlTaskArgs; import com.pulumi.databricks.inputs.JobTaskSqlTaskQueryArgs; import com.pulumi.databricks.inputs.JobTaskSqlTaskDashboardArgs; import com.pulumi.databricks.inputs.JobTaskSqlTaskAlertArgs; import java.util.List; import java.util.ArrayList; import java.util.Map; import java.io.File; import java.nio.file.Files; import java.nio.file.Paths; public class App { public static void main(String[] args) { Pulumi.run(App::stack); } public static void stack(Context ctx) { var sqlAggregationJob = new Job("sqlAggregationJob", JobArgs.builder() .name("Example SQL Job") .tasks( JobTaskArgs.builder() .taskKey("run_agg_query") .sqlTask(JobTaskSqlTaskArgs.builder() .warehouseId(sqlJobWarehouse.id()) .query(JobTaskSqlTaskQueryArgs.builder() .queryId(aggQuery.id()) .build()) .build()) .build(), JobTaskArgs.builder() .taskKey("run_dashboard") .sqlTask(JobTaskSqlTaskArgs.builder() .warehouseId(sqlJobWarehouse.id()) .dashboard(JobTaskSqlTaskDashboardArgs.builder() .dashboardId(dash.id()) .subscriptions(JobTaskSqlTaskDashboardSubscriptionArgs.builder() .userName("user@domain.com") .build()) .build()) .build()) .build(), JobTaskArgs.builder() .taskKey("run_alert") .sqlTask(JobTaskSqlTaskArgs.builder() .warehouseId(sqlJobWarehouse.id()) .alert(JobTaskSqlTaskAlertArgs.builder() .alertId(alert.id()) .subscriptions(JobTaskSqlTaskAlertSubscriptionArgs.builder() .userName("user@domain.com") .build()) .build()) .build()) .build()) .build()); } }
resources: sqlAggregationJob: type: databricks:Job name: sql_aggregation_job properties: name: Example SQL Job tasks: - taskKey: run_agg_query sqlTask: warehouseId: ${sqlJobWarehouse.id} query: queryId: ${aggQuery.id} - taskKey: run_dashboard sqlTask: warehouseId: ${sqlJobWarehouse.id} dashboard: dashboardId: ${dash.id} subscriptions: - userName: user@domain.com - taskKey: run_alert sqlTask: warehouseId: ${sqlJobWarehouse.id} alert: alertId: ${alert.id} subscriptions: - userName: user@domain.com
- Source string
- The source of the project. Possible values are
WORKSPACE
andGIT
.
- path String
If
source
isGIT
: Relative path to the file in the repository specified in thegit_source
block with SQL commands to execute. Ifsource
isWORKSPACE
: Absolute path to the file in the workspace with SQL commands to execute.Example
import * as pulumi from "@pulumi/pulumi"; import * as databricks from "@pulumi/databricks";
const sqlAggregationJob = new databricks.Job("sql_aggregation_job", { name: "Example SQL Job", tasks: [ { taskKey: "run_agg_query", sqlTask: { warehouseId: sqlJobWarehouse.id, query: { queryId: aggQuery.id, }, }, }, { taskKey: "run_dashboard", sqlTask: { warehouseId: sqlJobWarehouse.id, dashboard: { dashboardId: dash.id, subscriptions: [{ userName: "user@domain.com", }], }, }, }, { taskKey: "run_alert", sqlTask: { warehouseId: sqlJobWarehouse.id, alert: { alertId: alert.id, subscriptions: [{ userName: "user@domain.com", }], }, }, }, ], });
import pulumi import pulumi_databricks as databricks sql_aggregation_job = databricks.Job("sql_aggregation_job", name="Example SQL Job", tasks=[ { "task_key": "run_agg_query", "sql_task": { "warehouse_id": sql_job_warehouse["id"], "query": { "query_id": agg_query["id"], }, }, }, { "task_key": "run_dashboard", "sql_task": { "warehouse_id": sql_job_warehouse["id"], "dashboard": { "dashboard_id": dash["id"], "subscriptions": [{ "user_name": "user@domain.com", }], }, }, }, { "task_key": "run_alert", "sql_task": { "warehouse_id": sql_job_warehouse["id"], "alert": { "alert_id": alert["id"], "subscriptions": [{ "user_name": "user@domain.com", }], }, }, }, ])
using System.Collections.Generic; using System.Linq; using Pulumi; using Databricks = Pulumi.Databricks; return await Deployment.RunAsync(() => { var sqlAggregationJob = new Databricks.Job("sql_aggregation_job", new() { Name = "Example SQL Job", Tasks = new[] { new Databricks.Inputs.JobTaskArgs { TaskKey = "run_agg_query", SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs { WarehouseId = sqlJobWarehouse.Id, Query = new Databricks.Inputs.JobTaskSqlTaskQueryArgs { QueryId = aggQuery.Id, }, }, }, new Databricks.Inputs.JobTaskArgs { TaskKey = "run_dashboard", SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs { WarehouseId = sqlJobWarehouse.Id, Dashboard = new Databricks.Inputs.JobTaskSqlTaskDashboardArgs { DashboardId = dash.Id, Subscriptions = new[] { new Databricks.Inputs.JobTaskSqlTaskDashboardSubscriptionArgs { UserName = "user@domain.com", }, }, }, }, }, new Databricks.Inputs.JobTaskArgs { TaskKey = "run_alert", SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs { WarehouseId = sqlJobWarehouse.Id, Alert = new Databricks.Inputs.JobTaskSqlTaskAlertArgs { AlertId = alert.Id, Subscriptions = new[] { new Databricks.Inputs.JobTaskSqlTaskAlertSubscriptionArgs { UserName = "user@domain.com", }, }, }, }, }, }, }); });
package main import ( "github.com/pulumi/pulumi-databricks/sdk/go/databricks" "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) func main() { pulumi.Run(func(ctx *pulumi.Context) error { _, err := databricks.NewJob(ctx, "sql_aggregation_job", &databricks.JobArgs{ Name: pulumi.String("Example SQL Job"), Tasks: databricks.JobTaskArray{ &databricks.JobTaskArgs{ TaskKey: pulumi.String("run_agg_query"), SqlTask: &databricks.JobTaskSqlTaskArgs{ WarehouseId: pulumi.Any(sqlJobWarehouse.Id), Query: &databricks.JobTaskSqlTaskQueryArgs{ QueryId: pulumi.Any(aggQuery.Id), }, }, }, &databricks.JobTaskArgs{ TaskKey: pulumi.String("run_dashboard"), SqlTask: &databricks.JobTaskSqlTaskArgs{ WarehouseId: pulumi.Any(sqlJobWarehouse.Id), Dashboard: &databricks.JobTaskSqlTaskDashboardArgs{ DashboardId: pulumi.Any(dash.Id), Subscriptions: databricks.JobTaskSqlTaskDashboardSubscriptionArray{ &databricks.JobTaskSqlTaskDashboardSubscriptionArgs{ UserName: pulumi.String("user@domain.com"), }, }, }, }, }, &databricks.JobTaskArgs{ TaskKey: pulumi.String("run_alert"), SqlTask: &databricks.JobTaskSqlTaskArgs{ WarehouseId: pulumi.Any(sqlJobWarehouse.Id), Alert: &databricks.JobTaskSqlTaskAlertArgs{ AlertId: pulumi.Any(alert.Id), Subscriptions: databricks.JobTaskSqlTaskAlertSubscriptionArray{ &databricks.JobTaskSqlTaskAlertSubscriptionArgs{ UserName: pulumi.String("user@domain.com"), }, }, }, }, }, }, }) if err != nil { return err } return nil }) }
package generated_program; import com.pulumi.Context; import com.pulumi.Pulumi; import com.pulumi.core.Output; import com.pulumi.databricks.Job; import com.pulumi.databricks.JobArgs; import com.pulumi.databricks.inputs.JobTaskArgs; import com.pulumi.databricks.inputs.JobTaskSqlTaskArgs; import com.pulumi.databricks.inputs.JobTaskSqlTaskQueryArgs; import com.pulumi.databricks.inputs.JobTaskSqlTaskDashboardArgs; import com.pulumi.databricks.inputs.JobTaskSqlTaskAlertArgs; import java.util.List; import java.util.ArrayList; import java.util.Map; import java.io.File; import java.nio.file.Files; import java.nio.file.Paths; public class App { public static void main(String[] args) { Pulumi.run(App::stack); } public static void stack(Context ctx) { var sqlAggregationJob = new Job("sqlAggregationJob", JobArgs.builder() .name("Example SQL Job") .tasks( JobTaskArgs.builder() .taskKey("run_agg_query") .sqlTask(JobTaskSqlTaskArgs.builder() .warehouseId(sqlJobWarehouse.id()) .query(JobTaskSqlTaskQueryArgs.builder() .queryId(aggQuery.id()) .build()) .build()) .build(), JobTaskArgs.builder() .taskKey("run_dashboard") .sqlTask(JobTaskSqlTaskArgs.builder() .warehouseId(sqlJobWarehouse.id()) .dashboard(JobTaskSqlTaskDashboardArgs.builder() .dashboardId(dash.id()) .subscriptions(JobTaskSqlTaskDashboardSubscriptionArgs.builder() .userName("user@domain.com") .build()) .build()) .build()) .build(), JobTaskArgs.builder() .taskKey("run_alert") .sqlTask(JobTaskSqlTaskArgs.builder() .warehouseId(sqlJobWarehouse.id()) .alert(JobTaskSqlTaskAlertArgs.builder() .alertId(alert.id()) .subscriptions(JobTaskSqlTaskAlertSubscriptionArgs.builder() .userName("user@domain.com") .build()) .build()) .build()) .build()) .build()); } }
resources: sqlAggregationJob: type: databricks:Job name: sql_aggregation_job properties: name: Example SQL Job tasks: - taskKey: run_agg_query sqlTask: warehouseId: ${sqlJobWarehouse.id} query: queryId: ${aggQuery.id} - taskKey: run_dashboard sqlTask: warehouseId: ${sqlJobWarehouse.id} dashboard: dashboardId: ${dash.id} subscriptions: - userName: user@domain.com - taskKey: run_alert sqlTask: warehouseId: ${sqlJobWarehouse.id} alert: alertId: ${alert.id} subscriptions: - userName: user@domain.com
- source String
- The source of the project. Possible values are
WORKSPACE
andGIT
.
- path string
If
source
isGIT
: Relative path to the file in the repository specified in thegit_source
block with SQL commands to execute. Ifsource
isWORKSPACE
: Absolute path to the file in the workspace with SQL commands to execute.Example
import * as pulumi from "@pulumi/pulumi"; import * as databricks from "@pulumi/databricks";
const sqlAggregationJob = new databricks.Job("sql_aggregation_job", { name: "Example SQL Job", tasks: [ { taskKey: "run_agg_query", sqlTask: { warehouseId: sqlJobWarehouse.id, query: { queryId: aggQuery.id, }, }, }, { taskKey: "run_dashboard", sqlTask: { warehouseId: sqlJobWarehouse.id, dashboard: { dashboardId: dash.id, subscriptions: [{ userName: "user@domain.com", }], }, }, }, { taskKey: "run_alert", sqlTask: { warehouseId: sqlJobWarehouse.id, alert: { alertId: alert.id, subscriptions: [{ userName: "user@domain.com", }], }, }, }, ], });
import pulumi import pulumi_databricks as databricks sql_aggregation_job = databricks.Job("sql_aggregation_job", name="Example SQL Job", tasks=[ { "task_key": "run_agg_query", "sql_task": { "warehouse_id": sql_job_warehouse["id"], "query": { "query_id": agg_query["id"], }, }, }, { "task_key": "run_dashboard", "sql_task": { "warehouse_id": sql_job_warehouse["id"], "dashboard": { "dashboard_id": dash["id"], "subscriptions": [{ "user_name": "user@domain.com", }], }, }, }, { "task_key": "run_alert", "sql_task": { "warehouse_id": sql_job_warehouse["id"], "alert": { "alert_id": alert["id"], "subscriptions": [{ "user_name": "user@domain.com", }], }, }, }, ])
using System.Collections.Generic; using System.Linq; using Pulumi; using Databricks = Pulumi.Databricks; return await Deployment.RunAsync(() => { var sqlAggregationJob = new Databricks.Job("sql_aggregation_job", new() { Name = "Example SQL Job", Tasks = new[] { new Databricks.Inputs.JobTaskArgs { TaskKey = "run_agg_query", SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs { WarehouseId = sqlJobWarehouse.Id, Query = new Databricks.Inputs.JobTaskSqlTaskQueryArgs { QueryId = aggQuery.Id, }, }, }, new Databricks.Inputs.JobTaskArgs { TaskKey = "run_dashboard", SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs { WarehouseId = sqlJobWarehouse.Id, Dashboard = new Databricks.Inputs.JobTaskSqlTaskDashboardArgs { DashboardId = dash.Id, Subscriptions = new[] { new Databricks.Inputs.JobTaskSqlTaskDashboardSubscriptionArgs { UserName = "user@domain.com", }, }, }, }, }, new Databricks.Inputs.JobTaskArgs { TaskKey = "run_alert", SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs { WarehouseId = sqlJobWarehouse.Id, Alert = new Databricks.Inputs.JobTaskSqlTaskAlertArgs { AlertId = alert.Id, Subscriptions = new[] { new Databricks.Inputs.JobTaskSqlTaskAlertSubscriptionArgs { UserName = "user@domain.com", }, }, }, }, }, }, }); });
package main import ( "github.com/pulumi/pulumi-databricks/sdk/go/databricks" "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) func main() { pulumi.Run(func(ctx *pulumi.Context) error { _, err := databricks.NewJob(ctx, "sql_aggregation_job", &databricks.JobArgs{ Name: pulumi.String("Example SQL Job"), Tasks: databricks.JobTaskArray{ &databricks.JobTaskArgs{ TaskKey: pulumi.String("run_agg_query"), SqlTask: &databricks.JobTaskSqlTaskArgs{ WarehouseId: pulumi.Any(sqlJobWarehouse.Id), Query: &databricks.JobTaskSqlTaskQueryArgs{ QueryId: pulumi.Any(aggQuery.Id), }, }, }, &databricks.JobTaskArgs{ TaskKey: pulumi.String("run_dashboard"), SqlTask: &databricks.JobTaskSqlTaskArgs{ WarehouseId: pulumi.Any(sqlJobWarehouse.Id), Dashboard: &databricks.JobTaskSqlTaskDashboardArgs{ DashboardId: pulumi.Any(dash.Id), Subscriptions: databricks.JobTaskSqlTaskDashboardSubscriptionArray{ &databricks.JobTaskSqlTaskDashboardSubscriptionArgs{ UserName: pulumi.String("user@domain.com"), }, }, }, }, }, &databricks.JobTaskArgs{ TaskKey: pulumi.String("run_alert"), SqlTask: &databricks.JobTaskSqlTaskArgs{ WarehouseId: pulumi.Any(sqlJobWarehouse.Id), Alert: &databricks.JobTaskSqlTaskAlertArgs{ AlertId: pulumi.Any(alert.Id), Subscriptions: databricks.JobTaskSqlTaskAlertSubscriptionArray{ &databricks.JobTaskSqlTaskAlertSubscriptionArgs{ UserName: pulumi.String("user@domain.com"), }, }, }, }, }, }, }) if err != nil { return err } return nil }) }
package generated_program; import com.pulumi.Context; import com.pulumi.Pulumi; import com.pulumi.core.Output; import com.pulumi.databricks.Job; import com.pulumi.databricks.JobArgs; import com.pulumi.databricks.inputs.JobTaskArgs; import com.pulumi.databricks.inputs.JobTaskSqlTaskArgs; import com.pulumi.databricks.inputs.JobTaskSqlTaskQueryArgs; import com.pulumi.databricks.inputs.JobTaskSqlTaskDashboardArgs; import com.pulumi.databricks.inputs.JobTaskSqlTaskAlertArgs; import java.util.List; import java.util.ArrayList; import java.util.Map; import java.io.File; import java.nio.file.Files; import java.nio.file.Paths; public class App { public static void main(String[] args) { Pulumi.run(App::stack); } public static void stack(Context ctx) { var sqlAggregationJob = new Job("sqlAggregationJob", JobArgs.builder() .name("Example SQL Job") .tasks( JobTaskArgs.builder() .taskKey("run_agg_query") .sqlTask(JobTaskSqlTaskArgs.builder() .warehouseId(sqlJobWarehouse.id()) .query(JobTaskSqlTaskQueryArgs.builder() .queryId(aggQuery.id()) .build()) .build()) .build(), JobTaskArgs.builder() .taskKey("run_dashboard") .sqlTask(JobTaskSqlTaskArgs.builder() .warehouseId(sqlJobWarehouse.id()) .dashboard(JobTaskSqlTaskDashboardArgs.builder() .dashboardId(dash.id()) .subscriptions(JobTaskSqlTaskDashboardSubscriptionArgs.builder() .userName("user@domain.com") .build()) .build()) .build()) .build(), JobTaskArgs.builder() .taskKey("run_alert") .sqlTask(JobTaskSqlTaskArgs.builder() .warehouseId(sqlJobWarehouse.id()) .alert(JobTaskSqlTaskAlertArgs.builder() .alertId(alert.id()) .subscriptions(JobTaskSqlTaskAlertSubscriptionArgs.builder() .userName("user@domain.com") .build()) .build()) .build()) .build()) .build()); } }
resources: sqlAggregationJob: type: databricks:Job name: sql_aggregation_job properties: name: Example SQL Job tasks: - taskKey: run_agg_query sqlTask: warehouseId: ${sqlJobWarehouse.id} query: queryId: ${aggQuery.id} - taskKey: run_dashboard sqlTask: warehouseId: ${sqlJobWarehouse.id} dashboard: dashboardId: ${dash.id} subscriptions: - userName: user@domain.com - taskKey: run_alert sqlTask: warehouseId: ${sqlJobWarehouse.id} alert: alertId: ${alert.id} subscriptions: - userName: user@domain.com
- source string
- The source of the project. Possible values are
WORKSPACE
andGIT
.
- path str
If
source
isGIT
: Relative path to the file in the repository specified in thegit_source
block with SQL commands to execute. Ifsource
isWORKSPACE
: Absolute path to the file in the workspace with SQL commands to execute.Example
import * as pulumi from "@pulumi/pulumi"; import * as databricks from "@pulumi/databricks";
const sqlAggregationJob = new databricks.Job("sql_aggregation_job", { name: "Example SQL Job", tasks: [ { taskKey: "run_agg_query", sqlTask: { warehouseId: sqlJobWarehouse.id, query: { queryId: aggQuery.id, }, }, }, { taskKey: "run_dashboard", sqlTask: { warehouseId: sqlJobWarehouse.id, dashboard: { dashboardId: dash.id, subscriptions: [{ userName: "user@domain.com", }], }, }, }, { taskKey: "run_alert", sqlTask: { warehouseId: sqlJobWarehouse.id, alert: { alertId: alert.id, subscriptions: [{ userName: "user@domain.com", }], }, }, }, ], });
import pulumi import pulumi_databricks as databricks sql_aggregation_job = databricks.Job("sql_aggregation_job", name="Example SQL Job", tasks=[ { "task_key": "run_agg_query", "sql_task": { "warehouse_id": sql_job_warehouse["id"], "query": { "query_id": agg_query["id"], }, }, }, { "task_key": "run_dashboard", "sql_task": { "warehouse_id": sql_job_warehouse["id"], "dashboard": { "dashboard_id": dash["id"], "subscriptions": [{ "user_name": "user@domain.com", }], }, }, }, { "task_key": "run_alert", "sql_task": { "warehouse_id": sql_job_warehouse["id"], "alert": { "alert_id": alert["id"], "subscriptions": [{ "user_name": "user@domain.com", }], }, }, }, ])
using System.Collections.Generic; using System.Linq; using Pulumi; using Databricks = Pulumi.Databricks; return await Deployment.RunAsync(() => { var sqlAggregationJob = new Databricks.Job("sql_aggregation_job", new() { Name = "Example SQL Job", Tasks = new[] { new Databricks.Inputs.JobTaskArgs { TaskKey = "run_agg_query", SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs { WarehouseId = sqlJobWarehouse.Id, Query = new Databricks.Inputs.JobTaskSqlTaskQueryArgs { QueryId = aggQuery.Id, }, }, }, new Databricks.Inputs.JobTaskArgs { TaskKey = "run_dashboard", SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs { WarehouseId = sqlJobWarehouse.Id, Dashboard = new Databricks.Inputs.JobTaskSqlTaskDashboardArgs { DashboardId = dash.Id, Subscriptions = new[] { new Databricks.Inputs.JobTaskSqlTaskDashboardSubscriptionArgs { UserName = "user@domain.com", }, }, }, }, }, new Databricks.Inputs.JobTaskArgs { TaskKey = "run_alert", SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs { WarehouseId = sqlJobWarehouse.Id, Alert = new Databricks.Inputs.JobTaskSqlTaskAlertArgs { AlertId = alert.Id, Subscriptions = new[] { new Databricks.Inputs.JobTaskSqlTaskAlertSubscriptionArgs { UserName = "user@domain.com", }, }, }, }, }, }, }); });
package main import ( "github.com/pulumi/pulumi-databricks/sdk/go/databricks" "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) func main() { pulumi.Run(func(ctx *pulumi.Context) error { _, err := databricks.NewJob(ctx, "sql_aggregation_job", &databricks.JobArgs{ Name: pulumi.String("Example SQL Job"), Tasks: databricks.JobTaskArray{ &databricks.JobTaskArgs{ TaskKey: pulumi.String("run_agg_query"), SqlTask: &databricks.JobTaskSqlTaskArgs{ WarehouseId: pulumi.Any(sqlJobWarehouse.Id), Query: &databricks.JobTaskSqlTaskQueryArgs{ QueryId: pulumi.Any(aggQuery.Id), }, }, }, &databricks.JobTaskArgs{ TaskKey: pulumi.String("run_dashboard"), SqlTask: &databricks.JobTaskSqlTaskArgs{ WarehouseId: pulumi.Any(sqlJobWarehouse.Id), Dashboard: &databricks.JobTaskSqlTaskDashboardArgs{ DashboardId: pulumi.Any(dash.Id), Subscriptions: databricks.JobTaskSqlTaskDashboardSubscriptionArray{ &databricks.JobTaskSqlTaskDashboardSubscriptionArgs{ UserName: pulumi.String("user@domain.com"), }, }, }, }, }, &databricks.JobTaskArgs{ TaskKey: pulumi.String("run_alert"), SqlTask: &databricks.JobTaskSqlTaskArgs{ WarehouseId: pulumi.Any(sqlJobWarehouse.Id), Alert: &databricks.JobTaskSqlTaskAlertArgs{ AlertId: pulumi.Any(alert.Id), Subscriptions: databricks.JobTaskSqlTaskAlertSubscriptionArray{ &databricks.JobTaskSqlTaskAlertSubscriptionArgs{ UserName: pulumi.String("user@domain.com"), }, }, }, }, }, }, }) if err != nil { return err } return nil }) }
package generated_program; import com.pulumi.Context; import com.pulumi.Pulumi; import com.pulumi.core.Output; import com.pulumi.databricks.Job; import com.pulumi.databricks.JobArgs; import com.pulumi.databricks.inputs.JobTaskArgs; import com.pulumi.databricks.inputs.JobTaskSqlTaskArgs; import com.pulumi.databricks.inputs.JobTaskSqlTaskQueryArgs; import com.pulumi.databricks.inputs.JobTaskSqlTaskDashboardArgs; import com.pulumi.databricks.inputs.JobTaskSqlTaskAlertArgs; import java.util.List; import java.util.ArrayList; import java.util.Map; import java.io.File; import java.nio.file.Files; import java.nio.file.Paths; public class App { public static void main(String[] args) { Pulumi.run(App::stack); } public static void stack(Context ctx) { var sqlAggregationJob = new Job("sqlAggregationJob", JobArgs.builder() .name("Example SQL Job") .tasks( JobTaskArgs.builder() .taskKey("run_agg_query") .sqlTask(JobTaskSqlTaskArgs.builder() .warehouseId(sqlJobWarehouse.id()) .query(JobTaskSqlTaskQueryArgs.builder() .queryId(aggQuery.id()) .build()) .build()) .build(), JobTaskArgs.builder() .taskKey("run_dashboard") .sqlTask(JobTaskSqlTaskArgs.builder() .warehouseId(sqlJobWarehouse.id()) .dashboard(JobTaskSqlTaskDashboardArgs.builder() .dashboardId(dash.id()) .subscriptions(JobTaskSqlTaskDashboardSubscriptionArgs.builder() .userName("user@domain.com") .build()) .build()) .build()) .build(), JobTaskArgs.builder() .taskKey("run_alert") .sqlTask(JobTaskSqlTaskArgs.builder() .warehouseId(sqlJobWarehouse.id()) .alert(JobTaskSqlTaskAlertArgs.builder() .alertId(alert.id()) .subscriptions(JobTaskSqlTaskAlertSubscriptionArgs.builder() .userName("user@domain.com") .build()) .build()) .build()) .build()) .build()); } }
resources: sqlAggregationJob: type: databricks:Job name: sql_aggregation_job properties: name: Example SQL Job tasks: - taskKey: run_agg_query sqlTask: warehouseId: ${sqlJobWarehouse.id} query: queryId: ${aggQuery.id} - taskKey: run_dashboard sqlTask: warehouseId: ${sqlJobWarehouse.id} dashboard: dashboardId: ${dash.id} subscriptions: - userName: user@domain.com - taskKey: run_alert sqlTask: warehouseId: ${sqlJobWarehouse.id} alert: alertId: ${alert.id} subscriptions: - userName: user@domain.com
- source str
- The source of the project. Possible values are
WORKSPACE
andGIT
.
- path String
If
source
isGIT
: Relative path to the file in the repository specified in thegit_source
block with SQL commands to execute. Ifsource
isWORKSPACE
: Absolute path to the file in the workspace with SQL commands to execute.Example
import * as pulumi from "@pulumi/pulumi"; import * as databricks from "@pulumi/databricks";
const sqlAggregationJob = new databricks.Job("sql_aggregation_job", { name: "Example SQL Job", tasks: [ { taskKey: "run_agg_query", sqlTask: { warehouseId: sqlJobWarehouse.id, query: { queryId: aggQuery.id, }, }, }, { taskKey: "run_dashboard", sqlTask: { warehouseId: sqlJobWarehouse.id, dashboard: { dashboardId: dash.id, subscriptions: [{ userName: "user@domain.com", }], }, }, }, { taskKey: "run_alert", sqlTask: { warehouseId: sqlJobWarehouse.id, alert: { alertId: alert.id, subscriptions: [{ userName: "user@domain.com", }], }, }, }, ], });
import pulumi import pulumi_databricks as databricks sql_aggregation_job = databricks.Job("sql_aggregation_job", name="Example SQL Job", tasks=[ { "task_key": "run_agg_query", "sql_task": { "warehouse_id": sql_job_warehouse["id"], "query": { "query_id": agg_query["id"], }, }, }, { "task_key": "run_dashboard", "sql_task": { "warehouse_id": sql_job_warehouse["id"], "dashboard": { "dashboard_id": dash["id"], "subscriptions": [{ "user_name": "user@domain.com", }], }, }, }, { "task_key": "run_alert", "sql_task": { "warehouse_id": sql_job_warehouse["id"], "alert": { "alert_id": alert["id"], "subscriptions": [{ "user_name": "user@domain.com", }], }, }, }, ])
using System.Collections.Generic; using System.Linq; using Pulumi; using Databricks = Pulumi.Databricks; return await Deployment.RunAsync(() => { var sqlAggregationJob = new Databricks.Job("sql_aggregation_job", new() { Name = "Example SQL Job", Tasks = new[] { new Databricks.Inputs.JobTaskArgs { TaskKey = "run_agg_query", SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs { WarehouseId = sqlJobWarehouse.Id, Query = new Databricks.Inputs.JobTaskSqlTaskQueryArgs { QueryId = aggQuery.Id, }, }, }, new Databricks.Inputs.JobTaskArgs { TaskKey = "run_dashboard", SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs { WarehouseId = sqlJobWarehouse.Id, Dashboard = new Databricks.Inputs.JobTaskSqlTaskDashboardArgs { DashboardId = dash.Id, Subscriptions = new[] { new Databricks.Inputs.JobTaskSqlTaskDashboardSubscriptionArgs { UserName = "user@domain.com", }, }, }, }, }, new Databricks.Inputs.JobTaskArgs { TaskKey = "run_alert", SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs { WarehouseId = sqlJobWarehouse.Id, Alert = new Databricks.Inputs.JobTaskSqlTaskAlertArgs { AlertId = alert.Id, Subscriptions = new[] { new Databricks.Inputs.JobTaskSqlTaskAlertSubscriptionArgs { UserName = "user@domain.com", }, }, }, }, }, }, }); });
package main import ( "github.com/pulumi/pulumi-databricks/sdk/go/databricks" "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) func main() { pulumi.Run(func(ctx *pulumi.Context) error { _, err := databricks.NewJob(ctx, "sql_aggregation_job", &databricks.JobArgs{ Name: pulumi.String("Example SQL Job"), Tasks: databricks.JobTaskArray{ &databricks.JobTaskArgs{ TaskKey: pulumi.String("run_agg_query"), SqlTask: &databricks.JobTaskSqlTaskArgs{ WarehouseId: pulumi.Any(sqlJobWarehouse.Id), Query: &databricks.JobTaskSqlTaskQueryArgs{ QueryId: pulumi.Any(aggQuery.Id), }, }, }, &databricks.JobTaskArgs{ TaskKey: pulumi.String("run_dashboard"), SqlTask: &databricks.JobTaskSqlTaskArgs{ WarehouseId: pulumi.Any(sqlJobWarehouse.Id), Dashboard: &databricks.JobTaskSqlTaskDashboardArgs{ DashboardId: pulumi.Any(dash.Id), Subscriptions: databricks.JobTaskSqlTaskDashboardSubscriptionArray{ &databricks.JobTaskSqlTaskDashboardSubscriptionArgs{ UserName: pulumi.String("user@domain.com"), }, }, }, }, }, &databricks.JobTaskArgs{ TaskKey: pulumi.String("run_alert"), SqlTask: &databricks.JobTaskSqlTaskArgs{ WarehouseId: pulumi.Any(sqlJobWarehouse.Id), Alert: &databricks.JobTaskSqlTaskAlertArgs{ AlertId: pulumi.Any(alert.Id), Subscriptions: databricks.JobTaskSqlTaskAlertSubscriptionArray{ &databricks.JobTaskSqlTaskAlertSubscriptionArgs{ UserName: pulumi.String("user@domain.com"), }, }, }, }, }, }, }) if err != nil { return err } return nil }) }
package generated_program; import com.pulumi.Context; import com.pulumi.Pulumi; import com.pulumi.core.Output; import com.pulumi.databricks.Job; import com.pulumi.databricks.JobArgs; import com.pulumi.databricks.inputs.JobTaskArgs; import com.pulumi.databricks.inputs.JobTaskSqlTaskArgs; import com.pulumi.databricks.inputs.JobTaskSqlTaskQueryArgs; import com.pulumi.databricks.inputs.JobTaskSqlTaskDashboardArgs; import com.pulumi.databricks.inputs.JobTaskSqlTaskAlertArgs; import java.util.List; import java.util.ArrayList; import java.util.Map; import java.io.File; import java.nio.file.Files; import java.nio.file.Paths; public class App { public static void main(String[] args) { Pulumi.run(App::stack); } public static void stack(Context ctx) { var sqlAggregationJob = new Job("sqlAggregationJob", JobArgs.builder() .name("Example SQL Job") .tasks( JobTaskArgs.builder() .taskKey("run_agg_query") .sqlTask(JobTaskSqlTaskArgs.builder() .warehouseId(sqlJobWarehouse.id()) .query(JobTaskSqlTaskQueryArgs.builder() .queryId(aggQuery.id()) .build()) .build()) .build(), JobTaskArgs.builder() .taskKey("run_dashboard") .sqlTask(JobTaskSqlTaskArgs.builder() .warehouseId(sqlJobWarehouse.id()) .dashboard(JobTaskSqlTaskDashboardArgs.builder() .dashboardId(dash.id()) .subscriptions(JobTaskSqlTaskDashboardSubscriptionArgs.builder() .userName("user@domain.com") .build()) .build()) .build()) .build(), JobTaskArgs.builder() .taskKey("run_alert") .sqlTask(JobTaskSqlTaskArgs.builder() .warehouseId(sqlJobWarehouse.id()) .alert(JobTaskSqlTaskAlertArgs.builder() .alertId(alert.id()) .subscriptions(JobTaskSqlTaskAlertSubscriptionArgs.builder() .userName("user@domain.com") .build()) .build()) .build()) .build()) .build()); } }
resources: sqlAggregationJob: type: databricks:Job name: sql_aggregation_job properties: name: Example SQL Job tasks: - taskKey: run_agg_query sqlTask: warehouseId: ${sqlJobWarehouse.id} query: queryId: ${aggQuery.id} - taskKey: run_dashboard sqlTask: warehouseId: ${sqlJobWarehouse.id} dashboard: dashboardId: ${dash.id} subscriptions: - userName: user@domain.com - taskKey: run_alert sqlTask: warehouseId: ${sqlJobWarehouse.id} alert: alertId: ${alert.id} subscriptions: - userName: user@domain.com
- source String
- The source of the project. Possible values are
WORKSPACE
andGIT
.
JobTaskForEachTaskTaskSqlTaskQuery, JobTaskForEachTaskTaskSqlTaskQueryArgs
- Query
Id string
- Query
Id string
- query
Id String
- query
Id string
- query_
id str
- query
Id String
JobTaskForEachTaskTaskWebhookNotifications, JobTaskForEachTaskTaskWebhookNotificationsArgs
- On
Duration List<JobWarning Threshold Exceededs Task For Each Task Task Webhook Notifications On Duration Warning Threshold Exceeded> (List) list of notification IDs to call when the duration of a run exceeds the threshold specified by the
RUN_DURATION_SECONDS
metric in thehealth
block.Note that the
id
is not to be confused with the name of the alert destination. Theid
can be retrieved through the API or the URL of Databricks UIhttps://<workspace host>/sql/destinations/<notification id>?o=<workspace id>
Example
- On
Failures List<JobTask For Each Task Task Webhook Notifications On Failure> - (List) list of notification IDs to call when the run fails. A maximum of 3 destinations can be specified.
- On
Starts List<JobTask For Each Task Task Webhook Notifications On Start> - (List) list of notification IDs to call when the run starts. A maximum of 3 destinations can be specified.
- On
Streaming List<JobBacklog Exceededs Task For Each Task Task Webhook Notifications On Streaming Backlog Exceeded> - On
Successes List<JobTask For Each Task Task Webhook Notifications On Success> - (List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified.
- On
Duration []JobWarning Threshold Exceededs Task For Each Task Task Webhook Notifications On Duration Warning Threshold Exceeded (List) list of notification IDs to call when the duration of a run exceeds the threshold specified by the
RUN_DURATION_SECONDS
metric in thehealth
block.Note that the
id
is not to be confused with the name of the alert destination. Theid
can be retrieved through the API or the URL of Databricks UIhttps://<workspace host>/sql/destinations/<notification id>?o=<workspace id>
Example
- On
Failures []JobTask For Each Task Task Webhook Notifications On Failure - (List) list of notification IDs to call when the run fails. A maximum of 3 destinations can be specified.
- On
Starts []JobTask For Each Task Task Webhook Notifications On Start - (List) list of notification IDs to call when the run starts. A maximum of 3 destinations can be specified.
- On
Streaming []JobBacklog Exceededs Task For Each Task Task Webhook Notifications On Streaming Backlog Exceeded - On
Successes []JobTask For Each Task Task Webhook Notifications On Success - (List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified.
- on
Duration List<JobWarning Threshold Exceededs Task For Each Task Task Webhook Notifications On Duration Warning Threshold Exceeded> (List) list of notification IDs to call when the duration of a run exceeds the threshold specified by the
RUN_DURATION_SECONDS
metric in thehealth
block.Note that the
id
is not to be confused with the name of the alert destination. Theid
can be retrieved through the API or the URL of Databricks UIhttps://<workspace host>/sql/destinations/<notification id>?o=<workspace id>
Example
- on
Failures List<JobTask For Each Task Task Webhook Notifications On Failure> - (List) list of notification IDs to call when the run fails. A maximum of 3 destinations can be specified.
- on
Starts List<JobTask For Each Task Task Webhook Notifications On Start> - (List) list of notification IDs to call when the run starts. A maximum of 3 destinations can be specified.
- on
Streaming List<JobBacklog Exceededs Task For Each Task Task Webhook Notifications On Streaming Backlog Exceeded> - on
Successes List<JobTask For Each Task Task Webhook Notifications On Success> - (List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified.
- on
Duration JobWarning Threshold Exceededs Task For Each Task Task Webhook Notifications On Duration Warning Threshold Exceeded[] (List) list of notification IDs to call when the duration of a run exceeds the threshold specified by the
RUN_DURATION_SECONDS
metric in thehealth
block.Note that the
id
is not to be confused with the name of the alert destination. Theid
can be retrieved through the API or the URL of Databricks UIhttps://<workspace host>/sql/destinations/<notification id>?o=<workspace id>
Example
- on
Failures JobTask For Each Task Task Webhook Notifications On Failure[] - (List) list of notification IDs to call when the run fails. A maximum of 3 destinations can be specified.
- on
Starts JobTask For Each Task Task Webhook Notifications On Start[] - (List) list of notification IDs to call when the run starts. A maximum of 3 destinations can be specified.
- on
Streaming JobBacklog Exceededs Task For Each Task Task Webhook Notifications On Streaming Backlog Exceeded[] - on
Successes JobTask For Each Task Task Webhook Notifications On Success[] - (List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified.
- on_
duration_ Sequence[Jobwarning_ threshold_ exceededs Task For Each Task Task Webhook Notifications On Duration Warning Threshold Exceeded] (List) list of notification IDs to call when the duration of a run exceeds the threshold specified by the
RUN_DURATION_SECONDS
metric in thehealth
block.Note that the
id
is not to be confused with the name of the alert destination. Theid
can be retrieved through the API or the URL of Databricks UIhttps://<workspace host>/sql/destinations/<notification id>?o=<workspace id>
Example
- on_
failures Sequence[JobTask For Each Task Task Webhook Notifications On Failure] - (List) list of notification IDs to call when the run fails. A maximum of 3 destinations can be specified.
- on_
starts Sequence[JobTask For Each Task Task Webhook Notifications On Start] - (List) list of notification IDs to call when the run starts. A maximum of 3 destinations can be specified.
- on_
streaming_ Sequence[Jobbacklog_ exceededs Task For Each Task Task Webhook Notifications On Streaming Backlog Exceeded] - on_
successes Sequence[JobTask For Each Task Task Webhook Notifications On Success] - (List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified.
- on
Duration List<Property Map>Warning Threshold Exceededs (List) list of notification IDs to call when the duration of a run exceeds the threshold specified by the
RUN_DURATION_SECONDS
metric in thehealth
block.Note that the
id
is not to be confused with the name of the alert destination. Theid
can be retrieved through the API or the URL of Databricks UIhttps://<workspace host>/sql/destinations/<notification id>?o=<workspace id>
Example
- on
Failures List<Property Map> - (List) list of notification IDs to call when the run fails. A maximum of 3 destinations can be specified.
- on
Starts List<Property Map> - (List) list of notification IDs to call when the run starts. A maximum of 3 destinations can be specified.
- on
Streaming List<Property Map>Backlog Exceededs - on
Successes List<Property Map> - (List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified.
JobTaskForEachTaskTaskWebhookNotificationsOnDurationWarningThresholdExceeded, JobTaskForEachTaskTaskWebhookNotificationsOnDurationWarningThresholdExceededArgs
- Id string
- ID of the job
- Id string
- ID of the job
- id String
- ID of the job
- id string
- ID of the job
- id str
- ID of the job
- id String
- ID of the job
JobTaskForEachTaskTaskWebhookNotificationsOnFailure, JobTaskForEachTaskTaskWebhookNotificationsOnFailureArgs
- Id string
- ID of the job
- Id string
- ID of the job
- id String
- ID of the job
- id string
- ID of the job
- id str
- ID of the job
- id String
- ID of the job
JobTaskForEachTaskTaskWebhookNotificationsOnStart, JobTaskForEachTaskTaskWebhookNotificationsOnStartArgs
- Id string
- ID of the job
- Id string
- ID of the job
- id String
- ID of the job
- id string
- ID of the job
- id str
- ID of the job
- id String
- ID of the job
JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded, JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs
- Id string
- ID of the job
- Id string
- ID of the job
- id String
- ID of the job
- id string
- ID of the job
- id str
- ID of the job
- id String
- ID of the job
JobTaskForEachTaskTaskWebhookNotificationsOnSuccess, JobTaskForEachTaskTaskWebhookNotificationsOnSuccessArgs
- Id string
- ID of the job
- Id string
- ID of the job
- id String
- ID of the job
- id string
- ID of the job
- id str
- ID of the job
- id String
- ID of the job
JobTaskHealth, JobTaskHealthArgs
- Rules
List<Job
Task Health Rule> - list of rules that are represented as objects with the following attributes:
- Rules
[]Job
Task Health Rule - list of rules that are represented as objects with the following attributes:
- rules
List<Job
Task Health Rule> - list of rules that are represented as objects with the following attributes:
- rules
Job
Task Health Rule[] - list of rules that are represented as objects with the following attributes:
- rules
Sequence[Job
Task Health Rule] - list of rules that are represented as objects with the following attributes:
- rules List<Property Map>
- list of rules that are represented as objects with the following attributes:
JobTaskHealthRule, JobTaskHealthRuleArgs
- Metric string
- string specifying the metric to check. The only supported metric is
RUN_DURATION_SECONDS
(check Jobs REST API documentation for the latest information). - Op string
- string specifying the operation used to evaluate the given metric. The only supported operation is
GREATER_THAN
. - Value int
- integer value used to compare to the given metric.
- Metric string
- string specifying the metric to check. The only supported metric is
RUN_DURATION_SECONDS
(check Jobs REST API documentation for the latest information). - Op string
- string specifying the operation used to evaluate the given metric. The only supported operation is
GREATER_THAN
. - Value int
- integer value used to compare to the given metric.
- metric String
- string specifying the metric to check. The only supported metric is
RUN_DURATION_SECONDS
(check Jobs REST API documentation for the latest information). - op String
- string specifying the operation used to evaluate the given metric. The only supported operation is
GREATER_THAN
. - value Integer
- integer value used to compare to the given metric.
- metric string
- string specifying the metric to check. The only supported metric is
RUN_DURATION_SECONDS
(check Jobs REST API documentation for the latest information). - op string
- string specifying the operation used to evaluate the given metric. The only supported operation is
GREATER_THAN
. - value number
- integer value used to compare to the given metric.
- metric str
- string specifying the metric to check. The only supported metric is
RUN_DURATION_SECONDS
(check Jobs REST API documentation for the latest information). - op str
- string specifying the operation used to evaluate the given metric. The only supported operation is
GREATER_THAN
. - value int
- integer value used to compare to the given metric.
- metric String
- string specifying the metric to check. The only supported metric is
RUN_DURATION_SECONDS
(check Jobs REST API documentation for the latest information). - op String
- string specifying the operation used to evaluate the given metric. The only supported operation is
GREATER_THAN
. - value Number
- integer value used to compare to the given metric.
JobTaskLibrary, JobTaskLibraryArgs
- Cran
Job
Task Library Cran - Egg string
- Jar string
- Maven
Job
Task Library Maven - Pypi
Job
Task Library Pypi - Requirements string
- Whl string
- Cran
Job
Task Library Cran - Egg string
- Jar string
- Maven
Job
Task Library Maven - Pypi
Job
Task Library Pypi - Requirements string
- Whl string
- cran
Job
Task Library Cran - egg String
- jar String
- maven
Job
Task Library Maven - pypi
Job
Task Library Pypi - requirements String
- whl String
- cran
Job
Task Library Cran - egg string
- jar string
- maven
Job
Task Library Maven - pypi
Job
Task Library Pypi - requirements string
- whl string
- cran Property Map
- egg String
- jar String
- maven Property Map
- pypi Property Map
- requirements String
- whl String
JobTaskLibraryCran, JobTaskLibraryCranArgs
JobTaskLibraryMaven, JobTaskLibraryMavenArgs
- Coordinates string
- Exclusions List<string>
- Repo string
- Coordinates string
- Exclusions []string
- Repo string
- coordinates String
- exclusions List<String>
- repo String
- coordinates string
- exclusions string[]
- repo string
- coordinates str
- exclusions Sequence[str]
- repo str
- coordinates String
- exclusions List<String>
- repo String
JobTaskLibraryPypi, JobTaskLibraryPypiArgs
JobTaskNewCluster, JobTaskNewClusterArgs
- Spark
Version string - Apply
Policy boolDefault Values - Autoscale
Job
Task New Cluster Autoscale - Aws
Attributes JobTask New Cluster Aws Attributes - Azure
Attributes JobTask New Cluster Azure Attributes - Cluster
Id string - Cluster
Log JobConf Task New Cluster Cluster Log Conf - Cluster
Mount List<JobInfos Task New Cluster Cluster Mount Info> - Cluster
Name string - Dictionary<string, string>
- Data
Security stringMode - Docker
Image JobTask New Cluster Docker Image - Driver
Instance stringPool Id - Driver
Node stringType Id - Enable
Elastic boolDisk - Enable
Local boolDisk Encryption - Gcp
Attributes JobTask New Cluster Gcp Attributes - Idempotency
Token string - Init
Scripts List<JobTask New Cluster Init Script> - Instance
Pool stringId - Libraries
List<Job
Task New Cluster Library> - (List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
- Node
Type stringId - Num
Workers int - Policy
Id string - Runtime
Engine string - Single
User stringName - Spark
Conf Dictionary<string, string> - Spark
Env Dictionary<string, string>Vars - Ssh
Public List<string>Keys - Workload
Type JobTask New Cluster Workload Type - isn't supported
- Spark
Version string - Apply
Policy boolDefault Values - Autoscale
Job
Task New Cluster Autoscale - Aws
Attributes JobTask New Cluster Aws Attributes - Azure
Attributes JobTask New Cluster Azure Attributes - Cluster
Id string - Cluster
Log JobConf Task New Cluster Cluster Log Conf - Cluster
Mount []JobInfos Task New Cluster Cluster Mount Info - Cluster
Name string - map[string]string
- Data
Security stringMode - Docker
Image JobTask New Cluster Docker Image - Driver
Instance stringPool Id - Driver
Node stringType Id - Enable
Elastic boolDisk - Enable
Local boolDisk Encryption - Gcp
Attributes JobTask New Cluster Gcp Attributes - Idempotency
Token string - Init
Scripts []JobTask New Cluster Init Script - Instance
Pool stringId - Libraries
[]Job
Task New Cluster Library - (List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
- Node
Type stringId - Num
Workers int - Policy
Id string - Runtime
Engine string - Single
User stringName - Spark
Conf map[string]string - Spark
Env map[string]stringVars - Ssh
Public []stringKeys - Workload
Type JobTask New Cluster Workload Type - isn't supported
- spark
Version String - apply
Policy BooleanDefault Values - autoscale
Job
Task New Cluster Autoscale - aws
Attributes JobTask New Cluster Aws Attributes - azure
Attributes JobTask New Cluster Azure Attributes - cluster
Id String - cluster
Log JobConf Task New Cluster Cluster Log Conf - cluster
Mount List<JobInfos Task New Cluster Cluster Mount Info> - cluster
Name String - Map<String,String>
- data
Security StringMode - docker
Image JobTask New Cluster Docker Image - driver
Instance StringPool Id - driver
Node StringType Id - enable
Elastic BooleanDisk - enable
Local BooleanDisk Encryption - gcp
Attributes JobTask New Cluster Gcp Attributes - idempotency
Token String - init
Scripts List<JobTask New Cluster Init Script> - instance
Pool StringId - libraries
List<Job
Task New Cluster Library> - (List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
- node
Type StringId - num
Workers Integer - policy
Id String - runtime
Engine String - single
User StringName - spark
Conf Map<String,String> - spark
Env Map<String,String>Vars - ssh
Public List<String>Keys - workload
Type JobTask New Cluster Workload Type - isn't supported
- spark
Version string - apply
Policy booleanDefault Values - autoscale
Job
Task New Cluster Autoscale - aws
Attributes JobTask New Cluster Aws Attributes - azure
Attributes JobTask New Cluster Azure Attributes - cluster
Id string - cluster
Log JobConf Task New Cluster Cluster Log Conf - cluster
Mount JobInfos Task New Cluster Cluster Mount Info[] - cluster
Name string - {[key: string]: string}
- data
Security stringMode - docker
Image JobTask New Cluster Docker Image - driver
Instance stringPool Id - driver
Node stringType Id - enable
Elastic booleanDisk - enable
Local booleanDisk Encryption - gcp
Attributes JobTask New Cluster Gcp Attributes - idempotency
Token string - init
Scripts JobTask New Cluster Init Script[] - instance
Pool stringId - libraries
Job
Task New Cluster Library[] - (List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
- node
Type stringId - num
Workers number - policy
Id string - runtime
Engine string - single
User stringName - spark
Conf {[key: string]: string} - spark
Env {[key: string]: string}Vars - ssh
Public string[]Keys - workload
Type JobTask New Cluster Workload Type - isn't supported
- spark_
version str - apply_
policy_ booldefault_ values - autoscale
Job
Task New Cluster Autoscale - aws_
attributes JobTask New Cluster Aws Attributes - azure_
attributes JobTask New Cluster Azure Attributes - cluster_
id str - cluster_
log_ Jobconf Task New Cluster Cluster Log Conf - cluster_
mount_ Sequence[Jobinfos Task New Cluster Cluster Mount Info] - cluster_
name str - Mapping[str, str]
- data_
security_ strmode - docker_
image JobTask New Cluster Docker Image - driver_
instance_ strpool_ id - driver_
node_ strtype_ id - enable_
elastic_ booldisk - enable_
local_ booldisk_ encryption - gcp_
attributes JobTask New Cluster Gcp Attributes - idempotency_
token str - init_
scripts Sequence[JobTask New Cluster Init Script] - instance_
pool_ strid - libraries
Sequence[Job
Task New Cluster Library] - (List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
- node_
type_ strid - num_
workers int - policy_
id str - runtime_
engine str - single_
user_ strname - spark_
conf Mapping[str, str] - spark_
env_ Mapping[str, str]vars - ssh_
public_ Sequence[str]keys - workload_
type JobTask New Cluster Workload Type - isn't supported
- spark
Version String - apply
Policy BooleanDefault Values - autoscale Property Map
- aws
Attributes Property Map - azure
Attributes Property Map - cluster
Id String - cluster
Log Property MapConf - cluster
Mount List<Property Map>Infos - cluster
Name String - Map<String>
- data
Security StringMode - docker
Image Property Map - driver
Instance StringPool Id - driver
Node StringType Id - enable
Elastic BooleanDisk - enable
Local BooleanDisk Encryption - gcp
Attributes Property Map - idempotency
Token String - init
Scripts List<Property Map> - instance
Pool StringId - libraries List<Property Map>
- (List) An optional list of libraries to be installed on the cluster that will execute the job. See library Configuration Block below.
- node
Type StringId - num
Workers Number - policy
Id String - runtime
Engine String - single
User StringName - spark
Conf Map<String> - spark
Env Map<String>Vars - ssh
Public List<String>Keys - workload
Type Property Map - isn't supported
JobTaskNewClusterAutoscale, JobTaskNewClusterAutoscaleArgs
- Max
Workers int - Min
Workers int
- Max
Workers int - Min
Workers int
- max
Workers Integer - min
Workers Integer
- max
Workers number - min
Workers number
- max_
workers int - min_
workers int
- max
Workers Number - min
Workers Number
JobTaskNewClusterAwsAttributes, JobTaskNewClusterAwsAttributesArgs
- Availability string
- Ebs
Volume intCount - Ebs
Volume intIops - Ebs
Volume intSize - Ebs
Volume intThroughput - Ebs
Volume stringType - First
On intDemand - Instance
Profile stringArn - Spot
Bid intPrice Percent - Zone
Id string
- Availability string
- Ebs
Volume intCount - Ebs
Volume intIops - Ebs
Volume intSize - Ebs
Volume intThroughput - Ebs
Volume stringType - First
On intDemand - Instance
Profile stringArn - Spot
Bid intPrice Percent - Zone
Id string
- availability String
- ebs
Volume IntegerCount - ebs
Volume IntegerIops - ebs
Volume IntegerSize - ebs
Volume IntegerThroughput - ebs
Volume StringType - first
On IntegerDemand - instance
Profile StringArn - spot
Bid IntegerPrice Percent - zone
Id String
- availability string
- ebs
Volume numberCount - ebs
Volume numberIops - ebs
Volume numberSize - ebs
Volume numberThroughput - ebs
Volume stringType - first
On numberDemand - instance
Profile stringArn - spot
Bid numberPrice Percent - zone
Id string
- availability str
- ebs_
volume_ intcount - ebs_
volume_ intiops - ebs_
volume_ intsize - ebs_
volume_ intthroughput - ebs_
volume_ strtype - first_
on_ intdemand - instance_
profile_ strarn - spot_
bid_ intprice_ percent - zone_
id str
- availability String
- ebs
Volume NumberCount - ebs
Volume NumberIops - ebs
Volume NumberSize - ebs
Volume NumberThroughput - ebs
Volume StringType - first
On NumberDemand - instance
Profile StringArn - spot
Bid NumberPrice Percent - zone
Id String
JobTaskNewClusterAzureAttributes, JobTaskNewClusterAzureAttributesArgs
- availability String
- first
On NumberDemand - log
Analytics Property MapInfo - spot
Bid NumberMax Price
JobTaskNewClusterAzureAttributesLogAnalyticsInfo, JobTaskNewClusterAzureAttributesLogAnalyticsInfoArgs
- Log
Analytics stringPrimary Key - Log
Analytics stringWorkspace Id
- Log
Analytics stringPrimary Key - Log
Analytics stringWorkspace Id
- log
Analytics StringPrimary Key - log
Analytics StringWorkspace Id
- log
Analytics stringPrimary Key - log
Analytics stringWorkspace Id
- log
Analytics StringPrimary Key - log
Analytics StringWorkspace Id
JobTaskNewClusterClusterLogConf, JobTaskNewClusterClusterLogConfArgs
JobTaskNewClusterClusterLogConfDbfs, JobTaskNewClusterClusterLogConfDbfsArgs
- Destination string
- Destination string
- destination String
- destination string
- destination str
- destination String
JobTaskNewClusterClusterLogConfS3, JobTaskNewClusterClusterLogConfS3Args
- Destination string
- Canned
Acl string - Enable
Encryption bool - Encryption
Type string - Endpoint string
- Kms
Key string - Region string
- Destination string
- Canned
Acl string - Enable
Encryption bool - Encryption
Type string - Endpoint string
- Kms
Key string - Region string
- destination String
- canned
Acl String - enable
Encryption Boolean - encryption
Type String - endpoint String
- kms
Key String - region String
- destination string
- canned
Acl string - enable
Encryption boolean - encryption
Type string - endpoint string
- kms
Key string - region string
- destination str
- canned_
acl str - enable_
encryption bool - encryption_
type str - endpoint str
- kms_
key str - region str
- destination String
- canned
Acl String - enable
Encryption Boolean - encryption
Type String - endpoint String
- kms
Key String - region String
JobTaskNewClusterClusterMountInfo, JobTaskNewClusterClusterMountInfoArgs
JobTaskNewClusterClusterMountInfoNetworkFilesystemInfo, JobTaskNewClusterClusterMountInfoNetworkFilesystemInfoArgs
- Server
Address string - Mount
Options string
- Server
Address string - Mount
Options string
- server
Address String - mount
Options String
- server
Address string - mount
Options string
- server_
address str - mount_
options str
- server
Address String - mount
Options String
JobTaskNewClusterDockerImage, JobTaskNewClusterDockerImageArgs
- Url string
- URL of the job on the given workspace
- Basic
Auth JobTask New Cluster Docker Image Basic Auth
- Url string
- URL of the job on the given workspace
- Basic
Auth JobTask New Cluster Docker Image Basic Auth
- url String
- URL of the job on the given workspace
- basic
Auth JobTask New Cluster Docker Image Basic Auth
- url string
- URL of the job on the given workspace
- basic
Auth JobTask New Cluster Docker Image Basic Auth
- url str
- URL of the job on the given workspace
- basic_
auth JobTask New Cluster Docker Image Basic Auth
- url String
- URL of the job on the given workspace
- basic
Auth Property Map
JobTaskNewClusterDockerImageBasicAuth, JobTaskNewClusterDockerImageBasicAuthArgs
JobTaskNewClusterGcpAttributes, JobTaskNewClusterGcpAttributesArgs
- Availability string
- Boot
Disk intSize - Google
Service stringAccount - Local
Ssd intCount - Use
Preemptible boolExecutors - Zone
Id string
- Availability string
- Boot
Disk intSize - Google
Service stringAccount - Local
Ssd intCount - Use
Preemptible boolExecutors - Zone
Id string
- availability String
- boot
Disk IntegerSize - google
Service StringAccount - local
Ssd IntegerCount - use
Preemptible BooleanExecutors - zone
Id String
- availability string
- boot
Disk numberSize - google
Service stringAccount - local
Ssd numberCount - use
Preemptible booleanExecutors - zone
Id string
- availability str
- boot_
disk_ intsize - google_
service_ straccount - local_
ssd_ intcount - use_
preemptible_ boolexecutors - zone_
id str
- availability String
- boot
Disk NumberSize - google
Service StringAccount - local
Ssd NumberCount - use
Preemptible BooleanExecutors - zone
Id String
JobTaskNewClusterInitScript, JobTaskNewClusterInitScriptArgs
- Abfss
Job
Task New Cluster Init Script Abfss - Dbfs
Job
Task New Cluster Init Script Dbfs - File
Job
Task New Cluster Init Script File - block consisting of single string fields:
- Gcs
Job
Task New Cluster Init Script Gcs - S3
Job
Task New Cluster Init Script S3 - Volumes
Job
Task New Cluster Init Script Volumes - Workspace
Job
Task New Cluster Init Script Workspace
- Abfss
Job
Task New Cluster Init Script Abfss - Dbfs
Job
Task New Cluster Init Script Dbfs - File
Job
Task New Cluster Init Script File - block consisting of single string fields:
- Gcs
Job
Task New Cluster Init Script Gcs - S3
Job
Task New Cluster Init Script S3 - Volumes
Job
Task New Cluster Init Script Volumes - Workspace
Job
Task New Cluster Init Script Workspace
- abfss
Job
Task New Cluster Init Script Abfss - dbfs
Job
Task New Cluster Init Script Dbfs - file
Job
Task New Cluster Init Script File - block consisting of single string fields:
- gcs
Job
Task New Cluster Init Script Gcs - s3
Job
Task New Cluster Init Script S3 - volumes
Job
Task New Cluster Init Script Volumes - workspace
Job
Task New Cluster Init Script Workspace
- abfss
Job
Task New Cluster Init Script Abfss - dbfs
Job
Task New Cluster Init Script Dbfs - file
Job
Task New Cluster Init Script File - block consisting of single string fields:
- gcs
Job
Task New Cluster Init Script Gcs - s3
Job
Task New Cluster Init Script S3 - volumes
Job
Task New Cluster Init Script Volumes - workspace
Job
Task New Cluster Init Script Workspace
- abfss
Job
Task New Cluster Init Script Abfss - dbfs
Job
Task New Cluster Init Script Dbfs - file
Job
Task New Cluster Init Script File - block consisting of single string fields:
- gcs
Job
Task New Cluster Init Script Gcs - s3
Job
Task New Cluster Init Script S3 - volumes
Job
Task New Cluster Init Script Volumes - workspace
Job
Task New Cluster Init Script Workspace
- abfss Property Map
- dbfs Property Map
- file Property Map
- block consisting of single string fields:
- gcs Property Map
- s3 Property Map
- volumes Property Map
- workspace Property Map
JobTaskNewClusterInitScriptAbfss, JobTaskNewClusterInitScriptAbfssArgs
- Destination string
- Destination string
- destination String
- destination string
- destination str
- destination String
JobTaskNewClusterInitScriptDbfs, JobTaskNewClusterInitScriptDbfsArgs
- Destination string
- Destination string
- destination String
- destination string
- destination str
- destination String
JobTaskNewClusterInitScriptFile, JobTaskNewClusterInitScriptFileArgs
- Destination string
- Destination string
- destination String
- destination string
- destination str
- destination String
JobTaskNewClusterInitScriptGcs, JobTaskNewClusterInitScriptGcsArgs
- Destination string
- Destination string
- destination String
- destination string
- destination str
- destination String
JobTaskNewClusterInitScriptS3, JobTaskNewClusterInitScriptS3Args
- Destination string
- Canned
Acl string - Enable
Encryption bool - Encryption
Type string - Endpoint string
- Kms
Key string - Region string
- Destination string
- Canned
Acl string - Enable
Encryption bool - Encryption
Type string - Endpoint string
- Kms
Key string - Region string
- destination String
- canned
Acl String - enable
Encryption Boolean - encryption
Type String - endpoint String
- kms
Key String - region String
- destination string
- canned
Acl string - enable
Encryption boolean - encryption
Type string - endpoint string
- kms
Key string - region string
- destination str
- canned_
acl str - enable_
encryption bool - encryption_
type str - endpoint str
- kms_
key str - region str
- destination String
- canned
Acl String - enable
Encryption Boolean - encryption
Type String - endpoint String
- kms
Key String - region String
JobTaskNewClusterInitScriptVolumes, JobTaskNewClusterInitScriptVolumesArgs
- Destination string
- Destination string
- destination String
- destination string
- destination str
- destination String
JobTaskNewClusterInitScriptWorkspace, JobTaskNewClusterInitScriptWorkspaceArgs
- Destination string
- Destination string
- destination String
- destination string
- destination str
- destination String
JobTaskNewClusterLibrary, JobTaskNewClusterLibraryArgs
- Cran
Job
Task New Cluster Library Cran - Egg string
- Jar string
- Maven
Job
Task New Cluster Library Maven - Pypi
Job
Task New Cluster Library Pypi - Requirements string
- Whl string
- Cran
Job
Task New Cluster Library Cran - Egg string
- Jar string
- Maven
Job
Task New Cluster Library Maven - Pypi
Job
Task New Cluster Library Pypi - Requirements string
- Whl string
- cran
Job
Task New Cluster Library Cran - egg String
- jar String
- maven
Job
Task New Cluster Library Maven - pypi
Job
Task New Cluster Library Pypi - requirements String
- whl String
- cran
Job
Task New Cluster Library Cran - egg string
- jar string
- maven
Job
Task New Cluster Library Maven - pypi
Job
Task New Cluster Library Pypi - requirements string
- whl string
- cran Property Map
- egg String
- jar String
- maven Property Map
- pypi Property Map
- requirements String
- whl String
JobTaskNewClusterLibraryCran, JobTaskNewClusterLibraryCranArgs
JobTaskNewClusterLibraryMaven, JobTaskNewClusterLibraryMavenArgs
- Coordinates string
- Exclusions List<string>
- Repo string
- Coordinates string
- Exclusions []string
- Repo string
- coordinates String
- exclusions List<String>
- repo String
- coordinates string
- exclusions string[]
- repo string
- coordinates str
- exclusions Sequence[str]
- repo str
- coordinates String
- exclusions List<String>
- repo String
JobTaskNewClusterLibraryPypi, JobTaskNewClusterLibraryPypiArgs
JobTaskNewClusterWorkloadType, JobTaskNewClusterWorkloadTypeArgs
JobTaskNewClusterWorkloadTypeClients, JobTaskNewClusterWorkloadTypeClientsArgs
JobTaskNotebookTask, JobTaskNotebookTaskArgs
- Notebook
Path string - The path of the databricks.Notebook to be run in the Databricks workspace or remote repository. For notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash. For notebooks stored in a remote repository, the path must be relative. This field is required.
- Base
Parameters Dictionary<string, string> - (Map) Base parameters to be used for each run of this job. If the run is initiated by a call to run-now with parameters specified, the two parameters maps will be merged. If the same key is specified in base_parameters and in run-now, the value from run-now will be used. If the notebook takes a parameter that is not specified in the job’s base_parameters or the run-now override parameters, the default value from the notebook will be used. Retrieve these parameters in a notebook using
dbutils.widgets.get
. - Source string
- Location type of the notebook, can only be
WORKSPACE
orGIT
. When set toWORKSPACE
, the notebook will be retrieved from the local Databricks workspace. When set toGIT
, the notebook will be retrieved from a Git repository defined ingit_source
. If the value is empty, the task will useGIT
ifgit_source
is defined andWORKSPACE
otherwise. - Warehouse
Id string - ID of the (the databricks_sql_endpoint) that will be used to execute the task with SQL notebook.
- Notebook
Path string - The path of the databricks.Notebook to be run in the Databricks workspace or remote repository. For notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash. For notebooks stored in a remote repository, the path must be relative. This field is required.
- Base
Parameters map[string]string - (Map) Base parameters to be used for each run of this job. If the run is initiated by a call to run-now with parameters specified, the two parameters maps will be merged. If the same key is specified in base_parameters and in run-now, the value from run-now will be used. If the notebook takes a parameter that is not specified in the job’s base_parameters or the run-now override parameters, the default value from the notebook will be used. Retrieve these parameters in a notebook using
dbutils.widgets.get
. - Source string
- Location type of the notebook, can only be
WORKSPACE
orGIT
. When set toWORKSPACE
, the notebook will be retrieved from the local Databricks workspace. When set toGIT
, the notebook will be retrieved from a Git repository defined ingit_source
. If the value is empty, the task will useGIT
ifgit_source
is defined andWORKSPACE
otherwise. - Warehouse
Id string - ID of the (the databricks_sql_endpoint) that will be used to execute the task with SQL notebook.
- notebook
Path String - The path of the databricks.Notebook to be run in the Databricks workspace or remote repository. For notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash. For notebooks stored in a remote repository, the path must be relative. This field is required.
- base
Parameters Map<String,String> - (Map) Base parameters to be used for each run of this job. If the run is initiated by a call to run-now with parameters specified, the two parameters maps will be merged. If the same key is specified in base_parameters and in run-now, the value from run-now will be used. If the notebook takes a parameter that is not specified in the job’s base_parameters or the run-now override parameters, the default value from the notebook will be used. Retrieve these parameters in a notebook using
dbutils.widgets.get
. - source String
- Location type of the notebook, can only be
WORKSPACE
orGIT
. When set toWORKSPACE
, the notebook will be retrieved from the local Databricks workspace. When set toGIT
, the notebook will be retrieved from a Git repository defined ingit_source
. If the value is empty, the task will useGIT
ifgit_source
is defined andWORKSPACE
otherwise. - warehouse
Id String - ID of the (the databricks_sql_endpoint) that will be used to execute the task with SQL notebook.
- notebook
Path string - The path of the databricks.Notebook to be run in the Databricks workspace or remote repository. For notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash. For notebooks stored in a remote repository, the path must be relative. This field is required.
- base
Parameters {[key: string]: string} - (Map) Base parameters to be used for each run of this job. If the run is initiated by a call to run-now with parameters specified, the two parameters maps will be merged. If the same key is specified in base_parameters and in run-now, the value from run-now will be used. If the notebook takes a parameter that is not specified in the job’s base_parameters or the run-now override parameters, the default value from the notebook will be used. Retrieve these parameters in a notebook using
dbutils.widgets.get
. - source string
- Location type of the notebook, can only be
WORKSPACE
orGIT
. When set toWORKSPACE
, the notebook will be retrieved from the local Databricks workspace. When set toGIT
, the notebook will be retrieved from a Git repository defined ingit_source
. If the value is empty, the task will useGIT
ifgit_source
is defined andWORKSPACE
otherwise. - warehouse
Id string - ID of the (the databricks_sql_endpoint) that will be used to execute the task with SQL notebook.
- notebook_
path str - The path of the databricks.Notebook to be run in the Databricks workspace or remote repository. For notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash. For notebooks stored in a remote repository, the path must be relative. This field is required.
- base_
parameters Mapping[str, str] - (Map) Base parameters to be used for each run of this job. If the run is initiated by a call to run-now with parameters specified, the two parameters maps will be merged. If the same key is specified in base_parameters and in run-now, the value from run-now will be used. If the notebook takes a parameter that is not specified in the job’s base_parameters or the run-now override parameters, the default value from the notebook will be used. Retrieve these parameters in a notebook using
dbutils.widgets.get
. - source str
- Location type of the notebook, can only be
WORKSPACE
orGIT
. When set toWORKSPACE
, the notebook will be retrieved from the local Databricks workspace. When set toGIT
, the notebook will be retrieved from a Git repository defined ingit_source
. If the value is empty, the task will useGIT
ifgit_source
is defined andWORKSPACE
otherwise. - warehouse_
id str - ID of the (the databricks_sql_endpoint) that will be used to execute the task with SQL notebook.
- notebook
Path String - The path of the databricks.Notebook to be run in the Databricks workspace or remote repository. For notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash. For notebooks stored in a remote repository, the path must be relative. This field is required.
- base
Parameters Map<String> - (Map) Base parameters to be used for each run of this job. If the run is initiated by a call to run-now with parameters specified, the two parameters maps will be merged. If the same key is specified in base_parameters and in run-now, the value from run-now will be used. If the notebook takes a parameter that is not specified in the job’s base_parameters or the run-now override parameters, the default value from the notebook will be used. Retrieve these parameters in a notebook using
dbutils.widgets.get
. - source String
- Location type of the notebook, can only be
WORKSPACE
orGIT
. When set toWORKSPACE
, the notebook will be retrieved from the local Databricks workspace. When set toGIT
, the notebook will be retrieved from a Git repository defined ingit_source
. If the value is empty, the task will useGIT
ifgit_source
is defined andWORKSPACE
otherwise. - warehouse
Id String - ID of the (the databricks_sql_endpoint) that will be used to execute the task with SQL notebook.
JobTaskNotificationSettings, JobTaskNotificationSettingsArgs
- Alert
On boolLast Attempt - (Bool) do not send notifications to recipients specified in
on_start
for the retried runs and do not send notifications to recipients specified inon_failure
until the last retry of the run. - No
Alert boolFor Canceled Runs (Bool) don't send alert for cancelled runs.
The following parameter is only available on task level.
- No
Alert boolFor Skipped Runs - (Bool) don't send alert for skipped runs.
- Alert
On boolLast Attempt - (Bool) do not send notifications to recipients specified in
on_start
for the retried runs and do not send notifications to recipients specified inon_failure
until the last retry of the run. - No
Alert boolFor Canceled Runs (Bool) don't send alert for cancelled runs.
The following parameter is only available on task level.
- No
Alert boolFor Skipped Runs - (Bool) don't send alert for skipped runs.
- alert
On BooleanLast Attempt - (Bool) do not send notifications to recipients specified in
on_start
for the retried runs and do not send notifications to recipients specified inon_failure
until the last retry of the run. - no
Alert BooleanFor Canceled Runs (Bool) don't send alert for cancelled runs.
The following parameter is only available on task level.
- no
Alert BooleanFor Skipped Runs - (Bool) don't send alert for skipped runs.
- alert
On booleanLast Attempt - (Bool) do not send notifications to recipients specified in
on_start
for the retried runs and do not send notifications to recipients specified inon_failure
until the last retry of the run. - no
Alert booleanFor Canceled Runs (Bool) don't send alert for cancelled runs.
The following parameter is only available on task level.
- no
Alert booleanFor Skipped Runs - (Bool) don't send alert for skipped runs.
- alert_
on_ boollast_ attempt - (Bool) do not send notifications to recipients specified in
on_start
for the retried runs and do not send notifications to recipients specified inon_failure
until the last retry of the run. - no_
alert_ boolfor_ canceled_ runs (Bool) don't send alert for cancelled runs.
The following parameter is only available on task level.
- no_
alert_ boolfor_ skipped_ runs - (Bool) don't send alert for skipped runs.
- alert
On BooleanLast Attempt - (Bool) do not send notifications to recipients specified in
on_start
for the retried runs and do not send notifications to recipients specified inon_failure
until the last retry of the run. - no
Alert BooleanFor Canceled Runs (Bool) don't send alert for cancelled runs.
The following parameter is only available on task level.
- no
Alert BooleanFor Skipped Runs - (Bool) don't send alert for skipped runs.
JobTaskPipelineTask, JobTaskPipelineTaskArgs
- Pipeline
Id string - The pipeline's unique ID.
- Full
Refresh bool (Bool) Specifies if there should be full refresh of the pipeline.
The following configuration blocks are only supported inside a
task
block
- Pipeline
Id string - The pipeline's unique ID.
- Full
Refresh bool (Bool) Specifies if there should be full refresh of the pipeline.
The following configuration blocks are only supported inside a
task
block
- pipeline
Id String - The pipeline's unique ID.
- full
Refresh Boolean (Bool) Specifies if there should be full refresh of the pipeline.
The following configuration blocks are only supported inside a
task
block
- pipeline
Id string - The pipeline's unique ID.
- full
Refresh boolean (Bool) Specifies if there should be full refresh of the pipeline.
The following configuration blocks are only supported inside a
task
block
- pipeline_
id str - The pipeline's unique ID.
- full_
refresh bool (Bool) Specifies if there should be full refresh of the pipeline.
The following configuration blocks are only supported inside a
task
block
- pipeline
Id String - The pipeline's unique ID.
- full
Refresh Boolean (Bool) Specifies if there should be full refresh of the pipeline.
The following configuration blocks are only supported inside a
task
block
JobTaskPythonWheelTask, JobTaskPythonWheelTaskArgs
- Entry
Point string - Python function as entry point for the task
- Named
Parameters Dictionary<string, string> - Named parameters for the task
- Package
Name string - Name of Python package
- Parameters List<string>
- Parameters for the task
- Entry
Point string - Python function as entry point for the task
- Named
Parameters map[string]string - Named parameters for the task
- Package
Name string - Name of Python package
- Parameters []string
- Parameters for the task
- entry
Point String - Python function as entry point for the task
- named
Parameters Map<String,String> - Named parameters for the task
- package
Name String - Name of Python package
- parameters List<String>
- Parameters for the task
- entry
Point string - Python function as entry point for the task
- named
Parameters {[key: string]: string} - Named parameters for the task
- package
Name string - Name of Python package
- parameters string[]
- Parameters for the task
- entry_
point str - Python function as entry point for the task
- named_
parameters Mapping[str, str] - Named parameters for the task
- package_
name str - Name of Python package
- parameters Sequence[str]
- Parameters for the task
- entry
Point String - Python function as entry point for the task
- named
Parameters Map<String> - Named parameters for the task
- package
Name String - Name of Python package
- parameters List<String>
- Parameters for the task
JobTaskRunJobTask, JobTaskRunJobTaskArgs
- Job
Id int - (String) ID of the job
- Dbt
Commands List<string> - Jar
Params List<string> - Job
Parameters Dictionary<string, string> - (Map) Job parameters for the task
- Notebook
Params Dictionary<string, string> - Pipeline
Params JobTask Run Job Task Pipeline Params - Python
Named Dictionary<string, string>Params - Python
Params List<string> - Spark
Submit List<string>Params - Sql
Params Dictionary<string, string>
- Job
Id int - (String) ID of the job
- Dbt
Commands []string - Jar
Params []string - Job
Parameters map[string]string - (Map) Job parameters for the task
- Notebook
Params map[string]string - Pipeline
Params JobTask Run Job Task Pipeline Params - Python
Named map[string]stringParams - Python
Params []string - Spark
Submit []stringParams - Sql
Params map[string]string
- job
Id Integer - (String) ID of the job
- dbt
Commands List<String> - jar
Params List<String> - job
Parameters Map<String,String> - (Map) Job parameters for the task
- notebook
Params Map<String,String> - pipeline
Params JobTask Run Job Task Pipeline Params - python
Named Map<String,String>Params - python
Params List<String> - spark
Submit List<String>Params - sql
Params Map<String,String>
- job
Id number - (String) ID of the job
- dbt
Commands string[] - jar
Params string[] - job
Parameters {[key: string]: string} - (Map) Job parameters for the task
- notebook
Params {[key: string]: string} - pipeline
Params JobTask Run Job Task Pipeline Params - python
Named {[key: string]: string}Params - python
Params string[] - spark
Submit string[]Params - sql
Params {[key: string]: string}
- job_
id int - (String) ID of the job
- dbt_
commands Sequence[str] - jar_
params Sequence[str] - job_
parameters Mapping[str, str] - (Map) Job parameters for the task
- notebook_
params Mapping[str, str] - pipeline_
params JobTask Run Job Task Pipeline Params - python_
named_ Mapping[str, str]params - python_
params Sequence[str] - spark_
submit_ Sequence[str]params - sql_
params Mapping[str, str]
- job
Id Number - (String) ID of the job
- dbt
Commands List<String> - jar
Params List<String> - job
Parameters Map<String> - (Map) Job parameters for the task
- notebook
Params Map<String> - pipeline
Params Property Map - python
Named Map<String>Params - python
Params List<String> - spark
Submit List<String>Params - sql
Params Map<String>
JobTaskRunJobTaskPipelineParams, JobTaskRunJobTaskPipelineParamsArgs
- Full
Refresh bool (Bool) Specifies if there should be full refresh of the pipeline.
The following configuration blocks are only supported inside a
task
block
- Full
Refresh bool (Bool) Specifies if there should be full refresh of the pipeline.
The following configuration blocks are only supported inside a
task
block
- full
Refresh Boolean (Bool) Specifies if there should be full refresh of the pipeline.
The following configuration blocks are only supported inside a
task
block
- full
Refresh boolean (Bool) Specifies if there should be full refresh of the pipeline.
The following configuration blocks are only supported inside a
task
block
- full_
refresh bool (Bool) Specifies if there should be full refresh of the pipeline.
The following configuration blocks are only supported inside a
task
block
- full
Refresh Boolean (Bool) Specifies if there should be full refresh of the pipeline.
The following configuration blocks are only supported inside a
task
block
JobTaskSparkJarTask, JobTaskSparkJarTaskArgs
- Jar
Uri string - Main
Class stringName - The full name of the class containing the main method to be executed. This class must be contained in a JAR provided as a library. The code should use
SparkContext.getOrCreate
to obtain a Spark context; otherwise, runs of the job will fail. - Parameters List<string>
- (List) Parameters passed to the main method.
- Jar
Uri string - Main
Class stringName - The full name of the class containing the main method to be executed. This class must be contained in a JAR provided as a library. The code should use
SparkContext.getOrCreate
to obtain a Spark context; otherwise, runs of the job will fail. - Parameters []string
- (List) Parameters passed to the main method.
- jar
Uri String - main
Class StringName - The full name of the class containing the main method to be executed. This class must be contained in a JAR provided as a library. The code should use
SparkContext.getOrCreate
to obtain a Spark context; otherwise, runs of the job will fail. - parameters List<String>
- (List) Parameters passed to the main method.
- jar
Uri string - main
Class stringName - The full name of the class containing the main method to be executed. This class must be contained in a JAR provided as a library. The code should use
SparkContext.getOrCreate
to obtain a Spark context; otherwise, runs of the job will fail. - parameters string[]
- (List) Parameters passed to the main method.
- jar_
uri str - main_
class_ strname - The full name of the class containing the main method to be executed. This class must be contained in a JAR provided as a library. The code should use
SparkContext.getOrCreate
to obtain a Spark context; otherwise, runs of the job will fail. - parameters Sequence[str]
- (List) Parameters passed to the main method.
- jar
Uri String - main
Class StringName - The full name of the class containing the main method to be executed. This class must be contained in a JAR provided as a library. The code should use
SparkContext.getOrCreate
to obtain a Spark context; otherwise, runs of the job will fail. - parameters List<String>
- (List) Parameters passed to the main method.
JobTaskSparkPythonTask, JobTaskSparkPythonTaskArgs
- Python
File string - The URI of the Python file to be executed. databricks_dbfs_file, cloud file URIs (e.g.
s3:/
,abfss:/
,gs:/
), workspace paths and remote repository are supported. For Python files stored in the Databricks workspace, the path must be absolute and begin with/Repos
. For files stored in a remote repository, the path must be relative. This field is required. - Parameters List<string>
- (List) Command line parameters passed to the Python file.
- Source string
- Location type of the Python file, can only be
GIT
. When set toGIT
, the Python file will be retrieved from a Git repository defined ingit_source
.
- Python
File string - The URI of the Python file to be executed. databricks_dbfs_file, cloud file URIs (e.g.
s3:/
,abfss:/
,gs:/
), workspace paths and remote repository are supported. For Python files stored in the Databricks workspace, the path must be absolute and begin with/Repos
. For files stored in a remote repository, the path must be relative. This field is required. - Parameters []string
- (List) Command line parameters passed to the Python file.
- Source string
- Location type of the Python file, can only be
GIT
. When set toGIT
, the Python file will be retrieved from a Git repository defined ingit_source
.
- python
File String - The URI of the Python file to be executed. databricks_dbfs_file, cloud file URIs (e.g.
s3:/
,abfss:/
,gs:/
), workspace paths and remote repository are supported. For Python files stored in the Databricks workspace, the path must be absolute and begin with/Repos
. For files stored in a remote repository, the path must be relative. This field is required. - parameters List<String>
- (List) Command line parameters passed to the Python file.
- source String
- Location type of the Python file, can only be
GIT
. When set toGIT
, the Python file will be retrieved from a Git repository defined ingit_source
.
- python
File string - The URI of the Python file to be executed. databricks_dbfs_file, cloud file URIs (e.g.
s3:/
,abfss:/
,gs:/
), workspace paths and remote repository are supported. For Python files stored in the Databricks workspace, the path must be absolute and begin with/Repos
. For files stored in a remote repository, the path must be relative. This field is required. - parameters string[]
- (List) Command line parameters passed to the Python file.
- source string
- Location type of the Python file, can only be
GIT
. When set toGIT
, the Python file will be retrieved from a Git repository defined ingit_source
.
- python_
file str - The URI of the Python file to be executed. databricks_dbfs_file, cloud file URIs (e.g.
s3:/
,abfss:/
,gs:/
), workspace paths and remote repository are supported. For Python files stored in the Databricks workspace, the path must be absolute and begin with/Repos
. For files stored in a remote repository, the path must be relative. This field is required. - parameters Sequence[str]
- (List) Command line parameters passed to the Python file.
- source str
- Location type of the Python file, can only be
GIT
. When set toGIT
, the Python file will be retrieved from a Git repository defined ingit_source
.
- python
File String - The URI of the Python file to be executed. databricks_dbfs_file, cloud file URIs (e.g.
s3:/
,abfss:/
,gs:/
), workspace paths and remote repository are supported. For Python files stored in the Databricks workspace, the path must be absolute and begin with/Repos
. For files stored in a remote repository, the path must be relative. This field is required. - parameters List<String>
- (List) Command line parameters passed to the Python file.
- source String
- Location type of the Python file, can only be
GIT
. When set toGIT
, the Python file will be retrieved from a Git repository defined ingit_source
.
JobTaskSparkSubmitTask, JobTaskSparkSubmitTaskArgs
- Parameters List<string>
- (List) Command-line parameters passed to spark submit.
- Parameters []string
- (List) Command-line parameters passed to spark submit.
- parameters List<String>
- (List) Command-line parameters passed to spark submit.
- parameters string[]
- (List) Command-line parameters passed to spark submit.
- parameters Sequence[str]
- (List) Command-line parameters passed to spark submit.
- parameters List<String>
- (List) Command-line parameters passed to spark submit.
JobTaskSqlTask, JobTaskSqlTaskArgs
- Warehouse
Id string - ID of the (the databricks_sql_endpoint) that will be used to execute the task. Only Serverless & Pro warehouses are supported right now.
- Alert
Job
Task Sql Task Alert - block consisting of following fields:
- Dashboard
Job
Task Sql Task Dashboard - block consisting of following fields:
- File
Job
Task Sql Task File - block consisting of single string fields:
- Parameters Dictionary<string, string>
- (Map) parameters to be used for each run of this task. The SQL alert task does not support custom parameters.
- Query
Job
Task Sql Task Query - block consisting of single string field:
query_id
- identifier of the Databricks Query (databricks_query).
- Warehouse
Id string - ID of the (the databricks_sql_endpoint) that will be used to execute the task. Only Serverless & Pro warehouses are supported right now.
- Alert
Job
Task Sql Task Alert - block consisting of following fields:
- Dashboard
Job
Task Sql Task Dashboard - block consisting of following fields:
- File
Job
Task Sql Task File - block consisting of single string fields:
- Parameters map[string]string
- (Map) parameters to be used for each run of this task. The SQL alert task does not support custom parameters.
- Query
Job
Task Sql Task Query - block consisting of single string field:
query_id
- identifier of the Databricks Query (databricks_query).
- warehouse
Id String - ID of the (the databricks_sql_endpoint) that will be used to execute the task. Only Serverless & Pro warehouses are supported right now.
- alert
Job
Task Sql Task Alert - block consisting of following fields:
- dashboard
Job
Task Sql Task Dashboard - block consisting of following fields:
- file
Job
Task Sql Task File - block consisting of single string fields:
- parameters Map<String,String>
- (Map) parameters to be used for each run of this task. The SQL alert task does not support custom parameters.
- query
Job
Task Sql Task Query - block consisting of single string field:
query_id
- identifier of the Databricks Query (databricks_query).
- warehouse
Id string - ID of the (the databricks_sql_endpoint) that will be used to execute the task. Only Serverless & Pro warehouses are supported right now.
- alert
Job
Task Sql Task Alert - block consisting of following fields:
- dashboard
Job
Task Sql Task Dashboard - block consisting of following fields:
- file
Job
Task Sql Task File - block consisting of single string fields:
- parameters {[key: string]: string}
- (Map) parameters to be used for each run of this task. The SQL alert task does not support custom parameters.
- query
Job
Task Sql Task Query - block consisting of single string field:
query_id
- identifier of the Databricks Query (databricks_query).
- warehouse_
id str - ID of the (the databricks_sql_endpoint) that will be used to execute the task. Only Serverless & Pro warehouses are supported right now.
- alert
Job
Task Sql Task Alert - block consisting of following fields:
- dashboard
Job
Task Sql Task Dashboard - block consisting of following fields:
- file
Job
Task Sql Task File - block consisting of single string fields:
- parameters Mapping[str, str]
- (Map) parameters to be used for each run of this task. The SQL alert task does not support custom parameters.
- query
Job
Task Sql Task Query - block consisting of single string field:
query_id
- identifier of the Databricks Query (databricks_query).
- warehouse
Id String - ID of the (the databricks_sql_endpoint) that will be used to execute the task. Only Serverless & Pro warehouses are supported right now.
- alert Property Map
- block consisting of following fields:
- dashboard Property Map
- block consisting of following fields:
- file Property Map
- block consisting of single string fields:
- parameters Map<String>
- (Map) parameters to be used for each run of this task. The SQL alert task does not support custom parameters.
- query Property Map
- block consisting of single string field:
query_id
- identifier of the Databricks Query (databricks_query).
JobTaskSqlTaskAlert, JobTaskSqlTaskAlertArgs
- Alert
Id string - (String) identifier of the Databricks Alert (databricks_alert).
- Pause
Subscriptions bool - flag that specifies if subscriptions are paused or not.
- Subscriptions
List<Job
Task Sql Task Alert Subscription> - a list of subscription blocks consisting out of one of the required fields:
user_name
for user emails ordestination_id
- for Alert destination's identifier.
- Alert
Id string - (String) identifier of the Databricks Alert (databricks_alert).
- Pause
Subscriptions bool - flag that specifies if subscriptions are paused or not.
- Subscriptions
[]Job
Task Sql Task Alert Subscription - a list of subscription blocks consisting out of one of the required fields:
user_name
for user emails ordestination_id
- for Alert destination's identifier.
- alert
Id String - (String) identifier of the Databricks Alert (databricks_alert).
- pause
Subscriptions Boolean - flag that specifies if subscriptions are paused or not.
- subscriptions
List<Job
Task Sql Task Alert Subscription> - a list of subscription blocks consisting out of one of the required fields:
user_name
for user emails ordestination_id
- for Alert destination's identifier.
- alert
Id string - (String) identifier of the Databricks Alert (databricks_alert).
- pause
Subscriptions boolean - flag that specifies if subscriptions are paused or not.
- subscriptions
Job
Task Sql Task Alert Subscription[] - a list of subscription blocks consisting out of one of the required fields:
user_name
for user emails ordestination_id
- for Alert destination's identifier.
- alert_
id str - (String) identifier of the Databricks Alert (databricks_alert).
- pause_
subscriptions bool - flag that specifies if subscriptions are paused or not.
- subscriptions
Sequence[Job
Task Sql Task Alert Subscription] - a list of subscription blocks consisting out of one of the required fields:
user_name
for user emails ordestination_id
- for Alert destination's identifier.
- alert
Id String - (String) identifier of the Databricks Alert (databricks_alert).
- pause
Subscriptions Boolean - flag that specifies if subscriptions are paused or not.
- subscriptions List<Property Map>
- a list of subscription blocks consisting out of one of the required fields:
user_name
for user emails ordestination_id
- for Alert destination's identifier.
JobTaskSqlTaskAlertSubscription, JobTaskSqlTaskAlertSubscriptionArgs
- Destination
Id string - User
Name string - The email of an active workspace user. Non-admin users can only set this field to their own email.
- Destination
Id string - User
Name string - The email of an active workspace user. Non-admin users can only set this field to their own email.
- destination
Id String - user
Name String - The email of an active workspace user. Non-admin users can only set this field to their own email.
- destination
Id string - user
Name string - The email of an active workspace user. Non-admin users can only set this field to their own email.
- destination_
id str - user_
name str - The email of an active workspace user. Non-admin users can only set this field to their own email.
- destination
Id String - user
Name String - The email of an active workspace user. Non-admin users can only set this field to their own email.
JobTaskSqlTaskDashboard, JobTaskSqlTaskDashboardArgs
- Dashboard
Id string - (String) identifier of the Databricks SQL Dashboard databricks_sql_dashboard.
- Custom
Subject string - string specifying a custom subject of email sent.
- Pause
Subscriptions bool - flag that specifies if subscriptions are paused or not.
- Subscriptions
List<Job
Task Sql Task Dashboard Subscription> - a list of subscription blocks consisting out of one of the required fields:
user_name
for user emails ordestination_id
- for Alert destination's identifier.
- Dashboard
Id string - (String) identifier of the Databricks SQL Dashboard databricks_sql_dashboard.
- Custom
Subject string - string specifying a custom subject of email sent.
- Pause
Subscriptions bool - flag that specifies if subscriptions are paused or not.
- Subscriptions
[]Job
Task Sql Task Dashboard Subscription - a list of subscription blocks consisting out of one of the required fields:
user_name
for user emails ordestination_id
- for Alert destination's identifier.
- dashboard
Id String - (String) identifier of the Databricks SQL Dashboard databricks_sql_dashboard.
- custom
Subject String - string specifying a custom subject of email sent.
- pause
Subscriptions Boolean - flag that specifies if subscriptions are paused or not.
- subscriptions
List<Job
Task Sql Task Dashboard Subscription> - a list of subscription blocks consisting out of one of the required fields:
user_name
for user emails ordestination_id
- for Alert destination's identifier.
- dashboard
Id string - (String) identifier of the Databricks SQL Dashboard databricks_sql_dashboard.
- custom
Subject string - string specifying a custom subject of email sent.
- pause
Subscriptions boolean - flag that specifies if subscriptions are paused or not.
- subscriptions
Job
Task Sql Task Dashboard Subscription[] - a list of subscription blocks consisting out of one of the required fields:
user_name
for user emails ordestination_id
- for Alert destination's identifier.
- dashboard_
id str - (String) identifier of the Databricks SQL Dashboard databricks_sql_dashboard.
- custom_
subject str - string specifying a custom subject of email sent.
- pause_
subscriptions bool - flag that specifies if subscriptions are paused or not.
- subscriptions
Sequence[Job
Task Sql Task Dashboard Subscription] - a list of subscription blocks consisting out of one of the required fields:
user_name
for user emails ordestination_id
- for Alert destination's identifier.
- dashboard
Id String - (String) identifier of the Databricks SQL Dashboard databricks_sql_dashboard.
- custom
Subject String - string specifying a custom subject of email sent.
- pause
Subscriptions Boolean - flag that specifies if subscriptions are paused or not.
- subscriptions List<Property Map>
- a list of subscription blocks consisting out of one of the required fields:
user_name
for user emails ordestination_id
- for Alert destination's identifier.
JobTaskSqlTaskDashboardSubscription, JobTaskSqlTaskDashboardSubscriptionArgs
- Destination
Id string - User
Name string - The email of an active workspace user. Non-admin users can only set this field to their own email.
- Destination
Id string - User
Name string - The email of an active workspace user. Non-admin users can only set this field to their own email.
- destination
Id String - user
Name String - The email of an active workspace user. Non-admin users can only set this field to their own email.
- destination
Id string - user
Name string - The email of an active workspace user. Non-admin users can only set this field to their own email.
- destination_
id str - user_
name str - The email of an active workspace user. Non-admin users can only set this field to their own email.
- destination
Id String - user
Name String - The email of an active workspace user. Non-admin users can only set this field to their own email.
JobTaskSqlTaskFile, JobTaskSqlTaskFileArgs
- Path string
If
source
isGIT
: Relative path to the file in the repository specified in thegit_source
block with SQL commands to execute. Ifsource
isWORKSPACE
: Absolute path to the file in the workspace with SQL commands to execute.Example
import * as pulumi from "@pulumi/pulumi"; import * as databricks from "@pulumi/databricks";
const sqlAggregationJob = new databricks.Job("sql_aggregation_job", { name: "Example SQL Job", tasks: [ { taskKey: "run_agg_query", sqlTask: { warehouseId: sqlJobWarehouse.id, query: { queryId: aggQuery.id, }, }, }, { taskKey: "run_dashboard", sqlTask: { warehouseId: sqlJobWarehouse.id, dashboard: { dashboardId: dash.id, subscriptions: [{ userName: "user@domain.com", }], }, }, }, { taskKey: "run_alert", sqlTask: { warehouseId: sqlJobWarehouse.id, alert: { alertId: alert.id, subscriptions: [{ userName: "user@domain.com", }], }, }, }, ], });
import pulumi import pulumi_databricks as databricks sql_aggregation_job = databricks.Job("sql_aggregation_job", name="Example SQL Job", tasks=[ { "task_key": "run_agg_query", "sql_task": { "warehouse_id": sql_job_warehouse["id"], "query": { "query_id": agg_query["id"], }, }, }, { "task_key": "run_dashboard", "sql_task": { "warehouse_id": sql_job_warehouse["id"], "dashboard": { "dashboard_id": dash["id"], "subscriptions": [{ "user_name": "user@domain.com", }], }, }, }, { "task_key": "run_alert", "sql_task": { "warehouse_id": sql_job_warehouse["id"], "alert": { "alert_id": alert["id"], "subscriptions": [{ "user_name": "user@domain.com", }], }, }, }, ])
using System.Collections.Generic; using System.Linq; using Pulumi; using Databricks = Pulumi.Databricks; return await Deployment.RunAsync(() => { var sqlAggregationJob = new Databricks.Job("sql_aggregation_job", new() { Name = "Example SQL Job", Tasks = new[] { new Databricks.Inputs.JobTaskArgs { TaskKey = "run_agg_query", SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs { WarehouseId = sqlJobWarehouse.Id, Query = new Databricks.Inputs.JobTaskSqlTaskQueryArgs { QueryId = aggQuery.Id, }, }, }, new Databricks.Inputs.JobTaskArgs { TaskKey = "run_dashboard", SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs { WarehouseId = sqlJobWarehouse.Id, Dashboard = new Databricks.Inputs.JobTaskSqlTaskDashboardArgs { DashboardId = dash.Id, Subscriptions = new[] { new Databricks.Inputs.JobTaskSqlTaskDashboardSubscriptionArgs { UserName = "user@domain.com", }, }, }, }, }, new Databricks.Inputs.JobTaskArgs { TaskKey = "run_alert", SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs { WarehouseId = sqlJobWarehouse.Id, Alert = new Databricks.Inputs.JobTaskSqlTaskAlertArgs { AlertId = alert.Id, Subscriptions = new[] { new Databricks.Inputs.JobTaskSqlTaskAlertSubscriptionArgs { UserName = "user@domain.com", }, }, }, }, }, }, }); });
package main import ( "github.com/pulumi/pulumi-databricks/sdk/go/databricks" "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) func main() { pulumi.Run(func(ctx *pulumi.Context) error { _, err := databricks.NewJob(ctx, "sql_aggregation_job", &databricks.JobArgs{ Name: pulumi.String("Example SQL Job"), Tasks: databricks.JobTaskArray{ &databricks.JobTaskArgs{ TaskKey: pulumi.String("run_agg_query"), SqlTask: &databricks.JobTaskSqlTaskArgs{ WarehouseId: pulumi.Any(sqlJobWarehouse.Id), Query: &databricks.JobTaskSqlTaskQueryArgs{ QueryId: pulumi.Any(aggQuery.Id), }, }, }, &databricks.JobTaskArgs{ TaskKey: pulumi.String("run_dashboard"), SqlTask: &databricks.JobTaskSqlTaskArgs{ WarehouseId: pulumi.Any(sqlJobWarehouse.Id), Dashboard: &databricks.JobTaskSqlTaskDashboardArgs{ DashboardId: pulumi.Any(dash.Id), Subscriptions: databricks.JobTaskSqlTaskDashboardSubscriptionArray{ &databricks.JobTaskSqlTaskDashboardSubscriptionArgs{ UserName: pulumi.String("user@domain.com"), }, }, }, }, }, &databricks.JobTaskArgs{ TaskKey: pulumi.String("run_alert"), SqlTask: &databricks.JobTaskSqlTaskArgs{ WarehouseId: pulumi.Any(sqlJobWarehouse.Id), Alert: &databricks.JobTaskSqlTaskAlertArgs{ AlertId: pulumi.Any(alert.Id), Subscriptions: databricks.JobTaskSqlTaskAlertSubscriptionArray{ &databricks.JobTaskSqlTaskAlertSubscriptionArgs{ UserName: pulumi.String("user@domain.com"), }, }, }, }, }, }, }) if err != nil { return err } return nil }) }
package generated_program; import com.pulumi.Context; import com.pulumi.Pulumi; import com.pulumi.core.Output; import com.pulumi.databricks.Job; import com.pulumi.databricks.JobArgs; import com.pulumi.databricks.inputs.JobTaskArgs; import com.pulumi.databricks.inputs.JobTaskSqlTaskArgs; import com.pulumi.databricks.inputs.JobTaskSqlTaskQueryArgs; import com.pulumi.databricks.inputs.JobTaskSqlTaskDashboardArgs; import com.pulumi.databricks.inputs.JobTaskSqlTaskAlertArgs; import java.util.List; import java.util.ArrayList; import java.util.Map; import java.io.File; import java.nio.file.Files; import java.nio.file.Paths; public class App { public static void main(String[] args) { Pulumi.run(App::stack); } public static void stack(Context ctx) { var sqlAggregationJob = new Job("sqlAggregationJob", JobArgs.builder() .name("Example SQL Job") .tasks( JobTaskArgs.builder() .taskKey("run_agg_query") .sqlTask(JobTaskSqlTaskArgs.builder() .warehouseId(sqlJobWarehouse.id()) .query(JobTaskSqlTaskQueryArgs.builder() .queryId(aggQuery.id()) .build()) .build()) .build(), JobTaskArgs.builder() .taskKey("run_dashboard") .sqlTask(JobTaskSqlTaskArgs.builder() .warehouseId(sqlJobWarehouse.id()) .dashboard(JobTaskSqlTaskDashboardArgs.builder() .dashboardId(dash.id()) .subscriptions(JobTaskSqlTaskDashboardSubscriptionArgs.builder() .userName("user@domain.com") .build()) .build()) .build()) .build(), JobTaskArgs.builder() .taskKey("run_alert") .sqlTask(JobTaskSqlTaskArgs.builder() .warehouseId(sqlJobWarehouse.id()) .alert(JobTaskSqlTaskAlertArgs.builder() .alertId(alert.id()) .subscriptions(JobTaskSqlTaskAlertSubscriptionArgs.builder() .userName("user@domain.com") .build()) .build()) .build()) .build()) .build()); } }
resources: sqlAggregationJob: type: databricks:Job name: sql_aggregation_job properties: name: Example SQL Job tasks: - taskKey: run_agg_query sqlTask: warehouseId: ${sqlJobWarehouse.id} query: queryId: ${aggQuery.id} - taskKey: run_dashboard sqlTask: warehouseId: ${sqlJobWarehouse.id} dashboard: dashboardId: ${dash.id} subscriptions: - userName: user@domain.com - taskKey: run_alert sqlTask: warehouseId: ${sqlJobWarehouse.id} alert: alertId: ${alert.id} subscriptions: - userName: user@domain.com
- Source string
- The source of the project. Possible values are
WORKSPACE
andGIT
.
- Path string
If
source
isGIT
: Relative path to the file in the repository specified in thegit_source
block with SQL commands to execute. Ifsource
isWORKSPACE
: Absolute path to the file in the workspace with SQL commands to execute.Example
import * as pulumi from "@pulumi/pulumi"; import * as databricks from "@pulumi/databricks";
const sqlAggregationJob = new databricks.Job("sql_aggregation_job", { name: "Example SQL Job", tasks: [ { taskKey: "run_agg_query", sqlTask: { warehouseId: sqlJobWarehouse.id, query: { queryId: aggQuery.id, }, }, }, { taskKey: "run_dashboard", sqlTask: { warehouseId: sqlJobWarehouse.id, dashboard: { dashboardId: dash.id, subscriptions: [{ userName: "user@domain.com", }], }, }, }, { taskKey: "run_alert", sqlTask: { warehouseId: sqlJobWarehouse.id, alert: { alertId: alert.id, subscriptions: [{ userName: "user@domain.com", }], }, }, }, ], });
import pulumi import pulumi_databricks as databricks sql_aggregation_job = databricks.Job("sql_aggregation_job", name="Example SQL Job", tasks=[ { "task_key": "run_agg_query", "sql_task": { "warehouse_id": sql_job_warehouse["id"], "query": { "query_id": agg_query["id"], }, }, }, { "task_key": "run_dashboard", "sql_task": { "warehouse_id": sql_job_warehouse["id"], "dashboard": { "dashboard_id": dash["id"], "subscriptions": [{ "user_name": "user@domain.com", }], }, }, }, { "task_key": "run_alert", "sql_task": { "warehouse_id": sql_job_warehouse["id"], "alert": { "alert_id": alert["id"], "subscriptions": [{ "user_name": "user@domain.com", }], }, }, }, ])
using System.Collections.Generic; using System.Linq; using Pulumi; using Databricks = Pulumi.Databricks; return await Deployment.RunAsync(() => { var sqlAggregationJob = new Databricks.Job("sql_aggregation_job", new() { Name = "Example SQL Job", Tasks = new[] { new Databricks.Inputs.JobTaskArgs { TaskKey = "run_agg_query", SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs { WarehouseId = sqlJobWarehouse.Id, Query = new Databricks.Inputs.JobTaskSqlTaskQueryArgs { QueryId = aggQuery.Id, }, }, }, new Databricks.Inputs.JobTaskArgs { TaskKey = "run_dashboard", SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs { WarehouseId = sqlJobWarehouse.Id, Dashboard = new Databricks.Inputs.JobTaskSqlTaskDashboardArgs { DashboardId = dash.Id, Subscriptions = new[] { new Databricks.Inputs.JobTaskSqlTaskDashboardSubscriptionArgs { UserName = "user@domain.com", }, }, }, }, }, new Databricks.Inputs.JobTaskArgs { TaskKey = "run_alert", SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs { WarehouseId = sqlJobWarehouse.Id, Alert = new Databricks.Inputs.JobTaskSqlTaskAlertArgs { AlertId = alert.Id, Subscriptions = new[] { new Databricks.Inputs.JobTaskSqlTaskAlertSubscriptionArgs { UserName = "user@domain.com", }, }, }, }, }, }, }); });
package main import ( "github.com/pulumi/pulumi-databricks/sdk/go/databricks" "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) func main() { pulumi.Run(func(ctx *pulumi.Context) error { _, err := databricks.NewJob(ctx, "sql_aggregation_job", &databricks.JobArgs{ Name: pulumi.String("Example SQL Job"), Tasks: databricks.JobTaskArray{ &databricks.JobTaskArgs{ TaskKey: pulumi.String("run_agg_query"), SqlTask: &databricks.JobTaskSqlTaskArgs{ WarehouseId: pulumi.Any(sqlJobWarehouse.Id), Query: &databricks.JobTaskSqlTaskQueryArgs{ QueryId: pulumi.Any(aggQuery.Id), }, }, }, &databricks.JobTaskArgs{ TaskKey: pulumi.String("run_dashboard"), SqlTask: &databricks.JobTaskSqlTaskArgs{ WarehouseId: pulumi.Any(sqlJobWarehouse.Id), Dashboard: &databricks.JobTaskSqlTaskDashboardArgs{ DashboardId: pulumi.Any(dash.Id), Subscriptions: databricks.JobTaskSqlTaskDashboardSubscriptionArray{ &databricks.JobTaskSqlTaskDashboardSubscriptionArgs{ UserName: pulumi.String("user@domain.com"), }, }, }, }, }, &databricks.JobTaskArgs{ TaskKey: pulumi.String("run_alert"), SqlTask: &databricks.JobTaskSqlTaskArgs{ WarehouseId: pulumi.Any(sqlJobWarehouse.Id), Alert: &databricks.JobTaskSqlTaskAlertArgs{ AlertId: pulumi.Any(alert.Id), Subscriptions: databricks.JobTaskSqlTaskAlertSubscriptionArray{ &databricks.JobTaskSqlTaskAlertSubscriptionArgs{ UserName: pulumi.String("user@domain.com"), }, }, }, }, }, }, }) if err != nil { return err } return nil }) }
package generated_program; import com.pulumi.Context; import com.pulumi.Pulumi; import com.pulumi.core.Output; import com.pulumi.databricks.Job; import com.pulumi.databricks.JobArgs; import com.pulumi.databricks.inputs.JobTaskArgs; import com.pulumi.databricks.inputs.JobTaskSqlTaskArgs; import com.pulumi.databricks.inputs.JobTaskSqlTaskQueryArgs; import com.pulumi.databricks.inputs.JobTaskSqlTaskDashboardArgs; import com.pulumi.databricks.inputs.JobTaskSqlTaskAlertArgs; import java.util.List; import java.util.ArrayList; import java.util.Map; import java.io.File; import java.nio.file.Files; import java.nio.file.Paths; public class App { public static void main(String[] args) { Pulumi.run(App::stack); } public static void stack(Context ctx) { var sqlAggregationJob = new Job("sqlAggregationJob", JobArgs.builder() .name("Example SQL Job") .tasks( JobTaskArgs.builder() .taskKey("run_agg_query") .sqlTask(JobTaskSqlTaskArgs.builder() .warehouseId(sqlJobWarehouse.id()) .query(JobTaskSqlTaskQueryArgs.builder() .queryId(aggQuery.id()) .build()) .build()) .build(), JobTaskArgs.builder() .taskKey("run_dashboard") .sqlTask(JobTaskSqlTaskArgs.builder() .warehouseId(sqlJobWarehouse.id()) .dashboard(JobTaskSqlTaskDashboardArgs.builder() .dashboardId(dash.id()) .subscriptions(JobTaskSqlTaskDashboardSubscriptionArgs.builder() .userName("user@domain.com") .build()) .build()) .build()) .build(), JobTaskArgs.builder() .taskKey("run_alert") .sqlTask(JobTaskSqlTaskArgs.builder() .warehouseId(sqlJobWarehouse.id()) .alert(JobTaskSqlTaskAlertArgs.builder() .alertId(alert.id()) .subscriptions(JobTaskSqlTaskAlertSubscriptionArgs.builder() .userName("user@domain.com") .build()) .build()) .build()) .build()) .build()); } }
resources: sqlAggregationJob: type: databricks:Job name: sql_aggregation_job properties: name: Example SQL Job tasks: - taskKey: run_agg_query sqlTask: warehouseId: ${sqlJobWarehouse.id} query: queryId: ${aggQuery.id} - taskKey: run_dashboard sqlTask: warehouseId: ${sqlJobWarehouse.id} dashboard: dashboardId: ${dash.id} subscriptions: - userName: user@domain.com - taskKey: run_alert sqlTask: warehouseId: ${sqlJobWarehouse.id} alert: alertId: ${alert.id} subscriptions: - userName: user@domain.com
- Source string
- The source of the project. Possible values are
WORKSPACE
andGIT
.
- path String
If
source
isGIT
: Relative path to the file in the repository specified in thegit_source
block with SQL commands to execute. Ifsource
isWORKSPACE
: Absolute path to the file in the workspace with SQL commands to execute.Example
import * as pulumi from "@pulumi/pulumi"; import * as databricks from "@pulumi/databricks";
const sqlAggregationJob = new databricks.Job("sql_aggregation_job", { name: "Example SQL Job", tasks: [ { taskKey: "run_agg_query", sqlTask: { warehouseId: sqlJobWarehouse.id, query: { queryId: aggQuery.id, }, }, }, { taskKey: "run_dashboard", sqlTask: { warehouseId: sqlJobWarehouse.id, dashboard: { dashboardId: dash.id, subscriptions: [{ userName: "user@domain.com", }], }, }, }, { taskKey: "run_alert", sqlTask: { warehouseId: sqlJobWarehouse.id, alert: { alertId: alert.id, subscriptions: [{ userName: "user@domain.com", }], }, }, }, ], });
import pulumi import pulumi_databricks as databricks sql_aggregation_job = databricks.Job("sql_aggregation_job", name="Example SQL Job", tasks=[ { "task_key": "run_agg_query", "sql_task": { "warehouse_id": sql_job_warehouse["id"], "query": { "query_id": agg_query["id"], }, }, }, { "task_key": "run_dashboard", "sql_task": { "warehouse_id": sql_job_warehouse["id"], "dashboard": { "dashboard_id": dash["id"], "subscriptions": [{ "user_name": "user@domain.com", }], }, }, }, { "task_key": "run_alert", "sql_task": { "warehouse_id": sql_job_warehouse["id"], "alert": { "alert_id": alert["id"], "subscriptions": [{ "user_name": "user@domain.com", }], }, }, }, ])
using System.Collections.Generic; using System.Linq; using Pulumi; using Databricks = Pulumi.Databricks; return await Deployment.RunAsync(() => { var sqlAggregationJob = new Databricks.Job("sql_aggregation_job", new() { Name = "Example SQL Job", Tasks = new[] { new Databricks.Inputs.JobTaskArgs { TaskKey = "run_agg_query", SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs { WarehouseId = sqlJobWarehouse.Id, Query = new Databricks.Inputs.JobTaskSqlTaskQueryArgs { QueryId = aggQuery.Id, }, }, }, new Databricks.Inputs.JobTaskArgs { TaskKey = "run_dashboard", SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs { WarehouseId = sqlJobWarehouse.Id, Dashboard = new Databricks.Inputs.JobTaskSqlTaskDashboardArgs { DashboardId = dash.Id, Subscriptions = new[] { new Databricks.Inputs.JobTaskSqlTaskDashboardSubscriptionArgs { UserName = "user@domain.com", }, }, }, }, }, new Databricks.Inputs.JobTaskArgs { TaskKey = "run_alert", SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs { WarehouseId = sqlJobWarehouse.Id, Alert = new Databricks.Inputs.JobTaskSqlTaskAlertArgs { AlertId = alert.Id, Subscriptions = new[] { new Databricks.Inputs.JobTaskSqlTaskAlertSubscriptionArgs { UserName = "user@domain.com", }, }, }, }, }, }, }); });
package main import ( "github.com/pulumi/pulumi-databricks/sdk/go/databricks" "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) func main() { pulumi.Run(func(ctx *pulumi.Context) error { _, err := databricks.NewJob(ctx, "sql_aggregation_job", &databricks.JobArgs{ Name: pulumi.String("Example SQL Job"), Tasks: databricks.JobTaskArray{ &databricks.JobTaskArgs{ TaskKey: pulumi.String("run_agg_query"), SqlTask: &databricks.JobTaskSqlTaskArgs{ WarehouseId: pulumi.Any(sqlJobWarehouse.Id), Query: &databricks.JobTaskSqlTaskQueryArgs{ QueryId: pulumi.Any(aggQuery.Id), }, }, }, &databricks.JobTaskArgs{ TaskKey: pulumi.String("run_dashboard"), SqlTask: &databricks.JobTaskSqlTaskArgs{ WarehouseId: pulumi.Any(sqlJobWarehouse.Id), Dashboard: &databricks.JobTaskSqlTaskDashboardArgs{ DashboardId: pulumi.Any(dash.Id), Subscriptions: databricks.JobTaskSqlTaskDashboardSubscriptionArray{ &databricks.JobTaskSqlTaskDashboardSubscriptionArgs{ UserName: pulumi.String("user@domain.com"), }, }, }, }, }, &databricks.JobTaskArgs{ TaskKey: pulumi.String("run_alert"), SqlTask: &databricks.JobTaskSqlTaskArgs{ WarehouseId: pulumi.Any(sqlJobWarehouse.Id), Alert: &databricks.JobTaskSqlTaskAlertArgs{ AlertId: pulumi.Any(alert.Id), Subscriptions: databricks.JobTaskSqlTaskAlertSubscriptionArray{ &databricks.JobTaskSqlTaskAlertSubscriptionArgs{ UserName: pulumi.String("user@domain.com"), }, }, }, }, }, }, }) if err != nil { return err } return nil }) }
package generated_program; import com.pulumi.Context; import com.pulumi.Pulumi; import com.pulumi.core.Output; import com.pulumi.databricks.Job; import com.pulumi.databricks.JobArgs; import com.pulumi.databricks.inputs.JobTaskArgs; import com.pulumi.databricks.inputs.JobTaskSqlTaskArgs; import com.pulumi.databricks.inputs.JobTaskSqlTaskQueryArgs; import com.pulumi.databricks.inputs.JobTaskSqlTaskDashboardArgs; import com.pulumi.databricks.inputs.JobTaskSqlTaskAlertArgs; import java.util.List; import java.util.ArrayList; import java.util.Map; import java.io.File; import java.nio.file.Files; import java.nio.file.Paths; public class App { public static void main(String[] args) { Pulumi.run(App::stack); } public static void stack(Context ctx) { var sqlAggregationJob = new Job("sqlAggregationJob", JobArgs.builder() .name("Example SQL Job") .tasks( JobTaskArgs.builder() .taskKey("run_agg_query") .sqlTask(JobTaskSqlTaskArgs.builder() .warehouseId(sqlJobWarehouse.id()) .query(JobTaskSqlTaskQueryArgs.builder() .queryId(aggQuery.id()) .build()) .build()) .build(), JobTaskArgs.builder() .taskKey("run_dashboard") .sqlTask(JobTaskSqlTaskArgs.builder() .warehouseId(sqlJobWarehouse.id()) .dashboard(JobTaskSqlTaskDashboardArgs.builder() .dashboardId(dash.id()) .subscriptions(JobTaskSqlTaskDashboardSubscriptionArgs.builder() .userName("user@domain.com") .build()) .build()) .build()) .build(), JobTaskArgs.builder() .taskKey("run_alert") .sqlTask(JobTaskSqlTaskArgs.builder() .warehouseId(sqlJobWarehouse.id()) .alert(JobTaskSqlTaskAlertArgs.builder() .alertId(alert.id()) .subscriptions(JobTaskSqlTaskAlertSubscriptionArgs.builder() .userName("user@domain.com") .build()) .build()) .build()) .build()) .build()); } }
resources: sqlAggregationJob: type: databricks:Job name: sql_aggregation_job properties: name: Example SQL Job tasks: - taskKey: run_agg_query sqlTask: warehouseId: ${sqlJobWarehouse.id} query: queryId: ${aggQuery.id} - taskKey: run_dashboard sqlTask: warehouseId: ${sqlJobWarehouse.id} dashboard: dashboardId: ${dash.id} subscriptions: - userName: user@domain.com - taskKey: run_alert sqlTask: warehouseId: ${sqlJobWarehouse.id} alert: alertId: ${alert.id} subscriptions: - userName: user@domain.com
- source String
- The source of the project. Possible values are
WORKSPACE
andGIT
.
- path string
If
source
isGIT
: Relative path to the file in the repository specified in thegit_source
block with SQL commands to execute. Ifsource
isWORKSPACE
: Absolute path to the file in the workspace with SQL commands to execute.Example
import * as pulumi from "@pulumi/pulumi"; import * as databricks from "@pulumi/databricks";
const sqlAggregationJob = new databricks.Job("sql_aggregation_job", { name: "Example SQL Job", tasks: [ { taskKey: "run_agg_query", sqlTask: { warehouseId: sqlJobWarehouse.id, query: { queryId: aggQuery.id, }, }, }, { taskKey: "run_dashboard", sqlTask: { warehouseId: sqlJobWarehouse.id, dashboard: { dashboardId: dash.id, subscriptions: [{ userName: "user@domain.com", }], }, }, }, { taskKey: "run_alert", sqlTask: { warehouseId: sqlJobWarehouse.id, alert: { alertId: alert.id, subscriptions: [{ userName: "user@domain.com", }], }, }, }, ], });
import pulumi import pulumi_databricks as databricks sql_aggregation_job = databricks.Job("sql_aggregation_job", name="Example SQL Job", tasks=[ { "task_key": "run_agg_query", "sql_task": { "warehouse_id": sql_job_warehouse["id"], "query": { "query_id": agg_query["id"], }, }, }, { "task_key": "run_dashboard", "sql_task": { "warehouse_id": sql_job_warehouse["id"], "dashboard": { "dashboard_id": dash["id"], "subscriptions": [{ "user_name": "user@domain.com", }], }, }, }, { "task_key": "run_alert", "sql_task": { "warehouse_id": sql_job_warehouse["id"], "alert": { "alert_id": alert["id"], "subscriptions": [{ "user_name": "user@domain.com", }], }, }, }, ])
using System.Collections.Generic; using System.Linq; using Pulumi; using Databricks = Pulumi.Databricks; return await Deployment.RunAsync(() => { var sqlAggregationJob = new Databricks.Job("sql_aggregation_job", new() { Name = "Example SQL Job", Tasks = new[] { new Databricks.Inputs.JobTaskArgs { TaskKey = "run_agg_query", SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs { WarehouseId = sqlJobWarehouse.Id, Query = new Databricks.Inputs.JobTaskSqlTaskQueryArgs { QueryId = aggQuery.Id, }, }, }, new Databricks.Inputs.JobTaskArgs { TaskKey = "run_dashboard", SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs { WarehouseId = sqlJobWarehouse.Id, Dashboard = new Databricks.Inputs.JobTaskSqlTaskDashboardArgs { DashboardId = dash.Id, Subscriptions = new[] { new Databricks.Inputs.JobTaskSqlTaskDashboardSubscriptionArgs { UserName = "user@domain.com", }, }, }, }, }, new Databricks.Inputs.JobTaskArgs { TaskKey = "run_alert", SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs { WarehouseId = sqlJobWarehouse.Id, Alert = new Databricks.Inputs.JobTaskSqlTaskAlertArgs { AlertId = alert.Id, Subscriptions = new[] { new Databricks.Inputs.JobTaskSqlTaskAlertSubscriptionArgs { UserName = "user@domain.com", }, }, }, }, }, }, }); });
package main import ( "github.com/pulumi/pulumi-databricks/sdk/go/databricks" "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) func main() { pulumi.Run(func(ctx *pulumi.Context) error { _, err := databricks.NewJob(ctx, "sql_aggregation_job", &databricks.JobArgs{ Name: pulumi.String("Example SQL Job"), Tasks: databricks.JobTaskArray{ &databricks.JobTaskArgs{ TaskKey: pulumi.String("run_agg_query"), SqlTask: &databricks.JobTaskSqlTaskArgs{ WarehouseId: pulumi.Any(sqlJobWarehouse.Id), Query: &databricks.JobTaskSqlTaskQueryArgs{ QueryId: pulumi.Any(aggQuery.Id), }, }, }, &databricks.JobTaskArgs{ TaskKey: pulumi.String("run_dashboard"), SqlTask: &databricks.JobTaskSqlTaskArgs{ WarehouseId: pulumi.Any(sqlJobWarehouse.Id), Dashboard: &databricks.JobTaskSqlTaskDashboardArgs{ DashboardId: pulumi.Any(dash.Id), Subscriptions: databricks.JobTaskSqlTaskDashboardSubscriptionArray{ &databricks.JobTaskSqlTaskDashboardSubscriptionArgs{ UserName: pulumi.String("user@domain.com"), }, }, }, }, }, &databricks.JobTaskArgs{ TaskKey: pulumi.String("run_alert"), SqlTask: &databricks.JobTaskSqlTaskArgs{ WarehouseId: pulumi.Any(sqlJobWarehouse.Id), Alert: &databricks.JobTaskSqlTaskAlertArgs{ AlertId: pulumi.Any(alert.Id), Subscriptions: databricks.JobTaskSqlTaskAlertSubscriptionArray{ &databricks.JobTaskSqlTaskAlertSubscriptionArgs{ UserName: pulumi.String("user@domain.com"), }, }, }, }, }, }, }) if err != nil { return err } return nil }) }
package generated_program; import com.pulumi.Context; import com.pulumi.Pulumi; import com.pulumi.core.Output; import com.pulumi.databricks.Job; import com.pulumi.databricks.JobArgs; import com.pulumi.databricks.inputs.JobTaskArgs; import com.pulumi.databricks.inputs.JobTaskSqlTaskArgs; import com.pulumi.databricks.inputs.JobTaskSqlTaskQueryArgs; import com.pulumi.databricks.inputs.JobTaskSqlTaskDashboardArgs; import com.pulumi.databricks.inputs.JobTaskSqlTaskAlertArgs; import java.util.List; import java.util.ArrayList; import java.util.Map; import java.io.File; import java.nio.file.Files; import java.nio.file.Paths; public class App { public static void main(String[] args) { Pulumi.run(App::stack); } public static void stack(Context ctx) { var sqlAggregationJob = new Job("sqlAggregationJob", JobArgs.builder() .name("Example SQL Job") .tasks( JobTaskArgs.builder() .taskKey("run_agg_query") .sqlTask(JobTaskSqlTaskArgs.builder() .warehouseId(sqlJobWarehouse.id()) .query(JobTaskSqlTaskQueryArgs.builder() .queryId(aggQuery.id()) .build()) .build()) .build(), JobTaskArgs.builder() .taskKey("run_dashboard") .sqlTask(JobTaskSqlTaskArgs.builder() .warehouseId(sqlJobWarehouse.id()) .dashboard(JobTaskSqlTaskDashboardArgs.builder() .dashboardId(dash.id()) .subscriptions(JobTaskSqlTaskDashboardSubscriptionArgs.builder() .userName("user@domain.com") .build()) .build()) .build()) .build(), JobTaskArgs.builder() .taskKey("run_alert") .sqlTask(JobTaskSqlTaskArgs.builder() .warehouseId(sqlJobWarehouse.id()) .alert(JobTaskSqlTaskAlertArgs.builder() .alertId(alert.id()) .subscriptions(JobTaskSqlTaskAlertSubscriptionArgs.builder() .userName("user@domain.com") .build()) .build()) .build()) .build()) .build()); } }
resources: sqlAggregationJob: type: databricks:Job name: sql_aggregation_job properties: name: Example SQL Job tasks: - taskKey: run_agg_query sqlTask: warehouseId: ${sqlJobWarehouse.id} query: queryId: ${aggQuery.id} - taskKey: run_dashboard sqlTask: warehouseId: ${sqlJobWarehouse.id} dashboard: dashboardId: ${dash.id} subscriptions: - userName: user@domain.com - taskKey: run_alert sqlTask: warehouseId: ${sqlJobWarehouse.id} alert: alertId: ${alert.id} subscriptions: - userName: user@domain.com
- source string
- The source of the project. Possible values are
WORKSPACE
andGIT
.
- path str
If
source
isGIT
: Relative path to the file in the repository specified in thegit_source
block with SQL commands to execute. Ifsource
isWORKSPACE
: Absolute path to the file in the workspace with SQL commands to execute.Example
import * as pulumi from "@pulumi/pulumi"; import * as databricks from "@pulumi/databricks";
const sqlAggregationJob = new databricks.Job("sql_aggregation_job", { name: "Example SQL Job", tasks: [ { taskKey: "run_agg_query", sqlTask: { warehouseId: sqlJobWarehouse.id, query: { queryId: aggQuery.id, }, }, }, { taskKey: "run_dashboard", sqlTask: { warehouseId: sqlJobWarehouse.id, dashboard: { dashboardId: dash.id, subscriptions: [{ userName: "user@domain.com", }], }, }, }, { taskKey: "run_alert", sqlTask: { warehouseId: sqlJobWarehouse.id, alert: { alertId: alert.id, subscriptions: [{ userName: "user@domain.com", }], }, }, }, ], });
import pulumi import pulumi_databricks as databricks sql_aggregation_job = databricks.Job("sql_aggregation_job", name="Example SQL Job", tasks=[ { "task_key": "run_agg_query", "sql_task": { "warehouse_id": sql_job_warehouse["id"], "query": { "query_id": agg_query["id"], }, }, }, { "task_key": "run_dashboard", "sql_task": { "warehouse_id": sql_job_warehouse["id"], "dashboard": { "dashboard_id": dash["id"], "subscriptions": [{ "user_name": "user@domain.com", }], }, }, }, { "task_key": "run_alert", "sql_task": { "warehouse_id": sql_job_warehouse["id"], "alert": { "alert_id": alert["id"], "subscriptions": [{ "user_name": "user@domain.com", }], }, }, }, ])
using System.Collections.Generic; using System.Linq; using Pulumi; using Databricks = Pulumi.Databricks; return await Deployment.RunAsync(() => { var sqlAggregationJob = new Databricks.Job("sql_aggregation_job", new() { Name = "Example SQL Job", Tasks = new[] { new Databricks.Inputs.JobTaskArgs { TaskKey = "run_agg_query", SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs { WarehouseId = sqlJobWarehouse.Id, Query = new Databricks.Inputs.JobTaskSqlTaskQueryArgs { QueryId = aggQuery.Id, }, }, }, new Databricks.Inputs.JobTaskArgs { TaskKey = "run_dashboard", SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs { WarehouseId = sqlJobWarehouse.Id, Dashboard = new Databricks.Inputs.JobTaskSqlTaskDashboardArgs { DashboardId = dash.Id, Subscriptions = new[] { new Databricks.Inputs.JobTaskSqlTaskDashboardSubscriptionArgs { UserName = "user@domain.com", }, }, }, }, }, new Databricks.Inputs.JobTaskArgs { TaskKey = "run_alert", SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs { WarehouseId = sqlJobWarehouse.Id, Alert = new Databricks.Inputs.JobTaskSqlTaskAlertArgs { AlertId = alert.Id, Subscriptions = new[] { new Databricks.Inputs.JobTaskSqlTaskAlertSubscriptionArgs { UserName = "user@domain.com", }, }, }, }, }, }, }); });
package main import ( "github.com/pulumi/pulumi-databricks/sdk/go/databricks" "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) func main() { pulumi.Run(func(ctx *pulumi.Context) error { _, err := databricks.NewJob(ctx, "sql_aggregation_job", &databricks.JobArgs{ Name: pulumi.String("Example SQL Job"), Tasks: databricks.JobTaskArray{ &databricks.JobTaskArgs{ TaskKey: pulumi.String("run_agg_query"), SqlTask: &databricks.JobTaskSqlTaskArgs{ WarehouseId: pulumi.Any(sqlJobWarehouse.Id), Query: &databricks.JobTaskSqlTaskQueryArgs{ QueryId: pulumi.Any(aggQuery.Id), }, }, }, &databricks.JobTaskArgs{ TaskKey: pulumi.String("run_dashboard"), SqlTask: &databricks.JobTaskSqlTaskArgs{ WarehouseId: pulumi.Any(sqlJobWarehouse.Id), Dashboard: &databricks.JobTaskSqlTaskDashboardArgs{ DashboardId: pulumi.Any(dash.Id), Subscriptions: databricks.JobTaskSqlTaskDashboardSubscriptionArray{ &databricks.JobTaskSqlTaskDashboardSubscriptionArgs{ UserName: pulumi.String("user@domain.com"), }, }, }, }, }, &databricks.JobTaskArgs{ TaskKey: pulumi.String("run_alert"), SqlTask: &databricks.JobTaskSqlTaskArgs{ WarehouseId: pulumi.Any(sqlJobWarehouse.Id), Alert: &databricks.JobTaskSqlTaskAlertArgs{ AlertId: pulumi.Any(alert.Id), Subscriptions: databricks.JobTaskSqlTaskAlertSubscriptionArray{ &databricks.JobTaskSqlTaskAlertSubscriptionArgs{ UserName: pulumi.String("user@domain.com"), }, }, }, }, }, }, }) if err != nil { return err } return nil }) }
package generated_program; import com.pulumi.Context; import com.pulumi.Pulumi; import com.pulumi.core.Output; import com.pulumi.databricks.Job; import com.pulumi.databricks.JobArgs; import com.pulumi.databricks.inputs.JobTaskArgs; import com.pulumi.databricks.inputs.JobTaskSqlTaskArgs; import com.pulumi.databricks.inputs.JobTaskSqlTaskQueryArgs; import com.pulumi.databricks.inputs.JobTaskSqlTaskDashboardArgs; import com.pulumi.databricks.inputs.JobTaskSqlTaskAlertArgs; import java.util.List; import java.util.ArrayList; import java.util.Map; import java.io.File; import java.nio.file.Files; import java.nio.file.Paths; public class App { public static void main(String[] args) { Pulumi.run(App::stack); } public static void stack(Context ctx) { var sqlAggregationJob = new Job("sqlAggregationJob", JobArgs.builder() .name("Example SQL Job") .tasks( JobTaskArgs.builder() .taskKey("run_agg_query") .sqlTask(JobTaskSqlTaskArgs.builder() .warehouseId(sqlJobWarehouse.id()) .query(JobTaskSqlTaskQueryArgs.builder() .queryId(aggQuery.id()) .build()) .build()) .build(), JobTaskArgs.builder() .taskKey("run_dashboard") .sqlTask(JobTaskSqlTaskArgs.builder() .warehouseId(sqlJobWarehouse.id()) .dashboard(JobTaskSqlTaskDashboardArgs.builder() .dashboardId(dash.id()) .subscriptions(JobTaskSqlTaskDashboardSubscriptionArgs.builder() .userName("user@domain.com") .build()) .build()) .build()) .build(), JobTaskArgs.builder() .taskKey("run_alert") .sqlTask(JobTaskSqlTaskArgs.builder() .warehouseId(sqlJobWarehouse.id()) .alert(JobTaskSqlTaskAlertArgs.builder() .alertId(alert.id()) .subscriptions(JobTaskSqlTaskAlertSubscriptionArgs.builder() .userName("user@domain.com") .build()) .build()) .build()) .build()) .build()); } }
resources: sqlAggregationJob: type: databricks:Job name: sql_aggregation_job properties: name: Example SQL Job tasks: - taskKey: run_agg_query sqlTask: warehouseId: ${sqlJobWarehouse.id} query: queryId: ${aggQuery.id} - taskKey: run_dashboard sqlTask: warehouseId: ${sqlJobWarehouse.id} dashboard: dashboardId: ${dash.id} subscriptions: - userName: user@domain.com - taskKey: run_alert sqlTask: warehouseId: ${sqlJobWarehouse.id} alert: alertId: ${alert.id} subscriptions: - userName: user@domain.com
- source str
- The source of the project. Possible values are
WORKSPACE
andGIT
.
- path String
If
source
isGIT
: Relative path to the file in the repository specified in thegit_source
block with SQL commands to execute. Ifsource
isWORKSPACE
: Absolute path to the file in the workspace with SQL commands to execute.Example
import * as pulumi from "@pulumi/pulumi"; import * as databricks from "@pulumi/databricks";
const sqlAggregationJob = new databricks.Job("sql_aggregation_job", { name: "Example SQL Job", tasks: [ { taskKey: "run_agg_query", sqlTask: { warehouseId: sqlJobWarehouse.id, query: { queryId: aggQuery.id, }, }, }, { taskKey: "run_dashboard", sqlTask: { warehouseId: sqlJobWarehouse.id, dashboard: { dashboardId: dash.id, subscriptions: [{ userName: "user@domain.com", }], }, }, }, { taskKey: "run_alert", sqlTask: { warehouseId: sqlJobWarehouse.id, alert: { alertId: alert.id, subscriptions: [{ userName: "user@domain.com", }], }, }, }, ], });
import pulumi import pulumi_databricks as databricks sql_aggregation_job = databricks.Job("sql_aggregation_job", name="Example SQL Job", tasks=[ { "task_key": "run_agg_query", "sql_task": { "warehouse_id": sql_job_warehouse["id"], "query": { "query_id": agg_query["id"], }, }, }, { "task_key": "run_dashboard", "sql_task": { "warehouse_id": sql_job_warehouse["id"], "dashboard": { "dashboard_id": dash["id"], "subscriptions": [{ "user_name": "user@domain.com", }], }, }, }, { "task_key": "run_alert", "sql_task": { "warehouse_id": sql_job_warehouse["id"], "alert": { "alert_id": alert["id"], "subscriptions": [{ "user_name": "user@domain.com", }], }, }, }, ])
using System.Collections.Generic; using System.Linq; using Pulumi; using Databricks = Pulumi.Databricks; return await Deployment.RunAsync(() => { var sqlAggregationJob = new Databricks.Job("sql_aggregation_job", new() { Name = "Example SQL Job", Tasks = new[] { new Databricks.Inputs.JobTaskArgs { TaskKey = "run_agg_query", SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs { WarehouseId = sqlJobWarehouse.Id, Query = new Databricks.Inputs.JobTaskSqlTaskQueryArgs { QueryId = aggQuery.Id, }, }, }, new Databricks.Inputs.JobTaskArgs { TaskKey = "run_dashboard", SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs { WarehouseId = sqlJobWarehouse.Id, Dashboard = new Databricks.Inputs.JobTaskSqlTaskDashboardArgs { DashboardId = dash.Id, Subscriptions = new[] { new Databricks.Inputs.JobTaskSqlTaskDashboardSubscriptionArgs { UserName = "user@domain.com", }, }, }, }, }, new Databricks.Inputs.JobTaskArgs { TaskKey = "run_alert", SqlTask = new Databricks.Inputs.JobTaskSqlTaskArgs { WarehouseId = sqlJobWarehouse.Id, Alert = new Databricks.Inputs.JobTaskSqlTaskAlertArgs { AlertId = alert.Id, Subscriptions = new[] { new Databricks.Inputs.JobTaskSqlTaskAlertSubscriptionArgs { UserName = "user@domain.com", }, }, }, }, }, }, }); });
package main import ( "github.com/pulumi/pulumi-databricks/sdk/go/databricks" "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) func main() { pulumi.Run(func(ctx *pulumi.Context) error { _, err := databricks.NewJob(ctx, "sql_aggregation_job", &databricks.JobArgs{ Name: pulumi.String("Example SQL Job"), Tasks: databricks.JobTaskArray{ &databricks.JobTaskArgs{ TaskKey: pulumi.String("run_agg_query"), SqlTask: &databricks.JobTaskSqlTaskArgs{ WarehouseId: pulumi.Any(sqlJobWarehouse.Id), Query: &databricks.JobTaskSqlTaskQueryArgs{ QueryId: pulumi.Any(aggQuery.Id), }, }, }, &databricks.JobTaskArgs{ TaskKey: pulumi.String("run_dashboard"), SqlTask: &databricks.JobTaskSqlTaskArgs{ WarehouseId: pulumi.Any(sqlJobWarehouse.Id), Dashboard: &databricks.JobTaskSqlTaskDashboardArgs{ DashboardId: pulumi.Any(dash.Id), Subscriptions: databricks.JobTaskSqlTaskDashboardSubscriptionArray{ &databricks.JobTaskSqlTaskDashboardSubscriptionArgs{ UserName: pulumi.String("user@domain.com"), }, }, }, }, }, &databricks.JobTaskArgs{ TaskKey: pulumi.String("run_alert"), SqlTask: &databricks.JobTaskSqlTaskArgs{ WarehouseId: pulumi.Any(sqlJobWarehouse.Id), Alert: &databricks.JobTaskSqlTaskAlertArgs{ AlertId: pulumi.Any(alert.Id), Subscriptions: databricks.JobTaskSqlTaskAlertSubscriptionArray{ &databricks.JobTaskSqlTaskAlertSubscriptionArgs{ UserName: pulumi.String("user@domain.com"), }, }, }, }, }, }, }) if err != nil { return err } return nil }) }
package generated_program; import com.pulumi.Context; import com.pulumi.Pulumi; import com.pulumi.core.Output; import com.pulumi.databricks.Job; import com.pulumi.databricks.JobArgs; import com.pulumi.databricks.inputs.JobTaskArgs; import com.pulumi.databricks.inputs.JobTaskSqlTaskArgs; import com.pulumi.databricks.inputs.JobTaskSqlTaskQueryArgs; import com.pulumi.databricks.inputs.JobTaskSqlTaskDashboardArgs; import com.pulumi.databricks.inputs.JobTaskSqlTaskAlertArgs; import java.util.List; import java.util.ArrayList; import java.util.Map; import java.io.File; import java.nio.file.Files; import java.nio.file.Paths; public class App { public static void main(String[] args) { Pulumi.run(App::stack); } public static void stack(Context ctx) { var sqlAggregationJob = new Job("sqlAggregationJob", JobArgs.builder() .name("Example SQL Job") .tasks( JobTaskArgs.builder() .taskKey("run_agg_query") .sqlTask(JobTaskSqlTaskArgs.builder() .warehouseId(sqlJobWarehouse.id()) .query(JobTaskSqlTaskQueryArgs.builder() .queryId(aggQuery.id()) .build()) .build()) .build(), JobTaskArgs.builder() .taskKey("run_dashboard") .sqlTask(JobTaskSqlTaskArgs.builder() .warehouseId(sqlJobWarehouse.id()) .dashboard(JobTaskSqlTaskDashboardArgs.builder() .dashboardId(dash.id()) .subscriptions(JobTaskSqlTaskDashboardSubscriptionArgs.builder() .userName("user@domain.com") .build()) .build()) .build()) .build(), JobTaskArgs.builder() .taskKey("run_alert") .sqlTask(JobTaskSqlTaskArgs.builder() .warehouseId(sqlJobWarehouse.id()) .alert(JobTaskSqlTaskAlertArgs.builder() .alertId(alert.id()) .subscriptions(JobTaskSqlTaskAlertSubscriptionArgs.builder() .userName("user@domain.com") .build()) .build()) .build()) .build()) .build()); } }
resources: sqlAggregationJob: type: databricks:Job name: sql_aggregation_job properties: name: Example SQL Job tasks: - taskKey: run_agg_query sqlTask: warehouseId: ${sqlJobWarehouse.id} query: queryId: ${aggQuery.id} - taskKey: run_dashboard sqlTask: warehouseId: ${sqlJobWarehouse.id} dashboard: dashboardId: ${dash.id} subscriptions: - userName: user@domain.com - taskKey: run_alert sqlTask: warehouseId: ${sqlJobWarehouse.id} alert: alertId: ${alert.id} subscriptions: - userName: user@domain.com
- source String
- The source of the project. Possible values are
WORKSPACE
andGIT
.
JobTaskSqlTaskQuery, JobTaskSqlTaskQueryArgs
- Query
Id string
- Query
Id string
- query
Id String
- query
Id string
- query_
id str
- query
Id String
JobTaskWebhookNotifications, JobTaskWebhookNotificationsArgs
- On
Duration List<JobWarning Threshold Exceededs Task Webhook Notifications On Duration Warning Threshold Exceeded> (List) list of notification IDs to call when the duration of a run exceeds the threshold specified by the
RUN_DURATION_SECONDS
metric in thehealth
block.Note that the
id
is not to be confused with the name of the alert destination. Theid
can be retrieved through the API or the URL of Databricks UIhttps://<workspace host>/sql/destinations/<notification id>?o=<workspace id>
Example
- On
Failures List<JobTask Webhook Notifications On Failure> - (List) list of notification IDs to call when the run fails. A maximum of 3 destinations can be specified.
- On
Starts List<JobTask Webhook Notifications On Start> - (List) list of notification IDs to call when the run starts. A maximum of 3 destinations can be specified.
- On
Streaming List<JobBacklog Exceededs Task Webhook Notifications On Streaming Backlog Exceeded> - On
Successes List<JobTask Webhook Notifications On Success> - (List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified.
- On
Duration []JobWarning Threshold Exceededs Task Webhook Notifications On Duration Warning Threshold Exceeded (List) list of notification IDs to call when the duration of a run exceeds the threshold specified by the
RUN_DURATION_SECONDS
metric in thehealth
block.Note that the
id
is not to be confused with the name of the alert destination. Theid
can be retrieved through the API or the URL of Databricks UIhttps://<workspace host>/sql/destinations/<notification id>?o=<workspace id>
Example
- On
Failures []JobTask Webhook Notifications On Failure - (List) list of notification IDs to call when the run fails. A maximum of 3 destinations can be specified.
- On
Starts []JobTask Webhook Notifications On Start - (List) list of notification IDs to call when the run starts. A maximum of 3 destinations can be specified.
- On
Streaming []JobBacklog Exceededs Task Webhook Notifications On Streaming Backlog Exceeded - On
Successes []JobTask Webhook Notifications On Success - (List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified.
- on
Duration List<JobWarning Threshold Exceededs Task Webhook Notifications On Duration Warning Threshold Exceeded> (List) list of notification IDs to call when the duration of a run exceeds the threshold specified by the
RUN_DURATION_SECONDS
metric in thehealth
block.Note that the
id
is not to be confused with the name of the alert destination. Theid
can be retrieved through the API or the URL of Databricks UIhttps://<workspace host>/sql/destinations/<notification id>?o=<workspace id>
Example
- on
Failures List<JobTask Webhook Notifications On Failure> - (List) list of notification IDs to call when the run fails. A maximum of 3 destinations can be specified.
- on
Starts List<JobTask Webhook Notifications On Start> - (List) list of notification IDs to call when the run starts. A maximum of 3 destinations can be specified.
- on
Streaming List<JobBacklog Exceededs Task Webhook Notifications On Streaming Backlog Exceeded> - on
Successes List<JobTask Webhook Notifications On Success> - (List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified.
- on
Duration JobWarning Threshold Exceededs Task Webhook Notifications On Duration Warning Threshold Exceeded[] (List) list of notification IDs to call when the duration of a run exceeds the threshold specified by the
RUN_DURATION_SECONDS
metric in thehealth
block.Note that the
id
is not to be confused with the name of the alert destination. Theid
can be retrieved through the API or the URL of Databricks UIhttps://<workspace host>/sql/destinations/<notification id>?o=<workspace id>
Example
- on
Failures JobTask Webhook Notifications On Failure[] - (List) list of notification IDs to call when the run fails. A maximum of 3 destinations can be specified.
- on
Starts JobTask Webhook Notifications On Start[] - (List) list of notification IDs to call when the run starts. A maximum of 3 destinations can be specified.
- on
Streaming JobBacklog Exceededs Task Webhook Notifications On Streaming Backlog Exceeded[] - on
Successes JobTask Webhook Notifications On Success[] - (List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified.
- on_
duration_ Sequence[Jobwarning_ threshold_ exceededs Task Webhook Notifications On Duration Warning Threshold Exceeded] (List) list of notification IDs to call when the duration of a run exceeds the threshold specified by the
RUN_DURATION_SECONDS
metric in thehealth
block.Note that the
id
is not to be confused with the name of the alert destination. Theid
can be retrieved through the API or the URL of Databricks UIhttps://<workspace host>/sql/destinations/<notification id>?o=<workspace id>
Example
- on_
failures Sequence[JobTask Webhook Notifications On Failure] - (List) list of notification IDs to call when the run fails. A maximum of 3 destinations can be specified.
- on_
starts Sequence[JobTask Webhook Notifications On Start] - (List) list of notification IDs to call when the run starts. A maximum of 3 destinations can be specified.
- on_
streaming_ Sequence[Jobbacklog_ exceededs Task Webhook Notifications On Streaming Backlog Exceeded] - on_
successes Sequence[JobTask Webhook Notifications On Success] - (List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified.
- on
Duration List<Property Map>Warning Threshold Exceededs (List) list of notification IDs to call when the duration of a run exceeds the threshold specified by the
RUN_DURATION_SECONDS
metric in thehealth
block.Note that the
id
is not to be confused with the name of the alert destination. Theid
can be retrieved through the API or the URL of Databricks UIhttps://<workspace host>/sql/destinations/<notification id>?o=<workspace id>
Example
- on
Failures List<Property Map> - (List) list of notification IDs to call when the run fails. A maximum of 3 destinations can be specified.
- on
Starts List<Property Map> - (List) list of notification IDs to call when the run starts. A maximum of 3 destinations can be specified.
- on
Streaming List<Property Map>Backlog Exceededs - on
Successes List<Property Map> - (List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified.
JobTaskWebhookNotificationsOnDurationWarningThresholdExceeded, JobTaskWebhookNotificationsOnDurationWarningThresholdExceededArgs
- Id string
- ID of the job
- Id string
- ID of the job
- id String
- ID of the job
- id string
- ID of the job
- id str
- ID of the job
- id String
- ID of the job
JobTaskWebhookNotificationsOnFailure, JobTaskWebhookNotificationsOnFailureArgs
- Id string
- ID of the job
- Id string
- ID of the job
- id String
- ID of the job
- id string
- ID of the job
- id str
- ID of the job
- id String
- ID of the job
JobTaskWebhookNotificationsOnStart, JobTaskWebhookNotificationsOnStartArgs
- Id string
- ID of the job
- Id string
- ID of the job
- id String
- ID of the job
- id string
- ID of the job
- id str
- ID of the job
- id String
- ID of the job
JobTaskWebhookNotificationsOnStreamingBacklogExceeded, JobTaskWebhookNotificationsOnStreamingBacklogExceededArgs
- Id string
- ID of the job
- Id string
- ID of the job
- id String
- ID of the job
- id string
- ID of the job
- id str
- ID of the job
- id String
- ID of the job
JobTaskWebhookNotificationsOnSuccess, JobTaskWebhookNotificationsOnSuccessArgs
- Id string
- ID of the job
- Id string
- ID of the job
- id String
- ID of the job
- id string
- ID of the job
- id str
- ID of the job
- id String
- ID of the job
JobTrigger, JobTriggerArgs
- File
Arrival JobTrigger File Arrival - configuration block to define a trigger for File Arrival events consisting of following attributes:
- Pause
Status string - Indicate whether this trigger is paused or not. Either
PAUSED
orUNPAUSED
. When thepause_status
field is omitted in the block, the server will default to usingUNPAUSED
as a value forpause_status
. - Periodic
Job
Trigger Periodic - configuration block to define a trigger for Periodic Triggers consisting of the following attributes:
- Table
Job
Trigger Table - Table
Update JobTrigger Table Update
- File
Arrival JobTrigger File Arrival - configuration block to define a trigger for File Arrival events consisting of following attributes:
- Pause
Status string - Indicate whether this trigger is paused or not. Either
PAUSED
orUNPAUSED
. When thepause_status
field is omitted in the block, the server will default to usingUNPAUSED
as a value forpause_status
. - Periodic
Job
Trigger Periodic - configuration block to define a trigger for Periodic Triggers consisting of the following attributes:
- Table
Job
Trigger Table - Table
Update JobTrigger Table Update
- file
Arrival JobTrigger File Arrival - configuration block to define a trigger for File Arrival events consisting of following attributes:
- pause
Status String - Indicate whether this trigger is paused or not. Either
PAUSED
orUNPAUSED
. When thepause_status
field is omitted in the block, the server will default to usingUNPAUSED
as a value forpause_status
. - periodic
Job
Trigger Periodic - configuration block to define a trigger for Periodic Triggers consisting of the following attributes:
- table
Job
Trigger Table - table
Update JobTrigger Table Update
- file
Arrival JobTrigger File Arrival - configuration block to define a trigger for File Arrival events consisting of following attributes:
- pause
Status string - Indicate whether this trigger is paused or not. Either
PAUSED
orUNPAUSED
. When thepause_status
field is omitted in the block, the server will default to usingUNPAUSED
as a value forpause_status
. - periodic
Job
Trigger Periodic - configuration block to define a trigger for Periodic Triggers consisting of the following attributes:
- table
Job
Trigger Table - table
Update JobTrigger Table Update
- file_
arrival JobTrigger File Arrival - configuration block to define a trigger for File Arrival events consisting of following attributes:
- pause_
status str - Indicate whether this trigger is paused or not. Either
PAUSED
orUNPAUSED
. When thepause_status
field is omitted in the block, the server will default to usingUNPAUSED
as a value forpause_status
. - periodic
Job
Trigger Periodic - configuration block to define a trigger for Periodic Triggers consisting of the following attributes:
- table
Job
Trigger Table - table_
update JobTrigger Table Update
- file
Arrival Property Map - configuration block to define a trigger for File Arrival events consisting of following attributes:
- pause
Status String - Indicate whether this trigger is paused or not. Either
PAUSED
orUNPAUSED
. When thepause_status
field is omitted in the block, the server will default to usingUNPAUSED
as a value forpause_status
. - periodic Property Map
- configuration block to define a trigger for Periodic Triggers consisting of the following attributes:
- table Property Map
- table
Update Property Map
JobTriggerFileArrival, JobTriggerFileArrivalArgs
- Url string
- URL to be monitored for file arrivals. The path must point to the root or a subpath of the external location. Please note that the URL must have a trailing slash character (
/
). - Min
Time intBetween Triggers Seconds - If set, the trigger starts a run only after the specified amount of time passed since the last time the trigger fired. The minimum allowed value is 60 seconds.
- Wait
After intLast Change Seconds - If set, the trigger starts a run only after no file activity has occurred for the specified amount of time. This makes it possible to wait for a batch of incoming files to arrive before triggering a run. The minimum allowed value is 60 seconds.
- Url string
- URL to be monitored for file arrivals. The path must point to the root or a subpath of the external location. Please note that the URL must have a trailing slash character (
/
). - Min
Time intBetween Triggers Seconds - If set, the trigger starts a run only after the specified amount of time passed since the last time the trigger fired. The minimum allowed value is 60 seconds.
- Wait
After intLast Change Seconds - If set, the trigger starts a run only after no file activity has occurred for the specified amount of time. This makes it possible to wait for a batch of incoming files to arrive before triggering a run. The minimum allowed value is 60 seconds.
- url String
- URL to be monitored for file arrivals. The path must point to the root or a subpath of the external location. Please note that the URL must have a trailing slash character (
/
). - min
Time IntegerBetween Triggers Seconds - If set, the trigger starts a run only after the specified amount of time passed since the last time the trigger fired. The minimum allowed value is 60 seconds.
- wait
After IntegerLast Change Seconds - If set, the trigger starts a run only after no file activity has occurred for the specified amount of time. This makes it possible to wait for a batch of incoming files to arrive before triggering a run. The minimum allowed value is 60 seconds.
- url string
- URL to be monitored for file arrivals. The path must point to the root or a subpath of the external location. Please note that the URL must have a trailing slash character (
/
). - min
Time numberBetween Triggers Seconds - If set, the trigger starts a run only after the specified amount of time passed since the last time the trigger fired. The minimum allowed value is 60 seconds.
- wait
After numberLast Change Seconds - If set, the trigger starts a run only after no file activity has occurred for the specified amount of time. This makes it possible to wait for a batch of incoming files to arrive before triggering a run. The minimum allowed value is 60 seconds.
- url str
- URL to be monitored for file arrivals. The path must point to the root or a subpath of the external location. Please note that the URL must have a trailing slash character (
/
). - min_
time_ intbetween_ triggers_ seconds - If set, the trigger starts a run only after the specified amount of time passed since the last time the trigger fired. The minimum allowed value is 60 seconds.
- wait_
after_ intlast_ change_ seconds - If set, the trigger starts a run only after no file activity has occurred for the specified amount of time. This makes it possible to wait for a batch of incoming files to arrive before triggering a run. The minimum allowed value is 60 seconds.
- url String
- URL to be monitored for file arrivals. The path must point to the root or a subpath of the external location. Please note that the URL must have a trailing slash character (
/
). - min
Time NumberBetween Triggers Seconds - If set, the trigger starts a run only after the specified amount of time passed since the last time the trigger fired. The minimum allowed value is 60 seconds.
- wait
After NumberLast Change Seconds - If set, the trigger starts a run only after no file activity has occurred for the specified amount of time. This makes it possible to wait for a batch of incoming files to arrive before triggering a run. The minimum allowed value is 60 seconds.
JobTriggerPeriodic, JobTriggerPeriodicArgs
JobTriggerTable, JobTriggerTableArgs
- Condition string
- Min
Time intBetween Triggers Seconds - If set, the trigger starts a run only after the specified amount of time passed since the last time the trigger fired. The minimum allowed value is 60 seconds.
- Table
Names List<string> - Wait
After intLast Change Seconds - If set, the trigger starts a run only after no file activity has occurred for the specified amount of time. This makes it possible to wait for a batch of incoming files to arrive before triggering a run. The minimum allowed value is 60 seconds.
- Condition string
- Min
Time intBetween Triggers Seconds - If set, the trigger starts a run only after the specified amount of time passed since the last time the trigger fired. The minimum allowed value is 60 seconds.
- Table
Names []string - Wait
After intLast Change Seconds - If set, the trigger starts a run only after no file activity has occurred for the specified amount of time. This makes it possible to wait for a batch of incoming files to arrive before triggering a run. The minimum allowed value is 60 seconds.
- condition String
- min
Time IntegerBetween Triggers Seconds - If set, the trigger starts a run only after the specified amount of time passed since the last time the trigger fired. The minimum allowed value is 60 seconds.
- table
Names List<String> - wait
After IntegerLast Change Seconds - If set, the trigger starts a run only after no file activity has occurred for the specified amount of time. This makes it possible to wait for a batch of incoming files to arrive before triggering a run. The minimum allowed value is 60 seconds.
- condition string
- min
Time numberBetween Triggers Seconds - If set, the trigger starts a run only after the specified amount of time passed since the last time the trigger fired. The minimum allowed value is 60 seconds.
- table
Names string[] - wait
After numberLast Change Seconds - If set, the trigger starts a run only after no file activity has occurred for the specified amount of time. This makes it possible to wait for a batch of incoming files to arrive before triggering a run. The minimum allowed value is 60 seconds.
- condition str
- min_
time_ intbetween_ triggers_ seconds - If set, the trigger starts a run only after the specified amount of time passed since the last time the trigger fired. The minimum allowed value is 60 seconds.
- table_
names Sequence[str] - wait_
after_ intlast_ change_ seconds - If set, the trigger starts a run only after no file activity has occurred for the specified amount of time. This makes it possible to wait for a batch of incoming files to arrive before triggering a run. The minimum allowed value is 60 seconds.
- condition String
- min
Time NumberBetween Triggers Seconds - If set, the trigger starts a run only after the specified amount of time passed since the last time the trigger fired. The minimum allowed value is 60 seconds.
- table
Names List<String> - wait
After NumberLast Change Seconds - If set, the trigger starts a run only after no file activity has occurred for the specified amount of time. This makes it possible to wait for a batch of incoming files to arrive before triggering a run. The minimum allowed value is 60 seconds.
JobTriggerTableUpdate, JobTriggerTableUpdateArgs
- Table
Names List<string> - Condition string
- Min
Time intBetween Triggers Seconds - If set, the trigger starts a run only after the specified amount of time passed since the last time the trigger fired. The minimum allowed value is 60 seconds.
- Wait
After intLast Change Seconds - If set, the trigger starts a run only after no file activity has occurred for the specified amount of time. This makes it possible to wait for a batch of incoming files to arrive before triggering a run. The minimum allowed value is 60 seconds.
- Table
Names []string - Condition string
- Min
Time intBetween Triggers Seconds - If set, the trigger starts a run only after the specified amount of time passed since the last time the trigger fired. The minimum allowed value is 60 seconds.
- Wait
After intLast Change Seconds - If set, the trigger starts a run only after no file activity has occurred for the specified amount of time. This makes it possible to wait for a batch of incoming files to arrive before triggering a run. The minimum allowed value is 60 seconds.
- table
Names List<String> - condition String
- min
Time IntegerBetween Triggers Seconds - If set, the trigger starts a run only after the specified amount of time passed since the last time the trigger fired. The minimum allowed value is 60 seconds.
- wait
After IntegerLast Change Seconds - If set, the trigger starts a run only after no file activity has occurred for the specified amount of time. This makes it possible to wait for a batch of incoming files to arrive before triggering a run. The minimum allowed value is 60 seconds.
- table
Names string[] - condition string
- min
Time numberBetween Triggers Seconds - If set, the trigger starts a run only after the specified amount of time passed since the last time the trigger fired. The minimum allowed value is 60 seconds.
- wait
After numberLast Change Seconds - If set, the trigger starts a run only after no file activity has occurred for the specified amount of time. This makes it possible to wait for a batch of incoming files to arrive before triggering a run. The minimum allowed value is 60 seconds.
- table_
names Sequence[str] - condition str
- min_
time_ intbetween_ triggers_ seconds - If set, the trigger starts a run only after the specified amount of time passed since the last time the trigger fired. The minimum allowed value is 60 seconds.
- wait_
after_ intlast_ change_ seconds - If set, the trigger starts a run only after no file activity has occurred for the specified amount of time. This makes it possible to wait for a batch of incoming files to arrive before triggering a run. The minimum allowed value is 60 seconds.
- table
Names List<String> - condition String
- min
Time NumberBetween Triggers Seconds - If set, the trigger starts a run only after the specified amount of time passed since the last time the trigger fired. The minimum allowed value is 60 seconds.
- wait
After NumberLast Change Seconds - If set, the trigger starts a run only after no file activity has occurred for the specified amount of time. This makes it possible to wait for a batch of incoming files to arrive before triggering a run. The minimum allowed value is 60 seconds.
JobWebhookNotifications, JobWebhookNotificationsArgs
- On
Duration List<JobWarning Threshold Exceededs Webhook Notifications On Duration Warning Threshold Exceeded> (List) list of notification IDs to call when the duration of a run exceeds the threshold specified by the
RUN_DURATION_SECONDS
metric in thehealth
block.Note that the
id
is not to be confused with the name of the alert destination. Theid
can be retrieved through the API or the URL of Databricks UIhttps://<workspace host>/sql/destinations/<notification id>?o=<workspace id>
Example
- On
Failures List<JobWebhook Notifications On Failure> - (List) list of notification IDs to call when the run fails. A maximum of 3 destinations can be specified.
- On
Starts List<JobWebhook Notifications On Start> - (List) list of notification IDs to call when the run starts. A maximum of 3 destinations can be specified.
- On
Streaming List<JobBacklog Exceededs Webhook Notifications On Streaming Backlog Exceeded> - On
Successes List<JobWebhook Notifications On Success> - (List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified.
- On
Duration []JobWarning Threshold Exceededs Webhook Notifications On Duration Warning Threshold Exceeded (List) list of notification IDs to call when the duration of a run exceeds the threshold specified by the
RUN_DURATION_SECONDS
metric in thehealth
block.Note that the
id
is not to be confused with the name of the alert destination. Theid
can be retrieved through the API or the URL of Databricks UIhttps://<workspace host>/sql/destinations/<notification id>?o=<workspace id>
Example
- On
Failures []JobWebhook Notifications On Failure - (List) list of notification IDs to call when the run fails. A maximum of 3 destinations can be specified.
- On
Starts []JobWebhook Notifications On Start - (List) list of notification IDs to call when the run starts. A maximum of 3 destinations can be specified.
- On
Streaming []JobBacklog Exceededs Webhook Notifications On Streaming Backlog Exceeded - On
Successes []JobWebhook Notifications On Success - (List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified.
- on
Duration List<JobWarning Threshold Exceededs Webhook Notifications On Duration Warning Threshold Exceeded> (List) list of notification IDs to call when the duration of a run exceeds the threshold specified by the
RUN_DURATION_SECONDS
metric in thehealth
block.Note that the
id
is not to be confused with the name of the alert destination. Theid
can be retrieved through the API or the URL of Databricks UIhttps://<workspace host>/sql/destinations/<notification id>?o=<workspace id>
Example
- on
Failures List<JobWebhook Notifications On Failure> - (List) list of notification IDs to call when the run fails. A maximum of 3 destinations can be specified.
- on
Starts List<JobWebhook Notifications On Start> - (List) list of notification IDs to call when the run starts. A maximum of 3 destinations can be specified.
- on
Streaming List<JobBacklog Exceededs Webhook Notifications On Streaming Backlog Exceeded> - on
Successes List<JobWebhook Notifications On Success> - (List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified.
- on
Duration JobWarning Threshold Exceededs Webhook Notifications On Duration Warning Threshold Exceeded[] (List) list of notification IDs to call when the duration of a run exceeds the threshold specified by the
RUN_DURATION_SECONDS
metric in thehealth
block.Note that the
id
is not to be confused with the name of the alert destination. Theid
can be retrieved through the API or the URL of Databricks UIhttps://<workspace host>/sql/destinations/<notification id>?o=<workspace id>
Example
- on
Failures JobWebhook Notifications On Failure[] - (List) list of notification IDs to call when the run fails. A maximum of 3 destinations can be specified.
- on
Starts JobWebhook Notifications On Start[] - (List) list of notification IDs to call when the run starts. A maximum of 3 destinations can be specified.
- on
Streaming JobBacklog Exceededs Webhook Notifications On Streaming Backlog Exceeded[] - on
Successes JobWebhook Notifications On Success[] - (List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified.
- on_
duration_ Sequence[Jobwarning_ threshold_ exceededs Webhook Notifications On Duration Warning Threshold Exceeded] (List) list of notification IDs to call when the duration of a run exceeds the threshold specified by the
RUN_DURATION_SECONDS
metric in thehealth
block.Note that the
id
is not to be confused with the name of the alert destination. Theid
can be retrieved through the API or the URL of Databricks UIhttps://<workspace host>/sql/destinations/<notification id>?o=<workspace id>
Example
- on_
failures Sequence[JobWebhook Notifications On Failure] - (List) list of notification IDs to call when the run fails. A maximum of 3 destinations can be specified.
- on_
starts Sequence[JobWebhook Notifications On Start] - (List) list of notification IDs to call when the run starts. A maximum of 3 destinations can be specified.
- on_
streaming_ Sequence[Jobbacklog_ exceededs Webhook Notifications On Streaming Backlog Exceeded] - on_
successes Sequence[JobWebhook Notifications On Success] - (List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified.
- on
Duration List<Property Map>Warning Threshold Exceededs (List) list of notification IDs to call when the duration of a run exceeds the threshold specified by the
RUN_DURATION_SECONDS
metric in thehealth
block.Note that the
id
is not to be confused with the name of the alert destination. Theid
can be retrieved through the API or the URL of Databricks UIhttps://<workspace host>/sql/destinations/<notification id>?o=<workspace id>
Example
- on
Failures List<Property Map> - (List) list of notification IDs to call when the run fails. A maximum of 3 destinations can be specified.
- on
Starts List<Property Map> - (List) list of notification IDs to call when the run starts. A maximum of 3 destinations can be specified.
- on
Streaming List<Property Map>Backlog Exceededs - on
Successes List<Property Map> - (List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified.
JobWebhookNotificationsOnDurationWarningThresholdExceeded, JobWebhookNotificationsOnDurationWarningThresholdExceededArgs
- Id string
- ID of the job
- Id string
- ID of the job
- id String
- ID of the job
- id string
- ID of the job
- id str
- ID of the job
- id String
- ID of the job
JobWebhookNotificationsOnFailure, JobWebhookNotificationsOnFailureArgs
- Id string
- ID of the job
- Id string
- ID of the job
- id String
- ID of the job
- id string
- ID of the job
- id str
- ID of the job
- id String
- ID of the job
JobWebhookNotificationsOnStart, JobWebhookNotificationsOnStartArgs
- Id string
- ID of the job
- Id string
- ID of the job
- id String
- ID of the job
- id string
- ID of the job
- id str
- ID of the job
- id String
- ID of the job
JobWebhookNotificationsOnStreamingBacklogExceeded, JobWebhookNotificationsOnStreamingBacklogExceededArgs
- Id string
- ID of the job
- Id string
- ID of the job
- id String
- ID of the job
- id string
- ID of the job
- id str
- ID of the job
- id String
- ID of the job
JobWebhookNotificationsOnSuccess, JobWebhookNotificationsOnSuccessArgs
- Id string
- ID of the job
- Id string
- ID of the job
- id String
- ID of the job
- id string
- ID of the job
- id str
- ID of the job
- id String
- ID of the job
Import
The resource job can be imported using the id of the job
bash
$ pulumi import databricks:index/job:Job this <job-id>
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- databricks pulumi/pulumi-databricks
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
databricks
Terraform Provider.