databricks.InstancePool
Explore with Pulumi AI
This resource allows you to manage instance pools to reduce cluster start and auto-scaling times by maintaining a set of idle, ready-to-use instances. An instance pool reduces cluster start and auto-scaling times by maintaining a set of idle, ready-to-use cloud instances. When a cluster attached to a pool needs an instance, it first attempts to allocate one of the pool’s idle instances. If the pool has no idle instances, it expands by allocating a new instance from the instance provider in order to accommodate the cluster’s request. When a cluster releases an instance, it returns to the pool and is free for another cluster to use. Only clusters attached to a pool can use that pool’s idle instances.
It is important to know that different cloud service providers have different
node_type_id
,disk_specs
and potentially other configurations.
Example Usage
import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";
const smallest = databricks.getNodeType({});
const smallestNodes = new databricks.InstancePool("smallest_nodes", {
instancePoolName: "Smallest Nodes",
minIdleInstances: 0,
maxCapacity: 300,
nodeTypeId: smallest.then(smallest => smallest.id),
awsAttributes: {
availability: "ON_DEMAND",
zoneId: "us-east-1a",
spotBidPricePercent: 100,
},
idleInstanceAutoterminationMinutes: 10,
diskSpec: {
diskType: {
ebsVolumeType: "GENERAL_PURPOSE_SSD",
},
diskSize: 80,
diskCount: 1,
},
});
import pulumi
import pulumi_databricks as databricks
smallest = databricks.get_node_type()
smallest_nodes = databricks.InstancePool("smallest_nodes",
instance_pool_name="Smallest Nodes",
min_idle_instances=0,
max_capacity=300,
node_type_id=smallest.id,
aws_attributes={
"availability": "ON_DEMAND",
"zone_id": "us-east-1a",
"spot_bid_price_percent": 100,
},
idle_instance_autotermination_minutes=10,
disk_spec={
"disk_type": {
"ebs_volume_type": "GENERAL_PURPOSE_SSD",
},
"disk_size": 80,
"disk_count": 1,
})
package main
import (
"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
smallest, err := databricks.GetNodeType(ctx, &databricks.GetNodeTypeArgs{}, nil)
if err != nil {
return err
}
_, err = databricks.NewInstancePool(ctx, "smallest_nodes", &databricks.InstancePoolArgs{
InstancePoolName: pulumi.String("Smallest Nodes"),
MinIdleInstances: pulumi.Int(0),
MaxCapacity: pulumi.Int(300),
NodeTypeId: pulumi.String(smallest.Id),
AwsAttributes: &databricks.InstancePoolAwsAttributesArgs{
Availability: pulumi.String("ON_DEMAND"),
ZoneId: pulumi.String("us-east-1a"),
SpotBidPricePercent: pulumi.Int(100),
},
IdleInstanceAutoterminationMinutes: pulumi.Int(10),
DiskSpec: &databricks.InstancePoolDiskSpecArgs{
DiskType: &databricks.InstancePoolDiskSpecDiskTypeArgs{
EbsVolumeType: pulumi.String("GENERAL_PURPOSE_SSD"),
},
DiskSize: pulumi.Int(80),
DiskCount: pulumi.Int(1),
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;
return await Deployment.RunAsync(() =>
{
var smallest = Databricks.GetNodeType.Invoke();
var smallestNodes = new Databricks.InstancePool("smallest_nodes", new()
{
InstancePoolName = "Smallest Nodes",
MinIdleInstances = 0,
MaxCapacity = 300,
NodeTypeId = smallest.Apply(getNodeTypeResult => getNodeTypeResult.Id),
AwsAttributes = new Databricks.Inputs.InstancePoolAwsAttributesArgs
{
Availability = "ON_DEMAND",
ZoneId = "us-east-1a",
SpotBidPricePercent = 100,
},
IdleInstanceAutoterminationMinutes = 10,
DiskSpec = new Databricks.Inputs.InstancePoolDiskSpecArgs
{
DiskType = new Databricks.Inputs.InstancePoolDiskSpecDiskTypeArgs
{
EbsVolumeType = "GENERAL_PURPOSE_SSD",
},
DiskSize = 80,
DiskCount = 1,
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.DatabricksFunctions;
import com.pulumi.databricks.inputs.GetNodeTypeArgs;
import com.pulumi.databricks.InstancePool;
import com.pulumi.databricks.InstancePoolArgs;
import com.pulumi.databricks.inputs.InstancePoolAwsAttributesArgs;
import com.pulumi.databricks.inputs.InstancePoolDiskSpecArgs;
import com.pulumi.databricks.inputs.InstancePoolDiskSpecDiskTypeArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
final var smallest = DatabricksFunctions.getNodeType();
var smallestNodes = new InstancePool("smallestNodes", InstancePoolArgs.builder()
.instancePoolName("Smallest Nodes")
.minIdleInstances(0)
.maxCapacity(300)
.nodeTypeId(smallest.applyValue(getNodeTypeResult -> getNodeTypeResult.id()))
.awsAttributes(InstancePoolAwsAttributesArgs.builder()
.availability("ON_DEMAND")
.zoneId("us-east-1a")
.spotBidPricePercent("100")
.build())
.idleInstanceAutoterminationMinutes(10)
.diskSpec(InstancePoolDiskSpecArgs.builder()
.diskType(InstancePoolDiskSpecDiskTypeArgs.builder()
.ebsVolumeType("GENERAL_PURPOSE_SSD")
.build())
.diskSize(80)
.diskCount(1)
.build())
.build());
}
}
resources:
smallestNodes:
type: databricks:InstancePool
name: smallest_nodes
properties:
instancePoolName: Smallest Nodes
minIdleInstances: 0
maxCapacity: 300
nodeTypeId: ${smallest.id}
awsAttributes:
availability: ON_DEMAND
zoneId: us-east-1a
spotBidPricePercent: '100'
idleInstanceAutoterminationMinutes: 10
diskSpec:
diskType:
ebsVolumeType: GENERAL_PURPOSE_SSD
diskSize: 80
diskCount: 1
variables:
smallest:
fn::invoke:
Function: databricks:getNodeType
Arguments: {}
Access Control
- databricks.Group and databricks.User can control which groups or individual users can create instance pools.
- databricks.Permissions can control which groups or individual users can Manage or Attach to individual instance pools.
Create InstancePool Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new InstancePool(name: string, args: InstancePoolArgs, opts?: CustomResourceOptions);
@overload
def InstancePool(resource_name: str,
args: InstancePoolArgs,
opts: Optional[ResourceOptions] = None)
@overload
def InstancePool(resource_name: str,
opts: Optional[ResourceOptions] = None,
instance_pool_name: Optional[str] = None,
idle_instance_autotermination_minutes: Optional[int] = None,
instance_pool_id: Optional[str] = None,
disk_spec: Optional[InstancePoolDiskSpecArgs] = None,
enable_elastic_disk: Optional[bool] = None,
gcp_attributes: Optional[InstancePoolGcpAttributesArgs] = None,
azure_attributes: Optional[InstancePoolAzureAttributesArgs] = None,
instance_pool_fleet_attributes: Optional[InstancePoolInstancePoolFleetAttributesArgs] = None,
custom_tags: Optional[Mapping[str, str]] = None,
aws_attributes: Optional[InstancePoolAwsAttributesArgs] = None,
max_capacity: Optional[int] = None,
min_idle_instances: Optional[int] = None,
node_type_id: Optional[str] = None,
preloaded_docker_images: Optional[Sequence[InstancePoolPreloadedDockerImageArgs]] = None,
preloaded_spark_versions: Optional[Sequence[str]] = None)
func NewInstancePool(ctx *Context, name string, args InstancePoolArgs, opts ...ResourceOption) (*InstancePool, error)
public InstancePool(string name, InstancePoolArgs args, CustomResourceOptions? opts = null)
public InstancePool(String name, InstancePoolArgs args)
public InstancePool(String name, InstancePoolArgs args, CustomResourceOptions options)
type: databricks:InstancePool
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args InstancePoolArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args InstancePoolArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args InstancePoolArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args InstancePoolArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args InstancePoolArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var instancePoolResource = new Databricks.InstancePool("instancePoolResource", new()
{
InstancePoolName = "string",
IdleInstanceAutoterminationMinutes = 0,
InstancePoolId = "string",
DiskSpec = new Databricks.Inputs.InstancePoolDiskSpecArgs
{
DiskCount = 0,
DiskSize = 0,
DiskType = new Databricks.Inputs.InstancePoolDiskSpecDiskTypeArgs
{
AzureDiskVolumeType = "string",
EbsVolumeType = "string",
},
},
EnableElasticDisk = false,
GcpAttributes = new Databricks.Inputs.InstancePoolGcpAttributesArgs
{
GcpAvailability = "string",
LocalSsdCount = 0,
ZoneId = "string",
},
AzureAttributes = new Databricks.Inputs.InstancePoolAzureAttributesArgs
{
Availability = "string",
SpotBidMaxPrice = 0,
},
InstancePoolFleetAttributes = new Databricks.Inputs.InstancePoolInstancePoolFleetAttributesArgs
{
LaunchTemplateOverrides = new[]
{
new Databricks.Inputs.InstancePoolInstancePoolFleetAttributesLaunchTemplateOverrideArgs
{
AvailabilityZone = "string",
InstanceType = "string",
},
},
FleetOnDemandOption = new Databricks.Inputs.InstancePoolInstancePoolFleetAttributesFleetOnDemandOptionArgs
{
AllocationStrategy = "string",
InstancePoolsToUseCount = 0,
},
FleetSpotOption = new Databricks.Inputs.InstancePoolInstancePoolFleetAttributesFleetSpotOptionArgs
{
AllocationStrategy = "string",
InstancePoolsToUseCount = 0,
},
},
CustomTags =
{
{ "string", "string" },
},
AwsAttributes = new Databricks.Inputs.InstancePoolAwsAttributesArgs
{
Availability = "string",
SpotBidPricePercent = 0,
ZoneId = "string",
},
MaxCapacity = 0,
MinIdleInstances = 0,
NodeTypeId = "string",
PreloadedDockerImages = new[]
{
new Databricks.Inputs.InstancePoolPreloadedDockerImageArgs
{
Url = "string",
BasicAuth = new Databricks.Inputs.InstancePoolPreloadedDockerImageBasicAuthArgs
{
Password = "string",
Username = "string",
},
},
},
PreloadedSparkVersions = new[]
{
"string",
},
});
example, err := databricks.NewInstancePool(ctx, "instancePoolResource", &databricks.InstancePoolArgs{
InstancePoolName: pulumi.String("string"),
IdleInstanceAutoterminationMinutes: pulumi.Int(0),
InstancePoolId: pulumi.String("string"),
DiskSpec: &databricks.InstancePoolDiskSpecArgs{
DiskCount: pulumi.Int(0),
DiskSize: pulumi.Int(0),
DiskType: &databricks.InstancePoolDiskSpecDiskTypeArgs{
AzureDiskVolumeType: pulumi.String("string"),
EbsVolumeType: pulumi.String("string"),
},
},
EnableElasticDisk: pulumi.Bool(false),
GcpAttributes: &databricks.InstancePoolGcpAttributesArgs{
GcpAvailability: pulumi.String("string"),
LocalSsdCount: pulumi.Int(0),
ZoneId: pulumi.String("string"),
},
AzureAttributes: &databricks.InstancePoolAzureAttributesArgs{
Availability: pulumi.String("string"),
SpotBidMaxPrice: pulumi.Float64(0),
},
InstancePoolFleetAttributes: &databricks.InstancePoolInstancePoolFleetAttributesArgs{
LaunchTemplateOverrides: databricks.InstancePoolInstancePoolFleetAttributesLaunchTemplateOverrideArray{
&databricks.InstancePoolInstancePoolFleetAttributesLaunchTemplateOverrideArgs{
AvailabilityZone: pulumi.String("string"),
InstanceType: pulumi.String("string"),
},
},
FleetOnDemandOption: &databricks.InstancePoolInstancePoolFleetAttributesFleetOnDemandOptionArgs{
AllocationStrategy: pulumi.String("string"),
InstancePoolsToUseCount: pulumi.Int(0),
},
FleetSpotOption: &databricks.InstancePoolInstancePoolFleetAttributesFleetSpotOptionArgs{
AllocationStrategy: pulumi.String("string"),
InstancePoolsToUseCount: pulumi.Int(0),
},
},
CustomTags: pulumi.StringMap{
"string": pulumi.String("string"),
},
AwsAttributes: &databricks.InstancePoolAwsAttributesArgs{
Availability: pulumi.String("string"),
SpotBidPricePercent: pulumi.Int(0),
ZoneId: pulumi.String("string"),
},
MaxCapacity: pulumi.Int(0),
MinIdleInstances: pulumi.Int(0),
NodeTypeId: pulumi.String("string"),
PreloadedDockerImages: databricks.InstancePoolPreloadedDockerImageArray{
&databricks.InstancePoolPreloadedDockerImageArgs{
Url: pulumi.String("string"),
BasicAuth: &databricks.InstancePoolPreloadedDockerImageBasicAuthArgs{
Password: pulumi.String("string"),
Username: pulumi.String("string"),
},
},
},
PreloadedSparkVersions: pulumi.StringArray{
pulumi.String("string"),
},
})
var instancePoolResource = new InstancePool("instancePoolResource", InstancePoolArgs.builder()
.instancePoolName("string")
.idleInstanceAutoterminationMinutes(0)
.instancePoolId("string")
.diskSpec(InstancePoolDiskSpecArgs.builder()
.diskCount(0)
.diskSize(0)
.diskType(InstancePoolDiskSpecDiskTypeArgs.builder()
.azureDiskVolumeType("string")
.ebsVolumeType("string")
.build())
.build())
.enableElasticDisk(false)
.gcpAttributes(InstancePoolGcpAttributesArgs.builder()
.gcpAvailability("string")
.localSsdCount(0)
.zoneId("string")
.build())
.azureAttributes(InstancePoolAzureAttributesArgs.builder()
.availability("string")
.spotBidMaxPrice(0)
.build())
.instancePoolFleetAttributes(InstancePoolInstancePoolFleetAttributesArgs.builder()
.launchTemplateOverrides(InstancePoolInstancePoolFleetAttributesLaunchTemplateOverrideArgs.builder()
.availabilityZone("string")
.instanceType("string")
.build())
.fleetOnDemandOption(InstancePoolInstancePoolFleetAttributesFleetOnDemandOptionArgs.builder()
.allocationStrategy("string")
.instancePoolsToUseCount(0)
.build())
.fleetSpotOption(InstancePoolInstancePoolFleetAttributesFleetSpotOptionArgs.builder()
.allocationStrategy("string")
.instancePoolsToUseCount(0)
.build())
.build())
.customTags(Map.of("string", "string"))
.awsAttributes(InstancePoolAwsAttributesArgs.builder()
.availability("string")
.spotBidPricePercent(0)
.zoneId("string")
.build())
.maxCapacity(0)
.minIdleInstances(0)
.nodeTypeId("string")
.preloadedDockerImages(InstancePoolPreloadedDockerImageArgs.builder()
.url("string")
.basicAuth(InstancePoolPreloadedDockerImageBasicAuthArgs.builder()
.password("string")
.username("string")
.build())
.build())
.preloadedSparkVersions("string")
.build());
instance_pool_resource = databricks.InstancePool("instancePoolResource",
instance_pool_name="string",
idle_instance_autotermination_minutes=0,
instance_pool_id="string",
disk_spec={
"disk_count": 0,
"disk_size": 0,
"disk_type": {
"azure_disk_volume_type": "string",
"ebs_volume_type": "string",
},
},
enable_elastic_disk=False,
gcp_attributes={
"gcp_availability": "string",
"local_ssd_count": 0,
"zone_id": "string",
},
azure_attributes={
"availability": "string",
"spot_bid_max_price": 0,
},
instance_pool_fleet_attributes={
"launch_template_overrides": [{
"availability_zone": "string",
"instance_type": "string",
}],
"fleet_on_demand_option": {
"allocation_strategy": "string",
"instance_pools_to_use_count": 0,
},
"fleet_spot_option": {
"allocation_strategy": "string",
"instance_pools_to_use_count": 0,
},
},
custom_tags={
"string": "string",
},
aws_attributes={
"availability": "string",
"spot_bid_price_percent": 0,
"zone_id": "string",
},
max_capacity=0,
min_idle_instances=0,
node_type_id="string",
preloaded_docker_images=[{
"url": "string",
"basic_auth": {
"password": "string",
"username": "string",
},
}],
preloaded_spark_versions=["string"])
const instancePoolResource = new databricks.InstancePool("instancePoolResource", {
instancePoolName: "string",
idleInstanceAutoterminationMinutes: 0,
instancePoolId: "string",
diskSpec: {
diskCount: 0,
diskSize: 0,
diskType: {
azureDiskVolumeType: "string",
ebsVolumeType: "string",
},
},
enableElasticDisk: false,
gcpAttributes: {
gcpAvailability: "string",
localSsdCount: 0,
zoneId: "string",
},
azureAttributes: {
availability: "string",
spotBidMaxPrice: 0,
},
instancePoolFleetAttributes: {
launchTemplateOverrides: [{
availabilityZone: "string",
instanceType: "string",
}],
fleetOnDemandOption: {
allocationStrategy: "string",
instancePoolsToUseCount: 0,
},
fleetSpotOption: {
allocationStrategy: "string",
instancePoolsToUseCount: 0,
},
},
customTags: {
string: "string",
},
awsAttributes: {
availability: "string",
spotBidPricePercent: 0,
zoneId: "string",
},
maxCapacity: 0,
minIdleInstances: 0,
nodeTypeId: "string",
preloadedDockerImages: [{
url: "string",
basicAuth: {
password: "string",
username: "string",
},
}],
preloadedSparkVersions: ["string"],
});
type: databricks:InstancePool
properties:
awsAttributes:
availability: string
spotBidPricePercent: 0
zoneId: string
azureAttributes:
availability: string
spotBidMaxPrice: 0
customTags:
string: string
diskSpec:
diskCount: 0
diskSize: 0
diskType:
azureDiskVolumeType: string
ebsVolumeType: string
enableElasticDisk: false
gcpAttributes:
gcpAvailability: string
localSsdCount: 0
zoneId: string
idleInstanceAutoterminationMinutes: 0
instancePoolFleetAttributes:
fleetOnDemandOption:
allocationStrategy: string
instancePoolsToUseCount: 0
fleetSpotOption:
allocationStrategy: string
instancePoolsToUseCount: 0
launchTemplateOverrides:
- availabilityZone: string
instanceType: string
instancePoolId: string
instancePoolName: string
maxCapacity: 0
minIdleInstances: 0
nodeTypeId: string
preloadedDockerImages:
- basicAuth:
password: string
username: string
url: string
preloadedSparkVersions:
- string
InstancePool Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The InstancePool resource accepts the following input properties:
- Idle
Instance intAutotermination Minutes - (Integer) The number of minutes that idle instances in excess of the min_idle_instances are maintained by the pool before being terminated. If not specified, excess idle instances are terminated automatically after a default timeout period. If specified, the time must be between 0 and 10000 minutes. If you specify 0, excess idle instances are removed as soon as possible.
- Instance
Pool stringName - (String) The name of the instance pool. This is required for create and edit operations. It must be unique, non-empty, and less than 100 characters.
- Aws
Attributes InstancePool Aws Attributes - Azure
Attributes InstancePool Azure Attributes - Dictionary<string, string>
- (Map) Additional tags for instance pool resources. Databricks tags all pool resources (e.g. AWS & Azure instances and Disk volumes). The tags of the instance pool will propagate to the clusters using the pool (see the official documentation). Attempting to set the same tags in both cluster and instance pool will raise an error. Databricks allows at most 43 custom tags.
- Disk
Spec InstancePool Disk Spec - Enable
Elastic boolDisk - (Bool) Autoscaling Local Storage: when enabled, the instances in the pool dynamically acquire additional disk space when they are running low on disk space.
- Gcp
Attributes InstancePool Gcp Attributes - Instance
Pool InstanceFleet Attributes Pool Instance Pool Fleet Attributes - Instance
Pool stringId - Max
Capacity int - (Integer) The maximum number of instances the pool can contain, including both idle instances and ones in use by clusters. Once the maximum capacity is reached, you cannot create new clusters from the pool and existing clusters cannot autoscale up until some instances are made idle in the pool via cluster termination or down-scaling. There is no default limit, but as a best practice, this should be set based on anticipated usage.
- Min
Idle intInstances - (Integer) The minimum number of idle instances maintained by the pool. This is in addition to any instances in use by active clusters.
- Node
Type stringId - (String) The node type for the instances in the pool. All clusters attached to the pool inherit this node type and the pool’s idle instances are allocated based on this type. You can retrieve a list of available node types by using the List Node Types API call.
- Preloaded
Docker List<InstanceImages Pool Preloaded Docker Image> - Preloaded
Spark List<string>Versions - (List) A list with at most one runtime version the pool installs on each instance. Pool clusters that use a preloaded runtime version start faster as they do not have to wait for the image to download. You can retrieve them via databricks.getSparkVersion data source or via Runtime Versions API call.
- Idle
Instance intAutotermination Minutes - (Integer) The number of minutes that idle instances in excess of the min_idle_instances are maintained by the pool before being terminated. If not specified, excess idle instances are terminated automatically after a default timeout period. If specified, the time must be between 0 and 10000 minutes. If you specify 0, excess idle instances are removed as soon as possible.
- Instance
Pool stringName - (String) The name of the instance pool. This is required for create and edit operations. It must be unique, non-empty, and less than 100 characters.
- Aws
Attributes InstancePool Aws Attributes Args - Azure
Attributes InstancePool Azure Attributes Args - map[string]string
- (Map) Additional tags for instance pool resources. Databricks tags all pool resources (e.g. AWS & Azure instances and Disk volumes). The tags of the instance pool will propagate to the clusters using the pool (see the official documentation). Attempting to set the same tags in both cluster and instance pool will raise an error. Databricks allows at most 43 custom tags.
- Disk
Spec InstancePool Disk Spec Args - Enable
Elastic boolDisk - (Bool) Autoscaling Local Storage: when enabled, the instances in the pool dynamically acquire additional disk space when they are running low on disk space.
- Gcp
Attributes InstancePool Gcp Attributes Args - Instance
Pool InstanceFleet Attributes Pool Instance Pool Fleet Attributes Args - Instance
Pool stringId - Max
Capacity int - (Integer) The maximum number of instances the pool can contain, including both idle instances and ones in use by clusters. Once the maximum capacity is reached, you cannot create new clusters from the pool and existing clusters cannot autoscale up until some instances are made idle in the pool via cluster termination or down-scaling. There is no default limit, but as a best practice, this should be set based on anticipated usage.
- Min
Idle intInstances - (Integer) The minimum number of idle instances maintained by the pool. This is in addition to any instances in use by active clusters.
- Node
Type stringId - (String) The node type for the instances in the pool. All clusters attached to the pool inherit this node type and the pool’s idle instances are allocated based on this type. You can retrieve a list of available node types by using the List Node Types API call.
- Preloaded
Docker []InstanceImages Pool Preloaded Docker Image Args - Preloaded
Spark []stringVersions - (List) A list with at most one runtime version the pool installs on each instance. Pool clusters that use a preloaded runtime version start faster as they do not have to wait for the image to download. You can retrieve them via databricks.getSparkVersion data source or via Runtime Versions API call.
- idle
Instance IntegerAutotermination Minutes - (Integer) The number of minutes that idle instances in excess of the min_idle_instances are maintained by the pool before being terminated. If not specified, excess idle instances are terminated automatically after a default timeout period. If specified, the time must be between 0 and 10000 minutes. If you specify 0, excess idle instances are removed as soon as possible.
- instance
Pool StringName - (String) The name of the instance pool. This is required for create and edit operations. It must be unique, non-empty, and less than 100 characters.
- aws
Attributes InstancePool Aws Attributes - azure
Attributes InstancePool Azure Attributes - Map<String,String>
- (Map) Additional tags for instance pool resources. Databricks tags all pool resources (e.g. AWS & Azure instances and Disk volumes). The tags of the instance pool will propagate to the clusters using the pool (see the official documentation). Attempting to set the same tags in both cluster and instance pool will raise an error. Databricks allows at most 43 custom tags.
- disk
Spec InstancePool Disk Spec - enable
Elastic BooleanDisk - (Bool) Autoscaling Local Storage: when enabled, the instances in the pool dynamically acquire additional disk space when they are running low on disk space.
- gcp
Attributes InstancePool Gcp Attributes - instance
Pool InstanceFleet Attributes Pool Instance Pool Fleet Attributes - instance
Pool StringId - max
Capacity Integer - (Integer) The maximum number of instances the pool can contain, including both idle instances and ones in use by clusters. Once the maximum capacity is reached, you cannot create new clusters from the pool and existing clusters cannot autoscale up until some instances are made idle in the pool via cluster termination or down-scaling. There is no default limit, but as a best practice, this should be set based on anticipated usage.
- min
Idle IntegerInstances - (Integer) The minimum number of idle instances maintained by the pool. This is in addition to any instances in use by active clusters.
- node
Type StringId - (String) The node type for the instances in the pool. All clusters attached to the pool inherit this node type and the pool’s idle instances are allocated based on this type. You can retrieve a list of available node types by using the List Node Types API call.
- preloaded
Docker List<InstanceImages Pool Preloaded Docker Image> - preloaded
Spark List<String>Versions - (List) A list with at most one runtime version the pool installs on each instance. Pool clusters that use a preloaded runtime version start faster as they do not have to wait for the image to download. You can retrieve them via databricks.getSparkVersion data source or via Runtime Versions API call.
- idle
Instance numberAutotermination Minutes - (Integer) The number of minutes that idle instances in excess of the min_idle_instances are maintained by the pool before being terminated. If not specified, excess idle instances are terminated automatically after a default timeout period. If specified, the time must be between 0 and 10000 minutes. If you specify 0, excess idle instances are removed as soon as possible.
- instance
Pool stringName - (String) The name of the instance pool. This is required for create and edit operations. It must be unique, non-empty, and less than 100 characters.
- aws
Attributes InstancePool Aws Attributes - azure
Attributes InstancePool Azure Attributes - {[key: string]: string}
- (Map) Additional tags for instance pool resources. Databricks tags all pool resources (e.g. AWS & Azure instances and Disk volumes). The tags of the instance pool will propagate to the clusters using the pool (see the official documentation). Attempting to set the same tags in both cluster and instance pool will raise an error. Databricks allows at most 43 custom tags.
- disk
Spec InstancePool Disk Spec - enable
Elastic booleanDisk - (Bool) Autoscaling Local Storage: when enabled, the instances in the pool dynamically acquire additional disk space when they are running low on disk space.
- gcp
Attributes InstancePool Gcp Attributes - instance
Pool InstanceFleet Attributes Pool Instance Pool Fleet Attributes - instance
Pool stringId - max
Capacity number - (Integer) The maximum number of instances the pool can contain, including both idle instances and ones in use by clusters. Once the maximum capacity is reached, you cannot create new clusters from the pool and existing clusters cannot autoscale up until some instances are made idle in the pool via cluster termination or down-scaling. There is no default limit, but as a best practice, this should be set based on anticipated usage.
- min
Idle numberInstances - (Integer) The minimum number of idle instances maintained by the pool. This is in addition to any instances in use by active clusters.
- node
Type stringId - (String) The node type for the instances in the pool. All clusters attached to the pool inherit this node type and the pool’s idle instances are allocated based on this type. You can retrieve a list of available node types by using the List Node Types API call.
- preloaded
Docker InstanceImages Pool Preloaded Docker Image[] - preloaded
Spark string[]Versions - (List) A list with at most one runtime version the pool installs on each instance. Pool clusters that use a preloaded runtime version start faster as they do not have to wait for the image to download. You can retrieve them via databricks.getSparkVersion data source or via Runtime Versions API call.
- idle_
instance_ intautotermination_ minutes - (Integer) The number of minutes that idle instances in excess of the min_idle_instances are maintained by the pool before being terminated. If not specified, excess idle instances are terminated automatically after a default timeout period. If specified, the time must be between 0 and 10000 minutes. If you specify 0, excess idle instances are removed as soon as possible.
- instance_
pool_ strname - (String) The name of the instance pool. This is required for create and edit operations. It must be unique, non-empty, and less than 100 characters.
- aws_
attributes InstancePool Aws Attributes Args - azure_
attributes InstancePool Azure Attributes Args - Mapping[str, str]
- (Map) Additional tags for instance pool resources. Databricks tags all pool resources (e.g. AWS & Azure instances and Disk volumes). The tags of the instance pool will propagate to the clusters using the pool (see the official documentation). Attempting to set the same tags in both cluster and instance pool will raise an error. Databricks allows at most 43 custom tags.
- disk_
spec InstancePool Disk Spec Args - enable_
elastic_ booldisk - (Bool) Autoscaling Local Storage: when enabled, the instances in the pool dynamically acquire additional disk space when they are running low on disk space.
- gcp_
attributes InstancePool Gcp Attributes Args - instance_
pool_ Instancefleet_ attributes Pool Instance Pool Fleet Attributes Args - instance_
pool_ strid - max_
capacity int - (Integer) The maximum number of instances the pool can contain, including both idle instances and ones in use by clusters. Once the maximum capacity is reached, you cannot create new clusters from the pool and existing clusters cannot autoscale up until some instances are made idle in the pool via cluster termination or down-scaling. There is no default limit, but as a best practice, this should be set based on anticipated usage.
- min_
idle_ intinstances - (Integer) The minimum number of idle instances maintained by the pool. This is in addition to any instances in use by active clusters.
- node_
type_ strid - (String) The node type for the instances in the pool. All clusters attached to the pool inherit this node type and the pool’s idle instances are allocated based on this type. You can retrieve a list of available node types by using the List Node Types API call.
- preloaded_
docker_ Sequence[Instanceimages Pool Preloaded Docker Image Args] - preloaded_
spark_ Sequence[str]versions - (List) A list with at most one runtime version the pool installs on each instance. Pool clusters that use a preloaded runtime version start faster as they do not have to wait for the image to download. You can retrieve them via databricks.getSparkVersion data source or via Runtime Versions API call.
- idle
Instance NumberAutotermination Minutes - (Integer) The number of minutes that idle instances in excess of the min_idle_instances are maintained by the pool before being terminated. If not specified, excess idle instances are terminated automatically after a default timeout period. If specified, the time must be between 0 and 10000 minutes. If you specify 0, excess idle instances are removed as soon as possible.
- instance
Pool StringName - (String) The name of the instance pool. This is required for create and edit operations. It must be unique, non-empty, and less than 100 characters.
- aws
Attributes Property Map - azure
Attributes Property Map - Map<String>
- (Map) Additional tags for instance pool resources. Databricks tags all pool resources (e.g. AWS & Azure instances and Disk volumes). The tags of the instance pool will propagate to the clusters using the pool (see the official documentation). Attempting to set the same tags in both cluster and instance pool will raise an error. Databricks allows at most 43 custom tags.
- disk
Spec Property Map - enable
Elastic BooleanDisk - (Bool) Autoscaling Local Storage: when enabled, the instances in the pool dynamically acquire additional disk space when they are running low on disk space.
- gcp
Attributes Property Map - instance
Pool Property MapFleet Attributes - instance
Pool StringId - max
Capacity Number - (Integer) The maximum number of instances the pool can contain, including both idle instances and ones in use by clusters. Once the maximum capacity is reached, you cannot create new clusters from the pool and existing clusters cannot autoscale up until some instances are made idle in the pool via cluster termination or down-scaling. There is no default limit, but as a best practice, this should be set based on anticipated usage.
- min
Idle NumberInstances - (Integer) The minimum number of idle instances maintained by the pool. This is in addition to any instances in use by active clusters.
- node
Type StringId - (String) The node type for the instances in the pool. All clusters attached to the pool inherit this node type and the pool’s idle instances are allocated based on this type. You can retrieve a list of available node types by using the List Node Types API call.
- preloaded
Docker List<Property Map>Images - preloaded
Spark List<String>Versions - (List) A list with at most one runtime version the pool installs on each instance. Pool clusters that use a preloaded runtime version start faster as they do not have to wait for the image to download. You can retrieve them via databricks.getSparkVersion data source or via Runtime Versions API call.
Outputs
All input properties are implicitly available as output properties. Additionally, the InstancePool resource produces the following output properties:
- Id string
- The provider-assigned unique ID for this managed resource.
- Id string
- The provider-assigned unique ID for this managed resource.
- id String
- The provider-assigned unique ID for this managed resource.
- id string
- The provider-assigned unique ID for this managed resource.
- id str
- The provider-assigned unique ID for this managed resource.
- id String
- The provider-assigned unique ID for this managed resource.
Look up Existing InstancePool Resource
Get an existing InstancePool resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: InstancePoolState, opts?: CustomResourceOptions): InstancePool
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
aws_attributes: Optional[InstancePoolAwsAttributesArgs] = None,
azure_attributes: Optional[InstancePoolAzureAttributesArgs] = None,
custom_tags: Optional[Mapping[str, str]] = None,
disk_spec: Optional[InstancePoolDiskSpecArgs] = None,
enable_elastic_disk: Optional[bool] = None,
gcp_attributes: Optional[InstancePoolGcpAttributesArgs] = None,
idle_instance_autotermination_minutes: Optional[int] = None,
instance_pool_fleet_attributes: Optional[InstancePoolInstancePoolFleetAttributesArgs] = None,
instance_pool_id: Optional[str] = None,
instance_pool_name: Optional[str] = None,
max_capacity: Optional[int] = None,
min_idle_instances: Optional[int] = None,
node_type_id: Optional[str] = None,
preloaded_docker_images: Optional[Sequence[InstancePoolPreloadedDockerImageArgs]] = None,
preloaded_spark_versions: Optional[Sequence[str]] = None) -> InstancePool
func GetInstancePool(ctx *Context, name string, id IDInput, state *InstancePoolState, opts ...ResourceOption) (*InstancePool, error)
public static InstancePool Get(string name, Input<string> id, InstancePoolState? state, CustomResourceOptions? opts = null)
public static InstancePool get(String name, Output<String> id, InstancePoolState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Aws
Attributes InstancePool Aws Attributes - Azure
Attributes InstancePool Azure Attributes - Dictionary<string, string>
- (Map) Additional tags for instance pool resources. Databricks tags all pool resources (e.g. AWS & Azure instances and Disk volumes). The tags of the instance pool will propagate to the clusters using the pool (see the official documentation). Attempting to set the same tags in both cluster and instance pool will raise an error. Databricks allows at most 43 custom tags.
- Disk
Spec InstancePool Disk Spec - Enable
Elastic boolDisk - (Bool) Autoscaling Local Storage: when enabled, the instances in the pool dynamically acquire additional disk space when they are running low on disk space.
- Gcp
Attributes InstancePool Gcp Attributes - Idle
Instance intAutotermination Minutes - (Integer) The number of minutes that idle instances in excess of the min_idle_instances are maintained by the pool before being terminated. If not specified, excess idle instances are terminated automatically after a default timeout period. If specified, the time must be between 0 and 10000 minutes. If you specify 0, excess idle instances are removed as soon as possible.
- Instance
Pool InstanceFleet Attributes Pool Instance Pool Fleet Attributes - Instance
Pool stringId - Instance
Pool stringName - (String) The name of the instance pool. This is required for create and edit operations. It must be unique, non-empty, and less than 100 characters.
- Max
Capacity int - (Integer) The maximum number of instances the pool can contain, including both idle instances and ones in use by clusters. Once the maximum capacity is reached, you cannot create new clusters from the pool and existing clusters cannot autoscale up until some instances are made idle in the pool via cluster termination or down-scaling. There is no default limit, but as a best practice, this should be set based on anticipated usage.
- Min
Idle intInstances - (Integer) The minimum number of idle instances maintained by the pool. This is in addition to any instances in use by active clusters.
- Node
Type stringId - (String) The node type for the instances in the pool. All clusters attached to the pool inherit this node type and the pool’s idle instances are allocated based on this type. You can retrieve a list of available node types by using the List Node Types API call.
- Preloaded
Docker List<InstanceImages Pool Preloaded Docker Image> - Preloaded
Spark List<string>Versions - (List) A list with at most one runtime version the pool installs on each instance. Pool clusters that use a preloaded runtime version start faster as they do not have to wait for the image to download. You can retrieve them via databricks.getSparkVersion data source or via Runtime Versions API call.
- Aws
Attributes InstancePool Aws Attributes Args - Azure
Attributes InstancePool Azure Attributes Args - map[string]string
- (Map) Additional tags for instance pool resources. Databricks tags all pool resources (e.g. AWS & Azure instances and Disk volumes). The tags of the instance pool will propagate to the clusters using the pool (see the official documentation). Attempting to set the same tags in both cluster and instance pool will raise an error. Databricks allows at most 43 custom tags.
- Disk
Spec InstancePool Disk Spec Args - Enable
Elastic boolDisk - (Bool) Autoscaling Local Storage: when enabled, the instances in the pool dynamically acquire additional disk space when they are running low on disk space.
- Gcp
Attributes InstancePool Gcp Attributes Args - Idle
Instance intAutotermination Minutes - (Integer) The number of minutes that idle instances in excess of the min_idle_instances are maintained by the pool before being terminated. If not specified, excess idle instances are terminated automatically after a default timeout period. If specified, the time must be between 0 and 10000 minutes. If you specify 0, excess idle instances are removed as soon as possible.
- Instance
Pool InstanceFleet Attributes Pool Instance Pool Fleet Attributes Args - Instance
Pool stringId - Instance
Pool stringName - (String) The name of the instance pool. This is required for create and edit operations. It must be unique, non-empty, and less than 100 characters.
- Max
Capacity int - (Integer) The maximum number of instances the pool can contain, including both idle instances and ones in use by clusters. Once the maximum capacity is reached, you cannot create new clusters from the pool and existing clusters cannot autoscale up until some instances are made idle in the pool via cluster termination or down-scaling. There is no default limit, but as a best practice, this should be set based on anticipated usage.
- Min
Idle intInstances - (Integer) The minimum number of idle instances maintained by the pool. This is in addition to any instances in use by active clusters.
- Node
Type stringId - (String) The node type for the instances in the pool. All clusters attached to the pool inherit this node type and the pool’s idle instances are allocated based on this type. You can retrieve a list of available node types by using the List Node Types API call.
- Preloaded
Docker []InstanceImages Pool Preloaded Docker Image Args - Preloaded
Spark []stringVersions - (List) A list with at most one runtime version the pool installs on each instance. Pool clusters that use a preloaded runtime version start faster as they do not have to wait for the image to download. You can retrieve them via databricks.getSparkVersion data source or via Runtime Versions API call.
- aws
Attributes InstancePool Aws Attributes - azure
Attributes InstancePool Azure Attributes - Map<String,String>
- (Map) Additional tags for instance pool resources. Databricks tags all pool resources (e.g. AWS & Azure instances and Disk volumes). The tags of the instance pool will propagate to the clusters using the pool (see the official documentation). Attempting to set the same tags in both cluster and instance pool will raise an error. Databricks allows at most 43 custom tags.
- disk
Spec InstancePool Disk Spec - enable
Elastic BooleanDisk - (Bool) Autoscaling Local Storage: when enabled, the instances in the pool dynamically acquire additional disk space when they are running low on disk space.
- gcp
Attributes InstancePool Gcp Attributes - idle
Instance IntegerAutotermination Minutes - (Integer) The number of minutes that idle instances in excess of the min_idle_instances are maintained by the pool before being terminated. If not specified, excess idle instances are terminated automatically after a default timeout period. If specified, the time must be between 0 and 10000 minutes. If you specify 0, excess idle instances are removed as soon as possible.
- instance
Pool InstanceFleet Attributes Pool Instance Pool Fleet Attributes - instance
Pool StringId - instance
Pool StringName - (String) The name of the instance pool. This is required for create and edit operations. It must be unique, non-empty, and less than 100 characters.
- max
Capacity Integer - (Integer) The maximum number of instances the pool can contain, including both idle instances and ones in use by clusters. Once the maximum capacity is reached, you cannot create new clusters from the pool and existing clusters cannot autoscale up until some instances are made idle in the pool via cluster termination or down-scaling. There is no default limit, but as a best practice, this should be set based on anticipated usage.
- min
Idle IntegerInstances - (Integer) The minimum number of idle instances maintained by the pool. This is in addition to any instances in use by active clusters.
- node
Type StringId - (String) The node type for the instances in the pool. All clusters attached to the pool inherit this node type and the pool’s idle instances are allocated based on this type. You can retrieve a list of available node types by using the List Node Types API call.
- preloaded
Docker List<InstanceImages Pool Preloaded Docker Image> - preloaded
Spark List<String>Versions - (List) A list with at most one runtime version the pool installs on each instance. Pool clusters that use a preloaded runtime version start faster as they do not have to wait for the image to download. You can retrieve them via databricks.getSparkVersion data source or via Runtime Versions API call.
- aws
Attributes InstancePool Aws Attributes - azure
Attributes InstancePool Azure Attributes - {[key: string]: string}
- (Map) Additional tags for instance pool resources. Databricks tags all pool resources (e.g. AWS & Azure instances and Disk volumes). The tags of the instance pool will propagate to the clusters using the pool (see the official documentation). Attempting to set the same tags in both cluster and instance pool will raise an error. Databricks allows at most 43 custom tags.
- disk
Spec InstancePool Disk Spec - enable
Elastic booleanDisk - (Bool) Autoscaling Local Storage: when enabled, the instances in the pool dynamically acquire additional disk space when they are running low on disk space.
- gcp
Attributes InstancePool Gcp Attributes - idle
Instance numberAutotermination Minutes - (Integer) The number of minutes that idle instances in excess of the min_idle_instances are maintained by the pool before being terminated. If not specified, excess idle instances are terminated automatically after a default timeout period. If specified, the time must be between 0 and 10000 minutes. If you specify 0, excess idle instances are removed as soon as possible.
- instance
Pool InstanceFleet Attributes Pool Instance Pool Fleet Attributes - instance
Pool stringId - instance
Pool stringName - (String) The name of the instance pool. This is required for create and edit operations. It must be unique, non-empty, and less than 100 characters.
- max
Capacity number - (Integer) The maximum number of instances the pool can contain, including both idle instances and ones in use by clusters. Once the maximum capacity is reached, you cannot create new clusters from the pool and existing clusters cannot autoscale up until some instances are made idle in the pool via cluster termination or down-scaling. There is no default limit, but as a best practice, this should be set based on anticipated usage.
- min
Idle numberInstances - (Integer) The minimum number of idle instances maintained by the pool. This is in addition to any instances in use by active clusters.
- node
Type stringId - (String) The node type for the instances in the pool. All clusters attached to the pool inherit this node type and the pool’s idle instances are allocated based on this type. You can retrieve a list of available node types by using the List Node Types API call.
- preloaded
Docker InstanceImages Pool Preloaded Docker Image[] - preloaded
Spark string[]Versions - (List) A list with at most one runtime version the pool installs on each instance. Pool clusters that use a preloaded runtime version start faster as they do not have to wait for the image to download. You can retrieve them via databricks.getSparkVersion data source or via Runtime Versions API call.
- aws_
attributes InstancePool Aws Attributes Args - azure_
attributes InstancePool Azure Attributes Args - Mapping[str, str]
- (Map) Additional tags for instance pool resources. Databricks tags all pool resources (e.g. AWS & Azure instances and Disk volumes). The tags of the instance pool will propagate to the clusters using the pool (see the official documentation). Attempting to set the same tags in both cluster and instance pool will raise an error. Databricks allows at most 43 custom tags.
- disk_
spec InstancePool Disk Spec Args - enable_
elastic_ booldisk - (Bool) Autoscaling Local Storage: when enabled, the instances in the pool dynamically acquire additional disk space when they are running low on disk space.
- gcp_
attributes InstancePool Gcp Attributes Args - idle_
instance_ intautotermination_ minutes - (Integer) The number of minutes that idle instances in excess of the min_idle_instances are maintained by the pool before being terminated. If not specified, excess idle instances are terminated automatically after a default timeout period. If specified, the time must be between 0 and 10000 minutes. If you specify 0, excess idle instances are removed as soon as possible.
- instance_
pool_ Instancefleet_ attributes Pool Instance Pool Fleet Attributes Args - instance_
pool_ strid - instance_
pool_ strname - (String) The name of the instance pool. This is required for create and edit operations. It must be unique, non-empty, and less than 100 characters.
- max_
capacity int - (Integer) The maximum number of instances the pool can contain, including both idle instances and ones in use by clusters. Once the maximum capacity is reached, you cannot create new clusters from the pool and existing clusters cannot autoscale up until some instances are made idle in the pool via cluster termination or down-scaling. There is no default limit, but as a best practice, this should be set based on anticipated usage.
- min_
idle_ intinstances - (Integer) The minimum number of idle instances maintained by the pool. This is in addition to any instances in use by active clusters.
- node_
type_ strid - (String) The node type for the instances in the pool. All clusters attached to the pool inherit this node type and the pool’s idle instances are allocated based on this type. You can retrieve a list of available node types by using the List Node Types API call.
- preloaded_
docker_ Sequence[Instanceimages Pool Preloaded Docker Image Args] - preloaded_
spark_ Sequence[str]versions - (List) A list with at most one runtime version the pool installs on each instance. Pool clusters that use a preloaded runtime version start faster as they do not have to wait for the image to download. You can retrieve them via databricks.getSparkVersion data source or via Runtime Versions API call.
- aws
Attributes Property Map - azure
Attributes Property Map - Map<String>
- (Map) Additional tags for instance pool resources. Databricks tags all pool resources (e.g. AWS & Azure instances and Disk volumes). The tags of the instance pool will propagate to the clusters using the pool (see the official documentation). Attempting to set the same tags in both cluster and instance pool will raise an error. Databricks allows at most 43 custom tags.
- disk
Spec Property Map - enable
Elastic BooleanDisk - (Bool) Autoscaling Local Storage: when enabled, the instances in the pool dynamically acquire additional disk space when they are running low on disk space.
- gcp
Attributes Property Map - idle
Instance NumberAutotermination Minutes - (Integer) The number of minutes that idle instances in excess of the min_idle_instances are maintained by the pool before being terminated. If not specified, excess idle instances are terminated automatically after a default timeout period. If specified, the time must be between 0 and 10000 minutes. If you specify 0, excess idle instances are removed as soon as possible.
- instance
Pool Property MapFleet Attributes - instance
Pool StringId - instance
Pool StringName - (String) The name of the instance pool. This is required for create and edit operations. It must be unique, non-empty, and less than 100 characters.
- max
Capacity Number - (Integer) The maximum number of instances the pool can contain, including both idle instances and ones in use by clusters. Once the maximum capacity is reached, you cannot create new clusters from the pool and existing clusters cannot autoscale up until some instances are made idle in the pool via cluster termination or down-scaling. There is no default limit, but as a best practice, this should be set based on anticipated usage.
- min
Idle NumberInstances - (Integer) The minimum number of idle instances maintained by the pool. This is in addition to any instances in use by active clusters.
- node
Type StringId - (String) The node type for the instances in the pool. All clusters attached to the pool inherit this node type and the pool’s idle instances are allocated based on this type. You can retrieve a list of available node types by using the List Node Types API call.
- preloaded
Docker List<Property Map>Images - preloaded
Spark List<String>Versions - (List) A list with at most one runtime version the pool installs on each instance. Pool clusters that use a preloaded runtime version start faster as they do not have to wait for the image to download. You can retrieve them via databricks.getSparkVersion data source or via Runtime Versions API call.
Supporting Types
InstancePoolAwsAttributes, InstancePoolAwsAttributesArgs
- Availability string
- (String) Availability type used for all instances in the pool. Only
ON_DEMAND
andSPOT
are supported. - Spot
Bid intPrice Percent - (Integer) The max price for AWS spot instances, as a percentage of the corresponding instance type’s on-demand price. For example, if this field is set to 50, and the instance pool needs a new i3.xlarge spot instance, then the max price is half of the price of on-demand i3.xlarge instances. Similarly, if this field is set to 200, the max price is twice the price of on-demand i3.xlarge instances. If not specified, the default value is 100. When spot instances are requested for this instance pool, only spot instances whose max price percentage matches this field are considered. For safety, this field cannot be greater than 10000.
- Zone
Id string - (String) Identifier for the availability zone/datacenter in which the instance pool resides. This string is of the form like
"us-west-2a"
. The provided availability zone must be in the same region as the Databricks deployment. For example,"us-west-2a"
is not a valid zone ID if the Databricks deployment resides in the"us-east-1"
region. If not specified, a default zone is used. You can find the list of available zones as well as the default value by using the List Zones API.
- Availability string
- (String) Availability type used for all instances in the pool. Only
ON_DEMAND
andSPOT
are supported. - Spot
Bid intPrice Percent - (Integer) The max price for AWS spot instances, as a percentage of the corresponding instance type’s on-demand price. For example, if this field is set to 50, and the instance pool needs a new i3.xlarge spot instance, then the max price is half of the price of on-demand i3.xlarge instances. Similarly, if this field is set to 200, the max price is twice the price of on-demand i3.xlarge instances. If not specified, the default value is 100. When spot instances are requested for this instance pool, only spot instances whose max price percentage matches this field are considered. For safety, this field cannot be greater than 10000.
- Zone
Id string - (String) Identifier for the availability zone/datacenter in which the instance pool resides. This string is of the form like
"us-west-2a"
. The provided availability zone must be in the same region as the Databricks deployment. For example,"us-west-2a"
is not a valid zone ID if the Databricks deployment resides in the"us-east-1"
region. If not specified, a default zone is used. You can find the list of available zones as well as the default value by using the List Zones API.
- availability String
- (String) Availability type used for all instances in the pool. Only
ON_DEMAND
andSPOT
are supported. - spot
Bid IntegerPrice Percent - (Integer) The max price for AWS spot instances, as a percentage of the corresponding instance type’s on-demand price. For example, if this field is set to 50, and the instance pool needs a new i3.xlarge spot instance, then the max price is half of the price of on-demand i3.xlarge instances. Similarly, if this field is set to 200, the max price is twice the price of on-demand i3.xlarge instances. If not specified, the default value is 100. When spot instances are requested for this instance pool, only spot instances whose max price percentage matches this field are considered. For safety, this field cannot be greater than 10000.
- zone
Id String - (String) Identifier for the availability zone/datacenter in which the instance pool resides. This string is of the form like
"us-west-2a"
. The provided availability zone must be in the same region as the Databricks deployment. For example,"us-west-2a"
is not a valid zone ID if the Databricks deployment resides in the"us-east-1"
region. If not specified, a default zone is used. You can find the list of available zones as well as the default value by using the List Zones API.
- availability string
- (String) Availability type used for all instances in the pool. Only
ON_DEMAND
andSPOT
are supported. - spot
Bid numberPrice Percent - (Integer) The max price for AWS spot instances, as a percentage of the corresponding instance type’s on-demand price. For example, if this field is set to 50, and the instance pool needs a new i3.xlarge spot instance, then the max price is half of the price of on-demand i3.xlarge instances. Similarly, if this field is set to 200, the max price is twice the price of on-demand i3.xlarge instances. If not specified, the default value is 100. When spot instances are requested for this instance pool, only spot instances whose max price percentage matches this field are considered. For safety, this field cannot be greater than 10000.
- zone
Id string - (String) Identifier for the availability zone/datacenter in which the instance pool resides. This string is of the form like
"us-west-2a"
. The provided availability zone must be in the same region as the Databricks deployment. For example,"us-west-2a"
is not a valid zone ID if the Databricks deployment resides in the"us-east-1"
region. If not specified, a default zone is used. You can find the list of available zones as well as the default value by using the List Zones API.
- availability str
- (String) Availability type used for all instances in the pool. Only
ON_DEMAND
andSPOT
are supported. - spot_
bid_ intprice_ percent - (Integer) The max price for AWS spot instances, as a percentage of the corresponding instance type’s on-demand price. For example, if this field is set to 50, and the instance pool needs a new i3.xlarge spot instance, then the max price is half of the price of on-demand i3.xlarge instances. Similarly, if this field is set to 200, the max price is twice the price of on-demand i3.xlarge instances. If not specified, the default value is 100. When spot instances are requested for this instance pool, only spot instances whose max price percentage matches this field are considered. For safety, this field cannot be greater than 10000.
- zone_
id str - (String) Identifier for the availability zone/datacenter in which the instance pool resides. This string is of the form like
"us-west-2a"
. The provided availability zone must be in the same region as the Databricks deployment. For example,"us-west-2a"
is not a valid zone ID if the Databricks deployment resides in the"us-east-1"
region. If not specified, a default zone is used. You can find the list of available zones as well as the default value by using the List Zones API.
- availability String
- (String) Availability type used for all instances in the pool. Only
ON_DEMAND
andSPOT
are supported. - spot
Bid NumberPrice Percent - (Integer) The max price for AWS spot instances, as a percentage of the corresponding instance type’s on-demand price. For example, if this field is set to 50, and the instance pool needs a new i3.xlarge spot instance, then the max price is half of the price of on-demand i3.xlarge instances. Similarly, if this field is set to 200, the max price is twice the price of on-demand i3.xlarge instances. If not specified, the default value is 100. When spot instances are requested for this instance pool, only spot instances whose max price percentage matches this field are considered. For safety, this field cannot be greater than 10000.
- zone
Id String - (String) Identifier for the availability zone/datacenter in which the instance pool resides. This string is of the form like
"us-west-2a"
. The provided availability zone must be in the same region as the Databricks deployment. For example,"us-west-2a"
is not a valid zone ID if the Databricks deployment resides in the"us-east-1"
region. If not specified, a default zone is used. You can find the list of available zones as well as the default value by using the List Zones API.
InstancePoolAzureAttributes, InstancePoolAzureAttributesArgs
- Availability string
- Availability type used for all nodes. Valid values are
SPOT_AZURE
andON_DEMAND_AZURE
. - Spot
Bid doubleMax Price - The max bid price used for Azure spot instances. You can set this to greater than or equal to the current spot price. You can also set this to
-1
, which specifies that the instance cannot be evicted on the basis of price. The price for the instance will be the current price for spot instances or the price for a standard instance.
- Availability string
- Availability type used for all nodes. Valid values are
SPOT_AZURE
andON_DEMAND_AZURE
. - Spot
Bid float64Max Price - The max bid price used for Azure spot instances. You can set this to greater than or equal to the current spot price. You can also set this to
-1
, which specifies that the instance cannot be evicted on the basis of price. The price for the instance will be the current price for spot instances or the price for a standard instance.
- availability String
- Availability type used for all nodes. Valid values are
SPOT_AZURE
andON_DEMAND_AZURE
. - spot
Bid DoubleMax Price - The max bid price used for Azure spot instances. You can set this to greater than or equal to the current spot price. You can also set this to
-1
, which specifies that the instance cannot be evicted on the basis of price. The price for the instance will be the current price for spot instances or the price for a standard instance.
- availability string
- Availability type used for all nodes. Valid values are
SPOT_AZURE
andON_DEMAND_AZURE
. - spot
Bid numberMax Price - The max bid price used for Azure spot instances. You can set this to greater than or equal to the current spot price. You can also set this to
-1
, which specifies that the instance cannot be evicted on the basis of price. The price for the instance will be the current price for spot instances or the price for a standard instance.
- availability str
- Availability type used for all nodes. Valid values are
SPOT_AZURE
andON_DEMAND_AZURE
. - spot_
bid_ floatmax_ price - The max bid price used for Azure spot instances. You can set this to greater than or equal to the current spot price. You can also set this to
-1
, which specifies that the instance cannot be evicted on the basis of price. The price for the instance will be the current price for spot instances or the price for a standard instance.
- availability String
- Availability type used for all nodes. Valid values are
SPOT_AZURE
andON_DEMAND_AZURE
. - spot
Bid NumberMax Price - The max bid price used for Azure spot instances. You can set this to greater than or equal to the current spot price. You can also set this to
-1
, which specifies that the instance cannot be evicted on the basis of price. The price for the instance will be the current price for spot instances or the price for a standard instance.
InstancePoolDiskSpec, InstancePoolDiskSpecArgs
- Disk
Count int - (Integer) The number of disks to attach to each instance. This feature is only enabled for supported node types. Users can choose up to the limit of the disks supported by the node type. For node types with no local disk, at least one disk needs to be specified.
- Disk
Size int - (Integer) The size of each disk (in GiB) to attach.
- Disk
Type InstancePool Disk Spec Disk Type
- Disk
Count int - (Integer) The number of disks to attach to each instance. This feature is only enabled for supported node types. Users can choose up to the limit of the disks supported by the node type. For node types with no local disk, at least one disk needs to be specified.
- Disk
Size int - (Integer) The size of each disk (in GiB) to attach.
- Disk
Type InstancePool Disk Spec Disk Type
- disk
Count Integer - (Integer) The number of disks to attach to each instance. This feature is only enabled for supported node types. Users can choose up to the limit of the disks supported by the node type. For node types with no local disk, at least one disk needs to be specified.
- disk
Size Integer - (Integer) The size of each disk (in GiB) to attach.
- disk
Type InstancePool Disk Spec Disk Type
- disk
Count number - (Integer) The number of disks to attach to each instance. This feature is only enabled for supported node types. Users can choose up to the limit of the disks supported by the node type. For node types with no local disk, at least one disk needs to be specified.
- disk
Size number - (Integer) The size of each disk (in GiB) to attach.
- disk
Type InstancePool Disk Spec Disk Type
- disk_
count int - (Integer) The number of disks to attach to each instance. This feature is only enabled for supported node types. Users can choose up to the limit of the disks supported by the node type. For node types with no local disk, at least one disk needs to be specified.
- disk_
size int - (Integer) The size of each disk (in GiB) to attach.
- disk_
type InstancePool Disk Spec Disk Type
- disk
Count Number - (Integer) The number of disks to attach to each instance. This feature is only enabled for supported node types. Users can choose up to the limit of the disks supported by the node type. For node types with no local disk, at least one disk needs to be specified.
- disk
Size Number - (Integer) The size of each disk (in GiB) to attach.
- disk
Type Property Map
InstancePoolDiskSpecDiskType, InstancePoolDiskSpecDiskTypeArgs
- Azure
Disk stringVolume Type - Ebs
Volume stringType
- Azure
Disk stringVolume Type - Ebs
Volume stringType
- azure
Disk StringVolume Type - ebs
Volume StringType
- azure
Disk stringVolume Type - ebs
Volume stringType
- azure
Disk StringVolume Type - ebs
Volume StringType
InstancePoolGcpAttributes, InstancePoolGcpAttributesArgs
- Gcp
Availability string - Availability type used for all nodes. Valid values are
PREEMPTIBLE_GCP
,PREEMPTIBLE_WITH_FALLBACK_GCP
andON_DEMAND_GCP
, default:ON_DEMAND_GCP
. - Local
Ssd intCount - Number of local SSD disks (each is 375GB in size) that will be attached to each node of the cluster.
- Zone
Id string - Identifier for the availability zone/datacenter in which the cluster resides. This string will be of a form like
us-central1-a
. The provided availability zone must be in the same region as the Databricks workspace.
- Gcp
Availability string - Availability type used for all nodes. Valid values are
PREEMPTIBLE_GCP
,PREEMPTIBLE_WITH_FALLBACK_GCP
andON_DEMAND_GCP
, default:ON_DEMAND_GCP
. - Local
Ssd intCount - Number of local SSD disks (each is 375GB in size) that will be attached to each node of the cluster.
- Zone
Id string - Identifier for the availability zone/datacenter in which the cluster resides. This string will be of a form like
us-central1-a
. The provided availability zone must be in the same region as the Databricks workspace.
- gcp
Availability String - Availability type used for all nodes. Valid values are
PREEMPTIBLE_GCP
,PREEMPTIBLE_WITH_FALLBACK_GCP
andON_DEMAND_GCP
, default:ON_DEMAND_GCP
. - local
Ssd IntegerCount - Number of local SSD disks (each is 375GB in size) that will be attached to each node of the cluster.
- zone
Id String - Identifier for the availability zone/datacenter in which the cluster resides. This string will be of a form like
us-central1-a
. The provided availability zone must be in the same region as the Databricks workspace.
- gcp
Availability string - Availability type used for all nodes. Valid values are
PREEMPTIBLE_GCP
,PREEMPTIBLE_WITH_FALLBACK_GCP
andON_DEMAND_GCP
, default:ON_DEMAND_GCP
. - local
Ssd numberCount - Number of local SSD disks (each is 375GB in size) that will be attached to each node of the cluster.
- zone
Id string - Identifier for the availability zone/datacenter in which the cluster resides. This string will be of a form like
us-central1-a
. The provided availability zone must be in the same region as the Databricks workspace.
- gcp_
availability str - Availability type used for all nodes. Valid values are
PREEMPTIBLE_GCP
,PREEMPTIBLE_WITH_FALLBACK_GCP
andON_DEMAND_GCP
, default:ON_DEMAND_GCP
. - local_
ssd_ intcount - Number of local SSD disks (each is 375GB in size) that will be attached to each node of the cluster.
- zone_
id str - Identifier for the availability zone/datacenter in which the cluster resides. This string will be of a form like
us-central1-a
. The provided availability zone must be in the same region as the Databricks workspace.
- gcp
Availability String - Availability type used for all nodes. Valid values are
PREEMPTIBLE_GCP
,PREEMPTIBLE_WITH_FALLBACK_GCP
andON_DEMAND_GCP
, default:ON_DEMAND_GCP
. - local
Ssd NumberCount - Number of local SSD disks (each is 375GB in size) that will be attached to each node of the cluster.
- zone
Id String - Identifier for the availability zone/datacenter in which the cluster resides. This string will be of a form like
us-central1-a
. The provided availability zone must be in the same region as the Databricks workspace.
InstancePoolInstancePoolFleetAttributes, InstancePoolInstancePoolFleetAttributesArgs
InstancePoolInstancePoolFleetAttributesFleetOnDemandOption, InstancePoolInstancePoolFleetAttributesFleetOnDemandOptionArgs
- Allocation
Strategy string - Instance
Pools intTo Use Count
- Allocation
Strategy string - Instance
Pools intTo Use Count
- allocation
Strategy String - instance
Pools IntegerTo Use Count
- allocation
Strategy string - instance
Pools numberTo Use Count
- allocation
Strategy String - instance
Pools NumberTo Use Count
InstancePoolInstancePoolFleetAttributesFleetSpotOption, InstancePoolInstancePoolFleetAttributesFleetSpotOptionArgs
- Allocation
Strategy string - Instance
Pools intTo Use Count
- Allocation
Strategy string - Instance
Pools intTo Use Count
- allocation
Strategy String - instance
Pools IntegerTo Use Count
- allocation
Strategy string - instance
Pools numberTo Use Count
- allocation
Strategy String - instance
Pools NumberTo Use Count
InstancePoolInstancePoolFleetAttributesLaunchTemplateOverride, InstancePoolInstancePoolFleetAttributesLaunchTemplateOverrideArgs
- Availability
Zone string - Instance
Type string
- Availability
Zone string - Instance
Type string
- availability
Zone String - instance
Type String
- availability
Zone string - instance
Type string
- availability_
zone str - instance_
type str
- availability
Zone String - instance
Type String
InstancePoolPreloadedDockerImage, InstancePoolPreloadedDockerImageArgs
- Url string
- URL for the Docker image
- Basic
Auth InstancePool Preloaded Docker Image Basic Auth basic_auth.username
andbasic_auth.password
for Docker repository. Docker registry credentials are encrypted when they are stored in Databricks internal storage and when they are passed to a registry upon fetching Docker images at cluster launch. However, other authenticated and authorized API users of this workspace can access the username and password.Example usage with azurerm_container_registry and docker_registry_image, that you can adapt to your specific use-case:
import * as pulumi from "@pulumi/pulumi"; import * as databricks from "@pulumi/databricks"; import * as docker from "@pulumi/docker";
const _this = new docker.index.RegistryImage("this", { build: [{}], name:
${thisAzurermContainerRegistry.loginServer}/sample:latest
, }); const thisInstancePool = new databricks.InstancePool("this", {preloadedDockerImages: [{ url: _this.name, basicAuth: { username: thisAzurermContainerRegistry.adminUsername, password: thisAzurermContainerRegistry.adminPassword, }, }]});import pulumi import pulumi_databricks as databricks import pulumi_docker as docker this = docker.index.RegistryImage("this", build=[{}], name=f{this_azurerm_container_registry.login_server}/sample:latest) this_instance_pool = databricks.InstancePool("this", preloaded_docker_images=[{ "url": this["name"], "basic_auth": { "username": this_azurerm_container_registry["adminUsername"], "password": this_azurerm_container_registry["adminPassword"], }, }])
using System.Collections.Generic; using System.Linq; using Pulumi; using Databricks = Pulumi.Databricks; using Docker = Pulumi.Docker; return await Deployment.RunAsync(() => { var @this = new Docker.Index.RegistryImage("this", new() { Build = new[] { null, }, Name = $"{thisAzurermContainerRegistry.LoginServer}/sample:latest", }); var thisInstancePool = new Databricks.InstancePool("this", new() { PreloadedDockerImages = new[] { new Databricks.Inputs.InstancePoolPreloadedDockerImageArgs { Url = @this.Name, BasicAuth = new Databricks.Inputs.InstancePoolPreloadedDockerImageBasicAuthArgs { Username = thisAzurermContainerRegistry.AdminUsername, Password = thisAzurermContainerRegistry.AdminPassword, }, }, }, }); });
package main import ( "fmt" "github.com/pulumi/pulumi-databricks/sdk/go/databricks" "github.com/pulumi/pulumi-docker/sdk/v4/go/docker" "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) func main() { pulumi.Run(func(ctx *pulumi.Context) error { this, err := docker.NewRegistryImage(ctx, "this", &docker.RegistryImageArgs{ Build: []map[string]interface{}{ map[string]interface{}{}, }, Name: fmt.Sprintf("%v/sample:latest", thisAzurermContainerRegistry.LoginServer), }) if err != nil { return err } _, err = databricks.NewInstancePool(ctx, "this", &databricks.InstancePoolArgs{ PreloadedDockerImages: databricks.InstancePoolPreloadedDockerImageArray{ &databricks.InstancePoolPreloadedDockerImageArgs{ Url: this.Name, BasicAuth: &databricks.InstancePoolPreloadedDockerImageBasicAuthArgs{ Username: pulumi.Any(thisAzurermContainerRegistry.AdminUsername), Password: pulumi.Any(thisAzurermContainerRegistry.AdminPassword), }, }, }, }) if err != nil { return err } return nil }) }
package generated_program; import com.pulumi.Context; import com.pulumi.Pulumi; import com.pulumi.core.Output; import com.pulumi.docker.registryImage; import com.pulumi.docker.RegistryImageArgs; import com.pulumi.databricks.InstancePool; import com.pulumi.databricks.InstancePoolArgs; import com.pulumi.databricks.inputs.InstancePoolPreloadedDockerImageArgs; import com.pulumi.databricks.inputs.InstancePoolPreloadedDockerImageBasicAuthArgs; import java.util.List; import java.util.ArrayList; import java.util.Map; import java.io.File; import java.nio.file.Files; import java.nio.file.Paths; public class App { public static void main(String[] args) { Pulumi.run(App::stack); } public static void stack(Context ctx) { var this_ = new RegistryImage("this", RegistryImageArgs.builder() .build() .name(String.format("%s/sample:latest", thisAzurermContainerRegistry.loginServer())) .build()); var thisInstancePool = new InstancePool("thisInstancePool", InstancePoolArgs.builder() .preloadedDockerImages(InstancePoolPreloadedDockerImageArgs.builder() .url(this_.name()) .basicAuth(InstancePoolPreloadedDockerImageBasicAuthArgs.builder() .username(thisAzurermContainerRegistry.adminUsername()) .password(thisAzurermContainerRegistry.adminPassword()) .build()) .build()) .build()); } }
resources: this: type: docker:registryImage properties: build: - {} name: ${thisAzurermContainerRegistry.loginServer}/sample:latest thisInstancePool: type: databricks:InstancePool name: this properties: preloadedDockerImages: - url: ${this.name} basicAuth: username: ${thisAzurermContainerRegistry.adminUsername} password: ${thisAzurermContainerRegistry.adminPassword}
- Url string
- URL for the Docker image
- Basic
Auth InstancePool Preloaded Docker Image Basic Auth basic_auth.username
andbasic_auth.password
for Docker repository. Docker registry credentials are encrypted when they are stored in Databricks internal storage and when they are passed to a registry upon fetching Docker images at cluster launch. However, other authenticated and authorized API users of this workspace can access the username and password.Example usage with azurerm_container_registry and docker_registry_image, that you can adapt to your specific use-case:
import * as pulumi from "@pulumi/pulumi"; import * as databricks from "@pulumi/databricks"; import * as docker from "@pulumi/docker";
const _this = new docker.index.RegistryImage("this", { build: [{}], name:
${thisAzurermContainerRegistry.loginServer}/sample:latest
, }); const thisInstancePool = new databricks.InstancePool("this", {preloadedDockerImages: [{ url: _this.name, basicAuth: { username: thisAzurermContainerRegistry.adminUsername, password: thisAzurermContainerRegistry.adminPassword, }, }]});import pulumi import pulumi_databricks as databricks import pulumi_docker as docker this = docker.index.RegistryImage("this", build=[{}], name=f{this_azurerm_container_registry.login_server}/sample:latest) this_instance_pool = databricks.InstancePool("this", preloaded_docker_images=[{ "url": this["name"], "basic_auth": { "username": this_azurerm_container_registry["adminUsername"], "password": this_azurerm_container_registry["adminPassword"], }, }])
using System.Collections.Generic; using System.Linq; using Pulumi; using Databricks = Pulumi.Databricks; using Docker = Pulumi.Docker; return await Deployment.RunAsync(() => { var @this = new Docker.Index.RegistryImage("this", new() { Build = new[] { null, }, Name = $"{thisAzurermContainerRegistry.LoginServer}/sample:latest", }); var thisInstancePool = new Databricks.InstancePool("this", new() { PreloadedDockerImages = new[] { new Databricks.Inputs.InstancePoolPreloadedDockerImageArgs { Url = @this.Name, BasicAuth = new Databricks.Inputs.InstancePoolPreloadedDockerImageBasicAuthArgs { Username = thisAzurermContainerRegistry.AdminUsername, Password = thisAzurermContainerRegistry.AdminPassword, }, }, }, }); });
package main import ( "fmt" "github.com/pulumi/pulumi-databricks/sdk/go/databricks" "github.com/pulumi/pulumi-docker/sdk/v4/go/docker" "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) func main() { pulumi.Run(func(ctx *pulumi.Context) error { this, err := docker.NewRegistryImage(ctx, "this", &docker.RegistryImageArgs{ Build: []map[string]interface{}{ map[string]interface{}{}, }, Name: fmt.Sprintf("%v/sample:latest", thisAzurermContainerRegistry.LoginServer), }) if err != nil { return err } _, err = databricks.NewInstancePool(ctx, "this", &databricks.InstancePoolArgs{ PreloadedDockerImages: databricks.InstancePoolPreloadedDockerImageArray{ &databricks.InstancePoolPreloadedDockerImageArgs{ Url: this.Name, BasicAuth: &databricks.InstancePoolPreloadedDockerImageBasicAuthArgs{ Username: pulumi.Any(thisAzurermContainerRegistry.AdminUsername), Password: pulumi.Any(thisAzurermContainerRegistry.AdminPassword), }, }, }, }) if err != nil { return err } return nil }) }
package generated_program; import com.pulumi.Context; import com.pulumi.Pulumi; import com.pulumi.core.Output; import com.pulumi.docker.registryImage; import com.pulumi.docker.RegistryImageArgs; import com.pulumi.databricks.InstancePool; import com.pulumi.databricks.InstancePoolArgs; import com.pulumi.databricks.inputs.InstancePoolPreloadedDockerImageArgs; import com.pulumi.databricks.inputs.InstancePoolPreloadedDockerImageBasicAuthArgs; import java.util.List; import java.util.ArrayList; import java.util.Map; import java.io.File; import java.nio.file.Files; import java.nio.file.Paths; public class App { public static void main(String[] args) { Pulumi.run(App::stack); } public static void stack(Context ctx) { var this_ = new RegistryImage("this", RegistryImageArgs.builder() .build() .name(String.format("%s/sample:latest", thisAzurermContainerRegistry.loginServer())) .build()); var thisInstancePool = new InstancePool("thisInstancePool", InstancePoolArgs.builder() .preloadedDockerImages(InstancePoolPreloadedDockerImageArgs.builder() .url(this_.name()) .basicAuth(InstancePoolPreloadedDockerImageBasicAuthArgs.builder() .username(thisAzurermContainerRegistry.adminUsername()) .password(thisAzurermContainerRegistry.adminPassword()) .build()) .build()) .build()); } }
resources: this: type: docker:registryImage properties: build: - {} name: ${thisAzurermContainerRegistry.loginServer}/sample:latest thisInstancePool: type: databricks:InstancePool name: this properties: preloadedDockerImages: - url: ${this.name} basicAuth: username: ${thisAzurermContainerRegistry.adminUsername} password: ${thisAzurermContainerRegistry.adminPassword}
- url String
- URL for the Docker image
- basic
Auth InstancePool Preloaded Docker Image Basic Auth basic_auth.username
andbasic_auth.password
for Docker repository. Docker registry credentials are encrypted when they are stored in Databricks internal storage and when they are passed to a registry upon fetching Docker images at cluster launch. However, other authenticated and authorized API users of this workspace can access the username and password.Example usage with azurerm_container_registry and docker_registry_image, that you can adapt to your specific use-case:
import * as pulumi from "@pulumi/pulumi"; import * as databricks from "@pulumi/databricks"; import * as docker from "@pulumi/docker";
const _this = new docker.index.RegistryImage("this", { build: [{}], name:
${thisAzurermContainerRegistry.loginServer}/sample:latest
, }); const thisInstancePool = new databricks.InstancePool("this", {preloadedDockerImages: [{ url: _this.name, basicAuth: { username: thisAzurermContainerRegistry.adminUsername, password: thisAzurermContainerRegistry.adminPassword, }, }]});import pulumi import pulumi_databricks as databricks import pulumi_docker as docker this = docker.index.RegistryImage("this", build=[{}], name=f{this_azurerm_container_registry.login_server}/sample:latest) this_instance_pool = databricks.InstancePool("this", preloaded_docker_images=[{ "url": this["name"], "basic_auth": { "username": this_azurerm_container_registry["adminUsername"], "password": this_azurerm_container_registry["adminPassword"], }, }])
using System.Collections.Generic; using System.Linq; using Pulumi; using Databricks = Pulumi.Databricks; using Docker = Pulumi.Docker; return await Deployment.RunAsync(() => { var @this = new Docker.Index.RegistryImage("this", new() { Build = new[] { null, }, Name = $"{thisAzurermContainerRegistry.LoginServer}/sample:latest", }); var thisInstancePool = new Databricks.InstancePool("this", new() { PreloadedDockerImages = new[] { new Databricks.Inputs.InstancePoolPreloadedDockerImageArgs { Url = @this.Name, BasicAuth = new Databricks.Inputs.InstancePoolPreloadedDockerImageBasicAuthArgs { Username = thisAzurermContainerRegistry.AdminUsername, Password = thisAzurermContainerRegistry.AdminPassword, }, }, }, }); });
package main import ( "fmt" "github.com/pulumi/pulumi-databricks/sdk/go/databricks" "github.com/pulumi/pulumi-docker/sdk/v4/go/docker" "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) func main() { pulumi.Run(func(ctx *pulumi.Context) error { this, err := docker.NewRegistryImage(ctx, "this", &docker.RegistryImageArgs{ Build: []map[string]interface{}{ map[string]interface{}{}, }, Name: fmt.Sprintf("%v/sample:latest", thisAzurermContainerRegistry.LoginServer), }) if err != nil { return err } _, err = databricks.NewInstancePool(ctx, "this", &databricks.InstancePoolArgs{ PreloadedDockerImages: databricks.InstancePoolPreloadedDockerImageArray{ &databricks.InstancePoolPreloadedDockerImageArgs{ Url: this.Name, BasicAuth: &databricks.InstancePoolPreloadedDockerImageBasicAuthArgs{ Username: pulumi.Any(thisAzurermContainerRegistry.AdminUsername), Password: pulumi.Any(thisAzurermContainerRegistry.AdminPassword), }, }, }, }) if err != nil { return err } return nil }) }
package generated_program; import com.pulumi.Context; import com.pulumi.Pulumi; import com.pulumi.core.Output; import com.pulumi.docker.registryImage; import com.pulumi.docker.RegistryImageArgs; import com.pulumi.databricks.InstancePool; import com.pulumi.databricks.InstancePoolArgs; import com.pulumi.databricks.inputs.InstancePoolPreloadedDockerImageArgs; import com.pulumi.databricks.inputs.InstancePoolPreloadedDockerImageBasicAuthArgs; import java.util.List; import java.util.ArrayList; import java.util.Map; import java.io.File; import java.nio.file.Files; import java.nio.file.Paths; public class App { public static void main(String[] args) { Pulumi.run(App::stack); } public static void stack(Context ctx) { var this_ = new RegistryImage("this", RegistryImageArgs.builder() .build() .name(String.format("%s/sample:latest", thisAzurermContainerRegistry.loginServer())) .build()); var thisInstancePool = new InstancePool("thisInstancePool", InstancePoolArgs.builder() .preloadedDockerImages(InstancePoolPreloadedDockerImageArgs.builder() .url(this_.name()) .basicAuth(InstancePoolPreloadedDockerImageBasicAuthArgs.builder() .username(thisAzurermContainerRegistry.adminUsername()) .password(thisAzurermContainerRegistry.adminPassword()) .build()) .build()) .build()); } }
resources: this: type: docker:registryImage properties: build: - {} name: ${thisAzurermContainerRegistry.loginServer}/sample:latest thisInstancePool: type: databricks:InstancePool name: this properties: preloadedDockerImages: - url: ${this.name} basicAuth: username: ${thisAzurermContainerRegistry.adminUsername} password: ${thisAzurermContainerRegistry.adminPassword}
- url string
- URL for the Docker image
- basic
Auth InstancePool Preloaded Docker Image Basic Auth basic_auth.username
andbasic_auth.password
for Docker repository. Docker registry credentials are encrypted when they are stored in Databricks internal storage and when they are passed to a registry upon fetching Docker images at cluster launch. However, other authenticated and authorized API users of this workspace can access the username and password.Example usage with azurerm_container_registry and docker_registry_image, that you can adapt to your specific use-case:
import * as pulumi from "@pulumi/pulumi"; import * as databricks from "@pulumi/databricks"; import * as docker from "@pulumi/docker";
const _this = new docker.index.RegistryImage("this", { build: [{}], name:
${thisAzurermContainerRegistry.loginServer}/sample:latest
, }); const thisInstancePool = new databricks.InstancePool("this", {preloadedDockerImages: [{ url: _this.name, basicAuth: { username: thisAzurermContainerRegistry.adminUsername, password: thisAzurermContainerRegistry.adminPassword, }, }]});import pulumi import pulumi_databricks as databricks import pulumi_docker as docker this = docker.index.RegistryImage("this", build=[{}], name=f{this_azurerm_container_registry.login_server}/sample:latest) this_instance_pool = databricks.InstancePool("this", preloaded_docker_images=[{ "url": this["name"], "basic_auth": { "username": this_azurerm_container_registry["adminUsername"], "password": this_azurerm_container_registry["adminPassword"], }, }])
using System.Collections.Generic; using System.Linq; using Pulumi; using Databricks = Pulumi.Databricks; using Docker = Pulumi.Docker; return await Deployment.RunAsync(() => { var @this = new Docker.Index.RegistryImage("this", new() { Build = new[] { null, }, Name = $"{thisAzurermContainerRegistry.LoginServer}/sample:latest", }); var thisInstancePool = new Databricks.InstancePool("this", new() { PreloadedDockerImages = new[] { new Databricks.Inputs.InstancePoolPreloadedDockerImageArgs { Url = @this.Name, BasicAuth = new Databricks.Inputs.InstancePoolPreloadedDockerImageBasicAuthArgs { Username = thisAzurermContainerRegistry.AdminUsername, Password = thisAzurermContainerRegistry.AdminPassword, }, }, }, }); });
package main import ( "fmt" "github.com/pulumi/pulumi-databricks/sdk/go/databricks" "github.com/pulumi/pulumi-docker/sdk/v4/go/docker" "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) func main() { pulumi.Run(func(ctx *pulumi.Context) error { this, err := docker.NewRegistryImage(ctx, "this", &docker.RegistryImageArgs{ Build: []map[string]interface{}{ map[string]interface{}{}, }, Name: fmt.Sprintf("%v/sample:latest", thisAzurermContainerRegistry.LoginServer), }) if err != nil { return err } _, err = databricks.NewInstancePool(ctx, "this", &databricks.InstancePoolArgs{ PreloadedDockerImages: databricks.InstancePoolPreloadedDockerImageArray{ &databricks.InstancePoolPreloadedDockerImageArgs{ Url: this.Name, BasicAuth: &databricks.InstancePoolPreloadedDockerImageBasicAuthArgs{ Username: pulumi.Any(thisAzurermContainerRegistry.AdminUsername), Password: pulumi.Any(thisAzurermContainerRegistry.AdminPassword), }, }, }, }) if err != nil { return err } return nil }) }
package generated_program; import com.pulumi.Context; import com.pulumi.Pulumi; import com.pulumi.core.Output; import com.pulumi.docker.registryImage; import com.pulumi.docker.RegistryImageArgs; import com.pulumi.databricks.InstancePool; import com.pulumi.databricks.InstancePoolArgs; import com.pulumi.databricks.inputs.InstancePoolPreloadedDockerImageArgs; import com.pulumi.databricks.inputs.InstancePoolPreloadedDockerImageBasicAuthArgs; import java.util.List; import java.util.ArrayList; import java.util.Map; import java.io.File; import java.nio.file.Files; import java.nio.file.Paths; public class App { public static void main(String[] args) { Pulumi.run(App::stack); } public static void stack(Context ctx) { var this_ = new RegistryImage("this", RegistryImageArgs.builder() .build() .name(String.format("%s/sample:latest", thisAzurermContainerRegistry.loginServer())) .build()); var thisInstancePool = new InstancePool("thisInstancePool", InstancePoolArgs.builder() .preloadedDockerImages(InstancePoolPreloadedDockerImageArgs.builder() .url(this_.name()) .basicAuth(InstancePoolPreloadedDockerImageBasicAuthArgs.builder() .username(thisAzurermContainerRegistry.adminUsername()) .password(thisAzurermContainerRegistry.adminPassword()) .build()) .build()) .build()); } }
resources: this: type: docker:registryImage properties: build: - {} name: ${thisAzurermContainerRegistry.loginServer}/sample:latest thisInstancePool: type: databricks:InstancePool name: this properties: preloadedDockerImages: - url: ${this.name} basicAuth: username: ${thisAzurermContainerRegistry.adminUsername} password: ${thisAzurermContainerRegistry.adminPassword}
- url str
- URL for the Docker image
- basic_
auth InstancePool Preloaded Docker Image Basic Auth basic_auth.username
andbasic_auth.password
for Docker repository. Docker registry credentials are encrypted when they are stored in Databricks internal storage and when they are passed to a registry upon fetching Docker images at cluster launch. However, other authenticated and authorized API users of this workspace can access the username and password.Example usage with azurerm_container_registry and docker_registry_image, that you can adapt to your specific use-case:
import * as pulumi from "@pulumi/pulumi"; import * as databricks from "@pulumi/databricks"; import * as docker from "@pulumi/docker";
const _this = new docker.index.RegistryImage("this", { build: [{}], name:
${thisAzurermContainerRegistry.loginServer}/sample:latest
, }); const thisInstancePool = new databricks.InstancePool("this", {preloadedDockerImages: [{ url: _this.name, basicAuth: { username: thisAzurermContainerRegistry.adminUsername, password: thisAzurermContainerRegistry.adminPassword, }, }]});import pulumi import pulumi_databricks as databricks import pulumi_docker as docker this = docker.index.RegistryImage("this", build=[{}], name=f{this_azurerm_container_registry.login_server}/sample:latest) this_instance_pool = databricks.InstancePool("this", preloaded_docker_images=[{ "url": this["name"], "basic_auth": { "username": this_azurerm_container_registry["adminUsername"], "password": this_azurerm_container_registry["adminPassword"], }, }])
using System.Collections.Generic; using System.Linq; using Pulumi; using Databricks = Pulumi.Databricks; using Docker = Pulumi.Docker; return await Deployment.RunAsync(() => { var @this = new Docker.Index.RegistryImage("this", new() { Build = new[] { null, }, Name = $"{thisAzurermContainerRegistry.LoginServer}/sample:latest", }); var thisInstancePool = new Databricks.InstancePool("this", new() { PreloadedDockerImages = new[] { new Databricks.Inputs.InstancePoolPreloadedDockerImageArgs { Url = @this.Name, BasicAuth = new Databricks.Inputs.InstancePoolPreloadedDockerImageBasicAuthArgs { Username = thisAzurermContainerRegistry.AdminUsername, Password = thisAzurermContainerRegistry.AdminPassword, }, }, }, }); });
package main import ( "fmt" "github.com/pulumi/pulumi-databricks/sdk/go/databricks" "github.com/pulumi/pulumi-docker/sdk/v4/go/docker" "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) func main() { pulumi.Run(func(ctx *pulumi.Context) error { this, err := docker.NewRegistryImage(ctx, "this", &docker.RegistryImageArgs{ Build: []map[string]interface{}{ map[string]interface{}{}, }, Name: fmt.Sprintf("%v/sample:latest", thisAzurermContainerRegistry.LoginServer), }) if err != nil { return err } _, err = databricks.NewInstancePool(ctx, "this", &databricks.InstancePoolArgs{ PreloadedDockerImages: databricks.InstancePoolPreloadedDockerImageArray{ &databricks.InstancePoolPreloadedDockerImageArgs{ Url: this.Name, BasicAuth: &databricks.InstancePoolPreloadedDockerImageBasicAuthArgs{ Username: pulumi.Any(thisAzurermContainerRegistry.AdminUsername), Password: pulumi.Any(thisAzurermContainerRegistry.AdminPassword), }, }, }, }) if err != nil { return err } return nil }) }
package generated_program; import com.pulumi.Context; import com.pulumi.Pulumi; import com.pulumi.core.Output; import com.pulumi.docker.registryImage; import com.pulumi.docker.RegistryImageArgs; import com.pulumi.databricks.InstancePool; import com.pulumi.databricks.InstancePoolArgs; import com.pulumi.databricks.inputs.InstancePoolPreloadedDockerImageArgs; import com.pulumi.databricks.inputs.InstancePoolPreloadedDockerImageBasicAuthArgs; import java.util.List; import java.util.ArrayList; import java.util.Map; import java.io.File; import java.nio.file.Files; import java.nio.file.Paths; public class App { public static void main(String[] args) { Pulumi.run(App::stack); } public static void stack(Context ctx) { var this_ = new RegistryImage("this", RegistryImageArgs.builder() .build() .name(String.format("%s/sample:latest", thisAzurermContainerRegistry.loginServer())) .build()); var thisInstancePool = new InstancePool("thisInstancePool", InstancePoolArgs.builder() .preloadedDockerImages(InstancePoolPreloadedDockerImageArgs.builder() .url(this_.name()) .basicAuth(InstancePoolPreloadedDockerImageBasicAuthArgs.builder() .username(thisAzurermContainerRegistry.adminUsername()) .password(thisAzurermContainerRegistry.adminPassword()) .build()) .build()) .build()); } }
resources: this: type: docker:registryImage properties: build: - {} name: ${thisAzurermContainerRegistry.loginServer}/sample:latest thisInstancePool: type: databricks:InstancePool name: this properties: preloadedDockerImages: - url: ${this.name} basicAuth: username: ${thisAzurermContainerRegistry.adminUsername} password: ${thisAzurermContainerRegistry.adminPassword}
- url String
- URL for the Docker image
- basic
Auth Property Map basic_auth.username
andbasic_auth.password
for Docker repository. Docker registry credentials are encrypted when they are stored in Databricks internal storage and when they are passed to a registry upon fetching Docker images at cluster launch. However, other authenticated and authorized API users of this workspace can access the username and password.Example usage with azurerm_container_registry and docker_registry_image, that you can adapt to your specific use-case:
import * as pulumi from "@pulumi/pulumi"; import * as databricks from "@pulumi/databricks"; import * as docker from "@pulumi/docker";
const _this = new docker.index.RegistryImage("this", { build: [{}], name:
${thisAzurermContainerRegistry.loginServer}/sample:latest
, }); const thisInstancePool = new databricks.InstancePool("this", {preloadedDockerImages: [{ url: _this.name, basicAuth: { username: thisAzurermContainerRegistry.adminUsername, password: thisAzurermContainerRegistry.adminPassword, }, }]});import pulumi import pulumi_databricks as databricks import pulumi_docker as docker this = docker.index.RegistryImage("this", build=[{}], name=f{this_azurerm_container_registry.login_server}/sample:latest) this_instance_pool = databricks.InstancePool("this", preloaded_docker_images=[{ "url": this["name"], "basic_auth": { "username": this_azurerm_container_registry["adminUsername"], "password": this_azurerm_container_registry["adminPassword"], }, }])
using System.Collections.Generic; using System.Linq; using Pulumi; using Databricks = Pulumi.Databricks; using Docker = Pulumi.Docker; return await Deployment.RunAsync(() => { var @this = new Docker.Index.RegistryImage("this", new() { Build = new[] { null, }, Name = $"{thisAzurermContainerRegistry.LoginServer}/sample:latest", }); var thisInstancePool = new Databricks.InstancePool("this", new() { PreloadedDockerImages = new[] { new Databricks.Inputs.InstancePoolPreloadedDockerImageArgs { Url = @this.Name, BasicAuth = new Databricks.Inputs.InstancePoolPreloadedDockerImageBasicAuthArgs { Username = thisAzurermContainerRegistry.AdminUsername, Password = thisAzurermContainerRegistry.AdminPassword, }, }, }, }); });
package main import ( "fmt" "github.com/pulumi/pulumi-databricks/sdk/go/databricks" "github.com/pulumi/pulumi-docker/sdk/v4/go/docker" "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) func main() { pulumi.Run(func(ctx *pulumi.Context) error { this, err := docker.NewRegistryImage(ctx, "this", &docker.RegistryImageArgs{ Build: []map[string]interface{}{ map[string]interface{}{}, }, Name: fmt.Sprintf("%v/sample:latest", thisAzurermContainerRegistry.LoginServer), }) if err != nil { return err } _, err = databricks.NewInstancePool(ctx, "this", &databricks.InstancePoolArgs{ PreloadedDockerImages: databricks.InstancePoolPreloadedDockerImageArray{ &databricks.InstancePoolPreloadedDockerImageArgs{ Url: this.Name, BasicAuth: &databricks.InstancePoolPreloadedDockerImageBasicAuthArgs{ Username: pulumi.Any(thisAzurermContainerRegistry.AdminUsername), Password: pulumi.Any(thisAzurermContainerRegistry.AdminPassword), }, }, }, }) if err != nil { return err } return nil }) }
package generated_program; import com.pulumi.Context; import com.pulumi.Pulumi; import com.pulumi.core.Output; import com.pulumi.docker.registryImage; import com.pulumi.docker.RegistryImageArgs; import com.pulumi.databricks.InstancePool; import com.pulumi.databricks.InstancePoolArgs; import com.pulumi.databricks.inputs.InstancePoolPreloadedDockerImageArgs; import com.pulumi.databricks.inputs.InstancePoolPreloadedDockerImageBasicAuthArgs; import java.util.List; import java.util.ArrayList; import java.util.Map; import java.io.File; import java.nio.file.Files; import java.nio.file.Paths; public class App { public static void main(String[] args) { Pulumi.run(App::stack); } public static void stack(Context ctx) { var this_ = new RegistryImage("this", RegistryImageArgs.builder() .build() .name(String.format("%s/sample:latest", thisAzurermContainerRegistry.loginServer())) .build()); var thisInstancePool = new InstancePool("thisInstancePool", InstancePoolArgs.builder() .preloadedDockerImages(InstancePoolPreloadedDockerImageArgs.builder() .url(this_.name()) .basicAuth(InstancePoolPreloadedDockerImageBasicAuthArgs.builder() .username(thisAzurermContainerRegistry.adminUsername()) .password(thisAzurermContainerRegistry.adminPassword()) .build()) .build()) .build()); } }
resources: this: type: docker:registryImage properties: build: - {} name: ${thisAzurermContainerRegistry.loginServer}/sample:latest thisInstancePool: type: databricks:InstancePool name: this properties: preloadedDockerImages: - url: ${this.name} basicAuth: username: ${thisAzurermContainerRegistry.adminUsername} password: ${thisAzurermContainerRegistry.adminPassword}
InstancePoolPreloadedDockerImageBasicAuth, InstancePoolPreloadedDockerImageBasicAuthArgs
Import
The resource instance pool can be imported using it’s id:
bash
$ pulumi import databricks:index/instancePool:InstancePool this <instance-pool-id>
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- databricks pulumi/pulumi-databricks
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
databricks
Terraform Provider.