databricks.SqlEndpoint
Explore with Pulumi AI
This resource is used to manage Databricks SQL warehouses. To create SQL warehouses you must have databricks_sql_access
on your databricks.Group or databricks_user.
Example Usage
import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";
const me = databricks.getCurrentUser({});
const _this = new databricks.SqlEndpoint("this", {
name: me.then(me => `Endpoint of ${me.alphanumeric}`),
clusterSize: "Small",
maxNumClusters: 1,
tags: {
customTags: [{
key: "City",
value: "Amsterdam",
}],
},
});
import pulumi
import pulumi_databricks as databricks
me = databricks.get_current_user()
this = databricks.SqlEndpoint("this",
name=f"Endpoint of {me.alphanumeric}",
cluster_size="Small",
max_num_clusters=1,
tags={
"custom_tags": [{
"key": "City",
"value": "Amsterdam",
}],
})
package main
import (
"fmt"
"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
me, err := databricks.GetCurrentUser(ctx, map[string]interface{}{}, nil)
if err != nil {
return err
}
_, err = databricks.NewSqlEndpoint(ctx, "this", &databricks.SqlEndpointArgs{
Name: pulumi.Sprintf("Endpoint of %v", me.Alphanumeric),
ClusterSize: pulumi.String("Small"),
MaxNumClusters: pulumi.Int(1),
Tags: &databricks.SqlEndpointTagsArgs{
CustomTags: databricks.SqlEndpointTagsCustomTagArray{
&databricks.SqlEndpointTagsCustomTagArgs{
Key: pulumi.String("City"),
Value: pulumi.String("Amsterdam"),
},
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;
return await Deployment.RunAsync(() =>
{
var me = Databricks.GetCurrentUser.Invoke();
var @this = new Databricks.SqlEndpoint("this", new()
{
Name = $"Endpoint of {me.Apply(getCurrentUserResult => getCurrentUserResult.Alphanumeric)}",
ClusterSize = "Small",
MaxNumClusters = 1,
Tags = new Databricks.Inputs.SqlEndpointTagsArgs
{
CustomTags = new[]
{
new Databricks.Inputs.SqlEndpointTagsCustomTagArgs
{
Key = "City",
Value = "Amsterdam",
},
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.DatabricksFunctions;
import com.pulumi.databricks.SqlEndpoint;
import com.pulumi.databricks.SqlEndpointArgs;
import com.pulumi.databricks.inputs.SqlEndpointTagsArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
final var me = DatabricksFunctions.getCurrentUser();
var this_ = new SqlEndpoint("this", SqlEndpointArgs.builder()
.name(String.format("Endpoint of %s", me.applyValue(getCurrentUserResult -> getCurrentUserResult.alphanumeric())))
.clusterSize("Small")
.maxNumClusters(1)
.tags(SqlEndpointTagsArgs.builder()
.customTags(SqlEndpointTagsCustomTagArgs.builder()
.key("City")
.value("Amsterdam")
.build())
.build())
.build());
}
}
resources:
this:
type: databricks:SqlEndpoint
properties:
name: Endpoint of ${me.alphanumeric}
clusterSize: Small
maxNumClusters: 1
tags:
customTags:
- key: City
value: Amsterdam
variables:
me:
fn::invoke:
Function: databricks:getCurrentUser
Arguments: {}
Access control
- databricks.Permissions can control which groups or individual users can Can Use or Can Manage SQL warehouses.
databricks_sql_access
on databricks.Group or databricks_user.
Related resources
The following resources are often used in the same context:
- End to end workspace management guide.
- databricks.InstanceProfile to manage AWS EC2 instance profiles that users can launch databricks.Cluster and access data, like databricks_mount.
- databricks.SqlDashboard to manage Databricks SQL Dashboards.
- databricks.SqlGlobalConfig to configure the security policy, databricks_instance_profile, and data access properties for all databricks.SqlEndpoint of workspace.
- databricks.SqlPermissions to manage data object access control lists in Databricks workspaces for things like tables, views, databases, and more.
Create SqlEndpoint Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new SqlEndpoint(name: string, args: SqlEndpointArgs, opts?: CustomResourceOptions);
@overload
def SqlEndpoint(resource_name: str,
args: SqlEndpointArgs,
opts: Optional[ResourceOptions] = None)
@overload
def SqlEndpoint(resource_name: str,
opts: Optional[ResourceOptions] = None,
cluster_size: Optional[str] = None,
instance_profile_arn: Optional[str] = None,
channel: Optional[SqlEndpointChannelArgs] = None,
data_source_id: Optional[str] = None,
enable_photon: Optional[bool] = None,
enable_serverless_compute: Optional[bool] = None,
auto_stop_mins: Optional[int] = None,
max_num_clusters: Optional[int] = None,
min_num_clusters: Optional[int] = None,
name: Optional[str] = None,
spot_instance_policy: Optional[str] = None,
tags: Optional[SqlEndpointTagsArgs] = None,
warehouse_type: Optional[str] = None)
func NewSqlEndpoint(ctx *Context, name string, args SqlEndpointArgs, opts ...ResourceOption) (*SqlEndpoint, error)
public SqlEndpoint(string name, SqlEndpointArgs args, CustomResourceOptions? opts = null)
public SqlEndpoint(String name, SqlEndpointArgs args)
public SqlEndpoint(String name, SqlEndpointArgs args, CustomResourceOptions options)
type: databricks:SqlEndpoint
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args SqlEndpointArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args SqlEndpointArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args SqlEndpointArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args SqlEndpointArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args SqlEndpointArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var sqlEndpointResource = new Databricks.SqlEndpoint("sqlEndpointResource", new()
{
ClusterSize = "string",
InstanceProfileArn = "string",
Channel = new Databricks.Inputs.SqlEndpointChannelArgs
{
DbsqlVersion = "string",
Name = "string",
},
DataSourceId = "string",
EnablePhoton = false,
EnableServerlessCompute = false,
AutoStopMins = 0,
MaxNumClusters = 0,
MinNumClusters = 0,
Name = "string",
SpotInstancePolicy = "string",
Tags = new Databricks.Inputs.SqlEndpointTagsArgs
{
CustomTags = new[]
{
new Databricks.Inputs.SqlEndpointTagsCustomTagArgs
{
Key = "string",
Value = "string",
},
},
},
WarehouseType = "string",
});
example, err := databricks.NewSqlEndpoint(ctx, "sqlEndpointResource", &databricks.SqlEndpointArgs{
ClusterSize: pulumi.String("string"),
InstanceProfileArn: pulumi.String("string"),
Channel: &databricks.SqlEndpointChannelArgs{
DbsqlVersion: pulumi.String("string"),
Name: pulumi.String("string"),
},
DataSourceId: pulumi.String("string"),
EnablePhoton: pulumi.Bool(false),
EnableServerlessCompute: pulumi.Bool(false),
AutoStopMins: pulumi.Int(0),
MaxNumClusters: pulumi.Int(0),
MinNumClusters: pulumi.Int(0),
Name: pulumi.String("string"),
SpotInstancePolicy: pulumi.String("string"),
Tags: &databricks.SqlEndpointTagsArgs{
CustomTags: databricks.SqlEndpointTagsCustomTagArray{
&databricks.SqlEndpointTagsCustomTagArgs{
Key: pulumi.String("string"),
Value: pulumi.String("string"),
},
},
},
WarehouseType: pulumi.String("string"),
})
var sqlEndpointResource = new SqlEndpoint("sqlEndpointResource", SqlEndpointArgs.builder()
.clusterSize("string")
.instanceProfileArn("string")
.channel(SqlEndpointChannelArgs.builder()
.dbsqlVersion("string")
.name("string")
.build())
.dataSourceId("string")
.enablePhoton(false)
.enableServerlessCompute(false)
.autoStopMins(0)
.maxNumClusters(0)
.minNumClusters(0)
.name("string")
.spotInstancePolicy("string")
.tags(SqlEndpointTagsArgs.builder()
.customTags(SqlEndpointTagsCustomTagArgs.builder()
.key("string")
.value("string")
.build())
.build())
.warehouseType("string")
.build());
sql_endpoint_resource = databricks.SqlEndpoint("sqlEndpointResource",
cluster_size="string",
instance_profile_arn="string",
channel={
"dbsql_version": "string",
"name": "string",
},
data_source_id="string",
enable_photon=False,
enable_serverless_compute=False,
auto_stop_mins=0,
max_num_clusters=0,
min_num_clusters=0,
name="string",
spot_instance_policy="string",
tags={
"custom_tags": [{
"key": "string",
"value": "string",
}],
},
warehouse_type="string")
const sqlEndpointResource = new databricks.SqlEndpoint("sqlEndpointResource", {
clusterSize: "string",
instanceProfileArn: "string",
channel: {
dbsqlVersion: "string",
name: "string",
},
dataSourceId: "string",
enablePhoton: false,
enableServerlessCompute: false,
autoStopMins: 0,
maxNumClusters: 0,
minNumClusters: 0,
name: "string",
spotInstancePolicy: "string",
tags: {
customTags: [{
key: "string",
value: "string",
}],
},
warehouseType: "string",
});
type: databricks:SqlEndpoint
properties:
autoStopMins: 0
channel:
dbsqlVersion: string
name: string
clusterSize: string
dataSourceId: string
enablePhoton: false
enableServerlessCompute: false
instanceProfileArn: string
maxNumClusters: 0
minNumClusters: 0
name: string
spotInstancePolicy: string
tags:
customTags:
- key: string
value: string
warehouseType: string
SqlEndpoint Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The SqlEndpoint resource accepts the following input properties:
- Cluster
Size string - The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".
- Auto
Stop intMins - Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.
- Channel
Sql
Endpoint Channel - block, consisting of following fields:
- Data
Source stringId - ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.
- Enable
Photon bool - Whether to enable Photon. This field is optional and is enabled by default.
- Enable
Serverless boolCompute Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly.
For AWS, If omitted, the default is
false
for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default totrue
if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.For Azure, If omitted, the default is
false
for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default totrue
if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.
- Instance
Profile stringArn - Max
Num intClusters - Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to
1
. - Min
Num intClusters - Minimum number of clusters available when a SQL warehouse is running. The default is
1
. - Name string
- Name of the SQL warehouse. Must be unique.
- Spot
Instance stringPolicy - The spot policy to use for allocating instances to clusters:
COST_OPTIMIZED
orRELIABILITY_OPTIMIZED
. This field is optional. Default isCOST_OPTIMIZED
. - Sql
Endpoint Tags - Databricks tags all endpoint resources with these tags.
- Warehouse
Type string - SQL warehouse type. See for AWS or Azure. Set to
PRO
orCLASSIC
. If the fieldenable_serverless_compute
has the valuetrue
either explicitly or through the default logic (see that field above for details), the default isPRO
, which is required for serverless SQL warehouses. Otherwise, the default isCLASSIC
.
- Cluster
Size string - The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".
- Auto
Stop intMins - Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.
- Channel
Sql
Endpoint Channel Args - block, consisting of following fields:
- Data
Source stringId - ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.
- Enable
Photon bool - Whether to enable Photon. This field is optional and is enabled by default.
- Enable
Serverless boolCompute Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly.
For AWS, If omitted, the default is
false
for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default totrue
if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.For Azure, If omitted, the default is
false
for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default totrue
if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.
- Instance
Profile stringArn - Max
Num intClusters - Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to
1
. - Min
Num intClusters - Minimum number of clusters available when a SQL warehouse is running. The default is
1
. - Name string
- Name of the SQL warehouse. Must be unique.
- Spot
Instance stringPolicy - The spot policy to use for allocating instances to clusters:
COST_OPTIMIZED
orRELIABILITY_OPTIMIZED
. This field is optional. Default isCOST_OPTIMIZED
. - Sql
Endpoint Tags Args - Databricks tags all endpoint resources with these tags.
- Warehouse
Type string - SQL warehouse type. See for AWS or Azure. Set to
PRO
orCLASSIC
. If the fieldenable_serverless_compute
has the valuetrue
either explicitly or through the default logic (see that field above for details), the default isPRO
, which is required for serverless SQL warehouses. Otherwise, the default isCLASSIC
.
- cluster
Size String - The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".
- auto
Stop IntegerMins - Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.
- channel
Sql
Endpoint Channel - block, consisting of following fields:
- data
Source StringId - ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.
- enable
Photon Boolean - Whether to enable Photon. This field is optional and is enabled by default.
- enable
Serverless BooleanCompute Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly.
For AWS, If omitted, the default is
false
for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default totrue
if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.For Azure, If omitted, the default is
false
for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default totrue
if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.
- instance
Profile StringArn - max
Num IntegerClusters - Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to
1
. - min
Num IntegerClusters - Minimum number of clusters available when a SQL warehouse is running. The default is
1
. - name String
- Name of the SQL warehouse. Must be unique.
- spot
Instance StringPolicy - The spot policy to use for allocating instances to clusters:
COST_OPTIMIZED
orRELIABILITY_OPTIMIZED
. This field is optional. Default isCOST_OPTIMIZED
. - Sql
Endpoint Tags - Databricks tags all endpoint resources with these tags.
- warehouse
Type String - SQL warehouse type. See for AWS or Azure. Set to
PRO
orCLASSIC
. If the fieldenable_serverless_compute
has the valuetrue
either explicitly or through the default logic (see that field above for details), the default isPRO
, which is required for serverless SQL warehouses. Otherwise, the default isCLASSIC
.
- cluster
Size string - The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".
- auto
Stop numberMins - Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.
- channel
Sql
Endpoint Channel - block, consisting of following fields:
- data
Source stringId - ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.
- enable
Photon boolean - Whether to enable Photon. This field is optional and is enabled by default.
- enable
Serverless booleanCompute Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly.
For AWS, If omitted, the default is
false
for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default totrue
if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.For Azure, If omitted, the default is
false
for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default totrue
if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.
- instance
Profile stringArn - max
Num numberClusters - Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to
1
. - min
Num numberClusters - Minimum number of clusters available when a SQL warehouse is running. The default is
1
. - name string
- Name of the SQL warehouse. Must be unique.
- spot
Instance stringPolicy - The spot policy to use for allocating instances to clusters:
COST_OPTIMIZED
orRELIABILITY_OPTIMIZED
. This field is optional. Default isCOST_OPTIMIZED
. - Sql
Endpoint Tags - Databricks tags all endpoint resources with these tags.
- warehouse
Type string - SQL warehouse type. See for AWS or Azure. Set to
PRO
orCLASSIC
. If the fieldenable_serverless_compute
has the valuetrue
either explicitly or through the default logic (see that field above for details), the default isPRO
, which is required for serverless SQL warehouses. Otherwise, the default isCLASSIC
.
- cluster_
size str - The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".
- auto_
stop_ intmins - Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.
- channel
Sql
Endpoint Channel Args - block, consisting of following fields:
- data_
source_ strid - ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.
- enable_
photon bool - Whether to enable Photon. This field is optional and is enabled by default.
- enable_
serverless_ boolcompute Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly.
For AWS, If omitted, the default is
false
for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default totrue
if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.For Azure, If omitted, the default is
false
for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default totrue
if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.
- instance_
profile_ strarn - max_
num_ intclusters - Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to
1
. - min_
num_ intclusters - Minimum number of clusters available when a SQL warehouse is running. The default is
1
. - name str
- Name of the SQL warehouse. Must be unique.
- spot_
instance_ strpolicy - The spot policy to use for allocating instances to clusters:
COST_OPTIMIZED
orRELIABILITY_OPTIMIZED
. This field is optional. Default isCOST_OPTIMIZED
. - Sql
Endpoint Tags Args - Databricks tags all endpoint resources with these tags.
- warehouse_
type str - SQL warehouse type. See for AWS or Azure. Set to
PRO
orCLASSIC
. If the fieldenable_serverless_compute
has the valuetrue
either explicitly or through the default logic (see that field above for details), the default isPRO
, which is required for serverless SQL warehouses. Otherwise, the default isCLASSIC
.
- cluster
Size String - The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".
- auto
Stop NumberMins - Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.
- channel Property Map
- block, consisting of following fields:
- data
Source StringId - ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.
- enable
Photon Boolean - Whether to enable Photon. This field is optional and is enabled by default.
- enable
Serverless BooleanCompute Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly.
For AWS, If omitted, the default is
false
for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default totrue
if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.For Azure, If omitted, the default is
false
for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default totrue
if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.
- instance
Profile StringArn - max
Num NumberClusters - Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to
1
. - min
Num NumberClusters - Minimum number of clusters available when a SQL warehouse is running. The default is
1
. - name String
- Name of the SQL warehouse. Must be unique.
- spot
Instance StringPolicy - The spot policy to use for allocating instances to clusters:
COST_OPTIMIZED
orRELIABILITY_OPTIMIZED
. This field is optional. Default isCOST_OPTIMIZED
. - Property Map
- Databricks tags all endpoint resources with these tags.
- warehouse
Type String - SQL warehouse type. See for AWS or Azure. Set to
PRO
orCLASSIC
. If the fieldenable_serverless_compute
has the valuetrue
either explicitly or through the default logic (see that field above for details), the default isPRO
, which is required for serverless SQL warehouses. Otherwise, the default isCLASSIC
.
Outputs
All input properties are implicitly available as output properties. Additionally, the SqlEndpoint resource produces the following output properties:
- Creator
Name string - The username of the user who created the endpoint.
- Healths
List<Sql
Endpoint Health> - Health status of the endpoint.
- Id string
- The provider-assigned unique ID for this managed resource.
- Jdbc
Url string - JDBC connection string.
- Num
Active intSessions - The current number of clusters used by the endpoint.
- Num
Clusters int - The current number of clusters used by the endpoint.
- Odbc
Params SqlEndpoint Odbc Params - ODBC connection params:
odbc_params.hostname
,odbc_params.path
,odbc_params.protocol
, andodbc_params.port
. - State string
- The current state of the endpoint.
- Creator
Name string - The username of the user who created the endpoint.
- Healths
[]Sql
Endpoint Health - Health status of the endpoint.
- Id string
- The provider-assigned unique ID for this managed resource.
- Jdbc
Url string - JDBC connection string.
- Num
Active intSessions - The current number of clusters used by the endpoint.
- Num
Clusters int - The current number of clusters used by the endpoint.
- Odbc
Params SqlEndpoint Odbc Params - ODBC connection params:
odbc_params.hostname
,odbc_params.path
,odbc_params.protocol
, andodbc_params.port
. - State string
- The current state of the endpoint.
- creator
Name String - The username of the user who created the endpoint.
- healths
List<Sql
Endpoint Health> - Health status of the endpoint.
- id String
- The provider-assigned unique ID for this managed resource.
- jdbc
Url String - JDBC connection string.
- num
Active IntegerSessions - The current number of clusters used by the endpoint.
- num
Clusters Integer - The current number of clusters used by the endpoint.
- odbc
Params SqlEndpoint Odbc Params - ODBC connection params:
odbc_params.hostname
,odbc_params.path
,odbc_params.protocol
, andodbc_params.port
. - state String
- The current state of the endpoint.
- creator
Name string - The username of the user who created the endpoint.
- healths
Sql
Endpoint Health[] - Health status of the endpoint.
- id string
- The provider-assigned unique ID for this managed resource.
- jdbc
Url string - JDBC connection string.
- num
Active numberSessions - The current number of clusters used by the endpoint.
- num
Clusters number - The current number of clusters used by the endpoint.
- odbc
Params SqlEndpoint Odbc Params - ODBC connection params:
odbc_params.hostname
,odbc_params.path
,odbc_params.protocol
, andodbc_params.port
. - state string
- The current state of the endpoint.
- creator_
name str - The username of the user who created the endpoint.
- healths
Sequence[Sql
Endpoint Health] - Health status of the endpoint.
- id str
- The provider-assigned unique ID for this managed resource.
- jdbc_
url str - JDBC connection string.
- num_
active_ intsessions - The current number of clusters used by the endpoint.
- num_
clusters int - The current number of clusters used by the endpoint.
- odbc_
params SqlEndpoint Odbc Params - ODBC connection params:
odbc_params.hostname
,odbc_params.path
,odbc_params.protocol
, andodbc_params.port
. - state str
- The current state of the endpoint.
- creator
Name String - The username of the user who created the endpoint.
- healths List<Property Map>
- Health status of the endpoint.
- id String
- The provider-assigned unique ID for this managed resource.
- jdbc
Url String - JDBC connection string.
- num
Active NumberSessions - The current number of clusters used by the endpoint.
- num
Clusters Number - The current number of clusters used by the endpoint.
- odbc
Params Property Map - ODBC connection params:
odbc_params.hostname
,odbc_params.path
,odbc_params.protocol
, andodbc_params.port
. - state String
- The current state of the endpoint.
Look up Existing SqlEndpoint Resource
Get an existing SqlEndpoint resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: SqlEndpointState, opts?: CustomResourceOptions): SqlEndpoint
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
auto_stop_mins: Optional[int] = None,
channel: Optional[SqlEndpointChannelArgs] = None,
cluster_size: Optional[str] = None,
creator_name: Optional[str] = None,
data_source_id: Optional[str] = None,
enable_photon: Optional[bool] = None,
enable_serverless_compute: Optional[bool] = None,
healths: Optional[Sequence[SqlEndpointHealthArgs]] = None,
instance_profile_arn: Optional[str] = None,
jdbc_url: Optional[str] = None,
max_num_clusters: Optional[int] = None,
min_num_clusters: Optional[int] = None,
name: Optional[str] = None,
num_active_sessions: Optional[int] = None,
num_clusters: Optional[int] = None,
odbc_params: Optional[SqlEndpointOdbcParamsArgs] = None,
spot_instance_policy: Optional[str] = None,
state: Optional[str] = None,
tags: Optional[SqlEndpointTagsArgs] = None,
warehouse_type: Optional[str] = None) -> SqlEndpoint
func GetSqlEndpoint(ctx *Context, name string, id IDInput, state *SqlEndpointState, opts ...ResourceOption) (*SqlEndpoint, error)
public static SqlEndpoint Get(string name, Input<string> id, SqlEndpointState? state, CustomResourceOptions? opts = null)
public static SqlEndpoint get(String name, Output<String> id, SqlEndpointState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Auto
Stop intMins - Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.
- Channel
Sql
Endpoint Channel - block, consisting of following fields:
- Cluster
Size string - The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".
- Creator
Name string - The username of the user who created the endpoint.
- Data
Source stringId - ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.
- Enable
Photon bool - Whether to enable Photon. This field is optional and is enabled by default.
- Enable
Serverless boolCompute Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly.
For AWS, If omitted, the default is
false
for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default totrue
if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.For Azure, If omitted, the default is
false
for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default totrue
if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.
- Healths
List<Sql
Endpoint Health> - Health status of the endpoint.
- Instance
Profile stringArn - Jdbc
Url string - JDBC connection string.
- Max
Num intClusters - Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to
1
. - Min
Num intClusters - Minimum number of clusters available when a SQL warehouse is running. The default is
1
. - Name string
- Name of the SQL warehouse. Must be unique.
- Num
Active intSessions - The current number of clusters used by the endpoint.
- Num
Clusters int - The current number of clusters used by the endpoint.
- Odbc
Params SqlEndpoint Odbc Params - ODBC connection params:
odbc_params.hostname
,odbc_params.path
,odbc_params.protocol
, andodbc_params.port
. - Spot
Instance stringPolicy - The spot policy to use for allocating instances to clusters:
COST_OPTIMIZED
orRELIABILITY_OPTIMIZED
. This field is optional. Default isCOST_OPTIMIZED
. - State string
- The current state of the endpoint.
- Sql
Endpoint Tags - Databricks tags all endpoint resources with these tags.
- Warehouse
Type string - SQL warehouse type. See for AWS or Azure. Set to
PRO
orCLASSIC
. If the fieldenable_serverless_compute
has the valuetrue
either explicitly or through the default logic (see that field above for details), the default isPRO
, which is required for serverless SQL warehouses. Otherwise, the default isCLASSIC
.
- Auto
Stop intMins - Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.
- Channel
Sql
Endpoint Channel Args - block, consisting of following fields:
- Cluster
Size string - The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".
- Creator
Name string - The username of the user who created the endpoint.
- Data
Source stringId - ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.
- Enable
Photon bool - Whether to enable Photon. This field is optional and is enabled by default.
- Enable
Serverless boolCompute Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly.
For AWS, If omitted, the default is
false
for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default totrue
if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.For Azure, If omitted, the default is
false
for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default totrue
if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.
- Healths
[]Sql
Endpoint Health Args - Health status of the endpoint.
- Instance
Profile stringArn - Jdbc
Url string - JDBC connection string.
- Max
Num intClusters - Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to
1
. - Min
Num intClusters - Minimum number of clusters available when a SQL warehouse is running. The default is
1
. - Name string
- Name of the SQL warehouse. Must be unique.
- Num
Active intSessions - The current number of clusters used by the endpoint.
- Num
Clusters int - The current number of clusters used by the endpoint.
- Odbc
Params SqlEndpoint Odbc Params Args - ODBC connection params:
odbc_params.hostname
,odbc_params.path
,odbc_params.protocol
, andodbc_params.port
. - Spot
Instance stringPolicy - The spot policy to use for allocating instances to clusters:
COST_OPTIMIZED
orRELIABILITY_OPTIMIZED
. This field is optional. Default isCOST_OPTIMIZED
. - State string
- The current state of the endpoint.
- Sql
Endpoint Tags Args - Databricks tags all endpoint resources with these tags.
- Warehouse
Type string - SQL warehouse type. See for AWS or Azure. Set to
PRO
orCLASSIC
. If the fieldenable_serverless_compute
has the valuetrue
either explicitly or through the default logic (see that field above for details), the default isPRO
, which is required for serverless SQL warehouses. Otherwise, the default isCLASSIC
.
- auto
Stop IntegerMins - Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.
- channel
Sql
Endpoint Channel - block, consisting of following fields:
- cluster
Size String - The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".
- creator
Name String - The username of the user who created the endpoint.
- data
Source StringId - ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.
- enable
Photon Boolean - Whether to enable Photon. This field is optional and is enabled by default.
- enable
Serverless BooleanCompute Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly.
For AWS, If omitted, the default is
false
for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default totrue
if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.For Azure, If omitted, the default is
false
for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default totrue
if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.
- healths
List<Sql
Endpoint Health> - Health status of the endpoint.
- instance
Profile StringArn - jdbc
Url String - JDBC connection string.
- max
Num IntegerClusters - Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to
1
. - min
Num IntegerClusters - Minimum number of clusters available when a SQL warehouse is running. The default is
1
. - name String
- Name of the SQL warehouse. Must be unique.
- num
Active IntegerSessions - The current number of clusters used by the endpoint.
- num
Clusters Integer - The current number of clusters used by the endpoint.
- odbc
Params SqlEndpoint Odbc Params - ODBC connection params:
odbc_params.hostname
,odbc_params.path
,odbc_params.protocol
, andodbc_params.port
. - spot
Instance StringPolicy - The spot policy to use for allocating instances to clusters:
COST_OPTIMIZED
orRELIABILITY_OPTIMIZED
. This field is optional. Default isCOST_OPTIMIZED
. - state String
- The current state of the endpoint.
- Sql
Endpoint Tags - Databricks tags all endpoint resources with these tags.
- warehouse
Type String - SQL warehouse type. See for AWS or Azure. Set to
PRO
orCLASSIC
. If the fieldenable_serverless_compute
has the valuetrue
either explicitly or through the default logic (see that field above for details), the default isPRO
, which is required for serverless SQL warehouses. Otherwise, the default isCLASSIC
.
- auto
Stop numberMins - Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.
- channel
Sql
Endpoint Channel - block, consisting of following fields:
- cluster
Size string - The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".
- creator
Name string - The username of the user who created the endpoint.
- data
Source stringId - ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.
- enable
Photon boolean - Whether to enable Photon. This field is optional and is enabled by default.
- enable
Serverless booleanCompute Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly.
For AWS, If omitted, the default is
false
for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default totrue
if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.For Azure, If omitted, the default is
false
for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default totrue
if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.
- healths
Sql
Endpoint Health[] - Health status of the endpoint.
- instance
Profile stringArn - jdbc
Url string - JDBC connection string.
- max
Num numberClusters - Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to
1
. - min
Num numberClusters - Minimum number of clusters available when a SQL warehouse is running. The default is
1
. - name string
- Name of the SQL warehouse. Must be unique.
- num
Active numberSessions - The current number of clusters used by the endpoint.
- num
Clusters number - The current number of clusters used by the endpoint.
- odbc
Params SqlEndpoint Odbc Params - ODBC connection params:
odbc_params.hostname
,odbc_params.path
,odbc_params.protocol
, andodbc_params.port
. - spot
Instance stringPolicy - The spot policy to use for allocating instances to clusters:
COST_OPTIMIZED
orRELIABILITY_OPTIMIZED
. This field is optional. Default isCOST_OPTIMIZED
. - state string
- The current state of the endpoint.
- Sql
Endpoint Tags - Databricks tags all endpoint resources with these tags.
- warehouse
Type string - SQL warehouse type. See for AWS or Azure. Set to
PRO
orCLASSIC
. If the fieldenable_serverless_compute
has the valuetrue
either explicitly or through the default logic (see that field above for details), the default isPRO
, which is required for serverless SQL warehouses. Otherwise, the default isCLASSIC
.
- auto_
stop_ intmins - Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.
- channel
Sql
Endpoint Channel Args - block, consisting of following fields:
- cluster_
size str - The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".
- creator_
name str - The username of the user who created the endpoint.
- data_
source_ strid - ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.
- enable_
photon bool - Whether to enable Photon. This field is optional and is enabled by default.
- enable_
serverless_ boolcompute Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly.
For AWS, If omitted, the default is
false
for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default totrue
if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.For Azure, If omitted, the default is
false
for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default totrue
if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.
- healths
Sequence[Sql
Endpoint Health Args] - Health status of the endpoint.
- instance_
profile_ strarn - jdbc_
url str - JDBC connection string.
- max_
num_ intclusters - Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to
1
. - min_
num_ intclusters - Minimum number of clusters available when a SQL warehouse is running. The default is
1
. - name str
- Name of the SQL warehouse. Must be unique.
- num_
active_ intsessions - The current number of clusters used by the endpoint.
- num_
clusters int - The current number of clusters used by the endpoint.
- odbc_
params SqlEndpoint Odbc Params Args - ODBC connection params:
odbc_params.hostname
,odbc_params.path
,odbc_params.protocol
, andodbc_params.port
. - spot_
instance_ strpolicy - The spot policy to use for allocating instances to clusters:
COST_OPTIMIZED
orRELIABILITY_OPTIMIZED
. This field is optional. Default isCOST_OPTIMIZED
. - state str
- The current state of the endpoint.
- Sql
Endpoint Tags Args - Databricks tags all endpoint resources with these tags.
- warehouse_
type str - SQL warehouse type. See for AWS or Azure. Set to
PRO
orCLASSIC
. If the fieldenable_serverless_compute
has the valuetrue
either explicitly or through the default logic (see that field above for details), the default isPRO
, which is required for serverless SQL warehouses. Otherwise, the default isCLASSIC
.
- auto
Stop NumberMins - Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.
- channel Property Map
- block, consisting of following fields:
- cluster
Size String - The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".
- creator
Name String - The username of the user who created the endpoint.
- data
Source StringId - ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.
- enable
Photon Boolean - Whether to enable Photon. This field is optional and is enabled by default.
- enable
Serverless BooleanCompute Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly.
For AWS, If omitted, the default is
false
for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default totrue
if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.For Azure, If omitted, the default is
false
for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default totrue
if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.
- healths List<Property Map>
- Health status of the endpoint.
- instance
Profile StringArn - jdbc
Url String - JDBC connection string.
- max
Num NumberClusters - Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to
1
. - min
Num NumberClusters - Minimum number of clusters available when a SQL warehouse is running. The default is
1
. - name String
- Name of the SQL warehouse. Must be unique.
- num
Active NumberSessions - The current number of clusters used by the endpoint.
- num
Clusters Number - The current number of clusters used by the endpoint.
- odbc
Params Property Map - ODBC connection params:
odbc_params.hostname
,odbc_params.path
,odbc_params.protocol
, andodbc_params.port
. - spot
Instance StringPolicy - The spot policy to use for allocating instances to clusters:
COST_OPTIMIZED
orRELIABILITY_OPTIMIZED
. This field is optional. Default isCOST_OPTIMIZED
. - state String
- The current state of the endpoint.
- Property Map
- Databricks tags all endpoint resources with these tags.
- warehouse
Type String - SQL warehouse type. See for AWS or Azure. Set to
PRO
orCLASSIC
. If the fieldenable_serverless_compute
has the valuetrue
either explicitly or through the default logic (see that field above for details), the default isPRO
, which is required for serverless SQL warehouses. Otherwise, the default isCLASSIC
.
Supporting Types
SqlEndpointChannel, SqlEndpointChannelArgs
- Dbsql
Version string - Name string
- Name of the Databricks SQL release channel. Possible values are:
CHANNEL_NAME_PREVIEW
andCHANNEL_NAME_CURRENT
. Default isCHANNEL_NAME_CURRENT
.
- Dbsql
Version string - Name string
- Name of the Databricks SQL release channel. Possible values are:
CHANNEL_NAME_PREVIEW
andCHANNEL_NAME_CURRENT
. Default isCHANNEL_NAME_CURRENT
.
- dbsql
Version String - name String
- Name of the Databricks SQL release channel. Possible values are:
CHANNEL_NAME_PREVIEW
andCHANNEL_NAME_CURRENT
. Default isCHANNEL_NAME_CURRENT
.
- dbsql
Version string - name string
- Name of the Databricks SQL release channel. Possible values are:
CHANNEL_NAME_PREVIEW
andCHANNEL_NAME_CURRENT
. Default isCHANNEL_NAME_CURRENT
.
- dbsql_
version str - name str
- Name of the Databricks SQL release channel. Possible values are:
CHANNEL_NAME_PREVIEW
andCHANNEL_NAME_CURRENT
. Default isCHANNEL_NAME_CURRENT
.
- dbsql
Version String - name String
- Name of the Databricks SQL release channel. Possible values are:
CHANNEL_NAME_PREVIEW
andCHANNEL_NAME_CURRENT
. Default isCHANNEL_NAME_CURRENT
.
SqlEndpointHealth, SqlEndpointHealthArgs
- Details string
- Failure
Reason SqlEndpoint Health Failure Reason - Message string
- Status string
- Summary string
- Details string
- Failure
Reason SqlEndpoint Health Failure Reason - Message string
- Status string
- Summary string
- details String
- failure
Reason SqlEndpoint Health Failure Reason - message String
- status String
- summary String
- details string
- failure
Reason SqlEndpoint Health Failure Reason - message string
- status string
- summary string
- details String
- failure
Reason Property Map - message String
- status String
- summary String
SqlEndpointHealthFailureReason, SqlEndpointHealthFailureReasonArgs
- Code string
- Parameters Dictionary<string, string>
- Type string
- Code string
- Parameters map[string]string
- Type string
- code String
- parameters Map<String,String>
- type String
- code string
- parameters {[key: string]: string}
- type string
- code str
- parameters Mapping[str, str]
- type str
- code String
- parameters Map<String>
- type String
SqlEndpointOdbcParams, SqlEndpointOdbcParamsArgs
SqlEndpointTags, SqlEndpointTagsArgs
SqlEndpointTagsCustomTag, SqlEndpointTagsCustomTagArgs
Import
You can import a databricks_sql_endpoint
resource with ID like the following:
bash
$ pulumi import databricks:index/sqlEndpoint:SqlEndpoint this <endpoint-id>
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- databricks pulumi/pulumi-databricks
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
databricks
Terraform Provider.