spotinst.spark.Ocean
Explore with Pulumi AI
Manages a Spotinst Ocean Spark resource on AWS or GCP.
Prerequisites
An existing Ocean cluster is required by this resource. See e.g. the spotinst.aws.Ocean
resource.
Example Usage
import * as pulumi from "@pulumi/pulumi";
import * as spotinst from "@pulumi/spotinst";
const example = new spotinst.spark.Ocean("example", {
oceanClusterId: "ocean-cluster-id",
ingress: {
controller: {
managed: true,
},
loadBalancer: {
managed: true,
targetGroupArn: "arn:aws:elasticloadbalancing:eu-north-1:XXXXXXXXXXXX:targetgroup/my-spark-cluster-nlb-tg/a38c2b83XXXXXXXX",
serviceAnnotations: {
"service.beta.kubernetes.io/aws-load-balancer-security-groups": "sg-XXXXXXXXXXXXXXXXX",
"some-service-annotation-2": "some-service-annotation-value-2",
},
},
customEndpoint: {
enabled: false,
address: "my-spark-cluster-nlb-8cbb8da7XXXXXXXX.elb.us-east-1.amazonaws.com",
},
privateLink: {
enabled: false,
vpcEndpointService: "com.amazonaws.vpce.eu-north-1.vpce-svc-XXXXXXXXXXXXXXXXX",
},
},
compute: {
createVngs: true,
useTaints: true,
},
logCollection: {
collectAppLogs: true,
},
webhook: {
useHostNetwork: false,
hostNetworkPorts: [25554],
},
spark: {
additionalAppNamespaces: [
"extra-spark-app-ns-1",
"extra-spark-app-ns-2",
],
},
});
import pulumi
import pulumi_spotinst as spotinst
example = spotinst.spark.Ocean("example",
ocean_cluster_id="ocean-cluster-id",
ingress={
"controller": {
"managed": True,
},
"load_balancer": {
"managed": True,
"target_group_arn": "arn:aws:elasticloadbalancing:eu-north-1:XXXXXXXXXXXX:targetgroup/my-spark-cluster-nlb-tg/a38c2b83XXXXXXXX",
"service_annotations": {
"service.beta.kubernetes.io/aws-load-balancer-security-groups": "sg-XXXXXXXXXXXXXXXXX",
"some-service-annotation-2": "some-service-annotation-value-2",
},
},
"custom_endpoint": {
"enabled": False,
"address": "my-spark-cluster-nlb-8cbb8da7XXXXXXXX.elb.us-east-1.amazonaws.com",
},
"private_link": {
"enabled": False,
"vpc_endpoint_service": "com.amazonaws.vpce.eu-north-1.vpce-svc-XXXXXXXXXXXXXXXXX",
},
},
compute={
"create_vngs": True,
"use_taints": True,
},
log_collection={
"collect_app_logs": True,
},
webhook={
"use_host_network": False,
"host_network_ports": [25554],
},
spark={
"additional_app_namespaces": [
"extra-spark-app-ns-1",
"extra-spark-app-ns-2",
],
})
package main
import (
"github.com/pulumi/pulumi-spotinst/sdk/v3/go/spotinst/spark"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := spark.NewOcean(ctx, "example", &spark.OceanArgs{
OceanClusterId: pulumi.String("ocean-cluster-id"),
Ingress: &spark.OceanIngressArgs{
Controller: &spark.OceanIngressControllerArgs{
Managed: pulumi.Bool(true),
},
LoadBalancer: &spark.OceanIngressLoadBalancerArgs{
Managed: pulumi.Bool(true),
TargetGroupArn: pulumi.String("arn:aws:elasticloadbalancing:eu-north-1:XXXXXXXXXXXX:targetgroup/my-spark-cluster-nlb-tg/a38c2b83XXXXXXXX"),
ServiceAnnotations: pulumi.StringMap{
"service.beta.kubernetes.io/aws-load-balancer-security-groups": pulumi.String("sg-XXXXXXXXXXXXXXXXX"),
"some-service-annotation-2": pulumi.String("some-service-annotation-value-2"),
},
},
CustomEndpoint: &spark.OceanIngressCustomEndpointArgs{
Enabled: pulumi.Bool(false),
Address: pulumi.String("my-spark-cluster-nlb-8cbb8da7XXXXXXXX.elb.us-east-1.amazonaws.com"),
},
PrivateLink: &spark.OceanIngressPrivateLinkArgs{
Enabled: pulumi.Bool(false),
VpcEndpointService: pulumi.String("com.amazonaws.vpce.eu-north-1.vpce-svc-XXXXXXXXXXXXXXXXX"),
},
},
Compute: &spark.OceanComputeArgs{
CreateVngs: pulumi.Bool(true),
UseTaints: pulumi.Bool(true),
},
LogCollection: &spark.OceanLogCollectionArgs{
CollectAppLogs: pulumi.Bool(true),
},
Webhook: &spark.OceanWebhookArgs{
UseHostNetwork: pulumi.Bool(false),
HostNetworkPorts: pulumi.IntArray{
pulumi.Int(25554),
},
},
Spark: &spark.OceanSparkArgs{
AdditionalAppNamespaces: pulumi.StringArray{
pulumi.String("extra-spark-app-ns-1"),
pulumi.String("extra-spark-app-ns-2"),
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using SpotInst = Pulumi.SpotInst;
return await Deployment.RunAsync(() =>
{
var example = new SpotInst.Spark.Ocean("example", new()
{
OceanClusterId = "ocean-cluster-id",
Ingress = new SpotInst.Spark.Inputs.OceanIngressArgs
{
Controller = new SpotInst.Spark.Inputs.OceanIngressControllerArgs
{
Managed = true,
},
LoadBalancer = new SpotInst.Spark.Inputs.OceanIngressLoadBalancerArgs
{
Managed = true,
TargetGroupArn = "arn:aws:elasticloadbalancing:eu-north-1:XXXXXXXXXXXX:targetgroup/my-spark-cluster-nlb-tg/a38c2b83XXXXXXXX",
ServiceAnnotations =
{
{ "service.beta.kubernetes.io/aws-load-balancer-security-groups", "sg-XXXXXXXXXXXXXXXXX" },
{ "some-service-annotation-2", "some-service-annotation-value-2" },
},
},
CustomEndpoint = new SpotInst.Spark.Inputs.OceanIngressCustomEndpointArgs
{
Enabled = false,
Address = "my-spark-cluster-nlb-8cbb8da7XXXXXXXX.elb.us-east-1.amazonaws.com",
},
PrivateLink = new SpotInst.Spark.Inputs.OceanIngressPrivateLinkArgs
{
Enabled = false,
VpcEndpointService = "com.amazonaws.vpce.eu-north-1.vpce-svc-XXXXXXXXXXXXXXXXX",
},
},
Compute = new SpotInst.Spark.Inputs.OceanComputeArgs
{
CreateVngs = true,
UseTaints = true,
},
LogCollection = new SpotInst.Spark.Inputs.OceanLogCollectionArgs
{
CollectAppLogs = true,
},
Webhook = new SpotInst.Spark.Inputs.OceanWebhookArgs
{
UseHostNetwork = false,
HostNetworkPorts = new[]
{
25554,
},
},
Spark = new SpotInst.Spark.Inputs.OceanSparkArgs
{
AdditionalAppNamespaces = new[]
{
"extra-spark-app-ns-1",
"extra-spark-app-ns-2",
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.spotinst.spark.Ocean;
import com.pulumi.spotinst.spark.OceanArgs;
import com.pulumi.spotinst.spark.inputs.OceanIngressArgs;
import com.pulumi.spotinst.spark.inputs.OceanIngressControllerArgs;
import com.pulumi.spotinst.spark.inputs.OceanIngressLoadBalancerArgs;
import com.pulumi.spotinst.spark.inputs.OceanIngressCustomEndpointArgs;
import com.pulumi.spotinst.spark.inputs.OceanIngressPrivateLinkArgs;
import com.pulumi.spotinst.spark.inputs.OceanComputeArgs;
import com.pulumi.spotinst.spark.inputs.OceanLogCollectionArgs;
import com.pulumi.spotinst.spark.inputs.OceanWebhookArgs;
import com.pulumi.spotinst.spark.inputs.OceanSparkArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var example = new Ocean("example", OceanArgs.builder()
.oceanClusterId("ocean-cluster-id")
.ingress(OceanIngressArgs.builder()
.controller(OceanIngressControllerArgs.builder()
.managed(true)
.build())
.loadBalancer(OceanIngressLoadBalancerArgs.builder()
.managed(true)
.targetGroupArn("arn:aws:elasticloadbalancing:eu-north-1:XXXXXXXXXXXX:targetgroup/my-spark-cluster-nlb-tg/a38c2b83XXXXXXXX")
.serviceAnnotations(Map.ofEntries(
Map.entry("service.beta.kubernetes.io/aws-load-balancer-security-groups", "sg-XXXXXXXXXXXXXXXXX"),
Map.entry("some-service-annotation-2", "some-service-annotation-value-2")
))
.build())
.customEndpoint(OceanIngressCustomEndpointArgs.builder()
.enabled(false)
.address("my-spark-cluster-nlb-8cbb8da7XXXXXXXX.elb.us-east-1.amazonaws.com")
.build())
.privateLink(OceanIngressPrivateLinkArgs.builder()
.enabled(false)
.vpcEndpointService("com.amazonaws.vpce.eu-north-1.vpce-svc-XXXXXXXXXXXXXXXXX")
.build())
.build())
.compute(OceanComputeArgs.builder()
.createVngs(true)
.useTaints(true)
.build())
.logCollection(OceanLogCollectionArgs.builder()
.collectAppLogs(true)
.build())
.webhook(OceanWebhookArgs.builder()
.useHostNetwork(false)
.hostNetworkPorts(25554)
.build())
.spark(OceanSparkArgs.builder()
.additionalAppNamespaces(
"extra-spark-app-ns-1",
"extra-spark-app-ns-2")
.build())
.build());
}
}
resources:
example:
type: spotinst:spark:Ocean
properties:
oceanClusterId: ocean-cluster-id
ingress:
controller:
managed: true
loadBalancer:
managed: true
targetGroupArn: arn:aws:elasticloadbalancing:eu-north-1:XXXXXXXXXXXX:targetgroup/my-spark-cluster-nlb-tg/a38c2b83XXXXXXXX
serviceAnnotations:
service.beta.kubernetes.io/aws-load-balancer-security-groups: sg-XXXXXXXXXXXXXXXXX
some-service-annotation-2: some-service-annotation-value-2
customEndpoint:
enabled: false
address: my-spark-cluster-nlb-8cbb8da7XXXXXXXX.elb.us-east-1.amazonaws.com
privateLink:
enabled: false
vpcEndpointService: com.amazonaws.vpce.eu-north-1.vpce-svc-XXXXXXXXXXXXXXXXX
compute:
createVngs: true
useTaints: true
logCollection:
collectAppLogs: true
webhook:
useHostNetwork: false
hostNetworkPorts:
- 25554
spark:
additionalAppNamespaces:
- extra-spark-app-ns-1
- extra-spark-app-ns-2
output "ocean_spark_id" {
value = spotinst_ocean_spark.example.id
}
Create Ocean Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new Ocean(name: string, args: OceanArgs, opts?: CustomResourceOptions);
@overload
def Ocean(resource_name: str,
args: OceanArgs,
opts: Optional[ResourceOptions] = None)
@overload
def Ocean(resource_name: str,
opts: Optional[ResourceOptions] = None,
ocean_cluster_id: Optional[str] = None,
compute: Optional[OceanComputeArgs] = None,
ingress: Optional[OceanIngressArgs] = None,
log_collection: Optional[OceanLogCollectionArgs] = None,
spark: Optional[OceanSparkArgs] = None,
webhook: Optional[OceanWebhookArgs] = None)
func NewOcean(ctx *Context, name string, args OceanArgs, opts ...ResourceOption) (*Ocean, error)
public Ocean(string name, OceanArgs args, CustomResourceOptions? opts = null)
type: spotinst:spark:Ocean
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args OceanArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args OceanArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args OceanArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args OceanArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args OceanArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var exampleoceanResourceResourceFromSparkocean = new SpotInst.Spark.Ocean("exampleoceanResourceResourceFromSparkocean", new()
{
OceanClusterId = "string",
Compute = new SpotInst.Spark.Inputs.OceanComputeArgs
{
CreateVngs = false,
UseTaints = false,
},
Ingress = new SpotInst.Spark.Inputs.OceanIngressArgs
{
Controller = new SpotInst.Spark.Inputs.OceanIngressControllerArgs
{
Managed = false,
},
CustomEndpoint = new SpotInst.Spark.Inputs.OceanIngressCustomEndpointArgs
{
Address = "string",
Enabled = false,
},
LoadBalancer = new SpotInst.Spark.Inputs.OceanIngressLoadBalancerArgs
{
Managed = false,
ServiceAnnotations =
{
{ "string", "string" },
},
TargetGroupArn = "string",
},
PrivateLink = new SpotInst.Spark.Inputs.OceanIngressPrivateLinkArgs
{
Enabled = false,
VpcEndpointService = "string",
},
ServiceAnnotations =
{
{ "string", "string" },
},
},
LogCollection = new SpotInst.Spark.Inputs.OceanLogCollectionArgs
{
CollectAppLogs = false,
},
Spark = new SpotInst.Spark.Inputs.OceanSparkArgs
{
AdditionalAppNamespaces = new[]
{
"string",
},
},
Webhook = new SpotInst.Spark.Inputs.OceanWebhookArgs
{
HostNetworkPorts = new[]
{
0,
},
UseHostNetwork = false,
},
});
example, err := spark.NewOcean(ctx, "exampleoceanResourceResourceFromSparkocean", &spark.OceanArgs{
OceanClusterId: pulumi.String("string"),
Compute: &spark.OceanComputeArgs{
CreateVngs: pulumi.Bool(false),
UseTaints: pulumi.Bool(false),
},
Ingress: &spark.OceanIngressArgs{
Controller: &spark.OceanIngressControllerArgs{
Managed: pulumi.Bool(false),
},
CustomEndpoint: &spark.OceanIngressCustomEndpointArgs{
Address: pulumi.String("string"),
Enabled: pulumi.Bool(false),
},
LoadBalancer: &spark.OceanIngressLoadBalancerArgs{
Managed: pulumi.Bool(false),
ServiceAnnotations: pulumi.StringMap{
"string": pulumi.String("string"),
},
TargetGroupArn: pulumi.String("string"),
},
PrivateLink: &spark.OceanIngressPrivateLinkArgs{
Enabled: pulumi.Bool(false),
VpcEndpointService: pulumi.String("string"),
},
ServiceAnnotations: pulumi.StringMap{
"string": pulumi.String("string"),
},
},
LogCollection: &spark.OceanLogCollectionArgs{
CollectAppLogs: pulumi.Bool(false),
},
Spark: &spark.OceanSparkArgs{
AdditionalAppNamespaces: pulumi.StringArray{
pulumi.String("string"),
},
},
Webhook: &spark.OceanWebhookArgs{
HostNetworkPorts: pulumi.IntArray{
pulumi.Int(0),
},
UseHostNetwork: pulumi.Bool(false),
},
})
var exampleoceanResourceResourceFromSparkocean = new Ocean("exampleoceanResourceResourceFromSparkocean", OceanArgs.builder()
.oceanClusterId("string")
.compute(OceanComputeArgs.builder()
.createVngs(false)
.useTaints(false)
.build())
.ingress(OceanIngressArgs.builder()
.controller(OceanIngressControllerArgs.builder()
.managed(false)
.build())
.customEndpoint(OceanIngressCustomEndpointArgs.builder()
.address("string")
.enabled(false)
.build())
.loadBalancer(OceanIngressLoadBalancerArgs.builder()
.managed(false)
.serviceAnnotations(Map.of("string", "string"))
.targetGroupArn("string")
.build())
.privateLink(OceanIngressPrivateLinkArgs.builder()
.enabled(false)
.vpcEndpointService("string")
.build())
.serviceAnnotations(Map.of("string", "string"))
.build())
.logCollection(OceanLogCollectionArgs.builder()
.collectAppLogs(false)
.build())
.spark(OceanSparkArgs.builder()
.additionalAppNamespaces("string")
.build())
.webhook(OceanWebhookArgs.builder()
.hostNetworkPorts(0)
.useHostNetwork(false)
.build())
.build());
exampleocean_resource_resource_from_sparkocean = spotinst.spark.Ocean("exampleoceanResourceResourceFromSparkocean",
ocean_cluster_id="string",
compute={
"create_vngs": False,
"use_taints": False,
},
ingress={
"controller": {
"managed": False,
},
"custom_endpoint": {
"address": "string",
"enabled": False,
},
"load_balancer": {
"managed": False,
"service_annotations": {
"string": "string",
},
"target_group_arn": "string",
},
"private_link": {
"enabled": False,
"vpc_endpoint_service": "string",
},
"service_annotations": {
"string": "string",
},
},
log_collection={
"collect_app_logs": False,
},
spark={
"additional_app_namespaces": ["string"],
},
webhook={
"host_network_ports": [0],
"use_host_network": False,
})
const exampleoceanResourceResourceFromSparkocean = new spotinst.spark.Ocean("exampleoceanResourceResourceFromSparkocean", {
oceanClusterId: "string",
compute: {
createVngs: false,
useTaints: false,
},
ingress: {
controller: {
managed: false,
},
customEndpoint: {
address: "string",
enabled: false,
},
loadBalancer: {
managed: false,
serviceAnnotations: {
string: "string",
},
targetGroupArn: "string",
},
privateLink: {
enabled: false,
vpcEndpointService: "string",
},
serviceAnnotations: {
string: "string",
},
},
logCollection: {
collectAppLogs: false,
},
spark: {
additionalAppNamespaces: ["string"],
},
webhook: {
hostNetworkPorts: [0],
useHostNetwork: false,
},
});
type: spotinst:spark:Ocean
properties:
compute:
createVngs: false
useTaints: false
ingress:
controller:
managed: false
customEndpoint:
address: string
enabled: false
loadBalancer:
managed: false
serviceAnnotations:
string: string
targetGroupArn: string
privateLink:
enabled: false
vpcEndpointService: string
serviceAnnotations:
string: string
logCollection:
collectAppLogs: false
oceanClusterId: string
spark:
additionalAppNamespaces:
- string
webhook:
hostNetworkPorts:
- 0
useHostNetwork: false
Ocean Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The Ocean resource accepts the following input properties:
- Ocean
Cluster stringId - The ID of the Ocean cluster that Ocean for Apache Spark should be installed on.
- Compute
Pulumi.
Spot Inst. Spark. Inputs. Ocean Compute - Ingress
Pulumi.
Spot Inst. Spark. Inputs. Ocean Ingress - Log
Collection Pulumi.Spot Inst. Spark. Inputs. Ocean Log Collection - Spark
Pulumi.
Spot Inst. Spark. Inputs. Ocean Spark - Webhook
Pulumi.
Spot Inst. Spark. Inputs. Ocean Webhook
- Ocean
Cluster stringId - The ID of the Ocean cluster that Ocean for Apache Spark should be installed on.
- Compute
Ocean
Compute Args - Ingress
Ocean
Ingress Args - Log
Collection OceanLog Collection Args - Spark
Ocean
Spark Args - Webhook
Ocean
Webhook Args
- ocean
Cluster StringId - The ID of the Ocean cluster that Ocean for Apache Spark should be installed on.
- compute
Ocean
Compute - ingress
Ocean
Ingress - log
Collection OceanLog Collection - spark
Ocean
Spark - webhook
Ocean
Webhook
- ocean
Cluster stringId - The ID of the Ocean cluster that Ocean for Apache Spark should be installed on.
- compute
Ocean
Compute - ingress
Ocean
Ingress - log
Collection OceanLog Collection - spark
Ocean
Spark - webhook
Ocean
Webhook
- ocean_
cluster_ strid - The ID of the Ocean cluster that Ocean for Apache Spark should be installed on.
- compute
Ocean
Compute Args - ingress
Ocean
Ingress Args - log_
collection OceanLog Collection Args - spark
Ocean
Spark Args - webhook
Ocean
Webhook Args
- ocean
Cluster StringId - The ID of the Ocean cluster that Ocean for Apache Spark should be installed on.
- compute Property Map
- ingress Property Map
- log
Collection Property Map - spark Property Map
- webhook Property Map
Outputs
All input properties are implicitly available as output properties. Additionally, the Ocean resource produces the following output properties:
- Id string
- The provider-assigned unique ID for this managed resource.
- Id string
- The provider-assigned unique ID for this managed resource.
- id String
- The provider-assigned unique ID for this managed resource.
- id string
- The provider-assigned unique ID for this managed resource.
- id str
- The provider-assigned unique ID for this managed resource.
- id String
- The provider-assigned unique ID for this managed resource.
Look up Existing Ocean Resource
Get an existing Ocean resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: OceanState, opts?: CustomResourceOptions): Ocean
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
compute: Optional[OceanComputeArgs] = None,
ingress: Optional[OceanIngressArgs] = None,
log_collection: Optional[OceanLogCollectionArgs] = None,
ocean_cluster_id: Optional[str] = None,
spark: Optional[OceanSparkArgs] = None,
webhook: Optional[OceanWebhookArgs] = None) -> Ocean
func GetOcean(ctx *Context, name string, id IDInput, state *OceanState, opts ...ResourceOption) (*Ocean, error)
public static Ocean Get(string name, Input<string> id, OceanState? state, CustomResourceOptions? opts = null)
public static Ocean get(String name, Output<String> id, OceanState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Compute
Pulumi.
Spot Inst. Spark. Inputs. Ocean Compute - Ingress
Pulumi.
Spot Inst. Spark. Inputs. Ocean Ingress - Log
Collection Pulumi.Spot Inst. Spark. Inputs. Ocean Log Collection - Ocean
Cluster stringId - The ID of the Ocean cluster that Ocean for Apache Spark should be installed on.
- Spark
Pulumi.
Spot Inst. Spark. Inputs. Ocean Spark - Webhook
Pulumi.
Spot Inst. Spark. Inputs. Ocean Webhook
- Compute
Ocean
Compute Args - Ingress
Ocean
Ingress Args - Log
Collection OceanLog Collection Args - Ocean
Cluster stringId - The ID of the Ocean cluster that Ocean for Apache Spark should be installed on.
- Spark
Ocean
Spark Args - Webhook
Ocean
Webhook Args
- compute
Ocean
Compute - ingress
Ocean
Ingress - log
Collection OceanLog Collection - ocean
Cluster StringId - The ID of the Ocean cluster that Ocean for Apache Spark should be installed on.
- spark
Ocean
Spark - webhook
Ocean
Webhook
- compute
Ocean
Compute - ingress
Ocean
Ingress - log
Collection OceanLog Collection - ocean
Cluster stringId - The ID of the Ocean cluster that Ocean for Apache Spark should be installed on.
- spark
Ocean
Spark - webhook
Ocean
Webhook
- compute
Ocean
Compute Args - ingress
Ocean
Ingress Args - log_
collection OceanLog Collection Args - ocean_
cluster_ strid - The ID of the Ocean cluster that Ocean for Apache Spark should be installed on.
- spark
Ocean
Spark Args - webhook
Ocean
Webhook Args
- compute Property Map
- ingress Property Map
- log
Collection Property Map - ocean
Cluster StringId - The ID of the Ocean cluster that Ocean for Apache Spark should be installed on.
- spark Property Map
- webhook Property Map
Supporting Types
OceanCompute, OceanComputeArgs
- Create
Vngs bool - Enable/disable the creation of Ocean Spark VNGs during cluster creation.
- Use
Taints bool - Enable/disable Ocean Spark taints on the Ocean Spark VNGs. By default, Ocean Spark uses taints to prevent non-Spark workloads from running on Ocean Spark VNGs.
- Create
Vngs bool - Enable/disable the creation of Ocean Spark VNGs during cluster creation.
- Use
Taints bool - Enable/disable Ocean Spark taints on the Ocean Spark VNGs. By default, Ocean Spark uses taints to prevent non-Spark workloads from running on Ocean Spark VNGs.
- create
Vngs Boolean - Enable/disable the creation of Ocean Spark VNGs during cluster creation.
- use
Taints Boolean - Enable/disable Ocean Spark taints on the Ocean Spark VNGs. By default, Ocean Spark uses taints to prevent non-Spark workloads from running on Ocean Spark VNGs.
- create
Vngs boolean - Enable/disable the creation of Ocean Spark VNGs during cluster creation.
- use
Taints boolean - Enable/disable Ocean Spark taints on the Ocean Spark VNGs. By default, Ocean Spark uses taints to prevent non-Spark workloads from running on Ocean Spark VNGs.
- create_
vngs bool - Enable/disable the creation of Ocean Spark VNGs during cluster creation.
- use_
taints bool - Enable/disable Ocean Spark taints on the Ocean Spark VNGs. By default, Ocean Spark uses taints to prevent non-Spark workloads from running on Ocean Spark VNGs.
- create
Vngs Boolean - Enable/disable the creation of Ocean Spark VNGs during cluster creation.
- use
Taints Boolean - Enable/disable Ocean Spark taints on the Ocean Spark VNGs. By default, Ocean Spark uses taints to prevent non-Spark workloads from running on Ocean Spark VNGs.
OceanIngress, OceanIngressArgs
- Controller
Pulumi.
Spot Inst. Spark. Inputs. Ocean Ingress Controller - Custom
Endpoint Pulumi.Spot Inst. Spark. Inputs. Ocean Ingress Custom Endpoint - Load
Balancer Pulumi.Spot Inst. Spark. Inputs. Ocean Ingress Load Balancer - Private
Link Pulumi.Spot Inst. Spark. Inputs. Ocean Ingress Private Link - Service
Annotations Dictionary<string, string> - DEPRECATED: Use
load_balancer.service_annotations
instead.
- DEPRECATED: Use
- Controller
Ocean
Ingress Controller - Custom
Endpoint OceanIngress Custom Endpoint - Load
Balancer OceanIngress Load Balancer - Private
Link OceanIngress Private Link - Service
Annotations map[string]string - DEPRECATED: Use
load_balancer.service_annotations
instead.
- DEPRECATED: Use
- controller
Ocean
Ingress Controller - custom
Endpoint OceanIngress Custom Endpoint - load
Balancer OceanIngress Load Balancer - private
Link OceanIngress Private Link - service
Annotations Map<String,String> - DEPRECATED: Use
load_balancer.service_annotations
instead.
- DEPRECATED: Use
- controller
Ocean
Ingress Controller - custom
Endpoint OceanIngress Custom Endpoint - load
Balancer OceanIngress Load Balancer - private
Link OceanIngress Private Link - service
Annotations {[key: string]: string} - DEPRECATED: Use
load_balancer.service_annotations
instead.
- DEPRECATED: Use
- controller
Ocean
Ingress Controller - custom_
endpoint OceanIngress Custom Endpoint - load_
balancer OceanIngress Load Balancer - private_
link OceanIngress Private Link - service_
annotations Mapping[str, str] - DEPRECATED: Use
load_balancer.service_annotations
instead.
- DEPRECATED: Use
- controller Property Map
- custom
Endpoint Property Map - load
Balancer Property Map - private
Link Property Map - service
Annotations Map<String> - DEPRECATED: Use
load_balancer.service_annotations
instead.
- DEPRECATED: Use
OceanIngressController, OceanIngressControllerArgs
- Managed bool
- Should an ingress controller managed by Ocean for Apache Spark be installed on the cluster.
- Managed bool
- Should an ingress controller managed by Ocean for Apache Spark be installed on the cluster.
- managed Boolean
- Should an ingress controller managed by Ocean for Apache Spark be installed on the cluster.
- managed boolean
- Should an ingress controller managed by Ocean for Apache Spark be installed on the cluster.
- managed bool
- Should an ingress controller managed by Ocean for Apache Spark be installed on the cluster.
- managed Boolean
- Should an ingress controller managed by Ocean for Apache Spark be installed on the cluster.
OceanIngressCustomEndpoint, OceanIngressCustomEndpointArgs
- Address string
- The address the Ocean for Apache Spark control plane will use when addressing the cluster.
- Enabled bool
- Should the Ocean for Apache Spark control plane address the cluster using a custom endpoint. Use this to specify the DNS address of an externally provisioned (unmanaged) load balancer.
- Address string
- The address the Ocean for Apache Spark control plane will use when addressing the cluster.
- Enabled bool
- Should the Ocean for Apache Spark control plane address the cluster using a custom endpoint. Use this to specify the DNS address of an externally provisioned (unmanaged) load balancer.
- address String
- The address the Ocean for Apache Spark control plane will use when addressing the cluster.
- enabled Boolean
- Should the Ocean for Apache Spark control plane address the cluster using a custom endpoint. Use this to specify the DNS address of an externally provisioned (unmanaged) load balancer.
- address string
- The address the Ocean for Apache Spark control plane will use when addressing the cluster.
- enabled boolean
- Should the Ocean for Apache Spark control plane address the cluster using a custom endpoint. Use this to specify the DNS address of an externally provisioned (unmanaged) load balancer.
- address String
- The address the Ocean for Apache Spark control plane will use when addressing the cluster.
- enabled Boolean
- Should the Ocean for Apache Spark control plane address the cluster using a custom endpoint. Use this to specify the DNS address of an externally provisioned (unmanaged) load balancer.
OceanIngressLoadBalancer, OceanIngressLoadBalancerArgs
- Managed bool
- Should a load balancer managed by Ocean for Apache Spark be provisioned for the cluster. Set this to false if you want to use an existing load balancer (only available on AWS).
- Service
Annotations Dictionary<string, string> - Annotations to add to the ingress controller load balancer service. This is useful to configure properties of the managed load balancer, like the nature of the load balancer (e.g. ELB, NLB, ALB on AWS), the security groups, or various timeouts.
- Target
Group stringArn - The ARN of a target group that the Ocean for Apache Spark ingress controller will be bound to. Set this to use an existing load balancer with Ocean for Apache Spark. Has no effect if using a managed load balancer. Only available on AWS.
- Managed bool
- Should a load balancer managed by Ocean for Apache Spark be provisioned for the cluster. Set this to false if you want to use an existing load balancer (only available on AWS).
- Service
Annotations map[string]string - Annotations to add to the ingress controller load balancer service. This is useful to configure properties of the managed load balancer, like the nature of the load balancer (e.g. ELB, NLB, ALB on AWS), the security groups, or various timeouts.
- Target
Group stringArn - The ARN of a target group that the Ocean for Apache Spark ingress controller will be bound to. Set this to use an existing load balancer with Ocean for Apache Spark. Has no effect if using a managed load balancer. Only available on AWS.
- managed Boolean
- Should a load balancer managed by Ocean for Apache Spark be provisioned for the cluster. Set this to false if you want to use an existing load balancer (only available on AWS).
- service
Annotations Map<String,String> - Annotations to add to the ingress controller load balancer service. This is useful to configure properties of the managed load balancer, like the nature of the load balancer (e.g. ELB, NLB, ALB on AWS), the security groups, or various timeouts.
- target
Group StringArn - The ARN of a target group that the Ocean for Apache Spark ingress controller will be bound to. Set this to use an existing load balancer with Ocean for Apache Spark. Has no effect if using a managed load balancer. Only available on AWS.
- managed boolean
- Should a load balancer managed by Ocean for Apache Spark be provisioned for the cluster. Set this to false if you want to use an existing load balancer (only available on AWS).
- service
Annotations {[key: string]: string} - Annotations to add to the ingress controller load balancer service. This is useful to configure properties of the managed load balancer, like the nature of the load balancer (e.g. ELB, NLB, ALB on AWS), the security groups, or various timeouts.
- target
Group stringArn - The ARN of a target group that the Ocean for Apache Spark ingress controller will be bound to. Set this to use an existing load balancer with Ocean for Apache Spark. Has no effect if using a managed load balancer. Only available on AWS.
- managed bool
- Should a load balancer managed by Ocean for Apache Spark be provisioned for the cluster. Set this to false if you want to use an existing load balancer (only available on AWS).
- service_
annotations Mapping[str, str] - Annotations to add to the ingress controller load balancer service. This is useful to configure properties of the managed load balancer, like the nature of the load balancer (e.g. ELB, NLB, ALB on AWS), the security groups, or various timeouts.
- target_
group_ strarn - The ARN of a target group that the Ocean for Apache Spark ingress controller will be bound to. Set this to use an existing load balancer with Ocean for Apache Spark. Has no effect if using a managed load balancer. Only available on AWS.
- managed Boolean
- Should a load balancer managed by Ocean for Apache Spark be provisioned for the cluster. Set this to false if you want to use an existing load balancer (only available on AWS).
- service
Annotations Map<String> - Annotations to add to the ingress controller load balancer service. This is useful to configure properties of the managed load balancer, like the nature of the load balancer (e.g. ELB, NLB, ALB on AWS), the security groups, or various timeouts.
- target
Group StringArn - The ARN of a target group that the Ocean for Apache Spark ingress controller will be bound to. Set this to use an existing load balancer with Ocean for Apache Spark. Has no effect if using a managed load balancer. Only available on AWS.
OceanIngressPrivateLink, OceanIngressPrivateLinkArgs
- Enabled bool
- Should the Ocean for Apache Spark control plane address the cluster via an AWS Private Link. Only available on AWS.
- Vpc
Endpoint stringService - The name of the VPC Endpoint Service the Ocean for Apache Spark control plane should bind to.
- Enabled bool
- Should the Ocean for Apache Spark control plane address the cluster via an AWS Private Link. Only available on AWS.
- Vpc
Endpoint stringService - The name of the VPC Endpoint Service the Ocean for Apache Spark control plane should bind to.
- enabled Boolean
- Should the Ocean for Apache Spark control plane address the cluster via an AWS Private Link. Only available on AWS.
- vpc
Endpoint StringService - The name of the VPC Endpoint Service the Ocean for Apache Spark control plane should bind to.
- enabled boolean
- Should the Ocean for Apache Spark control plane address the cluster via an AWS Private Link. Only available on AWS.
- vpc
Endpoint stringService - The name of the VPC Endpoint Service the Ocean for Apache Spark control plane should bind to.
- enabled bool
- Should the Ocean for Apache Spark control plane address the cluster via an AWS Private Link. Only available on AWS.
- vpc_
endpoint_ strservice - The name of the VPC Endpoint Service the Ocean for Apache Spark control plane should bind to.
- enabled Boolean
- Should the Ocean for Apache Spark control plane address the cluster via an AWS Private Link. Only available on AWS.
- vpc
Endpoint StringService - The name of the VPC Endpoint Service the Ocean for Apache Spark control plane should bind to.
OceanLogCollection, OceanLogCollectionArgs
- Collect
App boolLogs - Enable/Disable collecting driver and executor logs. When enabled, logs are stored by NetApp and can be downloaded from the Spot console web interface. The logs are deleted after 30 days.
- Collect
App boolLogs - Enable/Disable collecting driver and executor logs. When enabled, logs are stored by NetApp and can be downloaded from the Spot console web interface. The logs are deleted after 30 days.
- collect
App BooleanLogs - Enable/Disable collecting driver and executor logs. When enabled, logs are stored by NetApp and can be downloaded from the Spot console web interface. The logs are deleted after 30 days.
- collect
App booleanLogs - Enable/Disable collecting driver and executor logs. When enabled, logs are stored by NetApp and can be downloaded from the Spot console web interface. The logs are deleted after 30 days.
- collect_
app_ boollogs - Enable/Disable collecting driver and executor logs. When enabled, logs are stored by NetApp and can be downloaded from the Spot console web interface. The logs are deleted after 30 days.
- collect
App BooleanLogs - Enable/Disable collecting driver and executor logs. When enabled, logs are stored by NetApp and can be downloaded from the Spot console web interface. The logs are deleted after 30 days.
OceanSpark, OceanSparkArgs
- Additional
App List<string>Namespaces - List of Kubernetes namespaces that should be configured to run Spark applications, in addition to the default Spark application namespace
spark-apps
.
- List of Kubernetes namespaces that should be configured to run Spark applications, in addition to the default Spark application namespace
- Additional
App []stringNamespaces - List of Kubernetes namespaces that should be configured to run Spark applications, in addition to the default Spark application namespace
spark-apps
.
- List of Kubernetes namespaces that should be configured to run Spark applications, in addition to the default Spark application namespace
- additional
App List<String>Namespaces - List of Kubernetes namespaces that should be configured to run Spark applications, in addition to the default Spark application namespace
spark-apps
.
- List of Kubernetes namespaces that should be configured to run Spark applications, in addition to the default Spark application namespace
- additional
App string[]Namespaces - List of Kubernetes namespaces that should be configured to run Spark applications, in addition to the default Spark application namespace
spark-apps
.
- List of Kubernetes namespaces that should be configured to run Spark applications, in addition to the default Spark application namespace
- additional_
app_ Sequence[str]namespaces - List of Kubernetes namespaces that should be configured to run Spark applications, in addition to the default Spark application namespace
spark-apps
.
- List of Kubernetes namespaces that should be configured to run Spark applications, in addition to the default Spark application namespace
- additional
App List<String>Namespaces - List of Kubernetes namespaces that should be configured to run Spark applications, in addition to the default Spark application namespace
spark-apps
.
- List of Kubernetes namespaces that should be configured to run Spark applications, in addition to the default Spark application namespace
OceanWebhook, OceanWebhookArgs
- Host
Network List<int>Ports - List of ports allowed to use on the host network - if empty default is
25554
.
- List of ports allowed to use on the host network - if empty default is
- Use
Host boolNetwork - Enable/disable host networking for the Spark Operator. Host networking can be useful when using custom CNI plugins like Calico on EKS.
- Host
Network []intPorts - List of ports allowed to use on the host network - if empty default is
25554
.
- List of ports allowed to use on the host network - if empty default is
- Use
Host boolNetwork - Enable/disable host networking for the Spark Operator. Host networking can be useful when using custom CNI plugins like Calico on EKS.
- host
Network List<Integer>Ports - List of ports allowed to use on the host network - if empty default is
25554
.
- List of ports allowed to use on the host network - if empty default is
- use
Host BooleanNetwork - Enable/disable host networking for the Spark Operator. Host networking can be useful when using custom CNI plugins like Calico on EKS.
- host
Network number[]Ports - List of ports allowed to use on the host network - if empty default is
25554
.
- List of ports allowed to use on the host network - if empty default is
- use
Host booleanNetwork - Enable/disable host networking for the Spark Operator. Host networking can be useful when using custom CNI plugins like Calico on EKS.
- host_
network_ Sequence[int]ports - List of ports allowed to use on the host network - if empty default is
25554
.
- List of ports allowed to use on the host network - if empty default is
- use_
host_ boolnetwork - Enable/disable host networking for the Spark Operator. Host networking can be useful when using custom CNI plugins like Calico on EKS.
- host
Network List<Number>Ports - List of ports allowed to use on the host network - if empty default is
25554
.
- List of ports allowed to use on the host network - if empty default is
- use
Host BooleanNetwork - Enable/disable host networking for the Spark Operator. Host networking can be useful when using custom CNI plugins like Calico on EKS.
Package Details
- Repository
- Spotinst pulumi/pulumi-spotinst
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
spotinst
Terraform Provider.