scaleway.InferenceDeployment
Explore with Pulumi AI
Creates and manages Scaleway Managed Inference deployments. For more information, see the documentation.
Example Usage
Basic
import * as pulumi from "@pulumi/pulumi";
import * as scaleway from "@pulumiverse/scaleway";
const deployment = new scaleway.InferenceDeployment("deployment", {
name: "tf-inference-deployment",
nodeType: "L4",
modelName: "meta/llama-3.1-8b-instruct:fp8",
publicEndpoint: {
isEnabled: true,
},
acceptEula: true,
});
import pulumi
import pulumiverse_scaleway as scaleway
deployment = scaleway.InferenceDeployment("deployment",
name="tf-inference-deployment",
node_type="L4",
model_name="meta/llama-3.1-8b-instruct:fp8",
public_endpoint={
"is_enabled": True,
},
accept_eula=True)
package main
import (
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
"github.com/pulumiverse/pulumi-scaleway/sdk/go/scaleway"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := scaleway.NewInferenceDeployment(ctx, "deployment", &scaleway.InferenceDeploymentArgs{
Name: pulumi.String("tf-inference-deployment"),
NodeType: pulumi.String("L4"),
ModelName: pulumi.String("meta/llama-3.1-8b-instruct:fp8"),
PublicEndpoint: &scaleway.InferenceDeploymentPublicEndpointArgs{
IsEnabled: pulumi.Bool(true),
},
AcceptEula: pulumi.Bool(true),
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Scaleway = Pulumiverse.Scaleway;
return await Deployment.RunAsync(() =>
{
var deployment = new Scaleway.InferenceDeployment("deployment", new()
{
Name = "tf-inference-deployment",
NodeType = "L4",
ModelName = "meta/llama-3.1-8b-instruct:fp8",
PublicEndpoint = new Scaleway.Inputs.InferenceDeploymentPublicEndpointArgs
{
IsEnabled = true,
},
AcceptEula = true,
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.scaleway.InferenceDeployment;
import com.pulumi.scaleway.InferenceDeploymentArgs;
import com.pulumi.scaleway.inputs.InferenceDeploymentPublicEndpointArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var deployment = new InferenceDeployment("deployment", InferenceDeploymentArgs.builder()
.name("tf-inference-deployment")
.nodeType("L4")
.modelName("meta/llama-3.1-8b-instruct:fp8")
.publicEndpoint(InferenceDeploymentPublicEndpointArgs.builder()
.isEnabled(true)
.build())
.acceptEula(true)
.build());
}
}
resources:
deployment:
type: scaleway:InferenceDeployment
properties:
name: tf-inference-deployment
nodeType: L4
modelName: meta/llama-3.1-8b-instruct:fp8
publicEndpoint:
isEnabled: true
acceptEula: true
Create InferenceDeployment Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new InferenceDeployment(name: string, args: InferenceDeploymentArgs, opts?: CustomResourceOptions);
@overload
def InferenceDeployment(resource_name: str,
args: InferenceDeploymentArgs,
opts: Optional[ResourceOptions] = None)
@overload
def InferenceDeployment(resource_name: str,
opts: Optional[ResourceOptions] = None,
model_name: Optional[str] = None,
node_type: Optional[str] = None,
accept_eula: Optional[bool] = None,
max_size: Optional[int] = None,
min_size: Optional[int] = None,
name: Optional[str] = None,
private_endpoint: Optional[InferenceDeploymentPrivateEndpointArgs] = None,
project_id: Optional[str] = None,
public_endpoint: Optional[InferenceDeploymentPublicEndpointArgs] = None,
region: Optional[str] = None,
tags: Optional[Sequence[str]] = None)
func NewInferenceDeployment(ctx *Context, name string, args InferenceDeploymentArgs, opts ...ResourceOption) (*InferenceDeployment, error)
public InferenceDeployment(string name, InferenceDeploymentArgs args, CustomResourceOptions? opts = null)
public InferenceDeployment(String name, InferenceDeploymentArgs args)
public InferenceDeployment(String name, InferenceDeploymentArgs args, CustomResourceOptions options)
type: scaleway:InferenceDeployment
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args InferenceDeploymentArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args InferenceDeploymentArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args InferenceDeploymentArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args InferenceDeploymentArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args InferenceDeploymentArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var inferenceDeploymentResource = new Scaleway.InferenceDeployment("inferenceDeploymentResource", new()
{
ModelName = "string",
NodeType = "string",
AcceptEula = false,
MaxSize = 0,
MinSize = 0,
Name = "string",
PrivateEndpoint = new Scaleway.Inputs.InferenceDeploymentPrivateEndpointArgs
{
DisableAuth = false,
Id = "string",
PrivateNetworkId = "string",
Url = "string",
},
ProjectId = "string",
PublicEndpoint = new Scaleway.Inputs.InferenceDeploymentPublicEndpointArgs
{
DisableAuth = false,
Id = "string",
IsEnabled = false,
Url = "string",
},
Region = "string",
Tags = new[]
{
"string",
},
});
example, err := scaleway.NewInferenceDeployment(ctx, "inferenceDeploymentResource", &scaleway.InferenceDeploymentArgs{
ModelName: pulumi.String("string"),
NodeType: pulumi.String("string"),
AcceptEula: pulumi.Bool(false),
MaxSize: pulumi.Int(0),
MinSize: pulumi.Int(0),
Name: pulumi.String("string"),
PrivateEndpoint: &scaleway.InferenceDeploymentPrivateEndpointArgs{
DisableAuth: pulumi.Bool(false),
Id: pulumi.String("string"),
PrivateNetworkId: pulumi.String("string"),
Url: pulumi.String("string"),
},
ProjectId: pulumi.String("string"),
PublicEndpoint: &scaleway.InferenceDeploymentPublicEndpointArgs{
DisableAuth: pulumi.Bool(false),
Id: pulumi.String("string"),
IsEnabled: pulumi.Bool(false),
Url: pulumi.String("string"),
},
Region: pulumi.String("string"),
Tags: pulumi.StringArray{
pulumi.String("string"),
},
})
var inferenceDeploymentResource = new InferenceDeployment("inferenceDeploymentResource", InferenceDeploymentArgs.builder()
.modelName("string")
.nodeType("string")
.acceptEula(false)
.maxSize(0)
.minSize(0)
.name("string")
.privateEndpoint(InferenceDeploymentPrivateEndpointArgs.builder()
.disableAuth(false)
.id("string")
.privateNetworkId("string")
.url("string")
.build())
.projectId("string")
.publicEndpoint(InferenceDeploymentPublicEndpointArgs.builder()
.disableAuth(false)
.id("string")
.isEnabled(false)
.url("string")
.build())
.region("string")
.tags("string")
.build());
inference_deployment_resource = scaleway.InferenceDeployment("inferenceDeploymentResource",
model_name="string",
node_type="string",
accept_eula=False,
max_size=0,
min_size=0,
name="string",
private_endpoint={
"disable_auth": False,
"id": "string",
"private_network_id": "string",
"url": "string",
},
project_id="string",
public_endpoint={
"disable_auth": False,
"id": "string",
"is_enabled": False,
"url": "string",
},
region="string",
tags=["string"])
const inferenceDeploymentResource = new scaleway.InferenceDeployment("inferenceDeploymentResource", {
modelName: "string",
nodeType: "string",
acceptEula: false,
maxSize: 0,
minSize: 0,
name: "string",
privateEndpoint: {
disableAuth: false,
id: "string",
privateNetworkId: "string",
url: "string",
},
projectId: "string",
publicEndpoint: {
disableAuth: false,
id: "string",
isEnabled: false,
url: "string",
},
region: "string",
tags: ["string"],
});
type: scaleway:InferenceDeployment
properties:
acceptEula: false
maxSize: 0
minSize: 0
modelName: string
name: string
nodeType: string
privateEndpoint:
disableAuth: false
id: string
privateNetworkId: string
url: string
projectId: string
publicEndpoint:
disableAuth: false
id: string
isEnabled: false
url: string
region: string
tags:
- string
InferenceDeployment Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The InferenceDeployment resource accepts the following input properties:
- Model
Name string - The model name to use for the deployment. Model names can be found in Console or using Scaleway's CLI (
scw inference model list
) - Node
Type string - The node type to use for the deployment. Node types can be found using Scaleway's CLI (
scw inference node-type list
) - Accept
Eula bool - Some models (e.g Meta Llama) require end-user license agreements. Set
true
to accept. - Max
Size int - The maximum size of the pool.
- Min
Size int - The minimum size of the pool.
- Name string
- The deployment name.
- Private
Endpoint Pulumiverse.Scaleway. Inputs. Inference Deployment Private Endpoint - Configuration of the deployment's private endpoint.
- Project
Id string project_id
) The ID of the project the deployment is associated with.- Public
Endpoint Pulumiverse.Scaleway. Inputs. Inference Deployment Public Endpoint - Configuration of the deployment's public endpoint.
- Region string
region
) The region in which the deployment is created.- List<string>
- The tags associated with the deployment.
- Model
Name string - The model name to use for the deployment. Model names can be found in Console or using Scaleway's CLI (
scw inference model list
) - Node
Type string - The node type to use for the deployment. Node types can be found using Scaleway's CLI (
scw inference node-type list
) - Accept
Eula bool - Some models (e.g Meta Llama) require end-user license agreements. Set
true
to accept. - Max
Size int - The maximum size of the pool.
- Min
Size int - The minimum size of the pool.
- Name string
- The deployment name.
- Private
Endpoint InferenceDeployment Private Endpoint Args - Configuration of the deployment's private endpoint.
- Project
Id string project_id
) The ID of the project the deployment is associated with.- Public
Endpoint InferenceDeployment Public Endpoint Args - Configuration of the deployment's public endpoint.
- Region string
region
) The region in which the deployment is created.- []string
- The tags associated with the deployment.
- model
Name String - The model name to use for the deployment. Model names can be found in Console or using Scaleway's CLI (
scw inference model list
) - node
Type String - The node type to use for the deployment. Node types can be found using Scaleway's CLI (
scw inference node-type list
) - accept
Eula Boolean - Some models (e.g Meta Llama) require end-user license agreements. Set
true
to accept. - max
Size Integer - The maximum size of the pool.
- min
Size Integer - The minimum size of the pool.
- name String
- The deployment name.
- private
Endpoint InferenceDeployment Private Endpoint - Configuration of the deployment's private endpoint.
- project
Id String project_id
) The ID of the project the deployment is associated with.- public
Endpoint InferenceDeployment Public Endpoint - Configuration of the deployment's public endpoint.
- region String
region
) The region in which the deployment is created.- List<String>
- The tags associated with the deployment.
- model
Name string - The model name to use for the deployment. Model names can be found in Console or using Scaleway's CLI (
scw inference model list
) - node
Type string - The node type to use for the deployment. Node types can be found using Scaleway's CLI (
scw inference node-type list
) - accept
Eula boolean - Some models (e.g Meta Llama) require end-user license agreements. Set
true
to accept. - max
Size number - The maximum size of the pool.
- min
Size number - The minimum size of the pool.
- name string
- The deployment name.
- private
Endpoint InferenceDeployment Private Endpoint - Configuration of the deployment's private endpoint.
- project
Id string project_id
) The ID of the project the deployment is associated with.- public
Endpoint InferenceDeployment Public Endpoint - Configuration of the deployment's public endpoint.
- region string
region
) The region in which the deployment is created.- string[]
- The tags associated with the deployment.
- model_
name str - The model name to use for the deployment. Model names can be found in Console or using Scaleway's CLI (
scw inference model list
) - node_
type str - The node type to use for the deployment. Node types can be found using Scaleway's CLI (
scw inference node-type list
) - accept_
eula bool - Some models (e.g Meta Llama) require end-user license agreements. Set
true
to accept. - max_
size int - The maximum size of the pool.
- min_
size int - The minimum size of the pool.
- name str
- The deployment name.
- private_
endpoint InferenceDeployment Private Endpoint Args - Configuration of the deployment's private endpoint.
- project_
id str project_id
) The ID of the project the deployment is associated with.- public_
endpoint InferenceDeployment Public Endpoint Args - Configuration of the deployment's public endpoint.
- region str
region
) The region in which the deployment is created.- Sequence[str]
- The tags associated with the deployment.
- model
Name String - The model name to use for the deployment. Model names can be found in Console or using Scaleway's CLI (
scw inference model list
) - node
Type String - The node type to use for the deployment. Node types can be found using Scaleway's CLI (
scw inference node-type list
) - accept
Eula Boolean - Some models (e.g Meta Llama) require end-user license agreements. Set
true
to accept. - max
Size Number - The maximum size of the pool.
- min
Size Number - The minimum size of the pool.
- name String
- The deployment name.
- private
Endpoint Property Map - Configuration of the deployment's private endpoint.
- project
Id String project_id
) The ID of the project the deployment is associated with.- public
Endpoint Property Map - Configuration of the deployment's public endpoint.
- region String
region
) The region in which the deployment is created.- List<String>
- The tags associated with the deployment.
Outputs
All input properties are implicitly available as output properties. Additionally, the InferenceDeployment resource produces the following output properties:
- Created
At string - The date and time of the creation of the deployment.
- Id string
- The provider-assigned unique ID for this managed resource.
- Model
Id string - The model id used for the deployment.
- Size int
- The size of the pool.
- Status string
- The status of the deployment.
- Updated
At string - The date and time of the last update of the deployment.
- Created
At string - The date and time of the creation of the deployment.
- Id string
- The provider-assigned unique ID for this managed resource.
- Model
Id string - The model id used for the deployment.
- Size int
- The size of the pool.
- Status string
- The status of the deployment.
- Updated
At string - The date and time of the last update of the deployment.
- created
At String - The date and time of the creation of the deployment.
- id String
- The provider-assigned unique ID for this managed resource.
- model
Id String - The model id used for the deployment.
- size Integer
- The size of the pool.
- status String
- The status of the deployment.
- updated
At String - The date and time of the last update of the deployment.
- created
At string - The date and time of the creation of the deployment.
- id string
- The provider-assigned unique ID for this managed resource.
- model
Id string - The model id used for the deployment.
- size number
- The size of the pool.
- status string
- The status of the deployment.
- updated
At string - The date and time of the last update of the deployment.
- created_
at str - The date and time of the creation of the deployment.
- id str
- The provider-assigned unique ID for this managed resource.
- model_
id str - The model id used for the deployment.
- size int
- The size of the pool.
- status str
- The status of the deployment.
- updated_
at str - The date and time of the last update of the deployment.
- created
At String - The date and time of the creation of the deployment.
- id String
- The provider-assigned unique ID for this managed resource.
- model
Id String - The model id used for the deployment.
- size Number
- The size of the pool.
- status String
- The status of the deployment.
- updated
At String - The date and time of the last update of the deployment.
Look up Existing InferenceDeployment Resource
Get an existing InferenceDeployment resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: InferenceDeploymentState, opts?: CustomResourceOptions): InferenceDeployment
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
accept_eula: Optional[bool] = None,
created_at: Optional[str] = None,
max_size: Optional[int] = None,
min_size: Optional[int] = None,
model_id: Optional[str] = None,
model_name: Optional[str] = None,
name: Optional[str] = None,
node_type: Optional[str] = None,
private_endpoint: Optional[InferenceDeploymentPrivateEndpointArgs] = None,
project_id: Optional[str] = None,
public_endpoint: Optional[InferenceDeploymentPublicEndpointArgs] = None,
region: Optional[str] = None,
size: Optional[int] = None,
status: Optional[str] = None,
tags: Optional[Sequence[str]] = None,
updated_at: Optional[str] = None) -> InferenceDeployment
func GetInferenceDeployment(ctx *Context, name string, id IDInput, state *InferenceDeploymentState, opts ...ResourceOption) (*InferenceDeployment, error)
public static InferenceDeployment Get(string name, Input<string> id, InferenceDeploymentState? state, CustomResourceOptions? opts = null)
public static InferenceDeployment get(String name, Output<String> id, InferenceDeploymentState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Accept
Eula bool - Some models (e.g Meta Llama) require end-user license agreements. Set
true
to accept. - Created
At string - The date and time of the creation of the deployment.
- Max
Size int - The maximum size of the pool.
- Min
Size int - The minimum size of the pool.
- Model
Id string - The model id used for the deployment.
- Model
Name string - The model name to use for the deployment. Model names can be found in Console or using Scaleway's CLI (
scw inference model list
) - Name string
- The deployment name.
- Node
Type string - The node type to use for the deployment. Node types can be found using Scaleway's CLI (
scw inference node-type list
) - Private
Endpoint Pulumiverse.Scaleway. Inputs. Inference Deployment Private Endpoint - Configuration of the deployment's private endpoint.
- Project
Id string project_id
) The ID of the project the deployment is associated with.- Public
Endpoint Pulumiverse.Scaleway. Inputs. Inference Deployment Public Endpoint - Configuration of the deployment's public endpoint.
- Region string
region
) The region in which the deployment is created.- Size int
- The size of the pool.
- Status string
- The status of the deployment.
- List<string>
- The tags associated with the deployment.
- Updated
At string - The date and time of the last update of the deployment.
- Accept
Eula bool - Some models (e.g Meta Llama) require end-user license agreements. Set
true
to accept. - Created
At string - The date and time of the creation of the deployment.
- Max
Size int - The maximum size of the pool.
- Min
Size int - The minimum size of the pool.
- Model
Id string - The model id used for the deployment.
- Model
Name string - The model name to use for the deployment. Model names can be found in Console or using Scaleway's CLI (
scw inference model list
) - Name string
- The deployment name.
- Node
Type string - The node type to use for the deployment. Node types can be found using Scaleway's CLI (
scw inference node-type list
) - Private
Endpoint InferenceDeployment Private Endpoint Args - Configuration of the deployment's private endpoint.
- Project
Id string project_id
) The ID of the project the deployment is associated with.- Public
Endpoint InferenceDeployment Public Endpoint Args - Configuration of the deployment's public endpoint.
- Region string
region
) The region in which the deployment is created.- Size int
- The size of the pool.
- Status string
- The status of the deployment.
- []string
- The tags associated with the deployment.
- Updated
At string - The date and time of the last update of the deployment.
- accept
Eula Boolean - Some models (e.g Meta Llama) require end-user license agreements. Set
true
to accept. - created
At String - The date and time of the creation of the deployment.
- max
Size Integer - The maximum size of the pool.
- min
Size Integer - The minimum size of the pool.
- model
Id String - The model id used for the deployment.
- model
Name String - The model name to use for the deployment. Model names can be found in Console or using Scaleway's CLI (
scw inference model list
) - name String
- The deployment name.
- node
Type String - The node type to use for the deployment. Node types can be found using Scaleway's CLI (
scw inference node-type list
) - private
Endpoint InferenceDeployment Private Endpoint - Configuration of the deployment's private endpoint.
- project
Id String project_id
) The ID of the project the deployment is associated with.- public
Endpoint InferenceDeployment Public Endpoint - Configuration of the deployment's public endpoint.
- region String
region
) The region in which the deployment is created.- size Integer
- The size of the pool.
- status String
- The status of the deployment.
- List<String>
- The tags associated with the deployment.
- updated
At String - The date and time of the last update of the deployment.
- accept
Eula boolean - Some models (e.g Meta Llama) require end-user license agreements. Set
true
to accept. - created
At string - The date and time of the creation of the deployment.
- max
Size number - The maximum size of the pool.
- min
Size number - The minimum size of the pool.
- model
Id string - The model id used for the deployment.
- model
Name string - The model name to use for the deployment. Model names can be found in Console or using Scaleway's CLI (
scw inference model list
) - name string
- The deployment name.
- node
Type string - The node type to use for the deployment. Node types can be found using Scaleway's CLI (
scw inference node-type list
) - private
Endpoint InferenceDeployment Private Endpoint - Configuration of the deployment's private endpoint.
- project
Id string project_id
) The ID of the project the deployment is associated with.- public
Endpoint InferenceDeployment Public Endpoint - Configuration of the deployment's public endpoint.
- region string
region
) The region in which the deployment is created.- size number
- The size of the pool.
- status string
- The status of the deployment.
- string[]
- The tags associated with the deployment.
- updated
At string - The date and time of the last update of the deployment.
- accept_
eula bool - Some models (e.g Meta Llama) require end-user license agreements. Set
true
to accept. - created_
at str - The date and time of the creation of the deployment.
- max_
size int - The maximum size of the pool.
- min_
size int - The minimum size of the pool.
- model_
id str - The model id used for the deployment.
- model_
name str - The model name to use for the deployment. Model names can be found in Console or using Scaleway's CLI (
scw inference model list
) - name str
- The deployment name.
- node_
type str - The node type to use for the deployment. Node types can be found using Scaleway's CLI (
scw inference node-type list
) - private_
endpoint InferenceDeployment Private Endpoint Args - Configuration of the deployment's private endpoint.
- project_
id str project_id
) The ID of the project the deployment is associated with.- public_
endpoint InferenceDeployment Public Endpoint Args - Configuration of the deployment's public endpoint.
- region str
region
) The region in which the deployment is created.- size int
- The size of the pool.
- status str
- The status of the deployment.
- Sequence[str]
- The tags associated with the deployment.
- updated_
at str - The date and time of the last update of the deployment.
- accept
Eula Boolean - Some models (e.g Meta Llama) require end-user license agreements. Set
true
to accept. - created
At String - The date and time of the creation of the deployment.
- max
Size Number - The maximum size of the pool.
- min
Size Number - The minimum size of the pool.
- model
Id String - The model id used for the deployment.
- model
Name String - The model name to use for the deployment. Model names can be found in Console or using Scaleway's CLI (
scw inference model list
) - name String
- The deployment name.
- node
Type String - The node type to use for the deployment. Node types can be found using Scaleway's CLI (
scw inference node-type list
) - private
Endpoint Property Map - Configuration of the deployment's private endpoint.
- project
Id String project_id
) The ID of the project the deployment is associated with.- public
Endpoint Property Map - Configuration of the deployment's public endpoint.
- region String
region
) The region in which the deployment is created.- size Number
- The size of the pool.
- status String
- The status of the deployment.
- List<String>
- The tags associated with the deployment.
- updated
At String - The date and time of the last update of the deployment.
Supporting Types
InferenceDeploymentPrivateEndpoint, InferenceDeploymentPrivateEndpointArgs
- Disable
Auth bool - Disable the authentication on the endpoint.
- Id string
- (Optional) The id of the public endpoint.
- Private
Network stringId - The ID of the private network to use.
- Url string
- (Optional) The URL of the endpoint.
- Disable
Auth bool - Disable the authentication on the endpoint.
- Id string
- (Optional) The id of the public endpoint.
- Private
Network stringId - The ID of the private network to use.
- Url string
- (Optional) The URL of the endpoint.
- disable
Auth Boolean - Disable the authentication on the endpoint.
- id String
- (Optional) The id of the public endpoint.
- private
Network StringId - The ID of the private network to use.
- url String
- (Optional) The URL of the endpoint.
- disable
Auth boolean - Disable the authentication on the endpoint.
- id string
- (Optional) The id of the public endpoint.
- private
Network stringId - The ID of the private network to use.
- url string
- (Optional) The URL of the endpoint.
- disable_
auth bool - Disable the authentication on the endpoint.
- id str
- (Optional) The id of the public endpoint.
- private_
network_ strid - The ID of the private network to use.
- url str
- (Optional) The URL of the endpoint.
- disable
Auth Boolean - Disable the authentication on the endpoint.
- id String
- (Optional) The id of the public endpoint.
- private
Network StringId - The ID of the private network to use.
- url String
- (Optional) The URL of the endpoint.
InferenceDeploymentPublicEndpoint, InferenceDeploymentPublicEndpointArgs
- Disable
Auth bool - Disable the authentication on the endpoint.
- Id string
- (Optional) The id of the public endpoint.
- Is
Enabled bool - Enable or disable public endpoint.
- Url string
- (Optional) The URL of the endpoint.
- Disable
Auth bool - Disable the authentication on the endpoint.
- Id string
- (Optional) The id of the public endpoint.
- Is
Enabled bool - Enable or disable public endpoint.
- Url string
- (Optional) The URL of the endpoint.
- disable
Auth Boolean - Disable the authentication on the endpoint.
- id String
- (Optional) The id of the public endpoint.
- is
Enabled Boolean - Enable or disable public endpoint.
- url String
- (Optional) The URL of the endpoint.
- disable
Auth boolean - Disable the authentication on the endpoint.
- id string
- (Optional) The id of the public endpoint.
- is
Enabled boolean - Enable or disable public endpoint.
- url string
- (Optional) The URL of the endpoint.
- disable_
auth bool - Disable the authentication on the endpoint.
- id str
- (Optional) The id of the public endpoint.
- is_
enabled bool - Enable or disable public endpoint.
- url str
- (Optional) The URL of the endpoint.
- disable
Auth Boolean - Disable the authentication on the endpoint.
- id String
- (Optional) The id of the public endpoint.
- is
Enabled Boolean - Enable or disable public endpoint.
- url String
- (Optional) The URL of the endpoint.
Import
Functions can be imported using, {region}/{id}
, as shown below:
bash
$ pulumi import scaleway:index/inferenceDeployment:InferenceDeployment deployment fr-par/11111111-1111-1111-1111-111111111111
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- scaleway pulumiverse/pulumi-scaleway
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
scaleway
Terraform Provider.