aws.sagemaker.Model
Explore with Pulumi AI
Provides a SageMaker model resource.
Example Usage
Basic usage:
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const assumeRole = aws.iam.getPolicyDocument({
statements: [{
actions: ["sts:AssumeRole"],
principals: [{
type: "Service",
identifiers: ["sagemaker.amazonaws.com"],
}],
}],
});
const exampleRole = new aws.iam.Role("example", {assumeRolePolicy: assumeRole.then(assumeRole => assumeRole.json)});
const test = aws.sagemaker.getPrebuiltEcrImage({
repositoryName: "kmeans",
});
const example = new aws.sagemaker.Model("example", {
name: "my-model",
executionRoleArn: exampleRole.arn,
primaryContainer: {
image: test.then(test => test.registryPath),
},
});
import pulumi
import pulumi_aws as aws
assume_role = aws.iam.get_policy_document(statements=[{
"actions": ["sts:AssumeRole"],
"principals": [{
"type": "Service",
"identifiers": ["sagemaker.amazonaws.com"],
}],
}])
example_role = aws.iam.Role("example", assume_role_policy=assume_role.json)
test = aws.sagemaker.get_prebuilt_ecr_image(repository_name="kmeans")
example = aws.sagemaker.Model("example",
name="my-model",
execution_role_arn=example_role.arn,
primary_container={
"image": test.registry_path,
})
package main
import (
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/iam"
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/sagemaker"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
assumeRole, err := iam.GetPolicyDocument(ctx, &iam.GetPolicyDocumentArgs{
Statements: []iam.GetPolicyDocumentStatement{
{
Actions: []string{
"sts:AssumeRole",
},
Principals: []iam.GetPolicyDocumentStatementPrincipal{
{
Type: "Service",
Identifiers: []string{
"sagemaker.amazonaws.com",
},
},
},
},
},
}, nil)
if err != nil {
return err
}
exampleRole, err := iam.NewRole(ctx, "example", &iam.RoleArgs{
AssumeRolePolicy: pulumi.String(assumeRole.Json),
})
if err != nil {
return err
}
test, err := sagemaker.GetPrebuiltEcrImage(ctx, &sagemaker.GetPrebuiltEcrImageArgs{
RepositoryName: "kmeans",
}, nil)
if err != nil {
return err
}
_, err = sagemaker.NewModel(ctx, "example", &sagemaker.ModelArgs{
Name: pulumi.String("my-model"),
ExecutionRoleArn: exampleRole.Arn,
PrimaryContainer: &sagemaker.ModelPrimaryContainerArgs{
Image: pulumi.String(test.RegistryPath),
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var assumeRole = Aws.Iam.GetPolicyDocument.Invoke(new()
{
Statements = new[]
{
new Aws.Iam.Inputs.GetPolicyDocumentStatementInputArgs
{
Actions = new[]
{
"sts:AssumeRole",
},
Principals = new[]
{
new Aws.Iam.Inputs.GetPolicyDocumentStatementPrincipalInputArgs
{
Type = "Service",
Identifiers = new[]
{
"sagemaker.amazonaws.com",
},
},
},
},
},
});
var exampleRole = new Aws.Iam.Role("example", new()
{
AssumeRolePolicy = assumeRole.Apply(getPolicyDocumentResult => getPolicyDocumentResult.Json),
});
var test = Aws.Sagemaker.GetPrebuiltEcrImage.Invoke(new()
{
RepositoryName = "kmeans",
});
var example = new Aws.Sagemaker.Model("example", new()
{
Name = "my-model",
ExecutionRoleArn = exampleRole.Arn,
PrimaryContainer = new Aws.Sagemaker.Inputs.ModelPrimaryContainerArgs
{
Image = test.Apply(getPrebuiltEcrImageResult => getPrebuiltEcrImageResult.RegistryPath),
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.iam.IamFunctions;
import com.pulumi.aws.iam.inputs.GetPolicyDocumentArgs;
import com.pulumi.aws.iam.Role;
import com.pulumi.aws.iam.RoleArgs;
import com.pulumi.aws.sagemaker.SagemakerFunctions;
import com.pulumi.aws.sagemaker.inputs.GetPrebuiltEcrImageArgs;
import com.pulumi.aws.sagemaker.Model;
import com.pulumi.aws.sagemaker.ModelArgs;
import com.pulumi.aws.sagemaker.inputs.ModelPrimaryContainerArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
final var assumeRole = IamFunctions.getPolicyDocument(GetPolicyDocumentArgs.builder()
.statements(GetPolicyDocumentStatementArgs.builder()
.actions("sts:AssumeRole")
.principals(GetPolicyDocumentStatementPrincipalArgs.builder()
.type("Service")
.identifiers("sagemaker.amazonaws.com")
.build())
.build())
.build());
var exampleRole = new Role("exampleRole", RoleArgs.builder()
.assumeRolePolicy(assumeRole.applyValue(getPolicyDocumentResult -> getPolicyDocumentResult.json()))
.build());
final var test = SagemakerFunctions.getPrebuiltEcrImage(GetPrebuiltEcrImageArgs.builder()
.repositoryName("kmeans")
.build());
var example = new Model("example", ModelArgs.builder()
.name("my-model")
.executionRoleArn(exampleRole.arn())
.primaryContainer(ModelPrimaryContainerArgs.builder()
.image(test.applyValue(getPrebuiltEcrImageResult -> getPrebuiltEcrImageResult.registryPath()))
.build())
.build());
}
}
resources:
example:
type: aws:sagemaker:Model
properties:
name: my-model
executionRoleArn: ${exampleRole.arn}
primaryContainer:
image: ${test.registryPath}
exampleRole:
type: aws:iam:Role
name: example
properties:
assumeRolePolicy: ${assumeRole.json}
variables:
assumeRole:
fn::invoke:
Function: aws:iam:getPolicyDocument
Arguments:
statements:
- actions:
- sts:AssumeRole
principals:
- type: Service
identifiers:
- sagemaker.amazonaws.com
test:
fn::invoke:
Function: aws:sagemaker:getPrebuiltEcrImage
Arguments:
repositoryName: kmeans
Inference Execution Config
mode
- (Required) How containers in a multi-container are run. The following values are validSerial
andDirect
.
Create Model Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new Model(name: string, args: ModelArgs, opts?: CustomResourceOptions);
@overload
def Model(resource_name: str,
args: ModelArgs,
opts: Optional[ResourceOptions] = None)
@overload
def Model(resource_name: str,
opts: Optional[ResourceOptions] = None,
execution_role_arn: Optional[str] = None,
containers: Optional[Sequence[ModelContainerArgs]] = None,
enable_network_isolation: Optional[bool] = None,
inference_execution_config: Optional[ModelInferenceExecutionConfigArgs] = None,
name: Optional[str] = None,
primary_container: Optional[ModelPrimaryContainerArgs] = None,
tags: Optional[Mapping[str, str]] = None,
vpc_config: Optional[ModelVpcConfigArgs] = None)
func NewModel(ctx *Context, name string, args ModelArgs, opts ...ResourceOption) (*Model, error)
public Model(string name, ModelArgs args, CustomResourceOptions? opts = null)
type: aws:sagemaker:Model
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args ModelArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args ModelArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args ModelArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args ModelArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args ModelArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var examplemodelResourceResourceFromSagemakermodel = new Aws.Sagemaker.Model("examplemodelResourceResourceFromSagemakermodel", new()
{
ExecutionRoleArn = "string",
Containers = new[]
{
new Aws.Sagemaker.Inputs.ModelContainerArgs
{
ContainerHostname = "string",
Environment =
{
{ "string", "string" },
},
Image = "string",
ImageConfig = new Aws.Sagemaker.Inputs.ModelContainerImageConfigArgs
{
RepositoryAccessMode = "string",
RepositoryAuthConfig = new Aws.Sagemaker.Inputs.ModelContainerImageConfigRepositoryAuthConfigArgs
{
RepositoryCredentialsProviderArn = "string",
},
},
InferenceSpecificationName = "string",
Mode = "string",
ModelDataSource = new Aws.Sagemaker.Inputs.ModelContainerModelDataSourceArgs
{
S3DataSources = new[]
{
new Aws.Sagemaker.Inputs.ModelContainerModelDataSourceS3DataSourceArgs
{
CompressionType = "string",
S3DataType = "string",
S3Uri = "string",
ModelAccessConfig = new Aws.Sagemaker.Inputs.ModelContainerModelDataSourceS3DataSourceModelAccessConfigArgs
{
AcceptEula = false,
},
},
},
},
ModelDataUrl = "string",
ModelPackageName = "string",
MultiModelConfig = new Aws.Sagemaker.Inputs.ModelContainerMultiModelConfigArgs
{
ModelCacheSetting = "string",
},
},
},
EnableNetworkIsolation = false,
InferenceExecutionConfig = new Aws.Sagemaker.Inputs.ModelInferenceExecutionConfigArgs
{
Mode = "string",
},
Name = "string",
PrimaryContainer = new Aws.Sagemaker.Inputs.ModelPrimaryContainerArgs
{
ContainerHostname = "string",
Environment =
{
{ "string", "string" },
},
Image = "string",
ImageConfig = new Aws.Sagemaker.Inputs.ModelPrimaryContainerImageConfigArgs
{
RepositoryAccessMode = "string",
RepositoryAuthConfig = new Aws.Sagemaker.Inputs.ModelPrimaryContainerImageConfigRepositoryAuthConfigArgs
{
RepositoryCredentialsProviderArn = "string",
},
},
InferenceSpecificationName = "string",
Mode = "string",
ModelDataSource = new Aws.Sagemaker.Inputs.ModelPrimaryContainerModelDataSourceArgs
{
S3DataSources = new[]
{
new Aws.Sagemaker.Inputs.ModelPrimaryContainerModelDataSourceS3DataSourceArgs
{
CompressionType = "string",
S3DataType = "string",
S3Uri = "string",
ModelAccessConfig = new Aws.Sagemaker.Inputs.ModelPrimaryContainerModelDataSourceS3DataSourceModelAccessConfigArgs
{
AcceptEula = false,
},
},
},
},
ModelDataUrl = "string",
ModelPackageName = "string",
MultiModelConfig = new Aws.Sagemaker.Inputs.ModelPrimaryContainerMultiModelConfigArgs
{
ModelCacheSetting = "string",
},
},
Tags =
{
{ "string", "string" },
},
VpcConfig = new Aws.Sagemaker.Inputs.ModelVpcConfigArgs
{
SecurityGroupIds = new[]
{
"string",
},
Subnets = new[]
{
"string",
},
},
});
example, err := sagemaker.NewModel(ctx, "examplemodelResourceResourceFromSagemakermodel", &sagemaker.ModelArgs{
ExecutionRoleArn: pulumi.String("string"),
Containers: sagemaker.ModelContainerArray{
&sagemaker.ModelContainerArgs{
ContainerHostname: pulumi.String("string"),
Environment: pulumi.StringMap{
"string": pulumi.String("string"),
},
Image: pulumi.String("string"),
ImageConfig: &sagemaker.ModelContainerImageConfigArgs{
RepositoryAccessMode: pulumi.String("string"),
RepositoryAuthConfig: &sagemaker.ModelContainerImageConfigRepositoryAuthConfigArgs{
RepositoryCredentialsProviderArn: pulumi.String("string"),
},
},
InferenceSpecificationName: pulumi.String("string"),
Mode: pulumi.String("string"),
ModelDataSource: &sagemaker.ModelContainerModelDataSourceArgs{
S3DataSources: sagemaker.ModelContainerModelDataSourceS3DataSourceArray{
&sagemaker.ModelContainerModelDataSourceS3DataSourceArgs{
CompressionType: pulumi.String("string"),
S3DataType: pulumi.String("string"),
S3Uri: pulumi.String("string"),
ModelAccessConfig: &sagemaker.ModelContainerModelDataSourceS3DataSourceModelAccessConfigArgs{
AcceptEula: pulumi.Bool(false),
},
},
},
},
ModelDataUrl: pulumi.String("string"),
ModelPackageName: pulumi.String("string"),
MultiModelConfig: &sagemaker.ModelContainerMultiModelConfigArgs{
ModelCacheSetting: pulumi.String("string"),
},
},
},
EnableNetworkIsolation: pulumi.Bool(false),
InferenceExecutionConfig: &sagemaker.ModelInferenceExecutionConfigArgs{
Mode: pulumi.String("string"),
},
Name: pulumi.String("string"),
PrimaryContainer: &sagemaker.ModelPrimaryContainerArgs{
ContainerHostname: pulumi.String("string"),
Environment: pulumi.StringMap{
"string": pulumi.String("string"),
},
Image: pulumi.String("string"),
ImageConfig: &sagemaker.ModelPrimaryContainerImageConfigArgs{
RepositoryAccessMode: pulumi.String("string"),
RepositoryAuthConfig: &sagemaker.ModelPrimaryContainerImageConfigRepositoryAuthConfigArgs{
RepositoryCredentialsProviderArn: pulumi.String("string"),
},
},
InferenceSpecificationName: pulumi.String("string"),
Mode: pulumi.String("string"),
ModelDataSource: &sagemaker.ModelPrimaryContainerModelDataSourceArgs{
S3DataSources: sagemaker.ModelPrimaryContainerModelDataSourceS3DataSourceArray{
&sagemaker.ModelPrimaryContainerModelDataSourceS3DataSourceArgs{
CompressionType: pulumi.String("string"),
S3DataType: pulumi.String("string"),
S3Uri: pulumi.String("string"),
ModelAccessConfig: &sagemaker.ModelPrimaryContainerModelDataSourceS3DataSourceModelAccessConfigArgs{
AcceptEula: pulumi.Bool(false),
},
},
},
},
ModelDataUrl: pulumi.String("string"),
ModelPackageName: pulumi.String("string"),
MultiModelConfig: &sagemaker.ModelPrimaryContainerMultiModelConfigArgs{
ModelCacheSetting: pulumi.String("string"),
},
},
Tags: pulumi.StringMap{
"string": pulumi.String("string"),
},
VpcConfig: &sagemaker.ModelVpcConfigArgs{
SecurityGroupIds: pulumi.StringArray{
pulumi.String("string"),
},
Subnets: pulumi.StringArray{
pulumi.String("string"),
},
},
})
var examplemodelResourceResourceFromSagemakermodel = new Model("examplemodelResourceResourceFromSagemakermodel", ModelArgs.builder()
.executionRoleArn("string")
.containers(ModelContainerArgs.builder()
.containerHostname("string")
.environment(Map.of("string", "string"))
.image("string")
.imageConfig(ModelContainerImageConfigArgs.builder()
.repositoryAccessMode("string")
.repositoryAuthConfig(ModelContainerImageConfigRepositoryAuthConfigArgs.builder()
.repositoryCredentialsProviderArn("string")
.build())
.build())
.inferenceSpecificationName("string")
.mode("string")
.modelDataSource(ModelContainerModelDataSourceArgs.builder()
.s3DataSources(ModelContainerModelDataSourceS3DataSourceArgs.builder()
.compressionType("string")
.s3DataType("string")
.s3Uri("string")
.modelAccessConfig(ModelContainerModelDataSourceS3DataSourceModelAccessConfigArgs.builder()
.acceptEula(false)
.build())
.build())
.build())
.modelDataUrl("string")
.modelPackageName("string")
.multiModelConfig(ModelContainerMultiModelConfigArgs.builder()
.modelCacheSetting("string")
.build())
.build())
.enableNetworkIsolation(false)
.inferenceExecutionConfig(ModelInferenceExecutionConfigArgs.builder()
.mode("string")
.build())
.name("string")
.primaryContainer(ModelPrimaryContainerArgs.builder()
.containerHostname("string")
.environment(Map.of("string", "string"))
.image("string")
.imageConfig(ModelPrimaryContainerImageConfigArgs.builder()
.repositoryAccessMode("string")
.repositoryAuthConfig(ModelPrimaryContainerImageConfigRepositoryAuthConfigArgs.builder()
.repositoryCredentialsProviderArn("string")
.build())
.build())
.inferenceSpecificationName("string")
.mode("string")
.modelDataSource(ModelPrimaryContainerModelDataSourceArgs.builder()
.s3DataSources(ModelPrimaryContainerModelDataSourceS3DataSourceArgs.builder()
.compressionType("string")
.s3DataType("string")
.s3Uri("string")
.modelAccessConfig(ModelPrimaryContainerModelDataSourceS3DataSourceModelAccessConfigArgs.builder()
.acceptEula(false)
.build())
.build())
.build())
.modelDataUrl("string")
.modelPackageName("string")
.multiModelConfig(ModelPrimaryContainerMultiModelConfigArgs.builder()
.modelCacheSetting("string")
.build())
.build())
.tags(Map.of("string", "string"))
.vpcConfig(ModelVpcConfigArgs.builder()
.securityGroupIds("string")
.subnets("string")
.build())
.build());
examplemodel_resource_resource_from_sagemakermodel = aws.sagemaker.Model("examplemodelResourceResourceFromSagemakermodel",
execution_role_arn="string",
containers=[{
"container_hostname": "string",
"environment": {
"string": "string",
},
"image": "string",
"image_config": {
"repository_access_mode": "string",
"repository_auth_config": {
"repository_credentials_provider_arn": "string",
},
},
"inference_specification_name": "string",
"mode": "string",
"model_data_source": {
"s3_data_sources": [{
"compression_type": "string",
"s3_data_type": "string",
"s3_uri": "string",
"model_access_config": {
"accept_eula": False,
},
}],
},
"model_data_url": "string",
"model_package_name": "string",
"multi_model_config": {
"model_cache_setting": "string",
},
}],
enable_network_isolation=False,
inference_execution_config={
"mode": "string",
},
name="string",
primary_container={
"container_hostname": "string",
"environment": {
"string": "string",
},
"image": "string",
"image_config": {
"repository_access_mode": "string",
"repository_auth_config": {
"repository_credentials_provider_arn": "string",
},
},
"inference_specification_name": "string",
"mode": "string",
"model_data_source": {
"s3_data_sources": [{
"compression_type": "string",
"s3_data_type": "string",
"s3_uri": "string",
"model_access_config": {
"accept_eula": False,
},
}],
},
"model_data_url": "string",
"model_package_name": "string",
"multi_model_config": {
"model_cache_setting": "string",
},
},
tags={
"string": "string",
},
vpc_config={
"security_group_ids": ["string"],
"subnets": ["string"],
})
const examplemodelResourceResourceFromSagemakermodel = new aws.sagemaker.Model("examplemodelResourceResourceFromSagemakermodel", {
executionRoleArn: "string",
containers: [{
containerHostname: "string",
environment: {
string: "string",
},
image: "string",
imageConfig: {
repositoryAccessMode: "string",
repositoryAuthConfig: {
repositoryCredentialsProviderArn: "string",
},
},
inferenceSpecificationName: "string",
mode: "string",
modelDataSource: {
s3DataSources: [{
compressionType: "string",
s3DataType: "string",
s3Uri: "string",
modelAccessConfig: {
acceptEula: false,
},
}],
},
modelDataUrl: "string",
modelPackageName: "string",
multiModelConfig: {
modelCacheSetting: "string",
},
}],
enableNetworkIsolation: false,
inferenceExecutionConfig: {
mode: "string",
},
name: "string",
primaryContainer: {
containerHostname: "string",
environment: {
string: "string",
},
image: "string",
imageConfig: {
repositoryAccessMode: "string",
repositoryAuthConfig: {
repositoryCredentialsProviderArn: "string",
},
},
inferenceSpecificationName: "string",
mode: "string",
modelDataSource: {
s3DataSources: [{
compressionType: "string",
s3DataType: "string",
s3Uri: "string",
modelAccessConfig: {
acceptEula: false,
},
}],
},
modelDataUrl: "string",
modelPackageName: "string",
multiModelConfig: {
modelCacheSetting: "string",
},
},
tags: {
string: "string",
},
vpcConfig: {
securityGroupIds: ["string"],
subnets: ["string"],
},
});
type: aws:sagemaker:Model
properties:
containers:
- containerHostname: string
environment:
string: string
image: string
imageConfig:
repositoryAccessMode: string
repositoryAuthConfig:
repositoryCredentialsProviderArn: string
inferenceSpecificationName: string
mode: string
modelDataSource:
s3DataSources:
- compressionType: string
modelAccessConfig:
acceptEula: false
s3DataType: string
s3Uri: string
modelDataUrl: string
modelPackageName: string
multiModelConfig:
modelCacheSetting: string
enableNetworkIsolation: false
executionRoleArn: string
inferenceExecutionConfig:
mode: string
name: string
primaryContainer:
containerHostname: string
environment:
string: string
image: string
imageConfig:
repositoryAccessMode: string
repositoryAuthConfig:
repositoryCredentialsProviderArn: string
inferenceSpecificationName: string
mode: string
modelDataSource:
s3DataSources:
- compressionType: string
modelAccessConfig:
acceptEula: false
s3DataType: string
s3Uri: string
modelDataUrl: string
modelPackageName: string
multiModelConfig:
modelCacheSetting: string
tags:
string: string
vpcConfig:
securityGroupIds:
- string
subnets:
- string
Model Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The Model resource accepts the following input properties:
- Execution
Role stringArn - A role that SageMaker can assume to access model artifacts and docker images for deployment.
- Containers
List<Model
Container> - Specifies containers in the inference pipeline. If not specified, the
primary_container
argument is required. Fields are documented below. - Enable
Network boolIsolation - Isolates the model container. No inbound or outbound network calls can be made to or from the model container.
- Inference
Execution ModelConfig Inference Execution Config - Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.
- Name string
- The name of the model (must be unique). If omitted, this provider will assign a random, unique name.
- Primary
Container ModelPrimary Container - The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the
container
argument is required. Fields are documented below. - Dictionary<string, string>
- A map of tags to assign to the resource. .If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - Vpc
Config ModelVpc Config - Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.
- Execution
Role stringArn - A role that SageMaker can assume to access model artifacts and docker images for deployment.
- Containers
[]Model
Container Args - Specifies containers in the inference pipeline. If not specified, the
primary_container
argument is required. Fields are documented below. - Enable
Network boolIsolation - Isolates the model container. No inbound or outbound network calls can be made to or from the model container.
- Inference
Execution ModelConfig Inference Execution Config Args - Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.
- Name string
- The name of the model (must be unique). If omitted, this provider will assign a random, unique name.
- Primary
Container ModelPrimary Container Args - The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the
container
argument is required. Fields are documented below. - map[string]string
- A map of tags to assign to the resource. .If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - Vpc
Config ModelVpc Config Args - Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.
- execution
Role StringArn - A role that SageMaker can assume to access model artifacts and docker images for deployment.
- containers
List<Model
Container> - Specifies containers in the inference pipeline. If not specified, the
primary_container
argument is required. Fields are documented below. - enable
Network BooleanIsolation - Isolates the model container. No inbound or outbound network calls can be made to or from the model container.
- inference
Execution ModelConfig Inference Execution Config - Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.
- name String
- The name of the model (must be unique). If omitted, this provider will assign a random, unique name.
- primary
Container ModelPrimary Container - The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the
container
argument is required. Fields are documented below. - Map<String,String>
- A map of tags to assign to the resource. .If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - vpc
Config ModelVpc Config - Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.
- execution
Role stringArn - A role that SageMaker can assume to access model artifacts and docker images for deployment.
- containers
Model
Container[] - Specifies containers in the inference pipeline. If not specified, the
primary_container
argument is required. Fields are documented below. - enable
Network booleanIsolation - Isolates the model container. No inbound or outbound network calls can be made to or from the model container.
- inference
Execution ModelConfig Inference Execution Config - Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.
- name string
- The name of the model (must be unique). If omitted, this provider will assign a random, unique name.
- primary
Container ModelPrimary Container - The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the
container
argument is required. Fields are documented below. - {[key: string]: string}
- A map of tags to assign to the resource. .If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - vpc
Config ModelVpc Config - Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.
- execution_
role_ strarn - A role that SageMaker can assume to access model artifacts and docker images for deployment.
- containers
Sequence[Model
Container Args] - Specifies containers in the inference pipeline. If not specified, the
primary_container
argument is required. Fields are documented below. - enable_
network_ boolisolation - Isolates the model container. No inbound or outbound network calls can be made to or from the model container.
- inference_
execution_ Modelconfig Inference Execution Config Args - Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.
- name str
- The name of the model (must be unique). If omitted, this provider will assign a random, unique name.
- primary_
container ModelPrimary Container Args - The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the
container
argument is required. Fields are documented below. - Mapping[str, str]
- A map of tags to assign to the resource. .If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - vpc_
config ModelVpc Config Args - Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.
- execution
Role StringArn - A role that SageMaker can assume to access model artifacts and docker images for deployment.
- containers List<Property Map>
- Specifies containers in the inference pipeline. If not specified, the
primary_container
argument is required. Fields are documented below. - enable
Network BooleanIsolation - Isolates the model container. No inbound or outbound network calls can be made to or from the model container.
- inference
Execution Property MapConfig - Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.
- name String
- The name of the model (must be unique). If omitted, this provider will assign a random, unique name.
- primary
Container Property Map - The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the
container
argument is required. Fields are documented below. - Map<String>
- A map of tags to assign to the resource. .If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - vpc
Config Property Map - Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.
Outputs
All input properties are implicitly available as output properties. Additionally, the Model resource produces the following output properties:
Look up Existing Model Resource
Get an existing Model resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: ModelState, opts?: CustomResourceOptions): Model
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
arn: Optional[str] = None,
containers: Optional[Sequence[ModelContainerArgs]] = None,
enable_network_isolation: Optional[bool] = None,
execution_role_arn: Optional[str] = None,
inference_execution_config: Optional[ModelInferenceExecutionConfigArgs] = None,
name: Optional[str] = None,
primary_container: Optional[ModelPrimaryContainerArgs] = None,
tags: Optional[Mapping[str, str]] = None,
tags_all: Optional[Mapping[str, str]] = None,
vpc_config: Optional[ModelVpcConfigArgs] = None) -> Model
func GetModel(ctx *Context, name string, id IDInput, state *ModelState, opts ...ResourceOption) (*Model, error)
public static Model Get(string name, Input<string> id, ModelState? state, CustomResourceOptions? opts = null)
public static Model get(String name, Output<String> id, ModelState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Arn string
- The Amazon Resource Name (ARN) assigned by AWS to this model.
- Containers
List<Model
Container> - Specifies containers in the inference pipeline. If not specified, the
primary_container
argument is required. Fields are documented below. - Enable
Network boolIsolation - Isolates the model container. No inbound or outbound network calls can be made to or from the model container.
- Execution
Role stringArn - A role that SageMaker can assume to access model artifacts and docker images for deployment.
- Inference
Execution ModelConfig Inference Execution Config - Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.
- Name string
- The name of the model (must be unique). If omitted, this provider will assign a random, unique name.
- Primary
Container ModelPrimary Container - The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the
container
argument is required. Fields are documented below. - Dictionary<string, string>
- A map of tags to assign to the resource. .If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - Dictionary<string, string>
- A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block. - Vpc
Config ModelVpc Config - Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.
- Arn string
- The Amazon Resource Name (ARN) assigned by AWS to this model.
- Containers
[]Model
Container Args - Specifies containers in the inference pipeline. If not specified, the
primary_container
argument is required. Fields are documented below. - Enable
Network boolIsolation - Isolates the model container. No inbound or outbound network calls can be made to or from the model container.
- Execution
Role stringArn - A role that SageMaker can assume to access model artifacts and docker images for deployment.
- Inference
Execution ModelConfig Inference Execution Config Args - Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.
- Name string
- The name of the model (must be unique). If omitted, this provider will assign a random, unique name.
- Primary
Container ModelPrimary Container Args - The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the
container
argument is required. Fields are documented below. - map[string]string
- A map of tags to assign to the resource. .If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - map[string]string
- A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block. - Vpc
Config ModelVpc Config Args - Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.
- arn String
- The Amazon Resource Name (ARN) assigned by AWS to this model.
- containers
List<Model
Container> - Specifies containers in the inference pipeline. If not specified, the
primary_container
argument is required. Fields are documented below. - enable
Network BooleanIsolation - Isolates the model container. No inbound or outbound network calls can be made to or from the model container.
- execution
Role StringArn - A role that SageMaker can assume to access model artifacts and docker images for deployment.
- inference
Execution ModelConfig Inference Execution Config - Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.
- name String
- The name of the model (must be unique). If omitted, this provider will assign a random, unique name.
- primary
Container ModelPrimary Container - The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the
container
argument is required. Fields are documented below. - Map<String,String>
- A map of tags to assign to the resource. .If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - Map<String,String>
- A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block. - vpc
Config ModelVpc Config - Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.
- arn string
- The Amazon Resource Name (ARN) assigned by AWS to this model.
- containers
Model
Container[] - Specifies containers in the inference pipeline. If not specified, the
primary_container
argument is required. Fields are documented below. - enable
Network booleanIsolation - Isolates the model container. No inbound or outbound network calls can be made to or from the model container.
- execution
Role stringArn - A role that SageMaker can assume to access model artifacts and docker images for deployment.
- inference
Execution ModelConfig Inference Execution Config - Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.
- name string
- The name of the model (must be unique). If omitted, this provider will assign a random, unique name.
- primary
Container ModelPrimary Container - The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the
container
argument is required. Fields are documented below. - {[key: string]: string}
- A map of tags to assign to the resource. .If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - {[key: string]: string}
- A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block. - vpc
Config ModelVpc Config - Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.
- arn str
- The Amazon Resource Name (ARN) assigned by AWS to this model.
- containers
Sequence[Model
Container Args] - Specifies containers in the inference pipeline. If not specified, the
primary_container
argument is required. Fields are documented below. - enable_
network_ boolisolation - Isolates the model container. No inbound or outbound network calls can be made to or from the model container.
- execution_
role_ strarn - A role that SageMaker can assume to access model artifacts and docker images for deployment.
- inference_
execution_ Modelconfig Inference Execution Config Args - Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.
- name str
- The name of the model (must be unique). If omitted, this provider will assign a random, unique name.
- primary_
container ModelPrimary Container Args - The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the
container
argument is required. Fields are documented below. - Mapping[str, str]
- A map of tags to assign to the resource. .If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - Mapping[str, str]
- A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block. - vpc_
config ModelVpc Config Args - Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.
- arn String
- The Amazon Resource Name (ARN) assigned by AWS to this model.
- containers List<Property Map>
- Specifies containers in the inference pipeline. If not specified, the
primary_container
argument is required. Fields are documented below. - enable
Network BooleanIsolation - Isolates the model container. No inbound or outbound network calls can be made to or from the model container.
- execution
Role StringArn - A role that SageMaker can assume to access model artifacts and docker images for deployment.
- inference
Execution Property MapConfig - Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.
- name String
- The name of the model (must be unique). If omitted, this provider will assign a random, unique name.
- primary
Container Property Map - The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the
container
argument is required. Fields are documented below. - Map<String>
- A map of tags to assign to the resource. .If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - Map<String>
- A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block. - vpc
Config Property Map - Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.
Supporting Types
ModelContainer, ModelContainerArgs
- Container
Hostname string - The DNS host name for the container.
- Environment Dictionary<string, string>
- Environment variables for the Docker container. A list of key value pairs.
- Image string
- The registry path where the inference code image is stored in Amazon ECR.
- Image
Config ModelContainer Image Config - Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.
- Inference
Specification stringName - The inference specification name in the model package version.
- Mode string
- The container hosts value
SingleModel/MultiModel
. The default value isSingleModel
. - Model
Data ModelSource Container Model Data Source - The location of model data to deploy. Use this for uncompressed model deployment. For information about how to deploy an uncompressed model, see Deploying uncompressed models in the AWS SageMaker Developer Guide.
- Model
Data stringUrl - The URL for the S3 location where model artifacts are stored.
- Model
Package stringName - The Amazon Resource Name (ARN) of the model package to use to create the model.
- Multi
Model ModelConfig Container Multi Model Config - Specifies additional configuration for multi-model endpoints. see Multi Model Config.
- Container
Hostname string - The DNS host name for the container.
- Environment map[string]string
- Environment variables for the Docker container. A list of key value pairs.
- Image string
- The registry path where the inference code image is stored in Amazon ECR.
- Image
Config ModelContainer Image Config - Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.
- Inference
Specification stringName - The inference specification name in the model package version.
- Mode string
- The container hosts value
SingleModel/MultiModel
. The default value isSingleModel
. - Model
Data ModelSource Container Model Data Source - The location of model data to deploy. Use this for uncompressed model deployment. For information about how to deploy an uncompressed model, see Deploying uncompressed models in the AWS SageMaker Developer Guide.
- Model
Data stringUrl - The URL for the S3 location where model artifacts are stored.
- Model
Package stringName - The Amazon Resource Name (ARN) of the model package to use to create the model.
- Multi
Model ModelConfig Container Multi Model Config - Specifies additional configuration for multi-model endpoints. see Multi Model Config.
- container
Hostname String - The DNS host name for the container.
- environment Map<String,String>
- Environment variables for the Docker container. A list of key value pairs.
- image String
- The registry path where the inference code image is stored in Amazon ECR.
- image
Config ModelContainer Image Config - Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.
- inference
Specification StringName - The inference specification name in the model package version.
- mode String
- The container hosts value
SingleModel/MultiModel
. The default value isSingleModel
. - model
Data ModelSource Container Model Data Source - The location of model data to deploy. Use this for uncompressed model deployment. For information about how to deploy an uncompressed model, see Deploying uncompressed models in the AWS SageMaker Developer Guide.
- model
Data StringUrl - The URL for the S3 location where model artifacts are stored.
- model
Package StringName - The Amazon Resource Name (ARN) of the model package to use to create the model.
- multi
Model ModelConfig Container Multi Model Config - Specifies additional configuration for multi-model endpoints. see Multi Model Config.
- container
Hostname string - The DNS host name for the container.
- environment {[key: string]: string}
- Environment variables for the Docker container. A list of key value pairs.
- image string
- The registry path where the inference code image is stored in Amazon ECR.
- image
Config ModelContainer Image Config - Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.
- inference
Specification stringName - The inference specification name in the model package version.
- mode string
- The container hosts value
SingleModel/MultiModel
. The default value isSingleModel
. - model
Data ModelSource Container Model Data Source - The location of model data to deploy. Use this for uncompressed model deployment. For information about how to deploy an uncompressed model, see Deploying uncompressed models in the AWS SageMaker Developer Guide.
- model
Data stringUrl - The URL for the S3 location where model artifacts are stored.
- model
Package stringName - The Amazon Resource Name (ARN) of the model package to use to create the model.
- multi
Model ModelConfig Container Multi Model Config - Specifies additional configuration for multi-model endpoints. see Multi Model Config.
- container_
hostname str - The DNS host name for the container.
- environment Mapping[str, str]
- Environment variables for the Docker container. A list of key value pairs.
- image str
- The registry path where the inference code image is stored in Amazon ECR.
- image_
config ModelContainer Image Config - Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.
- inference_
specification_ strname - The inference specification name in the model package version.
- mode str
- The container hosts value
SingleModel/MultiModel
. The default value isSingleModel
. - model_
data_ Modelsource Container Model Data Source - The location of model data to deploy. Use this for uncompressed model deployment. For information about how to deploy an uncompressed model, see Deploying uncompressed models in the AWS SageMaker Developer Guide.
- model_
data_ strurl - The URL for the S3 location where model artifacts are stored.
- model_
package_ strname - The Amazon Resource Name (ARN) of the model package to use to create the model.
- multi_
model_ Modelconfig Container Multi Model Config - Specifies additional configuration for multi-model endpoints. see Multi Model Config.
- container
Hostname String - The DNS host name for the container.
- environment Map<String>
- Environment variables for the Docker container. A list of key value pairs.
- image String
- The registry path where the inference code image is stored in Amazon ECR.
- image
Config Property Map - Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.
- inference
Specification StringName - The inference specification name in the model package version.
- mode String
- The container hosts value
SingleModel/MultiModel
. The default value isSingleModel
. - model
Data Property MapSource - The location of model data to deploy. Use this for uncompressed model deployment. For information about how to deploy an uncompressed model, see Deploying uncompressed models in the AWS SageMaker Developer Guide.
- model
Data StringUrl - The URL for the S3 location where model artifacts are stored.
- model
Package StringName - The Amazon Resource Name (ARN) of the model package to use to create the model.
- multi
Model Property MapConfig - Specifies additional configuration for multi-model endpoints. see Multi Model Config.
ModelContainerImageConfig, ModelContainerImageConfigArgs
- Repository
Access stringMode - Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are:
Platform
andVpc
. - Repository
Auth ModelConfig Container Image Config Repository Auth Config - Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.
- Repository
Access stringMode - Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are:
Platform
andVpc
. - Repository
Auth ModelConfig Container Image Config Repository Auth Config - Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.
- repository
Access StringMode - Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are:
Platform
andVpc
. - repository
Auth ModelConfig Container Image Config Repository Auth Config - Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.
- repository
Access stringMode - Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are:
Platform
andVpc
. - repository
Auth ModelConfig Container Image Config Repository Auth Config - Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.
- repository_
access_ strmode - Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are:
Platform
andVpc
. - repository_
auth_ Modelconfig Container Image Config Repository Auth Config - Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.
- repository
Access StringMode - Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are:
Platform
andVpc
. - repository
Auth Property MapConfig - Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.
ModelContainerImageConfigRepositoryAuthConfig, ModelContainerImageConfigRepositoryAuthConfigArgs
- Repository
Credentials stringProvider Arn - The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.
- Repository
Credentials stringProvider Arn - The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.
- repository
Credentials StringProvider Arn - The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.
- repository
Credentials stringProvider Arn - The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.
- repository_
credentials_ strprovider_ arn - The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.
- repository
Credentials StringProvider Arn - The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.
ModelContainerModelDataSource, ModelContainerModelDataSourceArgs
- S3Data
Sources List<ModelContainer Model Data Source S3Data Source> - The S3 location of model data to deploy.
- S3Data
Sources []ModelContainer Model Data Source S3Data Source - The S3 location of model data to deploy.
- s3Data
Sources List<ModelContainer Model Data Source S3Data Source> - The S3 location of model data to deploy.
- s3Data
Sources ModelContainer Model Data Source S3Data Source[] - The S3 location of model data to deploy.
- s3_
data_ Sequence[Modelsources Container Model Data Source S3Data Source] - The S3 location of model data to deploy.
- s3Data
Sources List<Property Map> - The S3 location of model data to deploy.
ModelContainerModelDataSourceS3DataSource, ModelContainerModelDataSourceS3DataSourceArgs
- Compression
Type string - How the model data is prepared. Allowed values are:
None
andGzip
. - S3Data
Type string - The type of model data to deploy. Allowed values are:
S3Object
andS3Prefix
. - S3Uri string
- The S3 path of model data to deploy.
- Model
Access ModelConfig Container Model Data Source S3Data Source Model Access Config - Specifies the access configuration file for the ML model. You can explicitly accept the model end-user license agreement (EULA) within the [
model_access_config
configuration block]. see Model Access Config.
- Compression
Type string - How the model data is prepared. Allowed values are:
None
andGzip
. - S3Data
Type string - The type of model data to deploy. Allowed values are:
S3Object
andS3Prefix
. - S3Uri string
- The S3 path of model data to deploy.
- Model
Access ModelConfig Container Model Data Source S3Data Source Model Access Config - Specifies the access configuration file for the ML model. You can explicitly accept the model end-user license agreement (EULA) within the [
model_access_config
configuration block]. see Model Access Config.
- compression
Type String - How the model data is prepared. Allowed values are:
None
andGzip
. - s3Data
Type String - The type of model data to deploy. Allowed values are:
S3Object
andS3Prefix
. - s3Uri String
- The S3 path of model data to deploy.
- model
Access ModelConfig Container Model Data Source S3Data Source Model Access Config - Specifies the access configuration file for the ML model. You can explicitly accept the model end-user license agreement (EULA) within the [
model_access_config
configuration block]. see Model Access Config.
- compression
Type string - How the model data is prepared. Allowed values are:
None
andGzip
. - s3Data
Type string - The type of model data to deploy. Allowed values are:
S3Object
andS3Prefix
. - s3Uri string
- The S3 path of model data to deploy.
- model
Access ModelConfig Container Model Data Source S3Data Source Model Access Config - Specifies the access configuration file for the ML model. You can explicitly accept the model end-user license agreement (EULA) within the [
model_access_config
configuration block]. see Model Access Config.
- compression_
type str - How the model data is prepared. Allowed values are:
None
andGzip
. - s3_
data_ strtype - The type of model data to deploy. Allowed values are:
S3Object
andS3Prefix
. - s3_
uri str - The S3 path of model data to deploy.
- model_
access_ Modelconfig Container Model Data Source S3Data Source Model Access Config - Specifies the access configuration file for the ML model. You can explicitly accept the model end-user license agreement (EULA) within the [
model_access_config
configuration block]. see Model Access Config.
- compression
Type String - How the model data is prepared. Allowed values are:
None
andGzip
. - s3Data
Type String - The type of model data to deploy. Allowed values are:
S3Object
andS3Prefix
. - s3Uri String
- The S3 path of model data to deploy.
- model
Access Property MapConfig - Specifies the access configuration file for the ML model. You can explicitly accept the model end-user license agreement (EULA) within the [
model_access_config
configuration block]. see Model Access Config.
ModelContainerModelDataSourceS3DataSourceModelAccessConfig, ModelContainerModelDataSourceS3DataSourceModelAccessConfigArgs
- Accept
Eula bool - Specifies agreement to the model end-user license agreement (EULA). The AcceptEula value must be explicitly defined as
true
in order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.
- Accept
Eula bool - Specifies agreement to the model end-user license agreement (EULA). The AcceptEula value must be explicitly defined as
true
in order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.
- accept
Eula Boolean - Specifies agreement to the model end-user license agreement (EULA). The AcceptEula value must be explicitly defined as
true
in order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.
- accept
Eula boolean - Specifies agreement to the model end-user license agreement (EULA). The AcceptEula value must be explicitly defined as
true
in order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.
- accept_
eula bool - Specifies agreement to the model end-user license agreement (EULA). The AcceptEula value must be explicitly defined as
true
in order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.
- accept
Eula Boolean - Specifies agreement to the model end-user license agreement (EULA). The AcceptEula value must be explicitly defined as
true
in order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.
ModelContainerMultiModelConfig, ModelContainerMultiModelConfigArgs
- Model
Cache stringSetting - Whether to cache models for a multi-model endpoint. By default, multi-model endpoints cache models so that a model does not have to be loaded into memory each time it is invoked. Some use cases do not benefit from model caching. For example, if an endpoint hosts a large number of models that are each invoked infrequently, the endpoint might perform better if you disable model caching. To disable model caching, set the value of this parameter to
Disabled
. Allowed values are:Enabled
andDisabled
.
- Model
Cache stringSetting - Whether to cache models for a multi-model endpoint. By default, multi-model endpoints cache models so that a model does not have to be loaded into memory each time it is invoked. Some use cases do not benefit from model caching. For example, if an endpoint hosts a large number of models that are each invoked infrequently, the endpoint might perform better if you disable model caching. To disable model caching, set the value of this parameter to
Disabled
. Allowed values are:Enabled
andDisabled
.
- model
Cache StringSetting - Whether to cache models for a multi-model endpoint. By default, multi-model endpoints cache models so that a model does not have to be loaded into memory each time it is invoked. Some use cases do not benefit from model caching. For example, if an endpoint hosts a large number of models that are each invoked infrequently, the endpoint might perform better if you disable model caching. To disable model caching, set the value of this parameter to
Disabled
. Allowed values are:Enabled
andDisabled
.
- model
Cache stringSetting - Whether to cache models for a multi-model endpoint. By default, multi-model endpoints cache models so that a model does not have to be loaded into memory each time it is invoked. Some use cases do not benefit from model caching. For example, if an endpoint hosts a large number of models that are each invoked infrequently, the endpoint might perform better if you disable model caching. To disable model caching, set the value of this parameter to
Disabled
. Allowed values are:Enabled
andDisabled
.
- model_
cache_ strsetting - Whether to cache models for a multi-model endpoint. By default, multi-model endpoints cache models so that a model does not have to be loaded into memory each time it is invoked. Some use cases do not benefit from model caching. For example, if an endpoint hosts a large number of models that are each invoked infrequently, the endpoint might perform better if you disable model caching. To disable model caching, set the value of this parameter to
Disabled
. Allowed values are:Enabled
andDisabled
.
- model
Cache StringSetting - Whether to cache models for a multi-model endpoint. By default, multi-model endpoints cache models so that a model does not have to be loaded into memory each time it is invoked. Some use cases do not benefit from model caching. For example, if an endpoint hosts a large number of models that are each invoked infrequently, the endpoint might perform better if you disable model caching. To disable model caching, set the value of this parameter to
Disabled
. Allowed values are:Enabled
andDisabled
.
ModelInferenceExecutionConfig, ModelInferenceExecutionConfigArgs
- Mode string
- The container hosts value
SingleModel/MultiModel
. The default value isSingleModel
.
- Mode string
- The container hosts value
SingleModel/MultiModel
. The default value isSingleModel
.
- mode String
- The container hosts value
SingleModel/MultiModel
. The default value isSingleModel
.
- mode string
- The container hosts value
SingleModel/MultiModel
. The default value isSingleModel
.
- mode str
- The container hosts value
SingleModel/MultiModel
. The default value isSingleModel
.
- mode String
- The container hosts value
SingleModel/MultiModel
. The default value isSingleModel
.
ModelPrimaryContainer, ModelPrimaryContainerArgs
- Container
Hostname string - The DNS host name for the container.
- Environment Dictionary<string, string>
- Environment variables for the Docker container. A list of key value pairs.
- Image string
- The registry path where the inference code image is stored in Amazon ECR.
- Image
Config ModelPrimary Container Image Config - Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.
- Inference
Specification stringName - The inference specification name in the model package version.
- Mode string
- The container hosts value
SingleModel/MultiModel
. The default value isSingleModel
. - Model
Data ModelSource Primary Container Model Data Source - The location of model data to deploy. Use this for uncompressed model deployment. For information about how to deploy an uncompressed model, see Deploying uncompressed models in the AWS SageMaker Developer Guide.
- Model
Data stringUrl - The URL for the S3 location where model artifacts are stored.
- Model
Package stringName - The Amazon Resource Name (ARN) of the model package to use to create the model.
- Multi
Model ModelConfig Primary Container Multi Model Config - Specifies additional configuration for multi-model endpoints. see Multi Model Config.
- Container
Hostname string - The DNS host name for the container.
- Environment map[string]string
- Environment variables for the Docker container. A list of key value pairs.
- Image string
- The registry path where the inference code image is stored in Amazon ECR.
- Image
Config ModelPrimary Container Image Config - Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.
- Inference
Specification stringName - The inference specification name in the model package version.
- Mode string
- The container hosts value
SingleModel/MultiModel
. The default value isSingleModel
. - Model
Data ModelSource Primary Container Model Data Source - The location of model data to deploy. Use this for uncompressed model deployment. For information about how to deploy an uncompressed model, see Deploying uncompressed models in the AWS SageMaker Developer Guide.
- Model
Data stringUrl - The URL for the S3 location where model artifacts are stored.
- Model
Package stringName - The Amazon Resource Name (ARN) of the model package to use to create the model.
- Multi
Model ModelConfig Primary Container Multi Model Config - Specifies additional configuration for multi-model endpoints. see Multi Model Config.
- container
Hostname String - The DNS host name for the container.
- environment Map<String,String>
- Environment variables for the Docker container. A list of key value pairs.
- image String
- The registry path where the inference code image is stored in Amazon ECR.
- image
Config ModelPrimary Container Image Config - Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.
- inference
Specification StringName - The inference specification name in the model package version.
- mode String
- The container hosts value
SingleModel/MultiModel
. The default value isSingleModel
. - model
Data ModelSource Primary Container Model Data Source - The location of model data to deploy. Use this for uncompressed model deployment. For information about how to deploy an uncompressed model, see Deploying uncompressed models in the AWS SageMaker Developer Guide.
- model
Data StringUrl - The URL for the S3 location where model artifacts are stored.
- model
Package StringName - The Amazon Resource Name (ARN) of the model package to use to create the model.
- multi
Model ModelConfig Primary Container Multi Model Config - Specifies additional configuration for multi-model endpoints. see Multi Model Config.
- container
Hostname string - The DNS host name for the container.
- environment {[key: string]: string}
- Environment variables for the Docker container. A list of key value pairs.
- image string
- The registry path where the inference code image is stored in Amazon ECR.
- image
Config ModelPrimary Container Image Config - Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.
- inference
Specification stringName - The inference specification name in the model package version.
- mode string
- The container hosts value
SingleModel/MultiModel
. The default value isSingleModel
. - model
Data ModelSource Primary Container Model Data Source - The location of model data to deploy. Use this for uncompressed model deployment. For information about how to deploy an uncompressed model, see Deploying uncompressed models in the AWS SageMaker Developer Guide.
- model
Data stringUrl - The URL for the S3 location where model artifacts are stored.
- model
Package stringName - The Amazon Resource Name (ARN) of the model package to use to create the model.
- multi
Model ModelConfig Primary Container Multi Model Config - Specifies additional configuration for multi-model endpoints. see Multi Model Config.
- container_
hostname str - The DNS host name for the container.
- environment Mapping[str, str]
- Environment variables for the Docker container. A list of key value pairs.
- image str
- The registry path where the inference code image is stored in Amazon ECR.
- image_
config ModelPrimary Container Image Config - Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.
- inference_
specification_ strname - The inference specification name in the model package version.
- mode str
- The container hosts value
SingleModel/MultiModel
. The default value isSingleModel
. - model_
data_ Modelsource Primary Container Model Data Source - The location of model data to deploy. Use this for uncompressed model deployment. For information about how to deploy an uncompressed model, see Deploying uncompressed models in the AWS SageMaker Developer Guide.
- model_
data_ strurl - The URL for the S3 location where model artifacts are stored.
- model_
package_ strname - The Amazon Resource Name (ARN) of the model package to use to create the model.
- multi_
model_ Modelconfig Primary Container Multi Model Config - Specifies additional configuration for multi-model endpoints. see Multi Model Config.
- container
Hostname String - The DNS host name for the container.
- environment Map<String>
- Environment variables for the Docker container. A list of key value pairs.
- image String
- The registry path where the inference code image is stored in Amazon ECR.
- image
Config Property Map - Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.
- inference
Specification StringName - The inference specification name in the model package version.
- mode String
- The container hosts value
SingleModel/MultiModel
. The default value isSingleModel
. - model
Data Property MapSource - The location of model data to deploy. Use this for uncompressed model deployment. For information about how to deploy an uncompressed model, see Deploying uncompressed models in the AWS SageMaker Developer Guide.
- model
Data StringUrl - The URL for the S3 location where model artifacts are stored.
- model
Package StringName - The Amazon Resource Name (ARN) of the model package to use to create the model.
- multi
Model Property MapConfig - Specifies additional configuration for multi-model endpoints. see Multi Model Config.
ModelPrimaryContainerImageConfig, ModelPrimaryContainerImageConfigArgs
- Repository
Access stringMode - Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are:
Platform
andVpc
. - Repository
Auth ModelConfig Primary Container Image Config Repository Auth Config - Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.
- Repository
Access stringMode - Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are:
Platform
andVpc
. - Repository
Auth ModelConfig Primary Container Image Config Repository Auth Config - Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.
- repository
Access StringMode - Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are:
Platform
andVpc
. - repository
Auth ModelConfig Primary Container Image Config Repository Auth Config - Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.
- repository
Access stringMode - Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are:
Platform
andVpc
. - repository
Auth ModelConfig Primary Container Image Config Repository Auth Config - Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.
- repository_
access_ strmode - Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are:
Platform
andVpc
. - repository_
auth_ Modelconfig Primary Container Image Config Repository Auth Config - Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.
- repository
Access StringMode - Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are:
Platform
andVpc
. - repository
Auth Property MapConfig - Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.
ModelPrimaryContainerImageConfigRepositoryAuthConfig, ModelPrimaryContainerImageConfigRepositoryAuthConfigArgs
- Repository
Credentials stringProvider Arn - The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.
- Repository
Credentials stringProvider Arn - The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.
- repository
Credentials StringProvider Arn - The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.
- repository
Credentials stringProvider Arn - The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.
- repository_
credentials_ strprovider_ arn - The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.
- repository
Credentials StringProvider Arn - The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.
ModelPrimaryContainerModelDataSource, ModelPrimaryContainerModelDataSourceArgs
- S3Data
Sources List<ModelPrimary Container Model Data Source S3Data Source> - The S3 location of model data to deploy.
- S3Data
Sources []ModelPrimary Container Model Data Source S3Data Source - The S3 location of model data to deploy.
- s3Data
Sources List<ModelPrimary Container Model Data Source S3Data Source> - The S3 location of model data to deploy.
- s3Data
Sources ModelPrimary Container Model Data Source S3Data Source[] - The S3 location of model data to deploy.
- s3_
data_ Sequence[Modelsources Primary Container Model Data Source S3Data Source] - The S3 location of model data to deploy.
- s3Data
Sources List<Property Map> - The S3 location of model data to deploy.
ModelPrimaryContainerModelDataSourceS3DataSource, ModelPrimaryContainerModelDataSourceS3DataSourceArgs
- Compression
Type string - How the model data is prepared. Allowed values are:
None
andGzip
. - S3Data
Type string - The type of model data to deploy. Allowed values are:
S3Object
andS3Prefix
. - S3Uri string
- The S3 path of model data to deploy.
- Model
Access ModelConfig Primary Container Model Data Source S3Data Source Model Access Config - Specifies the access configuration file for the ML model. You can explicitly accept the model end-user license agreement (EULA) within the [
model_access_config
configuration block]. see Model Access Config.
- Compression
Type string - How the model data is prepared. Allowed values are:
None
andGzip
. - S3Data
Type string - The type of model data to deploy. Allowed values are:
S3Object
andS3Prefix
. - S3Uri string
- The S3 path of model data to deploy.
- Model
Access ModelConfig Primary Container Model Data Source S3Data Source Model Access Config - Specifies the access configuration file for the ML model. You can explicitly accept the model end-user license agreement (EULA) within the [
model_access_config
configuration block]. see Model Access Config.
- compression
Type String - How the model data is prepared. Allowed values are:
None
andGzip
. - s3Data
Type String - The type of model data to deploy. Allowed values are:
S3Object
andS3Prefix
. - s3Uri String
- The S3 path of model data to deploy.
- model
Access ModelConfig Primary Container Model Data Source S3Data Source Model Access Config - Specifies the access configuration file for the ML model. You can explicitly accept the model end-user license agreement (EULA) within the [
model_access_config
configuration block]. see Model Access Config.
- compression
Type string - How the model data is prepared. Allowed values are:
None
andGzip
. - s3Data
Type string - The type of model data to deploy. Allowed values are:
S3Object
andS3Prefix
. - s3Uri string
- The S3 path of model data to deploy.
- model
Access ModelConfig Primary Container Model Data Source S3Data Source Model Access Config - Specifies the access configuration file for the ML model. You can explicitly accept the model end-user license agreement (EULA) within the [
model_access_config
configuration block]. see Model Access Config.
- compression_
type str - How the model data is prepared. Allowed values are:
None
andGzip
. - s3_
data_ strtype - The type of model data to deploy. Allowed values are:
S3Object
andS3Prefix
. - s3_
uri str - The S3 path of model data to deploy.
- model_
access_ Modelconfig Primary Container Model Data Source S3Data Source Model Access Config - Specifies the access configuration file for the ML model. You can explicitly accept the model end-user license agreement (EULA) within the [
model_access_config
configuration block]. see Model Access Config.
- compression
Type String - How the model data is prepared. Allowed values are:
None
andGzip
. - s3Data
Type String - The type of model data to deploy. Allowed values are:
S3Object
andS3Prefix
. - s3Uri String
- The S3 path of model data to deploy.
- model
Access Property MapConfig - Specifies the access configuration file for the ML model. You can explicitly accept the model end-user license agreement (EULA) within the [
model_access_config
configuration block]. see Model Access Config.
ModelPrimaryContainerModelDataSourceS3DataSourceModelAccessConfig, ModelPrimaryContainerModelDataSourceS3DataSourceModelAccessConfigArgs
- Accept
Eula bool - Specifies agreement to the model end-user license agreement (EULA). The AcceptEula value must be explicitly defined as
true
in order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.
- Accept
Eula bool - Specifies agreement to the model end-user license agreement (EULA). The AcceptEula value must be explicitly defined as
true
in order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.
- accept
Eula Boolean - Specifies agreement to the model end-user license agreement (EULA). The AcceptEula value must be explicitly defined as
true
in order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.
- accept
Eula boolean - Specifies agreement to the model end-user license agreement (EULA). The AcceptEula value must be explicitly defined as
true
in order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.
- accept_
eula bool - Specifies agreement to the model end-user license agreement (EULA). The AcceptEula value must be explicitly defined as
true
in order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.
- accept
Eula Boolean - Specifies agreement to the model end-user license agreement (EULA). The AcceptEula value must be explicitly defined as
true
in order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.
ModelPrimaryContainerMultiModelConfig, ModelPrimaryContainerMultiModelConfigArgs
- Model
Cache stringSetting - Whether to cache models for a multi-model endpoint. By default, multi-model endpoints cache models so that a model does not have to be loaded into memory each time it is invoked. Some use cases do not benefit from model caching. For example, if an endpoint hosts a large number of models that are each invoked infrequently, the endpoint might perform better if you disable model caching. To disable model caching, set the value of this parameter to
Disabled
. Allowed values are:Enabled
andDisabled
.
- Model
Cache stringSetting - Whether to cache models for a multi-model endpoint. By default, multi-model endpoints cache models so that a model does not have to be loaded into memory each time it is invoked. Some use cases do not benefit from model caching. For example, if an endpoint hosts a large number of models that are each invoked infrequently, the endpoint might perform better if you disable model caching. To disable model caching, set the value of this parameter to
Disabled
. Allowed values are:Enabled
andDisabled
.
- model
Cache StringSetting - Whether to cache models for a multi-model endpoint. By default, multi-model endpoints cache models so that a model does not have to be loaded into memory each time it is invoked. Some use cases do not benefit from model caching. For example, if an endpoint hosts a large number of models that are each invoked infrequently, the endpoint might perform better if you disable model caching. To disable model caching, set the value of this parameter to
Disabled
. Allowed values are:Enabled
andDisabled
.
- model
Cache stringSetting - Whether to cache models for a multi-model endpoint. By default, multi-model endpoints cache models so that a model does not have to be loaded into memory each time it is invoked. Some use cases do not benefit from model caching. For example, if an endpoint hosts a large number of models that are each invoked infrequently, the endpoint might perform better if you disable model caching. To disable model caching, set the value of this parameter to
Disabled
. Allowed values are:Enabled
andDisabled
.
- model_
cache_ strsetting - Whether to cache models for a multi-model endpoint. By default, multi-model endpoints cache models so that a model does not have to be loaded into memory each time it is invoked. Some use cases do not benefit from model caching. For example, if an endpoint hosts a large number of models that are each invoked infrequently, the endpoint might perform better if you disable model caching. To disable model caching, set the value of this parameter to
Disabled
. Allowed values are:Enabled
andDisabled
.
- model
Cache StringSetting - Whether to cache models for a multi-model endpoint. By default, multi-model endpoints cache models so that a model does not have to be loaded into memory each time it is invoked. Some use cases do not benefit from model caching. For example, if an endpoint hosts a large number of models that are each invoked infrequently, the endpoint might perform better if you disable model caching. To disable model caching, set the value of this parameter to
Disabled
. Allowed values are:Enabled
andDisabled
.
ModelVpcConfig, ModelVpcConfigArgs
- Security
Group List<string>Ids - Subnets List<string>
- Security
Group []stringIds - Subnets []string
- security
Group List<String>Ids - subnets List<String>
- security
Group string[]Ids - subnets string[]
- security_
group_ Sequence[str]ids - subnets Sequence[str]
- security
Group List<String>Ids - subnets List<String>
Import
Using pulumi import
, import models using the name
. For example:
$ pulumi import aws:sagemaker/model:Model test_model model-foo
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- AWS Classic pulumi/pulumi-aws
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
aws
Terraform Provider.