gcp.vertex.AiDeploymentResourcePool
Explore with Pulumi AI
‘DeploymentResourcePool can be shared by multiple deployed models, whose underlying specification consists of dedicated resources.’
To get more information about DeploymentResourcePool, see:
Example Usage
Vertex Ai Deployment Resource Pool
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const deploymentResourcePool = new gcp.vertex.AiDeploymentResourcePool("deployment_resource_pool", {
region: "us-central1",
name: "example-deployment-resource-pool",
dedicatedResources: {
machineSpec: {
machineType: "n1-standard-4",
acceleratorType: "NVIDIA_TESLA_P4",
acceleratorCount: 1,
},
minReplicaCount: 1,
maxReplicaCount: 2,
autoscalingMetricSpecs: [{
metricName: "aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle",
target: 60,
}],
},
});
import pulumi
import pulumi_gcp as gcp
deployment_resource_pool = gcp.vertex.AiDeploymentResourcePool("deployment_resource_pool",
region="us-central1",
name="example-deployment-resource-pool",
dedicated_resources={
"machine_spec": {
"machine_type": "n1-standard-4",
"accelerator_type": "NVIDIA_TESLA_P4",
"accelerator_count": 1,
},
"min_replica_count": 1,
"max_replica_count": 2,
"autoscaling_metric_specs": [{
"metric_name": "aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle",
"target": 60,
}],
})
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/vertex"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := vertex.NewAiDeploymentResourcePool(ctx, "deployment_resource_pool", &vertex.AiDeploymentResourcePoolArgs{
Region: pulumi.String("us-central1"),
Name: pulumi.String("example-deployment-resource-pool"),
DedicatedResources: &vertex.AiDeploymentResourcePoolDedicatedResourcesArgs{
MachineSpec: &vertex.AiDeploymentResourcePoolDedicatedResourcesMachineSpecArgs{
MachineType: pulumi.String("n1-standard-4"),
AcceleratorType: pulumi.String("NVIDIA_TESLA_P4"),
AcceleratorCount: pulumi.Int(1),
},
MinReplicaCount: pulumi.Int(1),
MaxReplicaCount: pulumi.Int(2),
AutoscalingMetricSpecs: vertex.AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpecArray{
&vertex.AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpecArgs{
MetricName: pulumi.String("aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle"),
Target: pulumi.Int(60),
},
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var deploymentResourcePool = new Gcp.Vertex.AiDeploymentResourcePool("deployment_resource_pool", new()
{
Region = "us-central1",
Name = "example-deployment-resource-pool",
DedicatedResources = new Gcp.Vertex.Inputs.AiDeploymentResourcePoolDedicatedResourcesArgs
{
MachineSpec = new Gcp.Vertex.Inputs.AiDeploymentResourcePoolDedicatedResourcesMachineSpecArgs
{
MachineType = "n1-standard-4",
AcceleratorType = "NVIDIA_TESLA_P4",
AcceleratorCount = 1,
},
MinReplicaCount = 1,
MaxReplicaCount = 2,
AutoscalingMetricSpecs = new[]
{
new Gcp.Vertex.Inputs.AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpecArgs
{
MetricName = "aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle",
Target = 60,
},
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.vertex.AiDeploymentResourcePool;
import com.pulumi.gcp.vertex.AiDeploymentResourcePoolArgs;
import com.pulumi.gcp.vertex.inputs.AiDeploymentResourcePoolDedicatedResourcesArgs;
import com.pulumi.gcp.vertex.inputs.AiDeploymentResourcePoolDedicatedResourcesMachineSpecArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var deploymentResourcePool = new AiDeploymentResourcePool("deploymentResourcePool", AiDeploymentResourcePoolArgs.builder()
.region("us-central1")
.name("example-deployment-resource-pool")
.dedicatedResources(AiDeploymentResourcePoolDedicatedResourcesArgs.builder()
.machineSpec(AiDeploymentResourcePoolDedicatedResourcesMachineSpecArgs.builder()
.machineType("n1-standard-4")
.acceleratorType("NVIDIA_TESLA_P4")
.acceleratorCount(1)
.build())
.minReplicaCount(1)
.maxReplicaCount(2)
.autoscalingMetricSpecs(AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpecArgs.builder()
.metricName("aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle")
.target(60)
.build())
.build())
.build());
}
}
resources:
deploymentResourcePool:
type: gcp:vertex:AiDeploymentResourcePool
name: deployment_resource_pool
properties:
region: us-central1
name: example-deployment-resource-pool
dedicatedResources:
machineSpec:
machineType: n1-standard-4
acceleratorType: NVIDIA_TESLA_P4
acceleratorCount: 1
minReplicaCount: 1
maxReplicaCount: 2
autoscalingMetricSpecs:
- metricName: aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle
target: 60
Create AiDeploymentResourcePool Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new AiDeploymentResourcePool(name: string, args?: AiDeploymentResourcePoolArgs, opts?: CustomResourceOptions);
@overload
def AiDeploymentResourcePool(resource_name: str,
args: Optional[AiDeploymentResourcePoolArgs] = None,
opts: Optional[ResourceOptions] = None)
@overload
def AiDeploymentResourcePool(resource_name: str,
opts: Optional[ResourceOptions] = None,
dedicated_resources: Optional[AiDeploymentResourcePoolDedicatedResourcesArgs] = None,
name: Optional[str] = None,
project: Optional[str] = None,
region: Optional[str] = None)
func NewAiDeploymentResourcePool(ctx *Context, name string, args *AiDeploymentResourcePoolArgs, opts ...ResourceOption) (*AiDeploymentResourcePool, error)
public AiDeploymentResourcePool(string name, AiDeploymentResourcePoolArgs? args = null, CustomResourceOptions? opts = null)
public AiDeploymentResourcePool(String name, AiDeploymentResourcePoolArgs args)
public AiDeploymentResourcePool(String name, AiDeploymentResourcePoolArgs args, CustomResourceOptions options)
type: gcp:vertex:AiDeploymentResourcePool
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args AiDeploymentResourcePoolArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args AiDeploymentResourcePoolArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args AiDeploymentResourcePoolArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args AiDeploymentResourcePoolArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args AiDeploymentResourcePoolArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var aiDeploymentResourcePoolResource = new Gcp.Vertex.AiDeploymentResourcePool("aiDeploymentResourcePoolResource", new()
{
DedicatedResources = new Gcp.Vertex.Inputs.AiDeploymentResourcePoolDedicatedResourcesArgs
{
MachineSpec = new Gcp.Vertex.Inputs.AiDeploymentResourcePoolDedicatedResourcesMachineSpecArgs
{
AcceleratorCount = 0,
AcceleratorType = "string",
MachineType = "string",
},
MinReplicaCount = 0,
AutoscalingMetricSpecs = new[]
{
new Gcp.Vertex.Inputs.AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpecArgs
{
MetricName = "string",
Target = 0,
},
},
MaxReplicaCount = 0,
},
Name = "string",
Project = "string",
Region = "string",
});
example, err := vertex.NewAiDeploymentResourcePool(ctx, "aiDeploymentResourcePoolResource", &vertex.AiDeploymentResourcePoolArgs{
DedicatedResources: &vertex.AiDeploymentResourcePoolDedicatedResourcesArgs{
MachineSpec: &vertex.AiDeploymentResourcePoolDedicatedResourcesMachineSpecArgs{
AcceleratorCount: pulumi.Int(0),
AcceleratorType: pulumi.String("string"),
MachineType: pulumi.String("string"),
},
MinReplicaCount: pulumi.Int(0),
AutoscalingMetricSpecs: vertex.AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpecArray{
&vertex.AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpecArgs{
MetricName: pulumi.String("string"),
Target: pulumi.Int(0),
},
},
MaxReplicaCount: pulumi.Int(0),
},
Name: pulumi.String("string"),
Project: pulumi.String("string"),
Region: pulumi.String("string"),
})
var aiDeploymentResourcePoolResource = new AiDeploymentResourcePool("aiDeploymentResourcePoolResource", AiDeploymentResourcePoolArgs.builder()
.dedicatedResources(AiDeploymentResourcePoolDedicatedResourcesArgs.builder()
.machineSpec(AiDeploymentResourcePoolDedicatedResourcesMachineSpecArgs.builder()
.acceleratorCount(0)
.acceleratorType("string")
.machineType("string")
.build())
.minReplicaCount(0)
.autoscalingMetricSpecs(AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpecArgs.builder()
.metricName("string")
.target(0)
.build())
.maxReplicaCount(0)
.build())
.name("string")
.project("string")
.region("string")
.build());
ai_deployment_resource_pool_resource = gcp.vertex.AiDeploymentResourcePool("aiDeploymentResourcePoolResource",
dedicated_resources={
"machine_spec": {
"accelerator_count": 0,
"accelerator_type": "string",
"machine_type": "string",
},
"min_replica_count": 0,
"autoscaling_metric_specs": [{
"metric_name": "string",
"target": 0,
}],
"max_replica_count": 0,
},
name="string",
project="string",
region="string")
const aiDeploymentResourcePoolResource = new gcp.vertex.AiDeploymentResourcePool("aiDeploymentResourcePoolResource", {
dedicatedResources: {
machineSpec: {
acceleratorCount: 0,
acceleratorType: "string",
machineType: "string",
},
minReplicaCount: 0,
autoscalingMetricSpecs: [{
metricName: "string",
target: 0,
}],
maxReplicaCount: 0,
},
name: "string",
project: "string",
region: "string",
});
type: gcp:vertex:AiDeploymentResourcePool
properties:
dedicatedResources:
autoscalingMetricSpecs:
- metricName: string
target: 0
machineSpec:
acceleratorCount: 0
acceleratorType: string
machineType: string
maxReplicaCount: 0
minReplicaCount: 0
name: string
project: string
region: string
AiDeploymentResourcePool Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The AiDeploymentResourcePool resource accepts the following input properties:
- Dedicated
Resources AiDeployment Resource Pool Dedicated Resources - The underlying dedicated resources that the deployment resource pool uses. Structure is documented below.
- Name string
- The resource name of deployment resource pool. The maximum length is 63 characters, and valid characters are
/^a-z?$/
. - Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- Region string
- The region of deployment resource pool. eg us-central1
- Dedicated
Resources AiDeployment Resource Pool Dedicated Resources Args - The underlying dedicated resources that the deployment resource pool uses. Structure is documented below.
- Name string
- The resource name of deployment resource pool. The maximum length is 63 characters, and valid characters are
/^a-z?$/
. - Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- Region string
- The region of deployment resource pool. eg us-central1
- dedicated
Resources AiDeployment Resource Pool Dedicated Resources - The underlying dedicated resources that the deployment resource pool uses. Structure is documented below.
- name String
- The resource name of deployment resource pool. The maximum length is 63 characters, and valid characters are
/^a-z?$/
. - project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- region String
- The region of deployment resource pool. eg us-central1
- dedicated
Resources AiDeployment Resource Pool Dedicated Resources - The underlying dedicated resources that the deployment resource pool uses. Structure is documented below.
- name string
- The resource name of deployment resource pool. The maximum length is 63 characters, and valid characters are
/^a-z?$/
. - project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- region string
- The region of deployment resource pool. eg us-central1
- dedicated_
resources AiDeployment Resource Pool Dedicated Resources Args - The underlying dedicated resources that the deployment resource pool uses. Structure is documented below.
- name str
- The resource name of deployment resource pool. The maximum length is 63 characters, and valid characters are
/^a-z?$/
. - project str
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- region str
- The region of deployment resource pool. eg us-central1
- dedicated
Resources Property Map - The underlying dedicated resources that the deployment resource pool uses. Structure is documented below.
- name String
- The resource name of deployment resource pool. The maximum length is 63 characters, and valid characters are
/^a-z?$/
. - project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- region String
- The region of deployment resource pool. eg us-central1
Outputs
All input properties are implicitly available as output properties. Additionally, the AiDeploymentResourcePool resource produces the following output properties:
- Create
Time string - A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.
- Id string
- The provider-assigned unique ID for this managed resource.
- Create
Time string - A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.
- Id string
- The provider-assigned unique ID for this managed resource.
- create
Time String - A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.
- id String
- The provider-assigned unique ID for this managed resource.
- create
Time string - A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.
- id string
- The provider-assigned unique ID for this managed resource.
- create_
time str - A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.
- id str
- The provider-assigned unique ID for this managed resource.
- create
Time String - A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.
- id String
- The provider-assigned unique ID for this managed resource.
Look up Existing AiDeploymentResourcePool Resource
Get an existing AiDeploymentResourcePool resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: AiDeploymentResourcePoolState, opts?: CustomResourceOptions): AiDeploymentResourcePool
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
create_time: Optional[str] = None,
dedicated_resources: Optional[AiDeploymentResourcePoolDedicatedResourcesArgs] = None,
name: Optional[str] = None,
project: Optional[str] = None,
region: Optional[str] = None) -> AiDeploymentResourcePool
func GetAiDeploymentResourcePool(ctx *Context, name string, id IDInput, state *AiDeploymentResourcePoolState, opts ...ResourceOption) (*AiDeploymentResourcePool, error)
public static AiDeploymentResourcePool Get(string name, Input<string> id, AiDeploymentResourcePoolState? state, CustomResourceOptions? opts = null)
public static AiDeploymentResourcePool get(String name, Output<String> id, AiDeploymentResourcePoolState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Create
Time string - A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.
- Dedicated
Resources AiDeployment Resource Pool Dedicated Resources - The underlying dedicated resources that the deployment resource pool uses. Structure is documented below.
- Name string
- The resource name of deployment resource pool. The maximum length is 63 characters, and valid characters are
/^a-z?$/
. - Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- Region string
- The region of deployment resource pool. eg us-central1
- Create
Time string - A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.
- Dedicated
Resources AiDeployment Resource Pool Dedicated Resources Args - The underlying dedicated resources that the deployment resource pool uses. Structure is documented below.
- Name string
- The resource name of deployment resource pool. The maximum length is 63 characters, and valid characters are
/^a-z?$/
. - Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- Region string
- The region of deployment resource pool. eg us-central1
- create
Time String - A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.
- dedicated
Resources AiDeployment Resource Pool Dedicated Resources - The underlying dedicated resources that the deployment resource pool uses. Structure is documented below.
- name String
- The resource name of deployment resource pool. The maximum length is 63 characters, and valid characters are
/^a-z?$/
. - project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- region String
- The region of deployment resource pool. eg us-central1
- create
Time string - A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.
- dedicated
Resources AiDeployment Resource Pool Dedicated Resources - The underlying dedicated resources that the deployment resource pool uses. Structure is documented below.
- name string
- The resource name of deployment resource pool. The maximum length is 63 characters, and valid characters are
/^a-z?$/
. - project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- region string
- The region of deployment resource pool. eg us-central1
- create_
time str - A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.
- dedicated_
resources AiDeployment Resource Pool Dedicated Resources Args - The underlying dedicated resources that the deployment resource pool uses. Structure is documented below.
- name str
- The resource name of deployment resource pool. The maximum length is 63 characters, and valid characters are
/^a-z?$/
. - project str
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- region str
- The region of deployment resource pool. eg us-central1
- create
Time String - A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.
- dedicated
Resources Property Map - The underlying dedicated resources that the deployment resource pool uses. Structure is documented below.
- name String
- The resource name of deployment resource pool. The maximum length is 63 characters, and valid characters are
/^a-z?$/
. - project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- region String
- The region of deployment resource pool. eg us-central1
Supporting Types
AiDeploymentResourcePoolDedicatedResources, AiDeploymentResourcePoolDedicatedResourcesArgs
- Machine
Spec AiDeployment Resource Pool Dedicated Resources Machine Spec - The specification of a single machine used by the prediction Structure is documented below.
- Min
Replica intCount - The minimum number of machine replicas this DeployedModel will be always deployed on. This value must be greater than or equal to 1. If traffic against the DeployedModel increases, it may dynamically be deployed onto more replicas, and as traffic decreases, some of these extra replicas may be freed.
- Autoscaling
Metric List<AiSpecs Deployment Resource Pool Dedicated Resources Autoscaling Metric Spec> - A list of the metric specifications that overrides a resource utilization metric. Structure is documented below.
- Max
Replica intCount - The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, will use min_replica_count as the default value. The value of this field impacts the charge against Vertex CPU and GPU quotas. Specifically, you will be charged for max_replica_count * number of cores in the selected machine type) and (max_replica_count * number of GPUs per replica in the selected machine type).
- Machine
Spec AiDeployment Resource Pool Dedicated Resources Machine Spec - The specification of a single machine used by the prediction Structure is documented below.
- Min
Replica intCount - The minimum number of machine replicas this DeployedModel will be always deployed on. This value must be greater than or equal to 1. If traffic against the DeployedModel increases, it may dynamically be deployed onto more replicas, and as traffic decreases, some of these extra replicas may be freed.
- Autoscaling
Metric []AiSpecs Deployment Resource Pool Dedicated Resources Autoscaling Metric Spec - A list of the metric specifications that overrides a resource utilization metric. Structure is documented below.
- Max
Replica intCount - The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, will use min_replica_count as the default value. The value of this field impacts the charge against Vertex CPU and GPU quotas. Specifically, you will be charged for max_replica_count * number of cores in the selected machine type) and (max_replica_count * number of GPUs per replica in the selected machine type).
- machine
Spec AiDeployment Resource Pool Dedicated Resources Machine Spec - The specification of a single machine used by the prediction Structure is documented below.
- min
Replica IntegerCount - The minimum number of machine replicas this DeployedModel will be always deployed on. This value must be greater than or equal to 1. If traffic against the DeployedModel increases, it may dynamically be deployed onto more replicas, and as traffic decreases, some of these extra replicas may be freed.
- autoscaling
Metric List<AiSpecs Deployment Resource Pool Dedicated Resources Autoscaling Metric Spec> - A list of the metric specifications that overrides a resource utilization metric. Structure is documented below.
- max
Replica IntegerCount - The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, will use min_replica_count as the default value. The value of this field impacts the charge against Vertex CPU and GPU quotas. Specifically, you will be charged for max_replica_count * number of cores in the selected machine type) and (max_replica_count * number of GPUs per replica in the selected machine type).
- machine
Spec AiDeployment Resource Pool Dedicated Resources Machine Spec - The specification of a single machine used by the prediction Structure is documented below.
- min
Replica numberCount - The minimum number of machine replicas this DeployedModel will be always deployed on. This value must be greater than or equal to 1. If traffic against the DeployedModel increases, it may dynamically be deployed onto more replicas, and as traffic decreases, some of these extra replicas may be freed.
- autoscaling
Metric AiSpecs Deployment Resource Pool Dedicated Resources Autoscaling Metric Spec[] - A list of the metric specifications that overrides a resource utilization metric. Structure is documented below.
- max
Replica numberCount - The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, will use min_replica_count as the default value. The value of this field impacts the charge against Vertex CPU and GPU quotas. Specifically, you will be charged for max_replica_count * number of cores in the selected machine type) and (max_replica_count * number of GPUs per replica in the selected machine type).
- machine_
spec AiDeployment Resource Pool Dedicated Resources Machine Spec - The specification of a single machine used by the prediction Structure is documented below.
- min_
replica_ intcount - The minimum number of machine replicas this DeployedModel will be always deployed on. This value must be greater than or equal to 1. If traffic against the DeployedModel increases, it may dynamically be deployed onto more replicas, and as traffic decreases, some of these extra replicas may be freed.
- autoscaling_
metric_ Sequence[Aispecs Deployment Resource Pool Dedicated Resources Autoscaling Metric Spec] - A list of the metric specifications that overrides a resource utilization metric. Structure is documented below.
- max_
replica_ intcount - The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, will use min_replica_count as the default value. The value of this field impacts the charge against Vertex CPU and GPU quotas. Specifically, you will be charged for max_replica_count * number of cores in the selected machine type) and (max_replica_count * number of GPUs per replica in the selected machine type).
- machine
Spec Property Map - The specification of a single machine used by the prediction Structure is documented below.
- min
Replica NumberCount - The minimum number of machine replicas this DeployedModel will be always deployed on. This value must be greater than or equal to 1. If traffic against the DeployedModel increases, it may dynamically be deployed onto more replicas, and as traffic decreases, some of these extra replicas may be freed.
- autoscaling
Metric List<Property Map>Specs - A list of the metric specifications that overrides a resource utilization metric. Structure is documented below.
- max
Replica NumberCount - The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, will use min_replica_count as the default value. The value of this field impacts the charge against Vertex CPU and GPU quotas. Specifically, you will be charged for max_replica_count * number of cores in the selected machine type) and (max_replica_count * number of GPUs per replica in the selected machine type).
AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpec, AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpecArgs
- Metric
Name string - The resource metric name. Supported metrics: For Online Prediction: *
aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle
*aiplatform.googleapis.com/prediction/online/cpu/utilization
- Target int
- The target resource utilization in percentage (1% - 100%) for the given metric; once the real usage deviates from the target by a certain percentage, the machine replicas change. The default value is 60 (representing 60%) if not provided.
- Metric
Name string - The resource metric name. Supported metrics: For Online Prediction: *
aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle
*aiplatform.googleapis.com/prediction/online/cpu/utilization
- Target int
- The target resource utilization in percentage (1% - 100%) for the given metric; once the real usage deviates from the target by a certain percentage, the machine replicas change. The default value is 60 (representing 60%) if not provided.
- metric
Name String - The resource metric name. Supported metrics: For Online Prediction: *
aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle
*aiplatform.googleapis.com/prediction/online/cpu/utilization
- target Integer
- The target resource utilization in percentage (1% - 100%) for the given metric; once the real usage deviates from the target by a certain percentage, the machine replicas change. The default value is 60 (representing 60%) if not provided.
- metric
Name string - The resource metric name. Supported metrics: For Online Prediction: *
aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle
*aiplatform.googleapis.com/prediction/online/cpu/utilization
- target number
- The target resource utilization in percentage (1% - 100%) for the given metric; once the real usage deviates from the target by a certain percentage, the machine replicas change. The default value is 60 (representing 60%) if not provided.
- metric_
name str - The resource metric name. Supported metrics: For Online Prediction: *
aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle
*aiplatform.googleapis.com/prediction/online/cpu/utilization
- target int
- The target resource utilization in percentage (1% - 100%) for the given metric; once the real usage deviates from the target by a certain percentage, the machine replicas change. The default value is 60 (representing 60%) if not provided.
- metric
Name String - The resource metric name. Supported metrics: For Online Prediction: *
aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle
*aiplatform.googleapis.com/prediction/online/cpu/utilization
- target Number
- The target resource utilization in percentage (1% - 100%) for the given metric; once the real usage deviates from the target by a certain percentage, the machine replicas change. The default value is 60 (representing 60%) if not provided.
AiDeploymentResourcePoolDedicatedResourcesMachineSpec, AiDeploymentResourcePoolDedicatedResourcesMachineSpecArgs
- Accelerator
Count int - The number of accelerators to attach to the machine.
- Accelerator
Type string - The type of accelerator(s) that may be attached to the machine as per accelerator_count. See possible values here.
- Machine
Type string - The type of the machine. See the list of machine types supported for prediction.
- Accelerator
Count int - The number of accelerators to attach to the machine.
- Accelerator
Type string - The type of accelerator(s) that may be attached to the machine as per accelerator_count. See possible values here.
- Machine
Type string - The type of the machine. See the list of machine types supported for prediction.
- accelerator
Count Integer - The number of accelerators to attach to the machine.
- accelerator
Type String - The type of accelerator(s) that may be attached to the machine as per accelerator_count. See possible values here.
- machine
Type String - The type of the machine. See the list of machine types supported for prediction.
- accelerator
Count number - The number of accelerators to attach to the machine.
- accelerator
Type string - The type of accelerator(s) that may be attached to the machine as per accelerator_count. See possible values here.
- machine
Type string - The type of the machine. See the list of machine types supported for prediction.
- accelerator_
count int - The number of accelerators to attach to the machine.
- accelerator_
type str - The type of accelerator(s) that may be attached to the machine as per accelerator_count. See possible values here.
- machine_
type str - The type of the machine. See the list of machine types supported for prediction.
- accelerator
Count Number - The number of accelerators to attach to the machine.
- accelerator
Type String - The type of accelerator(s) that may be attached to the machine as per accelerator_count. See possible values here.
- machine
Type String - The type of the machine. See the list of machine types supported for prediction.
Import
DeploymentResourcePool can be imported using any of these accepted formats:
projects/{{project}}/locations/{{region}}/deploymentResourcePools/{{name}}
{{project}}/{{region}}/{{name}}
{{region}}/{{name}}
{{name}}
When using the pulumi import
command, DeploymentResourcePool can be imported using one of the formats above. For example:
$ pulumi import gcp:vertex/aiDeploymentResourcePool:AiDeploymentResourcePool default projects/{{project}}/locations/{{region}}/deploymentResourcePools/{{name}}
$ pulumi import gcp:vertex/aiDeploymentResourcePool:AiDeploymentResourcePool default {{project}}/{{region}}/{{name}}
$ pulumi import gcp:vertex/aiDeploymentResourcePool:AiDeploymentResourcePool default {{region}}/{{name}}
$ pulumi import gcp:vertex/aiDeploymentResourcePool:AiDeploymentResourcePool default {{name}}
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- Google Cloud (GCP) Classic pulumi/pulumi-gcp
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
google-beta
Terraform Provider.