gcp.compute.RegionAutoscaler
Explore with Pulumi AI
Represents an Autoscaler resource.
Autoscalers allow you to automatically scale virtual machine instances in managed instance groups according to an autoscaling policy that you define.
To get more information about RegionAutoscaler, see:
- API documentation
- How-to Guides
Example Usage
Region Autoscaler Basic
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const foobarInstanceTemplate = new gcp.compute.InstanceTemplate("foobar", {
name: "my-instance-template",
machineType: "e2-standard-4",
disks: [{
sourceImage: "debian-cloud/debian-11",
diskSizeGb: 250,
}],
networkInterfaces: [{
network: "default",
accessConfigs: [{
networkTier: "PREMIUM",
}],
}],
serviceAccount: {
scopes: [
"https://www.googleapis.com/auth/devstorage.read_only",
"https://www.googleapis.com/auth/logging.write",
"https://www.googleapis.com/auth/monitoring.write",
"https://www.googleapis.com/auth/pubsub",
"https://www.googleapis.com/auth/service.management.readonly",
"https://www.googleapis.com/auth/servicecontrol",
"https://www.googleapis.com/auth/trace.append",
],
},
});
const foobarTargetPool = new gcp.compute.TargetPool("foobar", {name: "my-target-pool"});
const foobarRegionInstanceGroupManager = new gcp.compute.RegionInstanceGroupManager("foobar", {
name: "my-region-igm",
region: "us-central1",
versions: [{
instanceTemplate: foobarInstanceTemplate.id,
name: "primary",
}],
targetPools: [foobarTargetPool.id],
baseInstanceName: "foobar",
});
const foobar = new gcp.compute.RegionAutoscaler("foobar", {
name: "my-region-autoscaler",
region: "us-central1",
target: foobarRegionInstanceGroupManager.id,
autoscalingPolicy: {
maxReplicas: 5,
minReplicas: 1,
cooldownPeriod: 60,
cpuUtilization: {
target: 0.5,
},
},
});
const debian9 = gcp.compute.getImage({
family: "debian-11",
project: "debian-cloud",
});
import pulumi
import pulumi_gcp as gcp
foobar_instance_template = gcp.compute.InstanceTemplate("foobar",
name="my-instance-template",
machine_type="e2-standard-4",
disks=[{
"source_image": "debian-cloud/debian-11",
"disk_size_gb": 250,
}],
network_interfaces=[{
"network": "default",
"access_configs": [{
"network_tier": "PREMIUM",
}],
}],
service_account={
"scopes": [
"https://www.googleapis.com/auth/devstorage.read_only",
"https://www.googleapis.com/auth/logging.write",
"https://www.googleapis.com/auth/monitoring.write",
"https://www.googleapis.com/auth/pubsub",
"https://www.googleapis.com/auth/service.management.readonly",
"https://www.googleapis.com/auth/servicecontrol",
"https://www.googleapis.com/auth/trace.append",
],
})
foobar_target_pool = gcp.compute.TargetPool("foobar", name="my-target-pool")
foobar_region_instance_group_manager = gcp.compute.RegionInstanceGroupManager("foobar",
name="my-region-igm",
region="us-central1",
versions=[{
"instance_template": foobar_instance_template.id,
"name": "primary",
}],
target_pools=[foobar_target_pool.id],
base_instance_name="foobar")
foobar = gcp.compute.RegionAutoscaler("foobar",
name="my-region-autoscaler",
region="us-central1",
target=foobar_region_instance_group_manager.id,
autoscaling_policy={
"max_replicas": 5,
"min_replicas": 1,
"cooldown_period": 60,
"cpu_utilization": {
"target": 0.5,
},
})
debian9 = gcp.compute.get_image(family="debian-11",
project="debian-cloud")
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
foobarInstanceTemplate, err := compute.NewInstanceTemplate(ctx, "foobar", &compute.InstanceTemplateArgs{
Name: pulumi.String("my-instance-template"),
MachineType: pulumi.String("e2-standard-4"),
Disks: compute.InstanceTemplateDiskArray{
&compute.InstanceTemplateDiskArgs{
SourceImage: pulumi.String("debian-cloud/debian-11"),
DiskSizeGb: pulumi.Int(250),
},
},
NetworkInterfaces: compute.InstanceTemplateNetworkInterfaceArray{
&compute.InstanceTemplateNetworkInterfaceArgs{
Network: pulumi.String("default"),
AccessConfigs: compute.InstanceTemplateNetworkInterfaceAccessConfigArray{
&compute.InstanceTemplateNetworkInterfaceAccessConfigArgs{
NetworkTier: pulumi.String("PREMIUM"),
},
},
},
},
ServiceAccount: &compute.InstanceTemplateServiceAccountArgs{
Scopes: pulumi.StringArray{
pulumi.String("https://www.googleapis.com/auth/devstorage.read_only"),
pulumi.String("https://www.googleapis.com/auth/logging.write"),
pulumi.String("https://www.googleapis.com/auth/monitoring.write"),
pulumi.String("https://www.googleapis.com/auth/pubsub"),
pulumi.String("https://www.googleapis.com/auth/service.management.readonly"),
pulumi.String("https://www.googleapis.com/auth/servicecontrol"),
pulumi.String("https://www.googleapis.com/auth/trace.append"),
},
},
})
if err != nil {
return err
}
foobarTargetPool, err := compute.NewTargetPool(ctx, "foobar", &compute.TargetPoolArgs{
Name: pulumi.String("my-target-pool"),
})
if err != nil {
return err
}
foobarRegionInstanceGroupManager, err := compute.NewRegionInstanceGroupManager(ctx, "foobar", &compute.RegionInstanceGroupManagerArgs{
Name: pulumi.String("my-region-igm"),
Region: pulumi.String("us-central1"),
Versions: compute.RegionInstanceGroupManagerVersionArray{
&compute.RegionInstanceGroupManagerVersionArgs{
InstanceTemplate: foobarInstanceTemplate.ID(),
Name: pulumi.String("primary"),
},
},
TargetPools: pulumi.StringArray{
foobarTargetPool.ID(),
},
BaseInstanceName: pulumi.String("foobar"),
})
if err != nil {
return err
}
_, err = compute.NewRegionAutoscaler(ctx, "foobar", &compute.RegionAutoscalerArgs{
Name: pulumi.String("my-region-autoscaler"),
Region: pulumi.String("us-central1"),
Target: foobarRegionInstanceGroupManager.ID(),
AutoscalingPolicy: &compute.RegionAutoscalerAutoscalingPolicyArgs{
MaxReplicas: pulumi.Int(5),
MinReplicas: pulumi.Int(1),
CooldownPeriod: pulumi.Int(60),
CpuUtilization: &compute.RegionAutoscalerAutoscalingPolicyCpuUtilizationArgs{
Target: pulumi.Float64(0.5),
},
},
})
if err != nil {
return err
}
_, err = compute.LookupImage(ctx, &compute.LookupImageArgs{
Family: pulumi.StringRef("debian-11"),
Project: pulumi.StringRef("debian-cloud"),
}, nil)
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var foobarInstanceTemplate = new Gcp.Compute.InstanceTemplate("foobar", new()
{
Name = "my-instance-template",
MachineType = "e2-standard-4",
Disks = new[]
{
new Gcp.Compute.Inputs.InstanceTemplateDiskArgs
{
SourceImage = "debian-cloud/debian-11",
DiskSizeGb = 250,
},
},
NetworkInterfaces = new[]
{
new Gcp.Compute.Inputs.InstanceTemplateNetworkInterfaceArgs
{
Network = "default",
AccessConfigs = new[]
{
new Gcp.Compute.Inputs.InstanceTemplateNetworkInterfaceAccessConfigArgs
{
NetworkTier = "PREMIUM",
},
},
},
},
ServiceAccount = new Gcp.Compute.Inputs.InstanceTemplateServiceAccountArgs
{
Scopes = new[]
{
"https://www.googleapis.com/auth/devstorage.read_only",
"https://www.googleapis.com/auth/logging.write",
"https://www.googleapis.com/auth/monitoring.write",
"https://www.googleapis.com/auth/pubsub",
"https://www.googleapis.com/auth/service.management.readonly",
"https://www.googleapis.com/auth/servicecontrol",
"https://www.googleapis.com/auth/trace.append",
},
},
});
var foobarTargetPool = new Gcp.Compute.TargetPool("foobar", new()
{
Name = "my-target-pool",
});
var foobarRegionInstanceGroupManager = new Gcp.Compute.RegionInstanceGroupManager("foobar", new()
{
Name = "my-region-igm",
Region = "us-central1",
Versions = new[]
{
new Gcp.Compute.Inputs.RegionInstanceGroupManagerVersionArgs
{
InstanceTemplate = foobarInstanceTemplate.Id,
Name = "primary",
},
},
TargetPools = new[]
{
foobarTargetPool.Id,
},
BaseInstanceName = "foobar",
});
var foobar = new Gcp.Compute.RegionAutoscaler("foobar", new()
{
Name = "my-region-autoscaler",
Region = "us-central1",
Target = foobarRegionInstanceGroupManager.Id,
AutoscalingPolicy = new Gcp.Compute.Inputs.RegionAutoscalerAutoscalingPolicyArgs
{
MaxReplicas = 5,
MinReplicas = 1,
CooldownPeriod = 60,
CpuUtilization = new Gcp.Compute.Inputs.RegionAutoscalerAutoscalingPolicyCpuUtilizationArgs
{
Target = 0.5,
},
},
});
var debian9 = Gcp.Compute.GetImage.Invoke(new()
{
Family = "debian-11",
Project = "debian-cloud",
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.compute.InstanceTemplate;
import com.pulumi.gcp.compute.InstanceTemplateArgs;
import com.pulumi.gcp.compute.inputs.InstanceTemplateDiskArgs;
import com.pulumi.gcp.compute.inputs.InstanceTemplateNetworkInterfaceArgs;
import com.pulumi.gcp.compute.inputs.InstanceTemplateServiceAccountArgs;
import com.pulumi.gcp.compute.TargetPool;
import com.pulumi.gcp.compute.TargetPoolArgs;
import com.pulumi.gcp.compute.RegionInstanceGroupManager;
import com.pulumi.gcp.compute.RegionInstanceGroupManagerArgs;
import com.pulumi.gcp.compute.inputs.RegionInstanceGroupManagerVersionArgs;
import com.pulumi.gcp.compute.RegionAutoscaler;
import com.pulumi.gcp.compute.RegionAutoscalerArgs;
import com.pulumi.gcp.compute.inputs.RegionAutoscalerAutoscalingPolicyArgs;
import com.pulumi.gcp.compute.inputs.RegionAutoscalerAutoscalingPolicyCpuUtilizationArgs;
import com.pulumi.gcp.compute.ComputeFunctions;
import com.pulumi.gcp.compute.inputs.GetImageArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var foobarInstanceTemplate = new InstanceTemplate("foobarInstanceTemplate", InstanceTemplateArgs.builder()
.name("my-instance-template")
.machineType("e2-standard-4")
.disks(InstanceTemplateDiskArgs.builder()
.sourceImage("debian-cloud/debian-11")
.diskSizeGb(250)
.build())
.networkInterfaces(InstanceTemplateNetworkInterfaceArgs.builder()
.network("default")
.accessConfigs(InstanceTemplateNetworkInterfaceAccessConfigArgs.builder()
.networkTier("PREMIUM")
.build())
.build())
.serviceAccount(InstanceTemplateServiceAccountArgs.builder()
.scopes(
"https://www.googleapis.com/auth/devstorage.read_only",
"https://www.googleapis.com/auth/logging.write",
"https://www.googleapis.com/auth/monitoring.write",
"https://www.googleapis.com/auth/pubsub",
"https://www.googleapis.com/auth/service.management.readonly",
"https://www.googleapis.com/auth/servicecontrol",
"https://www.googleapis.com/auth/trace.append")
.build())
.build());
var foobarTargetPool = new TargetPool("foobarTargetPool", TargetPoolArgs.builder()
.name("my-target-pool")
.build());
var foobarRegionInstanceGroupManager = new RegionInstanceGroupManager("foobarRegionInstanceGroupManager", RegionInstanceGroupManagerArgs.builder()
.name("my-region-igm")
.region("us-central1")
.versions(RegionInstanceGroupManagerVersionArgs.builder()
.instanceTemplate(foobarInstanceTemplate.id())
.name("primary")
.build())
.targetPools(foobarTargetPool.id())
.baseInstanceName("foobar")
.build());
var foobar = new RegionAutoscaler("foobar", RegionAutoscalerArgs.builder()
.name("my-region-autoscaler")
.region("us-central1")
.target(foobarRegionInstanceGroupManager.id())
.autoscalingPolicy(RegionAutoscalerAutoscalingPolicyArgs.builder()
.maxReplicas(5)
.minReplicas(1)
.cooldownPeriod(60)
.cpuUtilization(RegionAutoscalerAutoscalingPolicyCpuUtilizationArgs.builder()
.target(0.5)
.build())
.build())
.build());
final var debian9 = ComputeFunctions.getImage(GetImageArgs.builder()
.family("debian-11")
.project("debian-cloud")
.build());
}
}
resources:
foobar:
type: gcp:compute:RegionAutoscaler
properties:
name: my-region-autoscaler
region: us-central1
target: ${foobarRegionInstanceGroupManager.id}
autoscalingPolicy:
maxReplicas: 5
minReplicas: 1
cooldownPeriod: 60
cpuUtilization:
target: 0.5
foobarInstanceTemplate:
type: gcp:compute:InstanceTemplate
name: foobar
properties:
name: my-instance-template
machineType: e2-standard-4
disks:
- sourceImage: debian-cloud/debian-11
diskSizeGb: 250
networkInterfaces:
- network: default
accessConfigs:
- networkTier: PREMIUM
serviceAccount:
scopes:
- https://www.googleapis.com/auth/devstorage.read_only
- https://www.googleapis.com/auth/logging.write
- https://www.googleapis.com/auth/monitoring.write
- https://www.googleapis.com/auth/pubsub
- https://www.googleapis.com/auth/service.management.readonly
- https://www.googleapis.com/auth/servicecontrol
- https://www.googleapis.com/auth/trace.append
foobarTargetPool:
type: gcp:compute:TargetPool
name: foobar
properties:
name: my-target-pool
foobarRegionInstanceGroupManager:
type: gcp:compute:RegionInstanceGroupManager
name: foobar
properties:
name: my-region-igm
region: us-central1
versions:
- instanceTemplate: ${foobarInstanceTemplate.id}
name: primary
targetPools:
- ${foobarTargetPool.id}
baseInstanceName: foobar
variables:
debian9:
fn::invoke:
Function: gcp:compute:getImage
Arguments:
family: debian-11
project: debian-cloud
Create RegionAutoscaler Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new RegionAutoscaler(name: string, args: RegionAutoscalerArgs, opts?: CustomResourceOptions);
@overload
def RegionAutoscaler(resource_name: str,
args: RegionAutoscalerArgs,
opts: Optional[ResourceOptions] = None)
@overload
def RegionAutoscaler(resource_name: str,
opts: Optional[ResourceOptions] = None,
autoscaling_policy: Optional[RegionAutoscalerAutoscalingPolicyArgs] = None,
target: Optional[str] = None,
description: Optional[str] = None,
name: Optional[str] = None,
project: Optional[str] = None,
region: Optional[str] = None)
func NewRegionAutoscaler(ctx *Context, name string, args RegionAutoscalerArgs, opts ...ResourceOption) (*RegionAutoscaler, error)
public RegionAutoscaler(string name, RegionAutoscalerArgs args, CustomResourceOptions? opts = null)
public RegionAutoscaler(String name, RegionAutoscalerArgs args)
public RegionAutoscaler(String name, RegionAutoscalerArgs args, CustomResourceOptions options)
type: gcp:compute:RegionAutoscaler
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args RegionAutoscalerArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args RegionAutoscalerArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args RegionAutoscalerArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args RegionAutoscalerArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args RegionAutoscalerArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var regionAutoscalerResource = new Gcp.Compute.RegionAutoscaler("regionAutoscalerResource", new()
{
AutoscalingPolicy = new Gcp.Compute.Inputs.RegionAutoscalerAutoscalingPolicyArgs
{
MaxReplicas = 0,
MinReplicas = 0,
CooldownPeriod = 0,
CpuUtilization = new Gcp.Compute.Inputs.RegionAutoscalerAutoscalingPolicyCpuUtilizationArgs
{
Target = 0,
PredictiveMethod = "string",
},
LoadBalancingUtilization = new Gcp.Compute.Inputs.RegionAutoscalerAutoscalingPolicyLoadBalancingUtilizationArgs
{
Target = 0,
},
Metrics = new[]
{
new Gcp.Compute.Inputs.RegionAutoscalerAutoscalingPolicyMetricArgs
{
Name = "string",
Filter = "string",
SingleInstanceAssignment = 0,
Target = 0,
Type = "string",
},
},
Mode = "string",
ScaleDownControl = new Gcp.Compute.Inputs.RegionAutoscalerAutoscalingPolicyScaleDownControlArgs
{
MaxScaledDownReplicas = new Gcp.Compute.Inputs.RegionAutoscalerAutoscalingPolicyScaleDownControlMaxScaledDownReplicasArgs
{
Fixed = 0,
Percent = 0,
},
TimeWindowSec = 0,
},
ScaleInControl = new Gcp.Compute.Inputs.RegionAutoscalerAutoscalingPolicyScaleInControlArgs
{
MaxScaledInReplicas = new Gcp.Compute.Inputs.RegionAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicasArgs
{
Fixed = 0,
Percent = 0,
},
TimeWindowSec = 0,
},
ScalingSchedules = new[]
{
new Gcp.Compute.Inputs.RegionAutoscalerAutoscalingPolicyScalingScheduleArgs
{
DurationSec = 0,
MinRequiredReplicas = 0,
Name = "string",
Schedule = "string",
Description = "string",
Disabled = false,
TimeZone = "string",
},
},
},
Target = "string",
Description = "string",
Name = "string",
Project = "string",
Region = "string",
});
example, err := compute.NewRegionAutoscaler(ctx, "regionAutoscalerResource", &compute.RegionAutoscalerArgs{
AutoscalingPolicy: &compute.RegionAutoscalerAutoscalingPolicyArgs{
MaxReplicas: pulumi.Int(0),
MinReplicas: pulumi.Int(0),
CooldownPeriod: pulumi.Int(0),
CpuUtilization: &compute.RegionAutoscalerAutoscalingPolicyCpuUtilizationArgs{
Target: pulumi.Float64(0),
PredictiveMethod: pulumi.String("string"),
},
LoadBalancingUtilization: &compute.RegionAutoscalerAutoscalingPolicyLoadBalancingUtilizationArgs{
Target: pulumi.Float64(0),
},
Metrics: compute.RegionAutoscalerAutoscalingPolicyMetricArray{
&compute.RegionAutoscalerAutoscalingPolicyMetricArgs{
Name: pulumi.String("string"),
Filter: pulumi.String("string"),
SingleInstanceAssignment: pulumi.Float64(0),
Target: pulumi.Float64(0),
Type: pulumi.String("string"),
},
},
Mode: pulumi.String("string"),
ScaleDownControl: &compute.RegionAutoscalerAutoscalingPolicyScaleDownControlArgs{
MaxScaledDownReplicas: &compute.RegionAutoscalerAutoscalingPolicyScaleDownControlMaxScaledDownReplicasArgs{
Fixed: pulumi.Int(0),
Percent: pulumi.Int(0),
},
TimeWindowSec: pulumi.Int(0),
},
ScaleInControl: &compute.RegionAutoscalerAutoscalingPolicyScaleInControlArgs{
MaxScaledInReplicas: &compute.RegionAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicasArgs{
Fixed: pulumi.Int(0),
Percent: pulumi.Int(0),
},
TimeWindowSec: pulumi.Int(0),
},
ScalingSchedules: compute.RegionAutoscalerAutoscalingPolicyScalingScheduleArray{
&compute.RegionAutoscalerAutoscalingPolicyScalingScheduleArgs{
DurationSec: pulumi.Int(0),
MinRequiredReplicas: pulumi.Int(0),
Name: pulumi.String("string"),
Schedule: pulumi.String("string"),
Description: pulumi.String("string"),
Disabled: pulumi.Bool(false),
TimeZone: pulumi.String("string"),
},
},
},
Target: pulumi.String("string"),
Description: pulumi.String("string"),
Name: pulumi.String("string"),
Project: pulumi.String("string"),
Region: pulumi.String("string"),
})
var regionAutoscalerResource = new RegionAutoscaler("regionAutoscalerResource", RegionAutoscalerArgs.builder()
.autoscalingPolicy(RegionAutoscalerAutoscalingPolicyArgs.builder()
.maxReplicas(0)
.minReplicas(0)
.cooldownPeriod(0)
.cpuUtilization(RegionAutoscalerAutoscalingPolicyCpuUtilizationArgs.builder()
.target(0)
.predictiveMethod("string")
.build())
.loadBalancingUtilization(RegionAutoscalerAutoscalingPolicyLoadBalancingUtilizationArgs.builder()
.target(0)
.build())
.metrics(RegionAutoscalerAutoscalingPolicyMetricArgs.builder()
.name("string")
.filter("string")
.singleInstanceAssignment(0)
.target(0)
.type("string")
.build())
.mode("string")
.scaleDownControl(RegionAutoscalerAutoscalingPolicyScaleDownControlArgs.builder()
.maxScaledDownReplicas(RegionAutoscalerAutoscalingPolicyScaleDownControlMaxScaledDownReplicasArgs.builder()
.fixed(0)
.percent(0)
.build())
.timeWindowSec(0)
.build())
.scaleInControl(RegionAutoscalerAutoscalingPolicyScaleInControlArgs.builder()
.maxScaledInReplicas(RegionAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicasArgs.builder()
.fixed(0)
.percent(0)
.build())
.timeWindowSec(0)
.build())
.scalingSchedules(RegionAutoscalerAutoscalingPolicyScalingScheduleArgs.builder()
.durationSec(0)
.minRequiredReplicas(0)
.name("string")
.schedule("string")
.description("string")
.disabled(false)
.timeZone("string")
.build())
.build())
.target("string")
.description("string")
.name("string")
.project("string")
.region("string")
.build());
region_autoscaler_resource = gcp.compute.RegionAutoscaler("regionAutoscalerResource",
autoscaling_policy={
"max_replicas": 0,
"min_replicas": 0,
"cooldown_period": 0,
"cpu_utilization": {
"target": 0,
"predictive_method": "string",
},
"load_balancing_utilization": {
"target": 0,
},
"metrics": [{
"name": "string",
"filter": "string",
"single_instance_assignment": 0,
"target": 0,
"type": "string",
}],
"mode": "string",
"scale_down_control": {
"max_scaled_down_replicas": {
"fixed": 0,
"percent": 0,
},
"time_window_sec": 0,
},
"scale_in_control": {
"max_scaled_in_replicas": {
"fixed": 0,
"percent": 0,
},
"time_window_sec": 0,
},
"scaling_schedules": [{
"duration_sec": 0,
"min_required_replicas": 0,
"name": "string",
"schedule": "string",
"description": "string",
"disabled": False,
"time_zone": "string",
}],
},
target="string",
description="string",
name="string",
project="string",
region="string")
const regionAutoscalerResource = new gcp.compute.RegionAutoscaler("regionAutoscalerResource", {
autoscalingPolicy: {
maxReplicas: 0,
minReplicas: 0,
cooldownPeriod: 0,
cpuUtilization: {
target: 0,
predictiveMethod: "string",
},
loadBalancingUtilization: {
target: 0,
},
metrics: [{
name: "string",
filter: "string",
singleInstanceAssignment: 0,
target: 0,
type: "string",
}],
mode: "string",
scaleDownControl: {
maxScaledDownReplicas: {
fixed: 0,
percent: 0,
},
timeWindowSec: 0,
},
scaleInControl: {
maxScaledInReplicas: {
fixed: 0,
percent: 0,
},
timeWindowSec: 0,
},
scalingSchedules: [{
durationSec: 0,
minRequiredReplicas: 0,
name: "string",
schedule: "string",
description: "string",
disabled: false,
timeZone: "string",
}],
},
target: "string",
description: "string",
name: "string",
project: "string",
region: "string",
});
type: gcp:compute:RegionAutoscaler
properties:
autoscalingPolicy:
cooldownPeriod: 0
cpuUtilization:
predictiveMethod: string
target: 0
loadBalancingUtilization:
target: 0
maxReplicas: 0
metrics:
- filter: string
name: string
singleInstanceAssignment: 0
target: 0
type: string
minReplicas: 0
mode: string
scaleDownControl:
maxScaledDownReplicas:
fixed: 0
percent: 0
timeWindowSec: 0
scaleInControl:
maxScaledInReplicas:
fixed: 0
percent: 0
timeWindowSec: 0
scalingSchedules:
- description: string
disabled: false
durationSec: 0
minRequiredReplicas: 0
name: string
schedule: string
timeZone: string
description: string
name: string
project: string
region: string
target: string
RegionAutoscaler Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The RegionAutoscaler resource accepts the following input properties:
- Autoscaling
Policy RegionAutoscaler Autoscaling Policy - The configuration parameters for the autoscaling algorithm. You can define one or more of the policies for an autoscaler: cpuUtilization, customMetricUtilizations, and loadBalancingUtilization. If none of these are specified, the default will be to autoscale based on cpuUtilization to 0.6 or 60%. Structure is documented below.
- Target string
- URL of the managed instance group that this autoscaler will scale.
- Description string
- An optional description of this resource.
- Name string
- Name of the resource. The name must be 1-63 characters long and match
the regular expression
a-z?
which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - Project string
- Region string
- URL of the region where the instance group resides.
- Autoscaling
Policy RegionAutoscaler Autoscaling Policy Args - The configuration parameters for the autoscaling algorithm. You can define one or more of the policies for an autoscaler: cpuUtilization, customMetricUtilizations, and loadBalancingUtilization. If none of these are specified, the default will be to autoscale based on cpuUtilization to 0.6 or 60%. Structure is documented below.
- Target string
- URL of the managed instance group that this autoscaler will scale.
- Description string
- An optional description of this resource.
- Name string
- Name of the resource. The name must be 1-63 characters long and match
the regular expression
a-z?
which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - Project string
- Region string
- URL of the region where the instance group resides.
- autoscaling
Policy RegionAutoscaler Autoscaling Policy - The configuration parameters for the autoscaling algorithm. You can define one or more of the policies for an autoscaler: cpuUtilization, customMetricUtilizations, and loadBalancingUtilization. If none of these are specified, the default will be to autoscale based on cpuUtilization to 0.6 or 60%. Structure is documented below.
- target String
- URL of the managed instance group that this autoscaler will scale.
- description String
- An optional description of this resource.
- name String
- Name of the resource. The name must be 1-63 characters long and match
the regular expression
a-z?
which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - project String
- region String
- URL of the region where the instance group resides.
- autoscaling
Policy RegionAutoscaler Autoscaling Policy - The configuration parameters for the autoscaling algorithm. You can define one or more of the policies for an autoscaler: cpuUtilization, customMetricUtilizations, and loadBalancingUtilization. If none of these are specified, the default will be to autoscale based on cpuUtilization to 0.6 or 60%. Structure is documented below.
- target string
- URL of the managed instance group that this autoscaler will scale.
- description string
- An optional description of this resource.
- name string
- Name of the resource. The name must be 1-63 characters long and match
the regular expression
a-z?
which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - project string
- region string
- URL of the region where the instance group resides.
- autoscaling_
policy RegionAutoscaler Autoscaling Policy Args - The configuration parameters for the autoscaling algorithm. You can define one or more of the policies for an autoscaler: cpuUtilization, customMetricUtilizations, and loadBalancingUtilization. If none of these are specified, the default will be to autoscale based on cpuUtilization to 0.6 or 60%. Structure is documented below.
- target str
- URL of the managed instance group that this autoscaler will scale.
- description str
- An optional description of this resource.
- name str
- Name of the resource. The name must be 1-63 characters long and match
the regular expression
a-z?
which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - project str
- region str
- URL of the region where the instance group resides.
- autoscaling
Policy Property Map - The configuration parameters for the autoscaling algorithm. You can define one or more of the policies for an autoscaler: cpuUtilization, customMetricUtilizations, and loadBalancingUtilization. If none of these are specified, the default will be to autoscale based on cpuUtilization to 0.6 or 60%. Structure is documented below.
- target String
- URL of the managed instance group that this autoscaler will scale.
- description String
- An optional description of this resource.
- name String
- Name of the resource. The name must be 1-63 characters long and match
the regular expression
a-z?
which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - project String
- region String
- URL of the region where the instance group resides.
Outputs
All input properties are implicitly available as output properties. Additionally, the RegionAutoscaler resource produces the following output properties:
- Creation
Timestamp string - Creation timestamp in RFC3339 text format.
- Id string
- The provider-assigned unique ID for this managed resource.
- Self
Link string - The URI of the created resource.
- Creation
Timestamp string - Creation timestamp in RFC3339 text format.
- Id string
- The provider-assigned unique ID for this managed resource.
- Self
Link string - The URI of the created resource.
- creation
Timestamp String - Creation timestamp in RFC3339 text format.
- id String
- The provider-assigned unique ID for this managed resource.
- self
Link String - The URI of the created resource.
- creation
Timestamp string - Creation timestamp in RFC3339 text format.
- id string
- The provider-assigned unique ID for this managed resource.
- self
Link string - The URI of the created resource.
- creation_
timestamp str - Creation timestamp in RFC3339 text format.
- id str
- The provider-assigned unique ID for this managed resource.
- self_
link str - The URI of the created resource.
- creation
Timestamp String - Creation timestamp in RFC3339 text format.
- id String
- The provider-assigned unique ID for this managed resource.
- self
Link String - The URI of the created resource.
Look up Existing RegionAutoscaler Resource
Get an existing RegionAutoscaler resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: RegionAutoscalerState, opts?: CustomResourceOptions): RegionAutoscaler
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
autoscaling_policy: Optional[RegionAutoscalerAutoscalingPolicyArgs] = None,
creation_timestamp: Optional[str] = None,
description: Optional[str] = None,
name: Optional[str] = None,
project: Optional[str] = None,
region: Optional[str] = None,
self_link: Optional[str] = None,
target: Optional[str] = None) -> RegionAutoscaler
func GetRegionAutoscaler(ctx *Context, name string, id IDInput, state *RegionAutoscalerState, opts ...ResourceOption) (*RegionAutoscaler, error)
public static RegionAutoscaler Get(string name, Input<string> id, RegionAutoscalerState? state, CustomResourceOptions? opts = null)
public static RegionAutoscaler get(String name, Output<String> id, RegionAutoscalerState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Autoscaling
Policy RegionAutoscaler Autoscaling Policy - The configuration parameters for the autoscaling algorithm. You can define one or more of the policies for an autoscaler: cpuUtilization, customMetricUtilizations, and loadBalancingUtilization. If none of these are specified, the default will be to autoscale based on cpuUtilization to 0.6 or 60%. Structure is documented below.
- Creation
Timestamp string - Creation timestamp in RFC3339 text format.
- Description string
- An optional description of this resource.
- Name string
- Name of the resource. The name must be 1-63 characters long and match
the regular expression
a-z?
which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - Project string
- Region string
- URL of the region where the instance group resides.
- Self
Link string - The URI of the created resource.
- Target string
- URL of the managed instance group that this autoscaler will scale.
- Autoscaling
Policy RegionAutoscaler Autoscaling Policy Args - The configuration parameters for the autoscaling algorithm. You can define one or more of the policies for an autoscaler: cpuUtilization, customMetricUtilizations, and loadBalancingUtilization. If none of these are specified, the default will be to autoscale based on cpuUtilization to 0.6 or 60%. Structure is documented below.
- Creation
Timestamp string - Creation timestamp in RFC3339 text format.
- Description string
- An optional description of this resource.
- Name string
- Name of the resource. The name must be 1-63 characters long and match
the regular expression
a-z?
which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - Project string
- Region string
- URL of the region where the instance group resides.
- Self
Link string - The URI of the created resource.
- Target string
- URL of the managed instance group that this autoscaler will scale.
- autoscaling
Policy RegionAutoscaler Autoscaling Policy - The configuration parameters for the autoscaling algorithm. You can define one or more of the policies for an autoscaler: cpuUtilization, customMetricUtilizations, and loadBalancingUtilization. If none of these are specified, the default will be to autoscale based on cpuUtilization to 0.6 or 60%. Structure is documented below.
- creation
Timestamp String - Creation timestamp in RFC3339 text format.
- description String
- An optional description of this resource.
- name String
- Name of the resource. The name must be 1-63 characters long and match
the regular expression
a-z?
which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - project String
- region String
- URL of the region where the instance group resides.
- self
Link String - The URI of the created resource.
- target String
- URL of the managed instance group that this autoscaler will scale.
- autoscaling
Policy RegionAutoscaler Autoscaling Policy - The configuration parameters for the autoscaling algorithm. You can define one or more of the policies for an autoscaler: cpuUtilization, customMetricUtilizations, and loadBalancingUtilization. If none of these are specified, the default will be to autoscale based on cpuUtilization to 0.6 or 60%. Structure is documented below.
- creation
Timestamp string - Creation timestamp in RFC3339 text format.
- description string
- An optional description of this resource.
- name string
- Name of the resource. The name must be 1-63 characters long and match
the regular expression
a-z?
which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - project string
- region string
- URL of the region where the instance group resides.
- self
Link string - The URI of the created resource.
- target string
- URL of the managed instance group that this autoscaler will scale.
- autoscaling_
policy RegionAutoscaler Autoscaling Policy Args - The configuration parameters for the autoscaling algorithm. You can define one or more of the policies for an autoscaler: cpuUtilization, customMetricUtilizations, and loadBalancingUtilization. If none of these are specified, the default will be to autoscale based on cpuUtilization to 0.6 or 60%. Structure is documented below.
- creation_
timestamp str - Creation timestamp in RFC3339 text format.
- description str
- An optional description of this resource.
- name str
- Name of the resource. The name must be 1-63 characters long and match
the regular expression
a-z?
which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - project str
- region str
- URL of the region where the instance group resides.
- self_
link str - The URI of the created resource.
- target str
- URL of the managed instance group that this autoscaler will scale.
- autoscaling
Policy Property Map - The configuration parameters for the autoscaling algorithm. You can define one or more of the policies for an autoscaler: cpuUtilization, customMetricUtilizations, and loadBalancingUtilization. If none of these are specified, the default will be to autoscale based on cpuUtilization to 0.6 or 60%. Structure is documented below.
- creation
Timestamp String - Creation timestamp in RFC3339 text format.
- description String
- An optional description of this resource.
- name String
- Name of the resource. The name must be 1-63 characters long and match
the regular expression
a-z?
which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - project String
- region String
- URL of the region where the instance group resides.
- self
Link String - The URI of the created resource.
- target String
- URL of the managed instance group that this autoscaler will scale.
Supporting Types
RegionAutoscalerAutoscalingPolicy, RegionAutoscalerAutoscalingPolicyArgs
- Max
Replicas int - The maximum number of instances that the autoscaler can scale up to. This is required when creating or updating an autoscaler. The maximum number of replicas should not be lower than minimal number of replicas.
- Min
Replicas int - The minimum number of replicas that the autoscaler can scale down to. This cannot be less than 0. If not provided, autoscaler will choose a default value depending on maximum number of instances allowed.
- Cooldown
Period int - The number of seconds that the autoscaler should wait before it starts collecting information from a new instance. This prevents the autoscaler from collecting information when the instance is initializing, during which the collected usage would not be reliable. The default time autoscaler waits is 60 seconds. Virtual machine initialization times might vary because of numerous factors. We recommend that you test how long an instance may take to initialize. To do this, create an instance and time the startup process.
- Cpu
Utilization RegionAutoscaler Autoscaling Policy Cpu Utilization - Defines the CPU utilization policy that allows the autoscaler to scale based on the average CPU utilization of a managed instance group. Structure is documented below.
- Load
Balancing RegionUtilization Autoscaler Autoscaling Policy Load Balancing Utilization - Configuration parameters of autoscaling based on a load balancer. Structure is documented below.
- Metrics
List<Region
Autoscaler Autoscaling Policy Metric> - Configuration parameters of autoscaling based on a custom metric. Structure is documented below.
- Mode string
- Defines operating mode for this policy.
- Scale
Down RegionControl Autoscaler Autoscaling Policy Scale Down Control - Defines scale down controls to reduce the risk of response latency and outages due to abrupt scale-in events Structure is documented below.
- Scale
In RegionControl Autoscaler Autoscaling Policy Scale In Control - Defines scale in controls to reduce the risk of response latency and outages due to abrupt scale-in events Structure is documented below.
- Scaling
Schedules List<RegionAutoscaler Autoscaling Policy Scaling Schedule> - Scaling schedules defined for an autoscaler. Multiple schedules can be set on an autoscaler and they can overlap. Structure is documented below.
- Max
Replicas int - The maximum number of instances that the autoscaler can scale up to. This is required when creating or updating an autoscaler. The maximum number of replicas should not be lower than minimal number of replicas.
- Min
Replicas int - The minimum number of replicas that the autoscaler can scale down to. This cannot be less than 0. If not provided, autoscaler will choose a default value depending on maximum number of instances allowed.
- Cooldown
Period int - The number of seconds that the autoscaler should wait before it starts collecting information from a new instance. This prevents the autoscaler from collecting information when the instance is initializing, during which the collected usage would not be reliable. The default time autoscaler waits is 60 seconds. Virtual machine initialization times might vary because of numerous factors. We recommend that you test how long an instance may take to initialize. To do this, create an instance and time the startup process.
- Cpu
Utilization RegionAutoscaler Autoscaling Policy Cpu Utilization - Defines the CPU utilization policy that allows the autoscaler to scale based on the average CPU utilization of a managed instance group. Structure is documented below.
- Load
Balancing RegionUtilization Autoscaler Autoscaling Policy Load Balancing Utilization - Configuration parameters of autoscaling based on a load balancer. Structure is documented below.
- Metrics
[]Region
Autoscaler Autoscaling Policy Metric - Configuration parameters of autoscaling based on a custom metric. Structure is documented below.
- Mode string
- Defines operating mode for this policy.
- Scale
Down RegionControl Autoscaler Autoscaling Policy Scale Down Control - Defines scale down controls to reduce the risk of response latency and outages due to abrupt scale-in events Structure is documented below.
- Scale
In RegionControl Autoscaler Autoscaling Policy Scale In Control - Defines scale in controls to reduce the risk of response latency and outages due to abrupt scale-in events Structure is documented below.
- Scaling
Schedules []RegionAutoscaler Autoscaling Policy Scaling Schedule - Scaling schedules defined for an autoscaler. Multiple schedules can be set on an autoscaler and they can overlap. Structure is documented below.
- max
Replicas Integer - The maximum number of instances that the autoscaler can scale up to. This is required when creating or updating an autoscaler. The maximum number of replicas should not be lower than minimal number of replicas.
- min
Replicas Integer - The minimum number of replicas that the autoscaler can scale down to. This cannot be less than 0. If not provided, autoscaler will choose a default value depending on maximum number of instances allowed.
- cooldown
Period Integer - The number of seconds that the autoscaler should wait before it starts collecting information from a new instance. This prevents the autoscaler from collecting information when the instance is initializing, during which the collected usage would not be reliable. The default time autoscaler waits is 60 seconds. Virtual machine initialization times might vary because of numerous factors. We recommend that you test how long an instance may take to initialize. To do this, create an instance and time the startup process.
- cpu
Utilization RegionAutoscaler Autoscaling Policy Cpu Utilization - Defines the CPU utilization policy that allows the autoscaler to scale based on the average CPU utilization of a managed instance group. Structure is documented below.
- load
Balancing RegionUtilization Autoscaler Autoscaling Policy Load Balancing Utilization - Configuration parameters of autoscaling based on a load balancer. Structure is documented below.
- metrics
List<Region
Autoscaler Autoscaling Policy Metric> - Configuration parameters of autoscaling based on a custom metric. Structure is documented below.
- mode String
- Defines operating mode for this policy.
- scale
Down RegionControl Autoscaler Autoscaling Policy Scale Down Control - Defines scale down controls to reduce the risk of response latency and outages due to abrupt scale-in events Structure is documented below.
- scale
In RegionControl Autoscaler Autoscaling Policy Scale In Control - Defines scale in controls to reduce the risk of response latency and outages due to abrupt scale-in events Structure is documented below.
- scaling
Schedules List<RegionAutoscaler Autoscaling Policy Scaling Schedule> - Scaling schedules defined for an autoscaler. Multiple schedules can be set on an autoscaler and they can overlap. Structure is documented below.
- max
Replicas number - The maximum number of instances that the autoscaler can scale up to. This is required when creating or updating an autoscaler. The maximum number of replicas should not be lower than minimal number of replicas.
- min
Replicas number - The minimum number of replicas that the autoscaler can scale down to. This cannot be less than 0. If not provided, autoscaler will choose a default value depending on maximum number of instances allowed.
- cooldown
Period number - The number of seconds that the autoscaler should wait before it starts collecting information from a new instance. This prevents the autoscaler from collecting information when the instance is initializing, during which the collected usage would not be reliable. The default time autoscaler waits is 60 seconds. Virtual machine initialization times might vary because of numerous factors. We recommend that you test how long an instance may take to initialize. To do this, create an instance and time the startup process.
- cpu
Utilization RegionAutoscaler Autoscaling Policy Cpu Utilization - Defines the CPU utilization policy that allows the autoscaler to scale based on the average CPU utilization of a managed instance group. Structure is documented below.
- load
Balancing RegionUtilization Autoscaler Autoscaling Policy Load Balancing Utilization - Configuration parameters of autoscaling based on a load balancer. Structure is documented below.
- metrics
Region
Autoscaler Autoscaling Policy Metric[] - Configuration parameters of autoscaling based on a custom metric. Structure is documented below.
- mode string
- Defines operating mode for this policy.
- scale
Down RegionControl Autoscaler Autoscaling Policy Scale Down Control - Defines scale down controls to reduce the risk of response latency and outages due to abrupt scale-in events Structure is documented below.
- scale
In RegionControl Autoscaler Autoscaling Policy Scale In Control - Defines scale in controls to reduce the risk of response latency and outages due to abrupt scale-in events Structure is documented below.
- scaling
Schedules RegionAutoscaler Autoscaling Policy Scaling Schedule[] - Scaling schedules defined for an autoscaler. Multiple schedules can be set on an autoscaler and they can overlap. Structure is documented below.
- max_
replicas int - The maximum number of instances that the autoscaler can scale up to. This is required when creating or updating an autoscaler. The maximum number of replicas should not be lower than minimal number of replicas.
- min_
replicas int - The minimum number of replicas that the autoscaler can scale down to. This cannot be less than 0. If not provided, autoscaler will choose a default value depending on maximum number of instances allowed.
- cooldown_
period int - The number of seconds that the autoscaler should wait before it starts collecting information from a new instance. This prevents the autoscaler from collecting information when the instance is initializing, during which the collected usage would not be reliable. The default time autoscaler waits is 60 seconds. Virtual machine initialization times might vary because of numerous factors. We recommend that you test how long an instance may take to initialize. To do this, create an instance and time the startup process.
- cpu_
utilization RegionAutoscaler Autoscaling Policy Cpu Utilization - Defines the CPU utilization policy that allows the autoscaler to scale based on the average CPU utilization of a managed instance group. Structure is documented below.
- load_
balancing_ Regionutilization Autoscaler Autoscaling Policy Load Balancing Utilization - Configuration parameters of autoscaling based on a load balancer. Structure is documented below.
- metrics
Sequence[Region
Autoscaler Autoscaling Policy Metric] - Configuration parameters of autoscaling based on a custom metric. Structure is documented below.
- mode str
- Defines operating mode for this policy.
- scale_
down_ Regioncontrol Autoscaler Autoscaling Policy Scale Down Control - Defines scale down controls to reduce the risk of response latency and outages due to abrupt scale-in events Structure is documented below.
- scale_
in_ Regioncontrol Autoscaler Autoscaling Policy Scale In Control - Defines scale in controls to reduce the risk of response latency and outages due to abrupt scale-in events Structure is documented below.
- scaling_
schedules Sequence[RegionAutoscaler Autoscaling Policy Scaling Schedule] - Scaling schedules defined for an autoscaler. Multiple schedules can be set on an autoscaler and they can overlap. Structure is documented below.
- max
Replicas Number - The maximum number of instances that the autoscaler can scale up to. This is required when creating or updating an autoscaler. The maximum number of replicas should not be lower than minimal number of replicas.
- min
Replicas Number - The minimum number of replicas that the autoscaler can scale down to. This cannot be less than 0. If not provided, autoscaler will choose a default value depending on maximum number of instances allowed.
- cooldown
Period Number - The number of seconds that the autoscaler should wait before it starts collecting information from a new instance. This prevents the autoscaler from collecting information when the instance is initializing, during which the collected usage would not be reliable. The default time autoscaler waits is 60 seconds. Virtual machine initialization times might vary because of numerous factors. We recommend that you test how long an instance may take to initialize. To do this, create an instance and time the startup process.
- cpu
Utilization Property Map - Defines the CPU utilization policy that allows the autoscaler to scale based on the average CPU utilization of a managed instance group. Structure is documented below.
- load
Balancing Property MapUtilization - Configuration parameters of autoscaling based on a load balancer. Structure is documented below.
- metrics List<Property Map>
- Configuration parameters of autoscaling based on a custom metric. Structure is documented below.
- mode String
- Defines operating mode for this policy.
- scale
Down Property MapControl - Defines scale down controls to reduce the risk of response latency and outages due to abrupt scale-in events Structure is documented below.
- scale
In Property MapControl - Defines scale in controls to reduce the risk of response latency and outages due to abrupt scale-in events Structure is documented below.
- scaling
Schedules List<Property Map> - Scaling schedules defined for an autoscaler. Multiple schedules can be set on an autoscaler and they can overlap. Structure is documented below.
RegionAutoscalerAutoscalingPolicyCpuUtilization, RegionAutoscalerAutoscalingPolicyCpuUtilizationArgs
- Target double
- The target CPU utilization that the autoscaler should maintain. Must be a float value in the range (0, 1]. If not specified, the default is 0.6. If the CPU level is below the target utilization, the autoscaler scales down the number of instances until it reaches the minimum number of instances you specified or until the average CPU of your instances reaches the target utilization. If the average CPU is above the target utilization, the autoscaler scales up until it reaches the maximum number of instances you specified or until the average utilization reaches the target utilization.
- Predictive
Method string - Indicates whether predictive autoscaling based on CPU metric is enabled. Valid values are:
- NONE (default). No predictive method is used. The autoscaler scales the group to meet current demand based on real-time metrics.
- OPTIMIZE_AVAILABILITY. Predictive autoscaling improves availability by monitoring daily and weekly load patterns and scaling out ahead of anticipated demand.
- Target float64
- The target CPU utilization that the autoscaler should maintain. Must be a float value in the range (0, 1]. If not specified, the default is 0.6. If the CPU level is below the target utilization, the autoscaler scales down the number of instances until it reaches the minimum number of instances you specified or until the average CPU of your instances reaches the target utilization. If the average CPU is above the target utilization, the autoscaler scales up until it reaches the maximum number of instances you specified or until the average utilization reaches the target utilization.
- Predictive
Method string - Indicates whether predictive autoscaling based on CPU metric is enabled. Valid values are:
- NONE (default). No predictive method is used. The autoscaler scales the group to meet current demand based on real-time metrics.
- OPTIMIZE_AVAILABILITY. Predictive autoscaling improves availability by monitoring daily and weekly load patterns and scaling out ahead of anticipated demand.
- target Double
- The target CPU utilization that the autoscaler should maintain. Must be a float value in the range (0, 1]. If not specified, the default is 0.6. If the CPU level is below the target utilization, the autoscaler scales down the number of instances until it reaches the minimum number of instances you specified or until the average CPU of your instances reaches the target utilization. If the average CPU is above the target utilization, the autoscaler scales up until it reaches the maximum number of instances you specified or until the average utilization reaches the target utilization.
- predictive
Method String - Indicates whether predictive autoscaling based on CPU metric is enabled. Valid values are:
- NONE (default). No predictive method is used. The autoscaler scales the group to meet current demand based on real-time metrics.
- OPTIMIZE_AVAILABILITY. Predictive autoscaling improves availability by monitoring daily and weekly load patterns and scaling out ahead of anticipated demand.
- target number
- The target CPU utilization that the autoscaler should maintain. Must be a float value in the range (0, 1]. If not specified, the default is 0.6. If the CPU level is below the target utilization, the autoscaler scales down the number of instances until it reaches the minimum number of instances you specified or until the average CPU of your instances reaches the target utilization. If the average CPU is above the target utilization, the autoscaler scales up until it reaches the maximum number of instances you specified or until the average utilization reaches the target utilization.
- predictive
Method string - Indicates whether predictive autoscaling based on CPU metric is enabled. Valid values are:
- NONE (default). No predictive method is used. The autoscaler scales the group to meet current demand based on real-time metrics.
- OPTIMIZE_AVAILABILITY. Predictive autoscaling improves availability by monitoring daily and weekly load patterns and scaling out ahead of anticipated demand.
- target float
- The target CPU utilization that the autoscaler should maintain. Must be a float value in the range (0, 1]. If not specified, the default is 0.6. If the CPU level is below the target utilization, the autoscaler scales down the number of instances until it reaches the minimum number of instances you specified or until the average CPU of your instances reaches the target utilization. If the average CPU is above the target utilization, the autoscaler scales up until it reaches the maximum number of instances you specified or until the average utilization reaches the target utilization.
- predictive_
method str - Indicates whether predictive autoscaling based on CPU metric is enabled. Valid values are:
- NONE (default). No predictive method is used. The autoscaler scales the group to meet current demand based on real-time metrics.
- OPTIMIZE_AVAILABILITY. Predictive autoscaling improves availability by monitoring daily and weekly load patterns and scaling out ahead of anticipated demand.
- target Number
- The target CPU utilization that the autoscaler should maintain. Must be a float value in the range (0, 1]. If not specified, the default is 0.6. If the CPU level is below the target utilization, the autoscaler scales down the number of instances until it reaches the minimum number of instances you specified or until the average CPU of your instances reaches the target utilization. If the average CPU is above the target utilization, the autoscaler scales up until it reaches the maximum number of instances you specified or until the average utilization reaches the target utilization.
- predictive
Method String - Indicates whether predictive autoscaling based on CPU metric is enabled. Valid values are:
- NONE (default). No predictive method is used. The autoscaler scales the group to meet current demand based on real-time metrics.
- OPTIMIZE_AVAILABILITY. Predictive autoscaling improves availability by monitoring daily and weekly load patterns and scaling out ahead of anticipated demand.
RegionAutoscalerAutoscalingPolicyLoadBalancingUtilization, RegionAutoscalerAutoscalingPolicyLoadBalancingUtilizationArgs
- Target double
- Fraction of backend capacity utilization (set in HTTP(s) load balancing configuration) that autoscaler should maintain. Must be a positive float value. If not defined, the default is 0.8.
- Target float64
- Fraction of backend capacity utilization (set in HTTP(s) load balancing configuration) that autoscaler should maintain. Must be a positive float value. If not defined, the default is 0.8.
- target Double
- Fraction of backend capacity utilization (set in HTTP(s) load balancing configuration) that autoscaler should maintain. Must be a positive float value. If not defined, the default is 0.8.
- target number
- Fraction of backend capacity utilization (set in HTTP(s) load balancing configuration) that autoscaler should maintain. Must be a positive float value. If not defined, the default is 0.8.
- target float
- Fraction of backend capacity utilization (set in HTTP(s) load balancing configuration) that autoscaler should maintain. Must be a positive float value. If not defined, the default is 0.8.
- target Number
- Fraction of backend capacity utilization (set in HTTP(s) load balancing configuration) that autoscaler should maintain. Must be a positive float value. If not defined, the default is 0.8.
RegionAutoscalerAutoscalingPolicyMetric, RegionAutoscalerAutoscalingPolicyMetricArgs
- Name string
- The identifier (type) of the Stackdriver Monitoring metric. The metric cannot have negative values. The metric must have a value type of INT64 or DOUBLE.
- Filter string
- A filter string to be used as the filter string for a Stackdriver Monitoring TimeSeries.list API call. This filter is used to select a specific TimeSeries for the purpose of autoscaling and to determine whether the metric is exporting per-instance or per-group data. You can only use the AND operator for joining selectors. You can only use direct equality comparison operator (=) without any functions for each selector. You can specify the metric in both the filter string and in the metric field. However, if specified in both places, the metric must be identical. The monitored resource type determines what kind of values are expected for the metric. If it is a gce_instance, the autoscaler expects the metric to include a separate TimeSeries for each instance in a group. In such a case, you cannot filter on resource labels. If the resource type is any other value, the autoscaler expects this metric to contain values that apply to the entire autoscaled instance group and resource label filtering can be performed to point autoscaler at the correct TimeSeries to scale upon. This is called a per-group metric for the purpose of autoscaling. If not specified, the type defaults to gce_instance. You should provide a filter that is selective enough to pick just one TimeSeries for the autoscaled group or for each of the instances (if you are using gce_instance resource type). If multiple TimeSeries are returned upon the query execution, the autoscaler will sum their respective values to obtain its scaling value.
- Single
Instance doubleAssignment - If scaling is based on a per-group metric value that represents the
total amount of work to be done or resource usage, set this value to
an amount assigned for a single instance of the scaled group.
The autoscaler will keep the number of instances proportional to the
value of this metric, the metric itself should not change value due
to group resizing.
For example, a good metric to use with the target is
pubsub.googleapis.com/subscription/num_undelivered_messages
or a custom metric exporting the total number of requests coming to your instances. A bad example would be a metric exporting an average or median latency, since this value can't include a chunk assignable to a single instance, it could be better used with utilization_target instead. - Target double
- The target value of the metric that autoscaler should maintain. This must be a positive value. A utilization metric scales number of virtual machines handling requests to increase or decrease proportionally to the metric. For example, a good metric to use as a utilizationTarget is www.googleapis.com/compute/instance/network/received_bytes_count. The autoscaler will work to keep this value constant for each of the instances.
- Type string
- Defines how target utilization value is expressed for a
Stackdriver Monitoring metric.
Possible values are:
GAUGE
,DELTA_PER_SECOND
,DELTA_PER_MINUTE
.
- Name string
- The identifier (type) of the Stackdriver Monitoring metric. The metric cannot have negative values. The metric must have a value type of INT64 or DOUBLE.
- Filter string
- A filter string to be used as the filter string for a Stackdriver Monitoring TimeSeries.list API call. This filter is used to select a specific TimeSeries for the purpose of autoscaling and to determine whether the metric is exporting per-instance or per-group data. You can only use the AND operator for joining selectors. You can only use direct equality comparison operator (=) without any functions for each selector. You can specify the metric in both the filter string and in the metric field. However, if specified in both places, the metric must be identical. The monitored resource type determines what kind of values are expected for the metric. If it is a gce_instance, the autoscaler expects the metric to include a separate TimeSeries for each instance in a group. In such a case, you cannot filter on resource labels. If the resource type is any other value, the autoscaler expects this metric to contain values that apply to the entire autoscaled instance group and resource label filtering can be performed to point autoscaler at the correct TimeSeries to scale upon. This is called a per-group metric for the purpose of autoscaling. If not specified, the type defaults to gce_instance. You should provide a filter that is selective enough to pick just one TimeSeries for the autoscaled group or for each of the instances (if you are using gce_instance resource type). If multiple TimeSeries are returned upon the query execution, the autoscaler will sum their respective values to obtain its scaling value.
- Single
Instance float64Assignment - If scaling is based on a per-group metric value that represents the
total amount of work to be done or resource usage, set this value to
an amount assigned for a single instance of the scaled group.
The autoscaler will keep the number of instances proportional to the
value of this metric, the metric itself should not change value due
to group resizing.
For example, a good metric to use with the target is
pubsub.googleapis.com/subscription/num_undelivered_messages
or a custom metric exporting the total number of requests coming to your instances. A bad example would be a metric exporting an average or median latency, since this value can't include a chunk assignable to a single instance, it could be better used with utilization_target instead. - Target float64
- The target value of the metric that autoscaler should maintain. This must be a positive value. A utilization metric scales number of virtual machines handling requests to increase or decrease proportionally to the metric. For example, a good metric to use as a utilizationTarget is www.googleapis.com/compute/instance/network/received_bytes_count. The autoscaler will work to keep this value constant for each of the instances.
- Type string
- Defines how target utilization value is expressed for a
Stackdriver Monitoring metric.
Possible values are:
GAUGE
,DELTA_PER_SECOND
,DELTA_PER_MINUTE
.
- name String
- The identifier (type) of the Stackdriver Monitoring metric. The metric cannot have negative values. The metric must have a value type of INT64 or DOUBLE.
- filter String
- A filter string to be used as the filter string for a Stackdriver Monitoring TimeSeries.list API call. This filter is used to select a specific TimeSeries for the purpose of autoscaling and to determine whether the metric is exporting per-instance or per-group data. You can only use the AND operator for joining selectors. You can only use direct equality comparison operator (=) without any functions for each selector. You can specify the metric in both the filter string and in the metric field. However, if specified in both places, the metric must be identical. The monitored resource type determines what kind of values are expected for the metric. If it is a gce_instance, the autoscaler expects the metric to include a separate TimeSeries for each instance in a group. In such a case, you cannot filter on resource labels. If the resource type is any other value, the autoscaler expects this metric to contain values that apply to the entire autoscaled instance group and resource label filtering can be performed to point autoscaler at the correct TimeSeries to scale upon. This is called a per-group metric for the purpose of autoscaling. If not specified, the type defaults to gce_instance. You should provide a filter that is selective enough to pick just one TimeSeries for the autoscaled group or for each of the instances (if you are using gce_instance resource type). If multiple TimeSeries are returned upon the query execution, the autoscaler will sum their respective values to obtain its scaling value.
- single
Instance DoubleAssignment - If scaling is based on a per-group metric value that represents the
total amount of work to be done or resource usage, set this value to
an amount assigned for a single instance of the scaled group.
The autoscaler will keep the number of instances proportional to the
value of this metric, the metric itself should not change value due
to group resizing.
For example, a good metric to use with the target is
pubsub.googleapis.com/subscription/num_undelivered_messages
or a custom metric exporting the total number of requests coming to your instances. A bad example would be a metric exporting an average or median latency, since this value can't include a chunk assignable to a single instance, it could be better used with utilization_target instead. - target Double
- The target value of the metric that autoscaler should maintain. This must be a positive value. A utilization metric scales number of virtual machines handling requests to increase or decrease proportionally to the metric. For example, a good metric to use as a utilizationTarget is www.googleapis.com/compute/instance/network/received_bytes_count. The autoscaler will work to keep this value constant for each of the instances.
- type String
- Defines how target utilization value is expressed for a
Stackdriver Monitoring metric.
Possible values are:
GAUGE
,DELTA_PER_SECOND
,DELTA_PER_MINUTE
.
- name string
- The identifier (type) of the Stackdriver Monitoring metric. The metric cannot have negative values. The metric must have a value type of INT64 or DOUBLE.
- filter string
- A filter string to be used as the filter string for a Stackdriver Monitoring TimeSeries.list API call. This filter is used to select a specific TimeSeries for the purpose of autoscaling and to determine whether the metric is exporting per-instance or per-group data. You can only use the AND operator for joining selectors. You can only use direct equality comparison operator (=) without any functions for each selector. You can specify the metric in both the filter string and in the metric field. However, if specified in both places, the metric must be identical. The monitored resource type determines what kind of values are expected for the metric. If it is a gce_instance, the autoscaler expects the metric to include a separate TimeSeries for each instance in a group. In such a case, you cannot filter on resource labels. If the resource type is any other value, the autoscaler expects this metric to contain values that apply to the entire autoscaled instance group and resource label filtering can be performed to point autoscaler at the correct TimeSeries to scale upon. This is called a per-group metric for the purpose of autoscaling. If not specified, the type defaults to gce_instance. You should provide a filter that is selective enough to pick just one TimeSeries for the autoscaled group or for each of the instances (if you are using gce_instance resource type). If multiple TimeSeries are returned upon the query execution, the autoscaler will sum their respective values to obtain its scaling value.
- single
Instance numberAssignment - If scaling is based on a per-group metric value that represents the
total amount of work to be done or resource usage, set this value to
an amount assigned for a single instance of the scaled group.
The autoscaler will keep the number of instances proportional to the
value of this metric, the metric itself should not change value due
to group resizing.
For example, a good metric to use with the target is
pubsub.googleapis.com/subscription/num_undelivered_messages
or a custom metric exporting the total number of requests coming to your instances. A bad example would be a metric exporting an average or median latency, since this value can't include a chunk assignable to a single instance, it could be better used with utilization_target instead. - target number
- The target value of the metric that autoscaler should maintain. This must be a positive value. A utilization metric scales number of virtual machines handling requests to increase or decrease proportionally to the metric. For example, a good metric to use as a utilizationTarget is www.googleapis.com/compute/instance/network/received_bytes_count. The autoscaler will work to keep this value constant for each of the instances.
- type string
- Defines how target utilization value is expressed for a
Stackdriver Monitoring metric.
Possible values are:
GAUGE
,DELTA_PER_SECOND
,DELTA_PER_MINUTE
.
- name str
- The identifier (type) of the Stackdriver Monitoring metric. The metric cannot have negative values. The metric must have a value type of INT64 or DOUBLE.
- filter str
- A filter string to be used as the filter string for a Stackdriver Monitoring TimeSeries.list API call. This filter is used to select a specific TimeSeries for the purpose of autoscaling and to determine whether the metric is exporting per-instance or per-group data. You can only use the AND operator for joining selectors. You can only use direct equality comparison operator (=) without any functions for each selector. You can specify the metric in both the filter string and in the metric field. However, if specified in both places, the metric must be identical. The monitored resource type determines what kind of values are expected for the metric. If it is a gce_instance, the autoscaler expects the metric to include a separate TimeSeries for each instance in a group. In such a case, you cannot filter on resource labels. If the resource type is any other value, the autoscaler expects this metric to contain values that apply to the entire autoscaled instance group and resource label filtering can be performed to point autoscaler at the correct TimeSeries to scale upon. This is called a per-group metric for the purpose of autoscaling. If not specified, the type defaults to gce_instance. You should provide a filter that is selective enough to pick just one TimeSeries for the autoscaled group or for each of the instances (if you are using gce_instance resource type). If multiple TimeSeries are returned upon the query execution, the autoscaler will sum their respective values to obtain its scaling value.
- single_
instance_ floatassignment - If scaling is based on a per-group metric value that represents the
total amount of work to be done or resource usage, set this value to
an amount assigned for a single instance of the scaled group.
The autoscaler will keep the number of instances proportional to the
value of this metric, the metric itself should not change value due
to group resizing.
For example, a good metric to use with the target is
pubsub.googleapis.com/subscription/num_undelivered_messages
or a custom metric exporting the total number of requests coming to your instances. A bad example would be a metric exporting an average or median latency, since this value can't include a chunk assignable to a single instance, it could be better used with utilization_target instead. - target float
- The target value of the metric that autoscaler should maintain. This must be a positive value. A utilization metric scales number of virtual machines handling requests to increase or decrease proportionally to the metric. For example, a good metric to use as a utilizationTarget is www.googleapis.com/compute/instance/network/received_bytes_count. The autoscaler will work to keep this value constant for each of the instances.
- type str
- Defines how target utilization value is expressed for a
Stackdriver Monitoring metric.
Possible values are:
GAUGE
,DELTA_PER_SECOND
,DELTA_PER_MINUTE
.
- name String
- The identifier (type) of the Stackdriver Monitoring metric. The metric cannot have negative values. The metric must have a value type of INT64 or DOUBLE.
- filter String
- A filter string to be used as the filter string for a Stackdriver Monitoring TimeSeries.list API call. This filter is used to select a specific TimeSeries for the purpose of autoscaling and to determine whether the metric is exporting per-instance or per-group data. You can only use the AND operator for joining selectors. You can only use direct equality comparison operator (=) without any functions for each selector. You can specify the metric in both the filter string and in the metric field. However, if specified in both places, the metric must be identical. The monitored resource type determines what kind of values are expected for the metric. If it is a gce_instance, the autoscaler expects the metric to include a separate TimeSeries for each instance in a group. In such a case, you cannot filter on resource labels. If the resource type is any other value, the autoscaler expects this metric to contain values that apply to the entire autoscaled instance group and resource label filtering can be performed to point autoscaler at the correct TimeSeries to scale upon. This is called a per-group metric for the purpose of autoscaling. If not specified, the type defaults to gce_instance. You should provide a filter that is selective enough to pick just one TimeSeries for the autoscaled group or for each of the instances (if you are using gce_instance resource type). If multiple TimeSeries are returned upon the query execution, the autoscaler will sum their respective values to obtain its scaling value.
- single
Instance NumberAssignment - If scaling is based on a per-group metric value that represents the
total amount of work to be done or resource usage, set this value to
an amount assigned for a single instance of the scaled group.
The autoscaler will keep the number of instances proportional to the
value of this metric, the metric itself should not change value due
to group resizing.
For example, a good metric to use with the target is
pubsub.googleapis.com/subscription/num_undelivered_messages
or a custom metric exporting the total number of requests coming to your instances. A bad example would be a metric exporting an average or median latency, since this value can't include a chunk assignable to a single instance, it could be better used with utilization_target instead. - target Number
- The target value of the metric that autoscaler should maintain. This must be a positive value. A utilization metric scales number of virtual machines handling requests to increase or decrease proportionally to the metric. For example, a good metric to use as a utilizationTarget is www.googleapis.com/compute/instance/network/received_bytes_count. The autoscaler will work to keep this value constant for each of the instances.
- type String
- Defines how target utilization value is expressed for a
Stackdriver Monitoring metric.
Possible values are:
GAUGE
,DELTA_PER_SECOND
,DELTA_PER_MINUTE
.
RegionAutoscalerAutoscalingPolicyScaleDownControl, RegionAutoscalerAutoscalingPolicyScaleDownControlArgs
- Max
Scaled RegionDown Replicas Autoscaler Autoscaling Policy Scale Down Control Max Scaled Down Replicas - A nested object resource. Structure is documented below.
- Time
Window intSec - How long back autoscaling should look when computing recommendations to include directives regarding slower scale down, as described above.
- Max
Scaled RegionDown Replicas Autoscaler Autoscaling Policy Scale Down Control Max Scaled Down Replicas - A nested object resource. Structure is documented below.
- Time
Window intSec - How long back autoscaling should look when computing recommendations to include directives regarding slower scale down, as described above.
- max
Scaled RegionDown Replicas Autoscaler Autoscaling Policy Scale Down Control Max Scaled Down Replicas - A nested object resource. Structure is documented below.
- time
Window IntegerSec - How long back autoscaling should look when computing recommendations to include directives regarding slower scale down, as described above.
- max
Scaled RegionDown Replicas Autoscaler Autoscaling Policy Scale Down Control Max Scaled Down Replicas - A nested object resource. Structure is documented below.
- time
Window numberSec - How long back autoscaling should look when computing recommendations to include directives regarding slower scale down, as described above.
- max_
scaled_ Regiondown_ replicas Autoscaler Autoscaling Policy Scale Down Control Max Scaled Down Replicas - A nested object resource. Structure is documented below.
- time_
window_ intsec - How long back autoscaling should look when computing recommendations to include directives regarding slower scale down, as described above.
- max
Scaled Property MapDown Replicas - A nested object resource. Structure is documented below.
- time
Window NumberSec - How long back autoscaling should look when computing recommendations to include directives regarding slower scale down, as described above.
RegionAutoscalerAutoscalingPolicyScaleDownControlMaxScaledDownReplicas, RegionAutoscalerAutoscalingPolicyScaleDownControlMaxScaledDownReplicasArgs
RegionAutoscalerAutoscalingPolicyScaleInControl, RegionAutoscalerAutoscalingPolicyScaleInControlArgs
- Max
Scaled RegionIn Replicas Autoscaler Autoscaling Policy Scale In Control Max Scaled In Replicas - A nested object resource. Structure is documented below.
- Time
Window intSec - How long back autoscaling should look when computing recommendations to include directives regarding slower scale down, as described above.
- Max
Scaled RegionIn Replicas Autoscaler Autoscaling Policy Scale In Control Max Scaled In Replicas - A nested object resource. Structure is documented below.
- Time
Window intSec - How long back autoscaling should look when computing recommendations to include directives regarding slower scale down, as described above.
- max
Scaled RegionIn Replicas Autoscaler Autoscaling Policy Scale In Control Max Scaled In Replicas - A nested object resource. Structure is documented below.
- time
Window IntegerSec - How long back autoscaling should look when computing recommendations to include directives regarding slower scale down, as described above.
- max
Scaled RegionIn Replicas Autoscaler Autoscaling Policy Scale In Control Max Scaled In Replicas - A nested object resource. Structure is documented below.
- time
Window numberSec - How long back autoscaling should look when computing recommendations to include directives regarding slower scale down, as described above.
- max_
scaled_ Regionin_ replicas Autoscaler Autoscaling Policy Scale In Control Max Scaled In Replicas - A nested object resource. Structure is documented below.
- time_
window_ intsec - How long back autoscaling should look when computing recommendations to include directives regarding slower scale down, as described above.
- max
Scaled Property MapIn Replicas - A nested object resource. Structure is documented below.
- time
Window NumberSec - How long back autoscaling should look when computing recommendations to include directives regarding slower scale down, as described above.
RegionAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicas, RegionAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicasArgs
RegionAutoscalerAutoscalingPolicyScalingSchedule, RegionAutoscalerAutoscalingPolicyScalingScheduleArgs
- Duration
Sec int - The duration of time intervals (in seconds) for which this scaling schedule will be running. The minimum allowed value is 300.
- Min
Required intReplicas - Minimum number of VM instances that autoscaler will recommend in time intervals starting according to schedule.
- Name string
- The identifier for this object. Format specified above.
- Schedule string
- The start timestamps of time intervals when this scaling schedule should provide a scaling signal. This field uses the extended cron format (with an optional year field).
- Description string
- An optional description of this resource.
- Disabled bool
- A boolean value that specifies if a scaling schedule can influence autoscaler recommendations. If set to true, then a scaling schedule has no effect.
- Time
Zone string - The time zone to be used when interpreting the schedule. The value of this field must be a time zone name from the tz database: http://en.wikipedia.org/wiki/Tz_database.
- Duration
Sec int - The duration of time intervals (in seconds) for which this scaling schedule will be running. The minimum allowed value is 300.
- Min
Required intReplicas - Minimum number of VM instances that autoscaler will recommend in time intervals starting according to schedule.
- Name string
- The identifier for this object. Format specified above.
- Schedule string
- The start timestamps of time intervals when this scaling schedule should provide a scaling signal. This field uses the extended cron format (with an optional year field).
- Description string
- An optional description of this resource.
- Disabled bool
- A boolean value that specifies if a scaling schedule can influence autoscaler recommendations. If set to true, then a scaling schedule has no effect.
- Time
Zone string - The time zone to be used when interpreting the schedule. The value of this field must be a time zone name from the tz database: http://en.wikipedia.org/wiki/Tz_database.
- duration
Sec Integer - The duration of time intervals (in seconds) for which this scaling schedule will be running. The minimum allowed value is 300.
- min
Required IntegerReplicas - Minimum number of VM instances that autoscaler will recommend in time intervals starting according to schedule.
- name String
- The identifier for this object. Format specified above.
- schedule String
- The start timestamps of time intervals when this scaling schedule should provide a scaling signal. This field uses the extended cron format (with an optional year field).
- description String
- An optional description of this resource.
- disabled Boolean
- A boolean value that specifies if a scaling schedule can influence autoscaler recommendations. If set to true, then a scaling schedule has no effect.
- time
Zone String - The time zone to be used when interpreting the schedule. The value of this field must be a time zone name from the tz database: http://en.wikipedia.org/wiki/Tz_database.
- duration
Sec number - The duration of time intervals (in seconds) for which this scaling schedule will be running. The minimum allowed value is 300.
- min
Required numberReplicas - Minimum number of VM instances that autoscaler will recommend in time intervals starting according to schedule.
- name string
- The identifier for this object. Format specified above.
- schedule string
- The start timestamps of time intervals when this scaling schedule should provide a scaling signal. This field uses the extended cron format (with an optional year field).
- description string
- An optional description of this resource.
- disabled boolean
- A boolean value that specifies if a scaling schedule can influence autoscaler recommendations. If set to true, then a scaling schedule has no effect.
- time
Zone string - The time zone to be used when interpreting the schedule. The value of this field must be a time zone name from the tz database: http://en.wikipedia.org/wiki/Tz_database.
- duration_
sec int - The duration of time intervals (in seconds) for which this scaling schedule will be running. The minimum allowed value is 300.
- min_
required_ intreplicas - Minimum number of VM instances that autoscaler will recommend in time intervals starting according to schedule.
- name str
- The identifier for this object. Format specified above.
- schedule str
- The start timestamps of time intervals when this scaling schedule should provide a scaling signal. This field uses the extended cron format (with an optional year field).
- description str
- An optional description of this resource.
- disabled bool
- A boolean value that specifies if a scaling schedule can influence autoscaler recommendations. If set to true, then a scaling schedule has no effect.
- time_
zone str - The time zone to be used when interpreting the schedule. The value of this field must be a time zone name from the tz database: http://en.wikipedia.org/wiki/Tz_database.
- duration
Sec Number - The duration of time intervals (in seconds) for which this scaling schedule will be running. The minimum allowed value is 300.
- min
Required NumberReplicas - Minimum number of VM instances that autoscaler will recommend in time intervals starting according to schedule.
- name String
- The identifier for this object. Format specified above.
- schedule String
- The start timestamps of time intervals when this scaling schedule should provide a scaling signal. This field uses the extended cron format (with an optional year field).
- description String
- An optional description of this resource.
- disabled Boolean
- A boolean value that specifies if a scaling schedule can influence autoscaler recommendations. If set to true, then a scaling schedule has no effect.
- time
Zone String - The time zone to be used when interpreting the schedule. The value of this field must be a time zone name from the tz database: http://en.wikipedia.org/wiki/Tz_database.
Import
RegionAutoscaler can be imported using any of these accepted formats:
projects/{{project}}/regions/{{region}}/autoscalers/{{name}}
{{project}}/{{region}}/{{name}}
{{region}}/{{name}}
{{name}}
When using the pulumi import
command, RegionAutoscaler can be imported using one of the formats above. For example:
$ pulumi import gcp:compute/regionAutoscaler:RegionAutoscaler default projects/{{project}}/regions/{{region}}/autoscalers/{{name}}
$ pulumi import gcp:compute/regionAutoscaler:RegionAutoscaler default {{project}}/{{region}}/{{name}}
$ pulumi import gcp:compute/regionAutoscaler:RegionAutoscaler default {{region}}/{{name}}
$ pulumi import gcp:compute/regionAutoscaler:RegionAutoscaler default {{name}}
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- Google Cloud (GCP) Classic pulumi/pulumi-gcp
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
google-beta
Terraform Provider.