gcp.dataflow.Pipeline
Explore with Pulumi AI
The main pipeline entity and all the necessary metadata for launching and managing linked jobs.
To get more information about Pipeline, see:
- API documentation
- How-to Guides
Example Usage
Data Pipeline Pipeline
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const serviceAccount = new gcp.serviceaccount.Account("service_account", {
accountId: "my-account",
displayName: "Service Account",
});
const primary = new gcp.dataflow.Pipeline("primary", {
name: "my-pipeline",
displayName: "my-pipeline",
type: "PIPELINE_TYPE_BATCH",
state: "STATE_ACTIVE",
region: "us-central1",
workload: {
dataflowLaunchTemplateRequest: {
projectId: "my-project",
gcsPath: "gs://my-bucket/path",
launchParameters: {
jobName: "my-job",
parameters: {
name: "wrench",
},
environment: {
numWorkers: 5,
maxWorkers: 5,
zone: "us-centra1-a",
serviceAccountEmail: serviceAccount.email,
network: "default",
tempLocation: "gs://my-bucket/tmp_dir",
bypassTempDirValidation: false,
machineType: "E2",
additionalUserLabels: {
context: "test",
},
workerRegion: "us-central1",
workerZone: "us-central1-a",
enableStreamingEngine: false,
},
update: false,
transformNameMapping: {
name: "wrench",
},
},
location: "us-central1",
},
},
scheduleInfo: {
schedule: "* */2 * * *",
},
});
import pulumi
import pulumi_gcp as gcp
service_account = gcp.serviceaccount.Account("service_account",
account_id="my-account",
display_name="Service Account")
primary = gcp.dataflow.Pipeline("primary",
name="my-pipeline",
display_name="my-pipeline",
type="PIPELINE_TYPE_BATCH",
state="STATE_ACTIVE",
region="us-central1",
workload={
"dataflow_launch_template_request": {
"project_id": "my-project",
"gcs_path": "gs://my-bucket/path",
"launch_parameters": {
"job_name": "my-job",
"parameters": {
"name": "wrench",
},
"environment": {
"num_workers": 5,
"max_workers": 5,
"zone": "us-centra1-a",
"service_account_email": service_account.email,
"network": "default",
"temp_location": "gs://my-bucket/tmp_dir",
"bypass_temp_dir_validation": False,
"machine_type": "E2",
"additional_user_labels": {
"context": "test",
},
"worker_region": "us-central1",
"worker_zone": "us-central1-a",
"enable_streaming_engine": False,
},
"update": False,
"transform_name_mapping": {
"name": "wrench",
},
},
"location": "us-central1",
},
},
schedule_info={
"schedule": "* */2 * * *",
})
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/dataflow"
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/serviceaccount"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
serviceAccount, err := serviceaccount.NewAccount(ctx, "service_account", &serviceaccount.AccountArgs{
AccountId: pulumi.String("my-account"),
DisplayName: pulumi.String("Service Account"),
})
if err != nil {
return err
}
_, err = dataflow.NewPipeline(ctx, "primary", &dataflow.PipelineArgs{
Name: pulumi.String("my-pipeline"),
DisplayName: pulumi.String("my-pipeline"),
Type: pulumi.String("PIPELINE_TYPE_BATCH"),
State: pulumi.String("STATE_ACTIVE"),
Region: pulumi.String("us-central1"),
Workload: &dataflow.PipelineWorkloadArgs{
DataflowLaunchTemplateRequest: &dataflow.PipelineWorkloadDataflowLaunchTemplateRequestArgs{
ProjectId: pulumi.String("my-project"),
GcsPath: pulumi.String("gs://my-bucket/path"),
LaunchParameters: &dataflow.PipelineWorkloadDataflowLaunchTemplateRequestLaunchParametersArgs{
JobName: pulumi.String("my-job"),
Parameters: pulumi.StringMap{
"name": pulumi.String("wrench"),
},
Environment: &dataflow.PipelineWorkloadDataflowLaunchTemplateRequestLaunchParametersEnvironmentArgs{
NumWorkers: pulumi.Int(5),
MaxWorkers: pulumi.Int(5),
Zone: pulumi.String("us-centra1-a"),
ServiceAccountEmail: serviceAccount.Email,
Network: pulumi.String("default"),
TempLocation: pulumi.String("gs://my-bucket/tmp_dir"),
BypassTempDirValidation: pulumi.Bool(false),
MachineType: pulumi.String("E2"),
AdditionalUserLabels: pulumi.StringMap{
"context": pulumi.String("test"),
},
WorkerRegion: pulumi.String("us-central1"),
WorkerZone: pulumi.String("us-central1-a"),
EnableStreamingEngine: pulumi.Bool(false),
},
Update: pulumi.Bool(false),
TransformNameMapping: pulumi.StringMap{
"name": pulumi.String("wrench"),
},
},
Location: pulumi.String("us-central1"),
},
},
ScheduleInfo: &dataflow.PipelineScheduleInfoArgs{
Schedule: pulumi.String("* */2 * * *"),
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var serviceAccount = new Gcp.ServiceAccount.Account("service_account", new()
{
AccountId = "my-account",
DisplayName = "Service Account",
});
var primary = new Gcp.Dataflow.Pipeline("primary", new()
{
Name = "my-pipeline",
DisplayName = "my-pipeline",
Type = "PIPELINE_TYPE_BATCH",
State = "STATE_ACTIVE",
Region = "us-central1",
Workload = new Gcp.Dataflow.Inputs.PipelineWorkloadArgs
{
DataflowLaunchTemplateRequest = new Gcp.Dataflow.Inputs.PipelineWorkloadDataflowLaunchTemplateRequestArgs
{
ProjectId = "my-project",
GcsPath = "gs://my-bucket/path",
LaunchParameters = new Gcp.Dataflow.Inputs.PipelineWorkloadDataflowLaunchTemplateRequestLaunchParametersArgs
{
JobName = "my-job",
Parameters =
{
{ "name", "wrench" },
},
Environment = new Gcp.Dataflow.Inputs.PipelineWorkloadDataflowLaunchTemplateRequestLaunchParametersEnvironmentArgs
{
NumWorkers = 5,
MaxWorkers = 5,
Zone = "us-centra1-a",
ServiceAccountEmail = serviceAccount.Email,
Network = "default",
TempLocation = "gs://my-bucket/tmp_dir",
BypassTempDirValidation = false,
MachineType = "E2",
AdditionalUserLabels =
{
{ "context", "test" },
},
WorkerRegion = "us-central1",
WorkerZone = "us-central1-a",
EnableStreamingEngine = false,
},
Update = false,
TransformNameMapping =
{
{ "name", "wrench" },
},
},
Location = "us-central1",
},
},
ScheduleInfo = new Gcp.Dataflow.Inputs.PipelineScheduleInfoArgs
{
Schedule = "* */2 * * *",
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.serviceaccount.Account;
import com.pulumi.gcp.serviceaccount.AccountArgs;
import com.pulumi.gcp.dataflow.Pipeline;
import com.pulumi.gcp.dataflow.PipelineArgs;
import com.pulumi.gcp.dataflow.inputs.PipelineWorkloadArgs;
import com.pulumi.gcp.dataflow.inputs.PipelineWorkloadDataflowLaunchTemplateRequestArgs;
import com.pulumi.gcp.dataflow.inputs.PipelineWorkloadDataflowLaunchTemplateRequestLaunchParametersArgs;
import com.pulumi.gcp.dataflow.inputs.PipelineWorkloadDataflowLaunchTemplateRequestLaunchParametersEnvironmentArgs;
import com.pulumi.gcp.dataflow.inputs.PipelineScheduleInfoArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var serviceAccount = new Account("serviceAccount", AccountArgs.builder()
.accountId("my-account")
.displayName("Service Account")
.build());
var primary = new Pipeline("primary", PipelineArgs.builder()
.name("my-pipeline")
.displayName("my-pipeline")
.type("PIPELINE_TYPE_BATCH")
.state("STATE_ACTIVE")
.region("us-central1")
.workload(PipelineWorkloadArgs.builder()
.dataflowLaunchTemplateRequest(PipelineWorkloadDataflowLaunchTemplateRequestArgs.builder()
.projectId("my-project")
.gcsPath("gs://my-bucket/path")
.launchParameters(PipelineWorkloadDataflowLaunchTemplateRequestLaunchParametersArgs.builder()
.jobName("my-job")
.parameters(Map.of("name", "wrench"))
.environment(PipelineWorkloadDataflowLaunchTemplateRequestLaunchParametersEnvironmentArgs.builder()
.numWorkers(5)
.maxWorkers(5)
.zone("us-centra1-a")
.serviceAccountEmail(serviceAccount.email())
.network("default")
.tempLocation("gs://my-bucket/tmp_dir")
.bypassTempDirValidation(false)
.machineType("E2")
.additionalUserLabels(Map.of("context", "test"))
.workerRegion("us-central1")
.workerZone("us-central1-a")
.enableStreamingEngine("false")
.build())
.update(false)
.transformNameMapping(Map.of("name", "wrench"))
.build())
.location("us-central1")
.build())
.build())
.scheduleInfo(PipelineScheduleInfoArgs.builder()
.schedule("* */2 * * *")
.build())
.build());
}
}
resources:
serviceAccount:
type: gcp:serviceaccount:Account
name: service_account
properties:
accountId: my-account
displayName: Service Account
primary:
type: gcp:dataflow:Pipeline
properties:
name: my-pipeline
displayName: my-pipeline
type: PIPELINE_TYPE_BATCH
state: STATE_ACTIVE
region: us-central1
workload:
dataflowLaunchTemplateRequest:
projectId: my-project
gcsPath: gs://my-bucket/path
launchParameters:
jobName: my-job
parameters:
name: wrench
environment:
numWorkers: 5
maxWorkers: 5
zone: us-centra1-a
serviceAccountEmail: ${serviceAccount.email}
network: default
tempLocation: gs://my-bucket/tmp_dir
bypassTempDirValidation: false
machineType: E2
additionalUserLabels:
context: test
workerRegion: us-central1
workerZone: us-central1-a
enableStreamingEngine: 'false'
update: false
transformNameMapping:
name: wrench
location: us-central1
scheduleInfo:
schedule: '* */2 * * *'
Create Pipeline Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new Pipeline(name: string, args: PipelineArgs, opts?: CustomResourceOptions);
@overload
def Pipeline(resource_name: str,
args: PipelineArgs,
opts: Optional[ResourceOptions] = None)
@overload
def Pipeline(resource_name: str,
opts: Optional[ResourceOptions] = None,
state: Optional[str] = None,
type: Optional[str] = None,
display_name: Optional[str] = None,
name: Optional[str] = None,
pipeline_sources: Optional[Mapping[str, str]] = None,
project: Optional[str] = None,
region: Optional[str] = None,
schedule_info: Optional[PipelineScheduleInfoArgs] = None,
scheduler_service_account_email: Optional[str] = None,
workload: Optional[PipelineWorkloadArgs] = None)
func NewPipeline(ctx *Context, name string, args PipelineArgs, opts ...ResourceOption) (*Pipeline, error)
public Pipeline(string name, PipelineArgs args, CustomResourceOptions? opts = null)
public Pipeline(String name, PipelineArgs args)
public Pipeline(String name, PipelineArgs args, CustomResourceOptions options)
type: gcp:dataflow:Pipeline
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args PipelineArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args PipelineArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args PipelineArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args PipelineArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args PipelineArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var pipelineResource = new Gcp.Dataflow.Pipeline("pipelineResource", new()
{
State = "string",
Type = "string",
DisplayName = "string",
Name = "string",
PipelineSources =
{
{ "string", "string" },
},
Project = "string",
Region = "string",
ScheduleInfo = new Gcp.Dataflow.Inputs.PipelineScheduleInfoArgs
{
NextJobTime = "string",
Schedule = "string",
TimeZone = "string",
},
SchedulerServiceAccountEmail = "string",
Workload = new Gcp.Dataflow.Inputs.PipelineWorkloadArgs
{
DataflowFlexTemplateRequest = new Gcp.Dataflow.Inputs.PipelineWorkloadDataflowFlexTemplateRequestArgs
{
LaunchParameter = new Gcp.Dataflow.Inputs.PipelineWorkloadDataflowFlexTemplateRequestLaunchParameterArgs
{
JobName = "string",
ContainerSpecGcsPath = "string",
Environment = new Gcp.Dataflow.Inputs.PipelineWorkloadDataflowFlexTemplateRequestLaunchParameterEnvironmentArgs
{
AdditionalExperiments = new[]
{
"string",
},
AdditionalUserLabels =
{
{ "string", "string" },
},
EnableStreamingEngine = false,
FlexrsGoal = "string",
IpConfiguration = "string",
KmsKeyName = "string",
MachineType = "string",
MaxWorkers = 0,
Network = "string",
NumWorkers = 0,
ServiceAccountEmail = "string",
Subnetwork = "string",
TempLocation = "string",
WorkerRegion = "string",
WorkerZone = "string",
Zone = "string",
},
LaunchOptions =
{
{ "string", "string" },
},
Parameters =
{
{ "string", "string" },
},
TransformNameMappings =
{
{ "string", "string" },
},
Update = false,
},
Location = "string",
ProjectId = "string",
ValidateOnly = false,
},
DataflowLaunchTemplateRequest = new Gcp.Dataflow.Inputs.PipelineWorkloadDataflowLaunchTemplateRequestArgs
{
ProjectId = "string",
GcsPath = "string",
LaunchParameters = new Gcp.Dataflow.Inputs.PipelineWorkloadDataflowLaunchTemplateRequestLaunchParametersArgs
{
JobName = "string",
Environment = new Gcp.Dataflow.Inputs.PipelineWorkloadDataflowLaunchTemplateRequestLaunchParametersEnvironmentArgs
{
AdditionalExperiments = new[]
{
"string",
},
AdditionalUserLabels =
{
{ "string", "string" },
},
BypassTempDirValidation = false,
EnableStreamingEngine = false,
IpConfiguration = "string",
KmsKeyName = "string",
MachineType = "string",
MaxWorkers = 0,
Network = "string",
NumWorkers = 0,
ServiceAccountEmail = "string",
Subnetwork = "string",
TempLocation = "string",
WorkerRegion = "string",
WorkerZone = "string",
Zone = "string",
},
Parameters =
{
{ "string", "string" },
},
TransformNameMapping =
{
{ "string", "string" },
},
Update = false,
},
Location = "string",
ValidateOnly = false,
},
},
});
example, err := dataflow.NewPipeline(ctx, "pipelineResource", &dataflow.PipelineArgs{
State: pulumi.String("string"),
Type: pulumi.String("string"),
DisplayName: pulumi.String("string"),
Name: pulumi.String("string"),
PipelineSources: pulumi.StringMap{
"string": pulumi.String("string"),
},
Project: pulumi.String("string"),
Region: pulumi.String("string"),
ScheduleInfo: &dataflow.PipelineScheduleInfoArgs{
NextJobTime: pulumi.String("string"),
Schedule: pulumi.String("string"),
TimeZone: pulumi.String("string"),
},
SchedulerServiceAccountEmail: pulumi.String("string"),
Workload: &dataflow.PipelineWorkloadArgs{
DataflowFlexTemplateRequest: &dataflow.PipelineWorkloadDataflowFlexTemplateRequestArgs{
LaunchParameter: &dataflow.PipelineWorkloadDataflowFlexTemplateRequestLaunchParameterArgs{
JobName: pulumi.String("string"),
ContainerSpecGcsPath: pulumi.String("string"),
Environment: &dataflow.PipelineWorkloadDataflowFlexTemplateRequestLaunchParameterEnvironmentArgs{
AdditionalExperiments: pulumi.StringArray{
pulumi.String("string"),
},
AdditionalUserLabels: pulumi.StringMap{
"string": pulumi.String("string"),
},
EnableStreamingEngine: pulumi.Bool(false),
FlexrsGoal: pulumi.String("string"),
IpConfiguration: pulumi.String("string"),
KmsKeyName: pulumi.String("string"),
MachineType: pulumi.String("string"),
MaxWorkers: pulumi.Int(0),
Network: pulumi.String("string"),
NumWorkers: pulumi.Int(0),
ServiceAccountEmail: pulumi.String("string"),
Subnetwork: pulumi.String("string"),
TempLocation: pulumi.String("string"),
WorkerRegion: pulumi.String("string"),
WorkerZone: pulumi.String("string"),
Zone: pulumi.String("string"),
},
LaunchOptions: pulumi.StringMap{
"string": pulumi.String("string"),
},
Parameters: pulumi.StringMap{
"string": pulumi.String("string"),
},
TransformNameMappings: pulumi.StringMap{
"string": pulumi.String("string"),
},
Update: pulumi.Bool(false),
},
Location: pulumi.String("string"),
ProjectId: pulumi.String("string"),
ValidateOnly: pulumi.Bool(false),
},
DataflowLaunchTemplateRequest: &dataflow.PipelineWorkloadDataflowLaunchTemplateRequestArgs{
ProjectId: pulumi.String("string"),
GcsPath: pulumi.String("string"),
LaunchParameters: &dataflow.PipelineWorkloadDataflowLaunchTemplateRequestLaunchParametersArgs{
JobName: pulumi.String("string"),
Environment: &dataflow.PipelineWorkloadDataflowLaunchTemplateRequestLaunchParametersEnvironmentArgs{
AdditionalExperiments: pulumi.StringArray{
pulumi.String("string"),
},
AdditionalUserLabels: pulumi.StringMap{
"string": pulumi.String("string"),
},
BypassTempDirValidation: pulumi.Bool(false),
EnableStreamingEngine: pulumi.Bool(false),
IpConfiguration: pulumi.String("string"),
KmsKeyName: pulumi.String("string"),
MachineType: pulumi.String("string"),
MaxWorkers: pulumi.Int(0),
Network: pulumi.String("string"),
NumWorkers: pulumi.Int(0),
ServiceAccountEmail: pulumi.String("string"),
Subnetwork: pulumi.String("string"),
TempLocation: pulumi.String("string"),
WorkerRegion: pulumi.String("string"),
WorkerZone: pulumi.String("string"),
Zone: pulumi.String("string"),
},
Parameters: pulumi.StringMap{
"string": pulumi.String("string"),
},
TransformNameMapping: pulumi.StringMap{
"string": pulumi.String("string"),
},
Update: pulumi.Bool(false),
},
Location: pulumi.String("string"),
ValidateOnly: pulumi.Bool(false),
},
},
})
var pipelineResource = new Pipeline("pipelineResource", PipelineArgs.builder()
.state("string")
.type("string")
.displayName("string")
.name("string")
.pipelineSources(Map.of("string", "string"))
.project("string")
.region("string")
.scheduleInfo(PipelineScheduleInfoArgs.builder()
.nextJobTime("string")
.schedule("string")
.timeZone("string")
.build())
.schedulerServiceAccountEmail("string")
.workload(PipelineWorkloadArgs.builder()
.dataflowFlexTemplateRequest(PipelineWorkloadDataflowFlexTemplateRequestArgs.builder()
.launchParameter(PipelineWorkloadDataflowFlexTemplateRequestLaunchParameterArgs.builder()
.jobName("string")
.containerSpecGcsPath("string")
.environment(PipelineWorkloadDataflowFlexTemplateRequestLaunchParameterEnvironmentArgs.builder()
.additionalExperiments("string")
.additionalUserLabels(Map.of("string", "string"))
.enableStreamingEngine(false)
.flexrsGoal("string")
.ipConfiguration("string")
.kmsKeyName("string")
.machineType("string")
.maxWorkers(0)
.network("string")
.numWorkers(0)
.serviceAccountEmail("string")
.subnetwork("string")
.tempLocation("string")
.workerRegion("string")
.workerZone("string")
.zone("string")
.build())
.launchOptions(Map.of("string", "string"))
.parameters(Map.of("string", "string"))
.transformNameMappings(Map.of("string", "string"))
.update(false)
.build())
.location("string")
.projectId("string")
.validateOnly(false)
.build())
.dataflowLaunchTemplateRequest(PipelineWorkloadDataflowLaunchTemplateRequestArgs.builder()
.projectId("string")
.gcsPath("string")
.launchParameters(PipelineWorkloadDataflowLaunchTemplateRequestLaunchParametersArgs.builder()
.jobName("string")
.environment(PipelineWorkloadDataflowLaunchTemplateRequestLaunchParametersEnvironmentArgs.builder()
.additionalExperiments("string")
.additionalUserLabels(Map.of("string", "string"))
.bypassTempDirValidation(false)
.enableStreamingEngine(false)
.ipConfiguration("string")
.kmsKeyName("string")
.machineType("string")
.maxWorkers(0)
.network("string")
.numWorkers(0)
.serviceAccountEmail("string")
.subnetwork("string")
.tempLocation("string")
.workerRegion("string")
.workerZone("string")
.zone("string")
.build())
.parameters(Map.of("string", "string"))
.transformNameMapping(Map.of("string", "string"))
.update(false)
.build())
.location("string")
.validateOnly(false)
.build())
.build())
.build());
pipeline_resource = gcp.dataflow.Pipeline("pipelineResource",
state="string",
type="string",
display_name="string",
name="string",
pipeline_sources={
"string": "string",
},
project="string",
region="string",
schedule_info={
"next_job_time": "string",
"schedule": "string",
"time_zone": "string",
},
scheduler_service_account_email="string",
workload={
"dataflow_flex_template_request": {
"launch_parameter": {
"job_name": "string",
"container_spec_gcs_path": "string",
"environment": {
"additional_experiments": ["string"],
"additional_user_labels": {
"string": "string",
},
"enable_streaming_engine": False,
"flexrs_goal": "string",
"ip_configuration": "string",
"kms_key_name": "string",
"machine_type": "string",
"max_workers": 0,
"network": "string",
"num_workers": 0,
"service_account_email": "string",
"subnetwork": "string",
"temp_location": "string",
"worker_region": "string",
"worker_zone": "string",
"zone": "string",
},
"launch_options": {
"string": "string",
},
"parameters": {
"string": "string",
},
"transform_name_mappings": {
"string": "string",
},
"update": False,
},
"location": "string",
"project_id": "string",
"validate_only": False,
},
"dataflow_launch_template_request": {
"project_id": "string",
"gcs_path": "string",
"launch_parameters": {
"job_name": "string",
"environment": {
"additional_experiments": ["string"],
"additional_user_labels": {
"string": "string",
},
"bypass_temp_dir_validation": False,
"enable_streaming_engine": False,
"ip_configuration": "string",
"kms_key_name": "string",
"machine_type": "string",
"max_workers": 0,
"network": "string",
"num_workers": 0,
"service_account_email": "string",
"subnetwork": "string",
"temp_location": "string",
"worker_region": "string",
"worker_zone": "string",
"zone": "string",
},
"parameters": {
"string": "string",
},
"transform_name_mapping": {
"string": "string",
},
"update": False,
},
"location": "string",
"validate_only": False,
},
})
const pipelineResource = new gcp.dataflow.Pipeline("pipelineResource", {
state: "string",
type: "string",
displayName: "string",
name: "string",
pipelineSources: {
string: "string",
},
project: "string",
region: "string",
scheduleInfo: {
nextJobTime: "string",
schedule: "string",
timeZone: "string",
},
schedulerServiceAccountEmail: "string",
workload: {
dataflowFlexTemplateRequest: {
launchParameter: {
jobName: "string",
containerSpecGcsPath: "string",
environment: {
additionalExperiments: ["string"],
additionalUserLabels: {
string: "string",
},
enableStreamingEngine: false,
flexrsGoal: "string",
ipConfiguration: "string",
kmsKeyName: "string",
machineType: "string",
maxWorkers: 0,
network: "string",
numWorkers: 0,
serviceAccountEmail: "string",
subnetwork: "string",
tempLocation: "string",
workerRegion: "string",
workerZone: "string",
zone: "string",
},
launchOptions: {
string: "string",
},
parameters: {
string: "string",
},
transformNameMappings: {
string: "string",
},
update: false,
},
location: "string",
projectId: "string",
validateOnly: false,
},
dataflowLaunchTemplateRequest: {
projectId: "string",
gcsPath: "string",
launchParameters: {
jobName: "string",
environment: {
additionalExperiments: ["string"],
additionalUserLabels: {
string: "string",
},
bypassTempDirValidation: false,
enableStreamingEngine: false,
ipConfiguration: "string",
kmsKeyName: "string",
machineType: "string",
maxWorkers: 0,
network: "string",
numWorkers: 0,
serviceAccountEmail: "string",
subnetwork: "string",
tempLocation: "string",
workerRegion: "string",
workerZone: "string",
zone: "string",
},
parameters: {
string: "string",
},
transformNameMapping: {
string: "string",
},
update: false,
},
location: "string",
validateOnly: false,
},
},
});
type: gcp:dataflow:Pipeline
properties:
displayName: string
name: string
pipelineSources:
string: string
project: string
region: string
scheduleInfo:
nextJobTime: string
schedule: string
timeZone: string
schedulerServiceAccountEmail: string
state: string
type: string
workload:
dataflowFlexTemplateRequest:
launchParameter:
containerSpecGcsPath: string
environment:
additionalExperiments:
- string
additionalUserLabels:
string: string
enableStreamingEngine: false
flexrsGoal: string
ipConfiguration: string
kmsKeyName: string
machineType: string
maxWorkers: 0
network: string
numWorkers: 0
serviceAccountEmail: string
subnetwork: string
tempLocation: string
workerRegion: string
workerZone: string
zone: string
jobName: string
launchOptions:
string: string
parameters:
string: string
transformNameMappings:
string: string
update: false
location: string
projectId: string
validateOnly: false
dataflowLaunchTemplateRequest:
gcsPath: string
launchParameters:
environment:
additionalExperiments:
- string
additionalUserLabels:
string: string
bypassTempDirValidation: false
enableStreamingEngine: false
ipConfiguration: string
kmsKeyName: string
machineType: string
maxWorkers: 0
network: string
numWorkers: 0
serviceAccountEmail: string
subnetwork: string
tempLocation: string
workerRegion: string
workerZone: string
zone: string
jobName: string
parameters:
string: string
transformNameMapping:
string: string
update: false
location: string
projectId: string
validateOnly: false
Pipeline Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The Pipeline resource accepts the following input properties:
- State string
- The state of the pipeline. When the pipeline is created, the state is set to 'PIPELINE_STATE_ACTIVE' by default. State changes can be requested by setting the state to stopping, paused, or resuming. State cannot be changed through pipelines.patch requests.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#state
Possible values are:
STATE_UNSPECIFIED
,STATE_RESUMING
,STATE_ACTIVE
,STATE_STOPPING
,STATE_ARCHIVED
,STATE_PAUSED
. - Type string
- The type of the pipeline. This field affects the scheduling of the pipeline and the type of metrics to show for the pipeline.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#pipelinetype
Possible values are:
PIPELINE_TYPE_UNSPECIFIED
,PIPELINE_TYPE_BATCH
,PIPELINE_TYPE_STREAMING
. - Display
Name string - The display name of the pipeline. It can contain only letters ([A-Za-z]), numbers ([0-9]), hyphens (-), and underscores (_).
- Name string
- "The pipeline name. For example': 'projects/PROJECT_ID/locations/LOCATION_ID/pipelines/PIPELINE_ID." "- PROJECT_ID can contain letters ([A-Za-z]), numbers ([0-9]), hyphens (-), colons (:), and periods (.). For more information, see Identifying projects." "LOCATION_ID is the canonical ID for the pipeline's location. The list of available locations can be obtained by calling google.cloud.location.Locations.ListLocations. Note that the Data Pipelines service is not available in all regions. It depends on Cloud Scheduler, an App Engine application, so it's only available in App Engine regions." "PIPELINE_ID is the ID of the pipeline. Must be unique for the selected project and location."
- Pipeline
Sources Dictionary<string, string> - The sources of the pipeline (for example, Dataplex). The keys and values are set by the corresponding sources during pipeline creation. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
- Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- Region string
- A reference to the region
- Schedule
Info PipelineSchedule Info - Internal scheduling information for a pipeline. If this information is provided, periodic jobs will be created per the schedule. If not, users are responsible for creating jobs externally. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#schedulespec Structure is documented below.
- Scheduler
Service stringAccount Email - Optional. A service account email to be used with the Cloud Scheduler job. If not specified, the default compute engine service account will be used.
- Workload
Pipeline
Workload - Workload information for creating new jobs. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#workload Structure is documented below.
- State string
- The state of the pipeline. When the pipeline is created, the state is set to 'PIPELINE_STATE_ACTIVE' by default. State changes can be requested by setting the state to stopping, paused, or resuming. State cannot be changed through pipelines.patch requests.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#state
Possible values are:
STATE_UNSPECIFIED
,STATE_RESUMING
,STATE_ACTIVE
,STATE_STOPPING
,STATE_ARCHIVED
,STATE_PAUSED
. - Type string
- The type of the pipeline. This field affects the scheduling of the pipeline and the type of metrics to show for the pipeline.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#pipelinetype
Possible values are:
PIPELINE_TYPE_UNSPECIFIED
,PIPELINE_TYPE_BATCH
,PIPELINE_TYPE_STREAMING
. - Display
Name string - The display name of the pipeline. It can contain only letters ([A-Za-z]), numbers ([0-9]), hyphens (-), and underscores (_).
- Name string
- "The pipeline name. For example': 'projects/PROJECT_ID/locations/LOCATION_ID/pipelines/PIPELINE_ID." "- PROJECT_ID can contain letters ([A-Za-z]), numbers ([0-9]), hyphens (-), colons (:), and periods (.). For more information, see Identifying projects." "LOCATION_ID is the canonical ID for the pipeline's location. The list of available locations can be obtained by calling google.cloud.location.Locations.ListLocations. Note that the Data Pipelines service is not available in all regions. It depends on Cloud Scheduler, an App Engine application, so it's only available in App Engine regions." "PIPELINE_ID is the ID of the pipeline. Must be unique for the selected project and location."
- Pipeline
Sources map[string]string - The sources of the pipeline (for example, Dataplex). The keys and values are set by the corresponding sources during pipeline creation. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
- Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- Region string
- A reference to the region
- Schedule
Info PipelineSchedule Info Args - Internal scheduling information for a pipeline. If this information is provided, periodic jobs will be created per the schedule. If not, users are responsible for creating jobs externally. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#schedulespec Structure is documented below.
- Scheduler
Service stringAccount Email - Optional. A service account email to be used with the Cloud Scheduler job. If not specified, the default compute engine service account will be used.
- Workload
Pipeline
Workload Args - Workload information for creating new jobs. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#workload Structure is documented below.
- state String
- The state of the pipeline. When the pipeline is created, the state is set to 'PIPELINE_STATE_ACTIVE' by default. State changes can be requested by setting the state to stopping, paused, or resuming. State cannot be changed through pipelines.patch requests.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#state
Possible values are:
STATE_UNSPECIFIED
,STATE_RESUMING
,STATE_ACTIVE
,STATE_STOPPING
,STATE_ARCHIVED
,STATE_PAUSED
. - type String
- The type of the pipeline. This field affects the scheduling of the pipeline and the type of metrics to show for the pipeline.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#pipelinetype
Possible values are:
PIPELINE_TYPE_UNSPECIFIED
,PIPELINE_TYPE_BATCH
,PIPELINE_TYPE_STREAMING
. - display
Name String - The display name of the pipeline. It can contain only letters ([A-Za-z]), numbers ([0-9]), hyphens (-), and underscores (_).
- name String
- "The pipeline name. For example': 'projects/PROJECT_ID/locations/LOCATION_ID/pipelines/PIPELINE_ID." "- PROJECT_ID can contain letters ([A-Za-z]), numbers ([0-9]), hyphens (-), colons (:), and periods (.). For more information, see Identifying projects." "LOCATION_ID is the canonical ID for the pipeline's location. The list of available locations can be obtained by calling google.cloud.location.Locations.ListLocations. Note that the Data Pipelines service is not available in all regions. It depends on Cloud Scheduler, an App Engine application, so it's only available in App Engine regions." "PIPELINE_ID is the ID of the pipeline. Must be unique for the selected project and location."
- pipeline
Sources Map<String,String> - The sources of the pipeline (for example, Dataplex). The keys and values are set by the corresponding sources during pipeline creation. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
- project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- region String
- A reference to the region
- schedule
Info PipelineSchedule Info - Internal scheduling information for a pipeline. If this information is provided, periodic jobs will be created per the schedule. If not, users are responsible for creating jobs externally. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#schedulespec Structure is documented below.
- scheduler
Service StringAccount Email - Optional. A service account email to be used with the Cloud Scheduler job. If not specified, the default compute engine service account will be used.
- workload
Pipeline
Workload - Workload information for creating new jobs. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#workload Structure is documented below.
- state string
- The state of the pipeline. When the pipeline is created, the state is set to 'PIPELINE_STATE_ACTIVE' by default. State changes can be requested by setting the state to stopping, paused, or resuming. State cannot be changed through pipelines.patch requests.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#state
Possible values are:
STATE_UNSPECIFIED
,STATE_RESUMING
,STATE_ACTIVE
,STATE_STOPPING
,STATE_ARCHIVED
,STATE_PAUSED
. - type string
- The type of the pipeline. This field affects the scheduling of the pipeline and the type of metrics to show for the pipeline.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#pipelinetype
Possible values are:
PIPELINE_TYPE_UNSPECIFIED
,PIPELINE_TYPE_BATCH
,PIPELINE_TYPE_STREAMING
. - display
Name string - The display name of the pipeline. It can contain only letters ([A-Za-z]), numbers ([0-9]), hyphens (-), and underscores (_).
- name string
- "The pipeline name. For example': 'projects/PROJECT_ID/locations/LOCATION_ID/pipelines/PIPELINE_ID." "- PROJECT_ID can contain letters ([A-Za-z]), numbers ([0-9]), hyphens (-), colons (:), and periods (.). For more information, see Identifying projects." "LOCATION_ID is the canonical ID for the pipeline's location. The list of available locations can be obtained by calling google.cloud.location.Locations.ListLocations. Note that the Data Pipelines service is not available in all regions. It depends on Cloud Scheduler, an App Engine application, so it's only available in App Engine regions." "PIPELINE_ID is the ID of the pipeline. Must be unique for the selected project and location."
- pipeline
Sources {[key: string]: string} - The sources of the pipeline (for example, Dataplex). The keys and values are set by the corresponding sources during pipeline creation. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
- project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- region string
- A reference to the region
- schedule
Info PipelineSchedule Info - Internal scheduling information for a pipeline. If this information is provided, periodic jobs will be created per the schedule. If not, users are responsible for creating jobs externally. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#schedulespec Structure is documented below.
- scheduler
Service stringAccount Email - Optional. A service account email to be used with the Cloud Scheduler job. If not specified, the default compute engine service account will be used.
- workload
Pipeline
Workload - Workload information for creating new jobs. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#workload Structure is documented below.
- state str
- The state of the pipeline. When the pipeline is created, the state is set to 'PIPELINE_STATE_ACTIVE' by default. State changes can be requested by setting the state to stopping, paused, or resuming. State cannot be changed through pipelines.patch requests.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#state
Possible values are:
STATE_UNSPECIFIED
,STATE_RESUMING
,STATE_ACTIVE
,STATE_STOPPING
,STATE_ARCHIVED
,STATE_PAUSED
. - type str
- The type of the pipeline. This field affects the scheduling of the pipeline and the type of metrics to show for the pipeline.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#pipelinetype
Possible values are:
PIPELINE_TYPE_UNSPECIFIED
,PIPELINE_TYPE_BATCH
,PIPELINE_TYPE_STREAMING
. - display_
name str - The display name of the pipeline. It can contain only letters ([A-Za-z]), numbers ([0-9]), hyphens (-), and underscores (_).
- name str
- "The pipeline name. For example': 'projects/PROJECT_ID/locations/LOCATION_ID/pipelines/PIPELINE_ID." "- PROJECT_ID can contain letters ([A-Za-z]), numbers ([0-9]), hyphens (-), colons (:), and periods (.). For more information, see Identifying projects." "LOCATION_ID is the canonical ID for the pipeline's location. The list of available locations can be obtained by calling google.cloud.location.Locations.ListLocations. Note that the Data Pipelines service is not available in all regions. It depends on Cloud Scheduler, an App Engine application, so it's only available in App Engine regions." "PIPELINE_ID is the ID of the pipeline. Must be unique for the selected project and location."
- pipeline_
sources Mapping[str, str] - The sources of the pipeline (for example, Dataplex). The keys and values are set by the corresponding sources during pipeline creation. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
- project str
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- region str
- A reference to the region
- schedule_
info PipelineSchedule Info Args - Internal scheduling information for a pipeline. If this information is provided, periodic jobs will be created per the schedule. If not, users are responsible for creating jobs externally. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#schedulespec Structure is documented below.
- scheduler_
service_ straccount_ email - Optional. A service account email to be used with the Cloud Scheduler job. If not specified, the default compute engine service account will be used.
- workload
Pipeline
Workload Args - Workload information for creating new jobs. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#workload Structure is documented below.
- state String
- The state of the pipeline. When the pipeline is created, the state is set to 'PIPELINE_STATE_ACTIVE' by default. State changes can be requested by setting the state to stopping, paused, or resuming. State cannot be changed through pipelines.patch requests.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#state
Possible values are:
STATE_UNSPECIFIED
,STATE_RESUMING
,STATE_ACTIVE
,STATE_STOPPING
,STATE_ARCHIVED
,STATE_PAUSED
. - type String
- The type of the pipeline. This field affects the scheduling of the pipeline and the type of metrics to show for the pipeline.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#pipelinetype
Possible values are:
PIPELINE_TYPE_UNSPECIFIED
,PIPELINE_TYPE_BATCH
,PIPELINE_TYPE_STREAMING
. - display
Name String - The display name of the pipeline. It can contain only letters ([A-Za-z]), numbers ([0-9]), hyphens (-), and underscores (_).
- name String
- "The pipeline name. For example': 'projects/PROJECT_ID/locations/LOCATION_ID/pipelines/PIPELINE_ID." "- PROJECT_ID can contain letters ([A-Za-z]), numbers ([0-9]), hyphens (-), colons (:), and periods (.). For more information, see Identifying projects." "LOCATION_ID is the canonical ID for the pipeline's location. The list of available locations can be obtained by calling google.cloud.location.Locations.ListLocations. Note that the Data Pipelines service is not available in all regions. It depends on Cloud Scheduler, an App Engine application, so it's only available in App Engine regions." "PIPELINE_ID is the ID of the pipeline. Must be unique for the selected project and location."
- pipeline
Sources Map<String> - The sources of the pipeline (for example, Dataplex). The keys and values are set by the corresponding sources during pipeline creation. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
- project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- region String
- A reference to the region
- schedule
Info Property Map - Internal scheduling information for a pipeline. If this information is provided, periodic jobs will be created per the schedule. If not, users are responsible for creating jobs externally. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#schedulespec Structure is documented below.
- scheduler
Service StringAccount Email - Optional. A service account email to be used with the Cloud Scheduler job. If not specified, the default compute engine service account will be used.
- workload Property Map
- Workload information for creating new jobs. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#workload Structure is documented below.
Outputs
All input properties are implicitly available as output properties. Additionally, the Pipeline resource produces the following output properties:
- Create
Time string - The timestamp when the pipeline was initially created. Set by the Data Pipelines service. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
- Id string
- The provider-assigned unique ID for this managed resource.
- Job
Count int - Number of jobs.
- Last
Update stringTime - The timestamp when the pipeline was last modified. Set by the Data Pipelines service. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
- Create
Time string - The timestamp when the pipeline was initially created. Set by the Data Pipelines service. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
- Id string
- The provider-assigned unique ID for this managed resource.
- Job
Count int - Number of jobs.
- Last
Update stringTime - The timestamp when the pipeline was last modified. Set by the Data Pipelines service. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
- create
Time String - The timestamp when the pipeline was initially created. Set by the Data Pipelines service. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
- id String
- The provider-assigned unique ID for this managed resource.
- job
Count Integer - Number of jobs.
- last
Update StringTime - The timestamp when the pipeline was last modified. Set by the Data Pipelines service. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
- create
Time string - The timestamp when the pipeline was initially created. Set by the Data Pipelines service. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
- id string
- The provider-assigned unique ID for this managed resource.
- job
Count number - Number of jobs.
- last
Update stringTime - The timestamp when the pipeline was last modified. Set by the Data Pipelines service. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
- create_
time str - The timestamp when the pipeline was initially created. Set by the Data Pipelines service. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
- id str
- The provider-assigned unique ID for this managed resource.
- job_
count int - Number of jobs.
- last_
update_ strtime - The timestamp when the pipeline was last modified. Set by the Data Pipelines service. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
- create
Time String - The timestamp when the pipeline was initially created. Set by the Data Pipelines service. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
- id String
- The provider-assigned unique ID for this managed resource.
- job
Count Number - Number of jobs.
- last
Update StringTime - The timestamp when the pipeline was last modified. Set by the Data Pipelines service. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
Look up Existing Pipeline Resource
Get an existing Pipeline resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: PipelineState, opts?: CustomResourceOptions): Pipeline
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
create_time: Optional[str] = None,
display_name: Optional[str] = None,
job_count: Optional[int] = None,
last_update_time: Optional[str] = None,
name: Optional[str] = None,
pipeline_sources: Optional[Mapping[str, str]] = None,
project: Optional[str] = None,
region: Optional[str] = None,
schedule_info: Optional[PipelineScheduleInfoArgs] = None,
scheduler_service_account_email: Optional[str] = None,
state: Optional[str] = None,
type: Optional[str] = None,
workload: Optional[PipelineWorkloadArgs] = None) -> Pipeline
func GetPipeline(ctx *Context, name string, id IDInput, state *PipelineState, opts ...ResourceOption) (*Pipeline, error)
public static Pipeline Get(string name, Input<string> id, PipelineState? state, CustomResourceOptions? opts = null)
public static Pipeline get(String name, Output<String> id, PipelineState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Create
Time string - The timestamp when the pipeline was initially created. Set by the Data Pipelines service. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
- Display
Name string - The display name of the pipeline. It can contain only letters ([A-Za-z]), numbers ([0-9]), hyphens (-), and underscores (_).
- Job
Count int - Number of jobs.
- Last
Update stringTime - The timestamp when the pipeline was last modified. Set by the Data Pipelines service. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
- Name string
- "The pipeline name. For example': 'projects/PROJECT_ID/locations/LOCATION_ID/pipelines/PIPELINE_ID." "- PROJECT_ID can contain letters ([A-Za-z]), numbers ([0-9]), hyphens (-), colons (:), and periods (.). For more information, see Identifying projects." "LOCATION_ID is the canonical ID for the pipeline's location. The list of available locations can be obtained by calling google.cloud.location.Locations.ListLocations. Note that the Data Pipelines service is not available in all regions. It depends on Cloud Scheduler, an App Engine application, so it's only available in App Engine regions." "PIPELINE_ID is the ID of the pipeline. Must be unique for the selected project and location."
- Pipeline
Sources Dictionary<string, string> - The sources of the pipeline (for example, Dataplex). The keys and values are set by the corresponding sources during pipeline creation. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
- Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- Region string
- A reference to the region
- Schedule
Info PipelineSchedule Info - Internal scheduling information for a pipeline. If this information is provided, periodic jobs will be created per the schedule. If not, users are responsible for creating jobs externally. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#schedulespec Structure is documented below.
- Scheduler
Service stringAccount Email - Optional. A service account email to be used with the Cloud Scheduler job. If not specified, the default compute engine service account will be used.
- State string
- The state of the pipeline. When the pipeline is created, the state is set to 'PIPELINE_STATE_ACTIVE' by default. State changes can be requested by setting the state to stopping, paused, or resuming. State cannot be changed through pipelines.patch requests.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#state
Possible values are:
STATE_UNSPECIFIED
,STATE_RESUMING
,STATE_ACTIVE
,STATE_STOPPING
,STATE_ARCHIVED
,STATE_PAUSED
. - Type string
- The type of the pipeline. This field affects the scheduling of the pipeline and the type of metrics to show for the pipeline.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#pipelinetype
Possible values are:
PIPELINE_TYPE_UNSPECIFIED
,PIPELINE_TYPE_BATCH
,PIPELINE_TYPE_STREAMING
. - Workload
Pipeline
Workload - Workload information for creating new jobs. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#workload Structure is documented below.
- Create
Time string - The timestamp when the pipeline was initially created. Set by the Data Pipelines service. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
- Display
Name string - The display name of the pipeline. It can contain only letters ([A-Za-z]), numbers ([0-9]), hyphens (-), and underscores (_).
- Job
Count int - Number of jobs.
- Last
Update stringTime - The timestamp when the pipeline was last modified. Set by the Data Pipelines service. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
- Name string
- "The pipeline name. For example': 'projects/PROJECT_ID/locations/LOCATION_ID/pipelines/PIPELINE_ID." "- PROJECT_ID can contain letters ([A-Za-z]), numbers ([0-9]), hyphens (-), colons (:), and periods (.). For more information, see Identifying projects." "LOCATION_ID is the canonical ID for the pipeline's location. The list of available locations can be obtained by calling google.cloud.location.Locations.ListLocations. Note that the Data Pipelines service is not available in all regions. It depends on Cloud Scheduler, an App Engine application, so it's only available in App Engine regions." "PIPELINE_ID is the ID of the pipeline. Must be unique for the selected project and location."
- Pipeline
Sources map[string]string - The sources of the pipeline (for example, Dataplex). The keys and values are set by the corresponding sources during pipeline creation. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
- Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- Region string
- A reference to the region
- Schedule
Info PipelineSchedule Info Args - Internal scheduling information for a pipeline. If this information is provided, periodic jobs will be created per the schedule. If not, users are responsible for creating jobs externally. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#schedulespec Structure is documented below.
- Scheduler
Service stringAccount Email - Optional. A service account email to be used with the Cloud Scheduler job. If not specified, the default compute engine service account will be used.
- State string
- The state of the pipeline. When the pipeline is created, the state is set to 'PIPELINE_STATE_ACTIVE' by default. State changes can be requested by setting the state to stopping, paused, or resuming. State cannot be changed through pipelines.patch requests.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#state
Possible values are:
STATE_UNSPECIFIED
,STATE_RESUMING
,STATE_ACTIVE
,STATE_STOPPING
,STATE_ARCHIVED
,STATE_PAUSED
. - Type string
- The type of the pipeline. This field affects the scheduling of the pipeline and the type of metrics to show for the pipeline.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#pipelinetype
Possible values are:
PIPELINE_TYPE_UNSPECIFIED
,PIPELINE_TYPE_BATCH
,PIPELINE_TYPE_STREAMING
. - Workload
Pipeline
Workload Args - Workload information for creating new jobs. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#workload Structure is documented below.
- create
Time String - The timestamp when the pipeline was initially created. Set by the Data Pipelines service. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
- display
Name String - The display name of the pipeline. It can contain only letters ([A-Za-z]), numbers ([0-9]), hyphens (-), and underscores (_).
- job
Count Integer - Number of jobs.
- last
Update StringTime - The timestamp when the pipeline was last modified. Set by the Data Pipelines service. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
- name String
- "The pipeline name. For example': 'projects/PROJECT_ID/locations/LOCATION_ID/pipelines/PIPELINE_ID." "- PROJECT_ID can contain letters ([A-Za-z]), numbers ([0-9]), hyphens (-), colons (:), and periods (.). For more information, see Identifying projects." "LOCATION_ID is the canonical ID for the pipeline's location. The list of available locations can be obtained by calling google.cloud.location.Locations.ListLocations. Note that the Data Pipelines service is not available in all regions. It depends on Cloud Scheduler, an App Engine application, so it's only available in App Engine regions." "PIPELINE_ID is the ID of the pipeline. Must be unique for the selected project and location."
- pipeline
Sources Map<String,String> - The sources of the pipeline (for example, Dataplex). The keys and values are set by the corresponding sources during pipeline creation. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
- project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- region String
- A reference to the region
- schedule
Info PipelineSchedule Info - Internal scheduling information for a pipeline. If this information is provided, periodic jobs will be created per the schedule. If not, users are responsible for creating jobs externally. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#schedulespec Structure is documented below.
- scheduler
Service StringAccount Email - Optional. A service account email to be used with the Cloud Scheduler job. If not specified, the default compute engine service account will be used.
- state String
- The state of the pipeline. When the pipeline is created, the state is set to 'PIPELINE_STATE_ACTIVE' by default. State changes can be requested by setting the state to stopping, paused, or resuming. State cannot be changed through pipelines.patch requests.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#state
Possible values are:
STATE_UNSPECIFIED
,STATE_RESUMING
,STATE_ACTIVE
,STATE_STOPPING
,STATE_ARCHIVED
,STATE_PAUSED
. - type String
- The type of the pipeline. This field affects the scheduling of the pipeline and the type of metrics to show for the pipeline.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#pipelinetype
Possible values are:
PIPELINE_TYPE_UNSPECIFIED
,PIPELINE_TYPE_BATCH
,PIPELINE_TYPE_STREAMING
. - workload
Pipeline
Workload - Workload information for creating new jobs. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#workload Structure is documented below.
- create
Time string - The timestamp when the pipeline was initially created. Set by the Data Pipelines service. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
- display
Name string - The display name of the pipeline. It can contain only letters ([A-Za-z]), numbers ([0-9]), hyphens (-), and underscores (_).
- job
Count number - Number of jobs.
- last
Update stringTime - The timestamp when the pipeline was last modified. Set by the Data Pipelines service. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
- name string
- "The pipeline name. For example': 'projects/PROJECT_ID/locations/LOCATION_ID/pipelines/PIPELINE_ID." "- PROJECT_ID can contain letters ([A-Za-z]), numbers ([0-9]), hyphens (-), colons (:), and periods (.). For more information, see Identifying projects." "LOCATION_ID is the canonical ID for the pipeline's location. The list of available locations can be obtained by calling google.cloud.location.Locations.ListLocations. Note that the Data Pipelines service is not available in all regions. It depends on Cloud Scheduler, an App Engine application, so it's only available in App Engine regions." "PIPELINE_ID is the ID of the pipeline. Must be unique for the selected project and location."
- pipeline
Sources {[key: string]: string} - The sources of the pipeline (for example, Dataplex). The keys and values are set by the corresponding sources during pipeline creation. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
- project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- region string
- A reference to the region
- schedule
Info PipelineSchedule Info - Internal scheduling information for a pipeline. If this information is provided, periodic jobs will be created per the schedule. If not, users are responsible for creating jobs externally. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#schedulespec Structure is documented below.
- scheduler
Service stringAccount Email - Optional. A service account email to be used with the Cloud Scheduler job. If not specified, the default compute engine service account will be used.
- state string
- The state of the pipeline. When the pipeline is created, the state is set to 'PIPELINE_STATE_ACTIVE' by default. State changes can be requested by setting the state to stopping, paused, or resuming. State cannot be changed through pipelines.patch requests.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#state
Possible values are:
STATE_UNSPECIFIED
,STATE_RESUMING
,STATE_ACTIVE
,STATE_STOPPING
,STATE_ARCHIVED
,STATE_PAUSED
. - type string
- The type of the pipeline. This field affects the scheduling of the pipeline and the type of metrics to show for the pipeline.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#pipelinetype
Possible values are:
PIPELINE_TYPE_UNSPECIFIED
,PIPELINE_TYPE_BATCH
,PIPELINE_TYPE_STREAMING
. - workload
Pipeline
Workload - Workload information for creating new jobs. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#workload Structure is documented below.
- create_
time str - The timestamp when the pipeline was initially created. Set by the Data Pipelines service. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
- display_
name str - The display name of the pipeline. It can contain only letters ([A-Za-z]), numbers ([0-9]), hyphens (-), and underscores (_).
- job_
count int - Number of jobs.
- last_
update_ strtime - The timestamp when the pipeline was last modified. Set by the Data Pipelines service. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
- name str
- "The pipeline name. For example': 'projects/PROJECT_ID/locations/LOCATION_ID/pipelines/PIPELINE_ID." "- PROJECT_ID can contain letters ([A-Za-z]), numbers ([0-9]), hyphens (-), colons (:), and periods (.). For more information, see Identifying projects." "LOCATION_ID is the canonical ID for the pipeline's location. The list of available locations can be obtained by calling google.cloud.location.Locations.ListLocations. Note that the Data Pipelines service is not available in all regions. It depends on Cloud Scheduler, an App Engine application, so it's only available in App Engine regions." "PIPELINE_ID is the ID of the pipeline. Must be unique for the selected project and location."
- pipeline_
sources Mapping[str, str] - The sources of the pipeline (for example, Dataplex). The keys and values are set by the corresponding sources during pipeline creation. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
- project str
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- region str
- A reference to the region
- schedule_
info PipelineSchedule Info Args - Internal scheduling information for a pipeline. If this information is provided, periodic jobs will be created per the schedule. If not, users are responsible for creating jobs externally. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#schedulespec Structure is documented below.
- scheduler_
service_ straccount_ email - Optional. A service account email to be used with the Cloud Scheduler job. If not specified, the default compute engine service account will be used.
- state str
- The state of the pipeline. When the pipeline is created, the state is set to 'PIPELINE_STATE_ACTIVE' by default. State changes can be requested by setting the state to stopping, paused, or resuming. State cannot be changed through pipelines.patch requests.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#state
Possible values are:
STATE_UNSPECIFIED
,STATE_RESUMING
,STATE_ACTIVE
,STATE_STOPPING
,STATE_ARCHIVED
,STATE_PAUSED
. - type str
- The type of the pipeline. This field affects the scheduling of the pipeline and the type of metrics to show for the pipeline.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#pipelinetype
Possible values are:
PIPELINE_TYPE_UNSPECIFIED
,PIPELINE_TYPE_BATCH
,PIPELINE_TYPE_STREAMING
. - workload
Pipeline
Workload Args - Workload information for creating new jobs. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#workload Structure is documented below.
- create
Time String - The timestamp when the pipeline was initially created. Set by the Data Pipelines service. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
- display
Name String - The display name of the pipeline. It can contain only letters ([A-Za-z]), numbers ([0-9]), hyphens (-), and underscores (_).
- job
Count Number - Number of jobs.
- last
Update StringTime - The timestamp when the pipeline was last modified. Set by the Data Pipelines service. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
- name String
- "The pipeline name. For example': 'projects/PROJECT_ID/locations/LOCATION_ID/pipelines/PIPELINE_ID." "- PROJECT_ID can contain letters ([A-Za-z]), numbers ([0-9]), hyphens (-), colons (:), and periods (.). For more information, see Identifying projects." "LOCATION_ID is the canonical ID for the pipeline's location. The list of available locations can be obtained by calling google.cloud.location.Locations.ListLocations. Note that the Data Pipelines service is not available in all regions. It depends on Cloud Scheduler, an App Engine application, so it's only available in App Engine regions." "PIPELINE_ID is the ID of the pipeline. Must be unique for the selected project and location."
- pipeline
Sources Map<String> - The sources of the pipeline (for example, Dataplex). The keys and values are set by the corresponding sources during pipeline creation. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
- project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- region String
- A reference to the region
- schedule
Info Property Map - Internal scheduling information for a pipeline. If this information is provided, periodic jobs will be created per the schedule. If not, users are responsible for creating jobs externally. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#schedulespec Structure is documented below.
- scheduler
Service StringAccount Email - Optional. A service account email to be used with the Cloud Scheduler job. If not specified, the default compute engine service account will be used.
- state String
- The state of the pipeline. When the pipeline is created, the state is set to 'PIPELINE_STATE_ACTIVE' by default. State changes can be requested by setting the state to stopping, paused, or resuming. State cannot be changed through pipelines.patch requests.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#state
Possible values are:
STATE_UNSPECIFIED
,STATE_RESUMING
,STATE_ACTIVE
,STATE_STOPPING
,STATE_ARCHIVED
,STATE_PAUSED
. - type String
- The type of the pipeline. This field affects the scheduling of the pipeline and the type of metrics to show for the pipeline.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#pipelinetype
Possible values are:
PIPELINE_TYPE_UNSPECIFIED
,PIPELINE_TYPE_BATCH
,PIPELINE_TYPE_STREAMING
. - workload Property Map
- Workload information for creating new jobs. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#workload Structure is documented below.
Supporting Types
PipelineScheduleInfo, PipelineScheduleInfoArgs
- Next
Job stringTime - (Output) When the next Scheduler job is going to run. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
- Schedule string
- Unix-cron format of the schedule. This information is retrieved from the linked Cloud Scheduler.
- Time
Zone string - Timezone ID. This matches the timezone IDs used by the Cloud Scheduler API. If empty, UTC time is assumed.
- Next
Job stringTime - (Output) When the next Scheduler job is going to run. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
- Schedule string
- Unix-cron format of the schedule. This information is retrieved from the linked Cloud Scheduler.
- Time
Zone string - Timezone ID. This matches the timezone IDs used by the Cloud Scheduler API. If empty, UTC time is assumed.
- next
Job StringTime - (Output) When the next Scheduler job is going to run. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
- schedule String
- Unix-cron format of the schedule. This information is retrieved from the linked Cloud Scheduler.
- time
Zone String - Timezone ID. This matches the timezone IDs used by the Cloud Scheduler API. If empty, UTC time is assumed.
- next
Job stringTime - (Output) When the next Scheduler job is going to run. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
- schedule string
- Unix-cron format of the schedule. This information is retrieved from the linked Cloud Scheduler.
- time
Zone string - Timezone ID. This matches the timezone IDs used by the Cloud Scheduler API. If empty, UTC time is assumed.
- next_
job_ strtime - (Output) When the next Scheduler job is going to run. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
- schedule str
- Unix-cron format of the schedule. This information is retrieved from the linked Cloud Scheduler.
- time_
zone str - Timezone ID. This matches the timezone IDs used by the Cloud Scheduler API. If empty, UTC time is assumed.
- next
Job StringTime - (Output) When the next Scheduler job is going to run. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
- schedule String
- Unix-cron format of the schedule. This information is retrieved from the linked Cloud Scheduler.
- time
Zone String - Timezone ID. This matches the timezone IDs used by the Cloud Scheduler API. If empty, UTC time is assumed.
PipelineWorkload, PipelineWorkloadArgs
- Dataflow
Flex PipelineTemplate Request Workload Dataflow Flex Template Request - Template information and additional parameters needed to launch a Dataflow job using the flex launch API. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#launchflextemplaterequest Structure is documented below.
- Dataflow
Launch PipelineTemplate Request Workload Dataflow Launch Template Request - Template information and additional parameters needed to launch a Dataflow job using the standard launch API. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#launchtemplaterequest Structure is documented below.
- Dataflow
Flex PipelineTemplate Request Workload Dataflow Flex Template Request - Template information and additional parameters needed to launch a Dataflow job using the flex launch API. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#launchflextemplaterequest Structure is documented below.
- Dataflow
Launch PipelineTemplate Request Workload Dataflow Launch Template Request - Template information and additional parameters needed to launch a Dataflow job using the standard launch API. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#launchtemplaterequest Structure is documented below.
- dataflow
Flex PipelineTemplate Request Workload Dataflow Flex Template Request - Template information and additional parameters needed to launch a Dataflow job using the flex launch API. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#launchflextemplaterequest Structure is documented below.
- dataflow
Launch PipelineTemplate Request Workload Dataflow Launch Template Request - Template information and additional parameters needed to launch a Dataflow job using the standard launch API. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#launchtemplaterequest Structure is documented below.
- dataflow
Flex PipelineTemplate Request Workload Dataflow Flex Template Request - Template information and additional parameters needed to launch a Dataflow job using the flex launch API. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#launchflextemplaterequest Structure is documented below.
- dataflow
Launch PipelineTemplate Request Workload Dataflow Launch Template Request - Template information and additional parameters needed to launch a Dataflow job using the standard launch API. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#launchtemplaterequest Structure is documented below.
- dataflow_
flex_ Pipelinetemplate_ request Workload Dataflow Flex Template Request - Template information and additional parameters needed to launch a Dataflow job using the flex launch API. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#launchflextemplaterequest Structure is documented below.
- dataflow_
launch_ Pipelinetemplate_ request Workload Dataflow Launch Template Request - Template information and additional parameters needed to launch a Dataflow job using the standard launch API. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#launchtemplaterequest Structure is documented below.
- dataflow
Flex Property MapTemplate Request - Template information and additional parameters needed to launch a Dataflow job using the flex launch API. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#launchflextemplaterequest Structure is documented below.
- dataflow
Launch Property MapTemplate Request - Template information and additional parameters needed to launch a Dataflow job using the standard launch API. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#launchtemplaterequest Structure is documented below.
PipelineWorkloadDataflowFlexTemplateRequest, PipelineWorkloadDataflowFlexTemplateRequestArgs
- Launch
Parameter PipelineWorkload Dataflow Flex Template Request Launch Parameter - Parameter to launch a job from a Flex Template. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#launchflextemplateparameter Structure is documented below.
- Location string
- The regional endpoint to which to direct the request. For example, us-central1, us-west1.
- Project
Id string - The ID of the Cloud Platform project that the job belongs to.
- Validate
Only bool - If true, the request is validated but not actually executed. Defaults to false.
- Launch
Parameter PipelineWorkload Dataflow Flex Template Request Launch Parameter - Parameter to launch a job from a Flex Template. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#launchflextemplateparameter Structure is documented below.
- Location string
- The regional endpoint to which to direct the request. For example, us-central1, us-west1.
- Project
Id string - The ID of the Cloud Platform project that the job belongs to.
- Validate
Only bool - If true, the request is validated but not actually executed. Defaults to false.
- launch
Parameter PipelineWorkload Dataflow Flex Template Request Launch Parameter - Parameter to launch a job from a Flex Template. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#launchflextemplateparameter Structure is documented below.
- location String
- The regional endpoint to which to direct the request. For example, us-central1, us-west1.
- project
Id String - The ID of the Cloud Platform project that the job belongs to.
- validate
Only Boolean - If true, the request is validated but not actually executed. Defaults to false.
- launch
Parameter PipelineWorkload Dataflow Flex Template Request Launch Parameter - Parameter to launch a job from a Flex Template. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#launchflextemplateparameter Structure is documented below.
- location string
- The regional endpoint to which to direct the request. For example, us-central1, us-west1.
- project
Id string - The ID of the Cloud Platform project that the job belongs to.
- validate
Only boolean - If true, the request is validated but not actually executed. Defaults to false.
- launch_
parameter PipelineWorkload Dataflow Flex Template Request Launch Parameter - Parameter to launch a job from a Flex Template. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#launchflextemplateparameter Structure is documented below.
- location str
- The regional endpoint to which to direct the request. For example, us-central1, us-west1.
- project_
id str - The ID of the Cloud Platform project that the job belongs to.
- validate_
only bool - If true, the request is validated but not actually executed. Defaults to false.
- launch
Parameter Property Map - Parameter to launch a job from a Flex Template. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#launchflextemplateparameter Structure is documented below.
- location String
- The regional endpoint to which to direct the request. For example, us-central1, us-west1.
- project
Id String - The ID of the Cloud Platform project that the job belongs to.
- validate
Only Boolean - If true, the request is validated but not actually executed. Defaults to false.
PipelineWorkloadDataflowFlexTemplateRequestLaunchParameter, PipelineWorkloadDataflowFlexTemplateRequestLaunchParameterArgs
- Job
Name string - The job name to use for the created job. For an update job request, the job name should be the same as the existing running job.
- Container
Spec stringGcs Path - Cloud Storage path to a file with a JSON-serialized ContainerSpec as content.
- Environment
Pipeline
Workload Dataflow Flex Template Request Launch Parameter Environment - The runtime environment for the Flex Template job. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#FlexTemplateRuntimeEnvironment Structure is documented below.
- Launch
Options Dictionary<string, string> - Launch options for this Flex Template job. This is a common set of options across languages and templates. This should not be used to pass job parameters. 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- Parameters Dictionary<string, string>
- 'The parameters for the Flex Template. Example: {"numWorkers":"5"}' 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- Transform
Name Dictionary<string, string>Mappings - 'Use this to pass transform name mappings for streaming update jobs. Example: {"oldTransformName":"newTransformName",...}' 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- Update bool
- Set this to true if you are sending a request to update a running streaming job. When set, the job name should be the same as the running job.
- Job
Name string - The job name to use for the created job. For an update job request, the job name should be the same as the existing running job.
- Container
Spec stringGcs Path - Cloud Storage path to a file with a JSON-serialized ContainerSpec as content.
- Environment
Pipeline
Workload Dataflow Flex Template Request Launch Parameter Environment - The runtime environment for the Flex Template job. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#FlexTemplateRuntimeEnvironment Structure is documented below.
- Launch
Options map[string]string - Launch options for this Flex Template job. This is a common set of options across languages and templates. This should not be used to pass job parameters. 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- Parameters map[string]string
- 'The parameters for the Flex Template. Example: {"numWorkers":"5"}' 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- Transform
Name map[string]stringMappings - 'Use this to pass transform name mappings for streaming update jobs. Example: {"oldTransformName":"newTransformName",...}' 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- Update bool
- Set this to true if you are sending a request to update a running streaming job. When set, the job name should be the same as the running job.
- job
Name String - The job name to use for the created job. For an update job request, the job name should be the same as the existing running job.
- container
Spec StringGcs Path - Cloud Storage path to a file with a JSON-serialized ContainerSpec as content.
- environment
Pipeline
Workload Dataflow Flex Template Request Launch Parameter Environment - The runtime environment for the Flex Template job. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#FlexTemplateRuntimeEnvironment Structure is documented below.
- launch
Options Map<String,String> - Launch options for this Flex Template job. This is a common set of options across languages and templates. This should not be used to pass job parameters. 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- parameters Map<String,String>
- 'The parameters for the Flex Template. Example: {"numWorkers":"5"}' 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- transform
Name Map<String,String>Mappings - 'Use this to pass transform name mappings for streaming update jobs. Example: {"oldTransformName":"newTransformName",...}' 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- update Boolean
- Set this to true if you are sending a request to update a running streaming job. When set, the job name should be the same as the running job.
- job
Name string - The job name to use for the created job. For an update job request, the job name should be the same as the existing running job.
- container
Spec stringGcs Path - Cloud Storage path to a file with a JSON-serialized ContainerSpec as content.
- environment
Pipeline
Workload Dataflow Flex Template Request Launch Parameter Environment - The runtime environment for the Flex Template job. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#FlexTemplateRuntimeEnvironment Structure is documented below.
- launch
Options {[key: string]: string} - Launch options for this Flex Template job. This is a common set of options across languages and templates. This should not be used to pass job parameters. 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- parameters {[key: string]: string}
- 'The parameters for the Flex Template. Example: {"numWorkers":"5"}' 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- transform
Name {[key: string]: string}Mappings - 'Use this to pass transform name mappings for streaming update jobs. Example: {"oldTransformName":"newTransformName",...}' 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- update boolean
- Set this to true if you are sending a request to update a running streaming job. When set, the job name should be the same as the running job.
- job_
name str - The job name to use for the created job. For an update job request, the job name should be the same as the existing running job.
- container_
spec_ strgcs_ path - Cloud Storage path to a file with a JSON-serialized ContainerSpec as content.
- environment
Pipeline
Workload Dataflow Flex Template Request Launch Parameter Environment - The runtime environment for the Flex Template job. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#FlexTemplateRuntimeEnvironment Structure is documented below.
- launch_
options Mapping[str, str] - Launch options for this Flex Template job. This is a common set of options across languages and templates. This should not be used to pass job parameters. 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- parameters Mapping[str, str]
- 'The parameters for the Flex Template. Example: {"numWorkers":"5"}' 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- transform_
name_ Mapping[str, str]mappings - 'Use this to pass transform name mappings for streaming update jobs. Example: {"oldTransformName":"newTransformName",...}' 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- update bool
- Set this to true if you are sending a request to update a running streaming job. When set, the job name should be the same as the running job.
- job
Name String - The job name to use for the created job. For an update job request, the job name should be the same as the existing running job.
- container
Spec StringGcs Path - Cloud Storage path to a file with a JSON-serialized ContainerSpec as content.
- environment Property Map
- The runtime environment for the Flex Template job. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#FlexTemplateRuntimeEnvironment Structure is documented below.
- launch
Options Map<String> - Launch options for this Flex Template job. This is a common set of options across languages and templates. This should not be used to pass job parameters. 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- parameters Map<String>
- 'The parameters for the Flex Template. Example: {"numWorkers":"5"}' 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- transform
Name Map<String>Mappings - 'Use this to pass transform name mappings for streaming update jobs. Example: {"oldTransformName":"newTransformName",...}' 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- update Boolean
- Set this to true if you are sending a request to update a running streaming job. When set, the job name should be the same as the running job.
PipelineWorkloadDataflowFlexTemplateRequestLaunchParameterEnvironment, PipelineWorkloadDataflowFlexTemplateRequestLaunchParameterEnvironmentArgs
- Additional
Experiments List<string> - Additional experiment flags for the job.
- Additional
User Dictionary<string, string>Labels - Additional user labels to be specified for the job. Keys and values should follow the restrictions specified in the labeling restrictions page. An object containing a list of key/value pairs. 'Example: { "name": "wrench", "mass": "1kg", "count": "3" }.' 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- Enable
Streaming boolEngine - Whether to enable Streaming Engine for the job.
- Flexrs
Goal string - Set FlexRS goal for the job. https://cloud.google.com/dataflow/docs/guides/flexrs
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#FlexResourceSchedulingGoal
Possible values are:
FLEXRS_UNSPECIFIED
,FLEXRS_SPEED_OPTIMIZED
,FLEXRS_COST_OPTIMIZED
. - Ip
Configuration string - Configuration for VM IPs.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#WorkerIPAddressConfiguration
Possible values are:
WORKER_IP_UNSPECIFIED
,WORKER_IP_PUBLIC
,WORKER_IP_PRIVATE
. - Kms
Key stringName - 'Name for the Cloud KMS key for the job. The key format is: projects//locations//keyRings//cryptoKeys/'
- Machine
Type string - The machine type to use for the job. Defaults to the value from the template if not specified.
- Max
Workers int - The maximum number of Compute Engine instances to be made available to your pipeline during execution, from 1 to 1000.
- Network string
- Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
- Num
Workers int - The initial number of Compute Engine instances for the job.
- Service
Account stringEmail - The email address of the service account to run the job as.
- Subnetwork string
- Subnetwork to which VMs will be assigned, if desired. You can specify a subnetwork using either a complete URL or an abbreviated path. Expected to be of the form "https://www.googleapis.com/compute/v1/projects/HOST_PROJECT_ID/regions/REGION/subnetworks/SUBNETWORK" or "regions/REGION/subnetworks/SUBNETWORK". If the subnetwork is located in a Shared VPC network, you must use the complete URL.
- Temp
Location string - The Cloud Storage path to use for temporary files. Must be a valid Cloud Storage URL, beginning with gs://.
- Worker
Region string - The Compute Engine region (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1". Mutually exclusive with workerZone. If neither workerRegion nor workerZone is specified, default to the control plane's region.
- Worker
Zone string - The Compute Engine zone (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1-a". Mutually exclusive with workerRegion. If neither workerRegion nor workerZone is specified, a zone in the control plane's region is chosen based on available capacity. If both workerZone and zone are set, workerZone takes precedence.
- Zone string
- The Compute Engine availability zone for launching worker instances to run your pipeline. In the future, workerZone will take precedence.
- Additional
Experiments []string - Additional experiment flags for the job.
- Additional
User map[string]stringLabels - Additional user labels to be specified for the job. Keys and values should follow the restrictions specified in the labeling restrictions page. An object containing a list of key/value pairs. 'Example: { "name": "wrench", "mass": "1kg", "count": "3" }.' 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- Enable
Streaming boolEngine - Whether to enable Streaming Engine for the job.
- Flexrs
Goal string - Set FlexRS goal for the job. https://cloud.google.com/dataflow/docs/guides/flexrs
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#FlexResourceSchedulingGoal
Possible values are:
FLEXRS_UNSPECIFIED
,FLEXRS_SPEED_OPTIMIZED
,FLEXRS_COST_OPTIMIZED
. - Ip
Configuration string - Configuration for VM IPs.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#WorkerIPAddressConfiguration
Possible values are:
WORKER_IP_UNSPECIFIED
,WORKER_IP_PUBLIC
,WORKER_IP_PRIVATE
. - Kms
Key stringName - 'Name for the Cloud KMS key for the job. The key format is: projects//locations//keyRings//cryptoKeys/'
- Machine
Type string - The machine type to use for the job. Defaults to the value from the template if not specified.
- Max
Workers int - The maximum number of Compute Engine instances to be made available to your pipeline during execution, from 1 to 1000.
- Network string
- Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
- Num
Workers int - The initial number of Compute Engine instances for the job.
- Service
Account stringEmail - The email address of the service account to run the job as.
- Subnetwork string
- Subnetwork to which VMs will be assigned, if desired. You can specify a subnetwork using either a complete URL or an abbreviated path. Expected to be of the form "https://www.googleapis.com/compute/v1/projects/HOST_PROJECT_ID/regions/REGION/subnetworks/SUBNETWORK" or "regions/REGION/subnetworks/SUBNETWORK". If the subnetwork is located in a Shared VPC network, you must use the complete URL.
- Temp
Location string - The Cloud Storage path to use for temporary files. Must be a valid Cloud Storage URL, beginning with gs://.
- Worker
Region string - The Compute Engine region (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1". Mutually exclusive with workerZone. If neither workerRegion nor workerZone is specified, default to the control plane's region.
- Worker
Zone string - The Compute Engine zone (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1-a". Mutually exclusive with workerRegion. If neither workerRegion nor workerZone is specified, a zone in the control plane's region is chosen based on available capacity. If both workerZone and zone are set, workerZone takes precedence.
- Zone string
- The Compute Engine availability zone for launching worker instances to run your pipeline. In the future, workerZone will take precedence.
- additional
Experiments List<String> - Additional experiment flags for the job.
- additional
User Map<String,String>Labels - Additional user labels to be specified for the job. Keys and values should follow the restrictions specified in the labeling restrictions page. An object containing a list of key/value pairs. 'Example: { "name": "wrench", "mass": "1kg", "count": "3" }.' 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- enable
Streaming BooleanEngine - Whether to enable Streaming Engine for the job.
- flexrs
Goal String - Set FlexRS goal for the job. https://cloud.google.com/dataflow/docs/guides/flexrs
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#FlexResourceSchedulingGoal
Possible values are:
FLEXRS_UNSPECIFIED
,FLEXRS_SPEED_OPTIMIZED
,FLEXRS_COST_OPTIMIZED
. - ip
Configuration String - Configuration for VM IPs.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#WorkerIPAddressConfiguration
Possible values are:
WORKER_IP_UNSPECIFIED
,WORKER_IP_PUBLIC
,WORKER_IP_PRIVATE
. - kms
Key StringName - 'Name for the Cloud KMS key for the job. The key format is: projects//locations//keyRings//cryptoKeys/'
- machine
Type String - The machine type to use for the job. Defaults to the value from the template if not specified.
- max
Workers Integer - The maximum number of Compute Engine instances to be made available to your pipeline during execution, from 1 to 1000.
- network String
- Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
- num
Workers Integer - The initial number of Compute Engine instances for the job.
- service
Account StringEmail - The email address of the service account to run the job as.
- subnetwork String
- Subnetwork to which VMs will be assigned, if desired. You can specify a subnetwork using either a complete URL or an abbreviated path. Expected to be of the form "https://www.googleapis.com/compute/v1/projects/HOST_PROJECT_ID/regions/REGION/subnetworks/SUBNETWORK" or "regions/REGION/subnetworks/SUBNETWORK". If the subnetwork is located in a Shared VPC network, you must use the complete URL.
- temp
Location String - The Cloud Storage path to use for temporary files. Must be a valid Cloud Storage URL, beginning with gs://.
- worker
Region String - The Compute Engine region (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1". Mutually exclusive with workerZone. If neither workerRegion nor workerZone is specified, default to the control plane's region.
- worker
Zone String - The Compute Engine zone (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1-a". Mutually exclusive with workerRegion. If neither workerRegion nor workerZone is specified, a zone in the control plane's region is chosen based on available capacity. If both workerZone and zone are set, workerZone takes precedence.
- zone String
- The Compute Engine availability zone for launching worker instances to run your pipeline. In the future, workerZone will take precedence.
- additional
Experiments string[] - Additional experiment flags for the job.
- additional
User {[key: string]: string}Labels - Additional user labels to be specified for the job. Keys and values should follow the restrictions specified in the labeling restrictions page. An object containing a list of key/value pairs. 'Example: { "name": "wrench", "mass": "1kg", "count": "3" }.' 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- enable
Streaming booleanEngine - Whether to enable Streaming Engine for the job.
- flexrs
Goal string - Set FlexRS goal for the job. https://cloud.google.com/dataflow/docs/guides/flexrs
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#FlexResourceSchedulingGoal
Possible values are:
FLEXRS_UNSPECIFIED
,FLEXRS_SPEED_OPTIMIZED
,FLEXRS_COST_OPTIMIZED
. - ip
Configuration string - Configuration for VM IPs.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#WorkerIPAddressConfiguration
Possible values are:
WORKER_IP_UNSPECIFIED
,WORKER_IP_PUBLIC
,WORKER_IP_PRIVATE
. - kms
Key stringName - 'Name for the Cloud KMS key for the job. The key format is: projects//locations//keyRings//cryptoKeys/'
- machine
Type string - The machine type to use for the job. Defaults to the value from the template if not specified.
- max
Workers number - The maximum number of Compute Engine instances to be made available to your pipeline during execution, from 1 to 1000.
- network string
- Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
- num
Workers number - The initial number of Compute Engine instances for the job.
- service
Account stringEmail - The email address of the service account to run the job as.
- subnetwork string
- Subnetwork to which VMs will be assigned, if desired. You can specify a subnetwork using either a complete URL or an abbreviated path. Expected to be of the form "https://www.googleapis.com/compute/v1/projects/HOST_PROJECT_ID/regions/REGION/subnetworks/SUBNETWORK" or "regions/REGION/subnetworks/SUBNETWORK". If the subnetwork is located in a Shared VPC network, you must use the complete URL.
- temp
Location string - The Cloud Storage path to use for temporary files. Must be a valid Cloud Storage URL, beginning with gs://.
- worker
Region string - The Compute Engine region (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1". Mutually exclusive with workerZone. If neither workerRegion nor workerZone is specified, default to the control plane's region.
- worker
Zone string - The Compute Engine zone (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1-a". Mutually exclusive with workerRegion. If neither workerRegion nor workerZone is specified, a zone in the control plane's region is chosen based on available capacity. If both workerZone and zone are set, workerZone takes precedence.
- zone string
- The Compute Engine availability zone for launching worker instances to run your pipeline. In the future, workerZone will take precedence.
- additional_
experiments Sequence[str] - Additional experiment flags for the job.
- additional_
user_ Mapping[str, str]labels - Additional user labels to be specified for the job. Keys and values should follow the restrictions specified in the labeling restrictions page. An object containing a list of key/value pairs. 'Example: { "name": "wrench", "mass": "1kg", "count": "3" }.' 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- enable_
streaming_ boolengine - Whether to enable Streaming Engine for the job.
- flexrs_
goal str - Set FlexRS goal for the job. https://cloud.google.com/dataflow/docs/guides/flexrs
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#FlexResourceSchedulingGoal
Possible values are:
FLEXRS_UNSPECIFIED
,FLEXRS_SPEED_OPTIMIZED
,FLEXRS_COST_OPTIMIZED
. - ip_
configuration str - Configuration for VM IPs.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#WorkerIPAddressConfiguration
Possible values are:
WORKER_IP_UNSPECIFIED
,WORKER_IP_PUBLIC
,WORKER_IP_PRIVATE
. - kms_
key_ strname - 'Name for the Cloud KMS key for the job. The key format is: projects//locations//keyRings//cryptoKeys/'
- machine_
type str - The machine type to use for the job. Defaults to the value from the template if not specified.
- max_
workers int - The maximum number of Compute Engine instances to be made available to your pipeline during execution, from 1 to 1000.
- network str
- Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
- num_
workers int - The initial number of Compute Engine instances for the job.
- service_
account_ stremail - The email address of the service account to run the job as.
- subnetwork str
- Subnetwork to which VMs will be assigned, if desired. You can specify a subnetwork using either a complete URL or an abbreviated path. Expected to be of the form "https://www.googleapis.com/compute/v1/projects/HOST_PROJECT_ID/regions/REGION/subnetworks/SUBNETWORK" or "regions/REGION/subnetworks/SUBNETWORK". If the subnetwork is located in a Shared VPC network, you must use the complete URL.
- temp_
location str - The Cloud Storage path to use for temporary files. Must be a valid Cloud Storage URL, beginning with gs://.
- worker_
region str - The Compute Engine region (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1". Mutually exclusive with workerZone. If neither workerRegion nor workerZone is specified, default to the control plane's region.
- worker_
zone str - The Compute Engine zone (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1-a". Mutually exclusive with workerRegion. If neither workerRegion nor workerZone is specified, a zone in the control plane's region is chosen based on available capacity. If both workerZone and zone are set, workerZone takes precedence.
- zone str
- The Compute Engine availability zone for launching worker instances to run your pipeline. In the future, workerZone will take precedence.
- additional
Experiments List<String> - Additional experiment flags for the job.
- additional
User Map<String>Labels - Additional user labels to be specified for the job. Keys and values should follow the restrictions specified in the labeling restrictions page. An object containing a list of key/value pairs. 'Example: { "name": "wrench", "mass": "1kg", "count": "3" }.' 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- enable
Streaming BooleanEngine - Whether to enable Streaming Engine for the job.
- flexrs
Goal String - Set FlexRS goal for the job. https://cloud.google.com/dataflow/docs/guides/flexrs
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#FlexResourceSchedulingGoal
Possible values are:
FLEXRS_UNSPECIFIED
,FLEXRS_SPEED_OPTIMIZED
,FLEXRS_COST_OPTIMIZED
. - ip
Configuration String - Configuration for VM IPs.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#WorkerIPAddressConfiguration
Possible values are:
WORKER_IP_UNSPECIFIED
,WORKER_IP_PUBLIC
,WORKER_IP_PRIVATE
. - kms
Key StringName - 'Name for the Cloud KMS key for the job. The key format is: projects//locations//keyRings//cryptoKeys/'
- machine
Type String - The machine type to use for the job. Defaults to the value from the template if not specified.
- max
Workers Number - The maximum number of Compute Engine instances to be made available to your pipeline during execution, from 1 to 1000.
- network String
- Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
- num
Workers Number - The initial number of Compute Engine instances for the job.
- service
Account StringEmail - The email address of the service account to run the job as.
- subnetwork String
- Subnetwork to which VMs will be assigned, if desired. You can specify a subnetwork using either a complete URL or an abbreviated path. Expected to be of the form "https://www.googleapis.com/compute/v1/projects/HOST_PROJECT_ID/regions/REGION/subnetworks/SUBNETWORK" or "regions/REGION/subnetworks/SUBNETWORK". If the subnetwork is located in a Shared VPC network, you must use the complete URL.
- temp
Location String - The Cloud Storage path to use for temporary files. Must be a valid Cloud Storage URL, beginning with gs://.
- worker
Region String - The Compute Engine region (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1". Mutually exclusive with workerZone. If neither workerRegion nor workerZone is specified, default to the control plane's region.
- worker
Zone String - The Compute Engine zone (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1-a". Mutually exclusive with workerRegion. If neither workerRegion nor workerZone is specified, a zone in the control plane's region is chosen based on available capacity. If both workerZone and zone are set, workerZone takes precedence.
- zone String
- The Compute Engine availability zone for launching worker instances to run your pipeline. In the future, workerZone will take precedence.
PipelineWorkloadDataflowLaunchTemplateRequest, PipelineWorkloadDataflowLaunchTemplateRequestArgs
- Project
Id string - The ID of the Cloud Platform project that the job belongs to.
- Gcs
Path string - A Cloud Storage path to the template from which to create the job. Must be a valid Cloud Storage URL, beginning with 'gs://'.
- Launch
Parameters PipelineWorkload Dataflow Launch Template Request Launch Parameters - The parameters of the template to launch. This should be part of the body of the POST request. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#launchtemplateparameters Structure is documented below.
- Location string
- The regional endpoint to which to direct the request.
- Validate
Only bool - (Optional)
- Project
Id string - The ID of the Cloud Platform project that the job belongs to.
- Gcs
Path string - A Cloud Storage path to the template from which to create the job. Must be a valid Cloud Storage URL, beginning with 'gs://'.
- Launch
Parameters PipelineWorkload Dataflow Launch Template Request Launch Parameters - The parameters of the template to launch. This should be part of the body of the POST request. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#launchtemplateparameters Structure is documented below.
- Location string
- The regional endpoint to which to direct the request.
- Validate
Only bool - (Optional)
- project
Id String - The ID of the Cloud Platform project that the job belongs to.
- gcs
Path String - A Cloud Storage path to the template from which to create the job. Must be a valid Cloud Storage URL, beginning with 'gs://'.
- launch
Parameters PipelineWorkload Dataflow Launch Template Request Launch Parameters - The parameters of the template to launch. This should be part of the body of the POST request. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#launchtemplateparameters Structure is documented below.
- location String
- The regional endpoint to which to direct the request.
- validate
Only Boolean - (Optional)
- project
Id string - The ID of the Cloud Platform project that the job belongs to.
- gcs
Path string - A Cloud Storage path to the template from which to create the job. Must be a valid Cloud Storage URL, beginning with 'gs://'.
- launch
Parameters PipelineWorkload Dataflow Launch Template Request Launch Parameters - The parameters of the template to launch. This should be part of the body of the POST request. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#launchtemplateparameters Structure is documented below.
- location string
- The regional endpoint to which to direct the request.
- validate
Only boolean - (Optional)
- project_
id str - The ID of the Cloud Platform project that the job belongs to.
- gcs_
path str - A Cloud Storage path to the template from which to create the job. Must be a valid Cloud Storage URL, beginning with 'gs://'.
- launch_
parameters PipelineWorkload Dataflow Launch Template Request Launch Parameters - The parameters of the template to launch. This should be part of the body of the POST request. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#launchtemplateparameters Structure is documented below.
- location str
- The regional endpoint to which to direct the request.
- validate_
only bool - (Optional)
- project
Id String - The ID of the Cloud Platform project that the job belongs to.
- gcs
Path String - A Cloud Storage path to the template from which to create the job. Must be a valid Cloud Storage URL, beginning with 'gs://'.
- launch
Parameters Property Map - The parameters of the template to launch. This should be part of the body of the POST request. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#launchtemplateparameters Structure is documented below.
- location String
- The regional endpoint to which to direct the request.
- validate
Only Boolean - (Optional)
PipelineWorkloadDataflowLaunchTemplateRequestLaunchParameters, PipelineWorkloadDataflowLaunchTemplateRequestLaunchParametersArgs
- Job
Name string - The job name to use for the created job.
- Environment
Pipeline
Workload Dataflow Launch Template Request Launch Parameters Environment - The runtime environment for the job. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#RuntimeEnvironment Structure is documented below.
- Parameters Dictionary<string, string>
- The runtime parameters to pass to the job. 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- Transform
Name Dictionary<string, string>Mapping - Map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job. Only applicable when updating a pipeline. 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- Update bool
- If set, replace the existing pipeline with the name specified by jobName with this pipeline, preserving state.
- Job
Name string - The job name to use for the created job.
- Environment
Pipeline
Workload Dataflow Launch Template Request Launch Parameters Environment - The runtime environment for the job. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#RuntimeEnvironment Structure is documented below.
- Parameters map[string]string
- The runtime parameters to pass to the job. 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- Transform
Name map[string]stringMapping - Map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job. Only applicable when updating a pipeline. 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- Update bool
- If set, replace the existing pipeline with the name specified by jobName with this pipeline, preserving state.
- job
Name String - The job name to use for the created job.
- environment
Pipeline
Workload Dataflow Launch Template Request Launch Parameters Environment - The runtime environment for the job. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#RuntimeEnvironment Structure is documented below.
- parameters Map<String,String>
- The runtime parameters to pass to the job. 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- transform
Name Map<String,String>Mapping - Map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job. Only applicable when updating a pipeline. 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- update Boolean
- If set, replace the existing pipeline with the name specified by jobName with this pipeline, preserving state.
- job
Name string - The job name to use for the created job.
- environment
Pipeline
Workload Dataflow Launch Template Request Launch Parameters Environment - The runtime environment for the job. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#RuntimeEnvironment Structure is documented below.
- parameters {[key: string]: string}
- The runtime parameters to pass to the job. 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- transform
Name {[key: string]: string}Mapping - Map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job. Only applicable when updating a pipeline. 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- update boolean
- If set, replace the existing pipeline with the name specified by jobName with this pipeline, preserving state.
- job_
name str - The job name to use for the created job.
- environment
Pipeline
Workload Dataflow Launch Template Request Launch Parameters Environment - The runtime environment for the job. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#RuntimeEnvironment Structure is documented below.
- parameters Mapping[str, str]
- The runtime parameters to pass to the job. 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- transform_
name_ Mapping[str, str]mapping - Map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job. Only applicable when updating a pipeline. 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- update bool
- If set, replace the existing pipeline with the name specified by jobName with this pipeline, preserving state.
- job
Name String - The job name to use for the created job.
- environment Property Map
- The runtime environment for the job. https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#RuntimeEnvironment Structure is documented below.
- parameters Map<String>
- The runtime parameters to pass to the job. 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- transform
Name Map<String>Mapping - Map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job. Only applicable when updating a pipeline. 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- update Boolean
- If set, replace the existing pipeline with the name specified by jobName with this pipeline, preserving state.
PipelineWorkloadDataflowLaunchTemplateRequestLaunchParametersEnvironment, PipelineWorkloadDataflowLaunchTemplateRequestLaunchParametersEnvironmentArgs
- Additional
Experiments List<string> - Additional experiment flags for the job.
- Additional
User Dictionary<string, string>Labels - Additional user labels to be specified for the job. Keys and values should follow the restrictions specified in the labeling restrictions page. An object containing a list of key/value pairs. 'Example: { "name": "wrench", "mass": "1kg", "count": "3" }.' 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- Bypass
Temp boolDir Validation - Whether to bypass the safety checks for the job's temporary directory. Use with caution.
- Enable
Streaming boolEngine - Whether to enable Streaming Engine for the job.
- Ip
Configuration string - Configuration for VM IPs.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#WorkerIPAddressConfiguration
Possible values are:
WORKER_IP_UNSPECIFIED
,WORKER_IP_PUBLIC
,WORKER_IP_PRIVATE
. - Kms
Key stringName - 'Name for the Cloud KMS key for the job. The key format is: projects//locations//keyRings//cryptoKeys/'
- Machine
Type string - The machine type to use for the job. Defaults to the value from the template if not specified.
- Max
Workers int - The maximum number of Compute Engine instances to be made available to your pipeline during execution, from 1 to 1000.
- Network string
- Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
- Num
Workers int - The initial number of Compute Engine instances for the job.
- Service
Account stringEmail - The email address of the service account to run the job as.
- Subnetwork string
- Subnetwork to which VMs will be assigned, if desired. You can specify a subnetwork using either a complete URL or an abbreviated path. Expected to be of the form "https://www.googleapis.com/compute/v1/projects/HOST_PROJECT_ID/regions/REGION/subnetworks/SUBNETWORK" or "regions/REGION/subnetworks/SUBNETWORK". If the subnetwork is located in a Shared VPC network, you must use the complete URL.
- Temp
Location string - The Cloud Storage path to use for temporary files. Must be a valid Cloud Storage URL, beginning with gs://.
- Worker
Region string - The Compute Engine region (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1". Mutually exclusive with workerZone. If neither workerRegion nor workerZone is specified, default to the control plane's region.
- Worker
Zone string - The Compute Engine zone (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1-a". Mutually exclusive with workerRegion. If neither workerRegion nor workerZone is specified, a zone in the control plane's region is chosen based on available capacity. If both workerZone and zone are set, workerZone takes precedence.
- Zone string
- The Compute Engine availability zone for launching worker instances to run your pipeline. In the future, workerZone will take precedence.
- Additional
Experiments []string - Additional experiment flags for the job.
- Additional
User map[string]stringLabels - Additional user labels to be specified for the job. Keys and values should follow the restrictions specified in the labeling restrictions page. An object containing a list of key/value pairs. 'Example: { "name": "wrench", "mass": "1kg", "count": "3" }.' 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- Bypass
Temp boolDir Validation - Whether to bypass the safety checks for the job's temporary directory. Use with caution.
- Enable
Streaming boolEngine - Whether to enable Streaming Engine for the job.
- Ip
Configuration string - Configuration for VM IPs.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#WorkerIPAddressConfiguration
Possible values are:
WORKER_IP_UNSPECIFIED
,WORKER_IP_PUBLIC
,WORKER_IP_PRIVATE
. - Kms
Key stringName - 'Name for the Cloud KMS key for the job. The key format is: projects//locations//keyRings//cryptoKeys/'
- Machine
Type string - The machine type to use for the job. Defaults to the value from the template if not specified.
- Max
Workers int - The maximum number of Compute Engine instances to be made available to your pipeline during execution, from 1 to 1000.
- Network string
- Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
- Num
Workers int - The initial number of Compute Engine instances for the job.
- Service
Account stringEmail - The email address of the service account to run the job as.
- Subnetwork string
- Subnetwork to which VMs will be assigned, if desired. You can specify a subnetwork using either a complete URL or an abbreviated path. Expected to be of the form "https://www.googleapis.com/compute/v1/projects/HOST_PROJECT_ID/regions/REGION/subnetworks/SUBNETWORK" or "regions/REGION/subnetworks/SUBNETWORK". If the subnetwork is located in a Shared VPC network, you must use the complete URL.
- Temp
Location string - The Cloud Storage path to use for temporary files. Must be a valid Cloud Storage URL, beginning with gs://.
- Worker
Region string - The Compute Engine region (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1". Mutually exclusive with workerZone. If neither workerRegion nor workerZone is specified, default to the control plane's region.
- Worker
Zone string - The Compute Engine zone (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1-a". Mutually exclusive with workerRegion. If neither workerRegion nor workerZone is specified, a zone in the control plane's region is chosen based on available capacity. If both workerZone and zone are set, workerZone takes precedence.
- Zone string
- The Compute Engine availability zone for launching worker instances to run your pipeline. In the future, workerZone will take precedence.
- additional
Experiments List<String> - Additional experiment flags for the job.
- additional
User Map<String,String>Labels - Additional user labels to be specified for the job. Keys and values should follow the restrictions specified in the labeling restrictions page. An object containing a list of key/value pairs. 'Example: { "name": "wrench", "mass": "1kg", "count": "3" }.' 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- bypass
Temp BooleanDir Validation - Whether to bypass the safety checks for the job's temporary directory. Use with caution.
- enable
Streaming BooleanEngine - Whether to enable Streaming Engine for the job.
- ip
Configuration String - Configuration for VM IPs.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#WorkerIPAddressConfiguration
Possible values are:
WORKER_IP_UNSPECIFIED
,WORKER_IP_PUBLIC
,WORKER_IP_PRIVATE
. - kms
Key StringName - 'Name for the Cloud KMS key for the job. The key format is: projects//locations//keyRings//cryptoKeys/'
- machine
Type String - The machine type to use for the job. Defaults to the value from the template if not specified.
- max
Workers Integer - The maximum number of Compute Engine instances to be made available to your pipeline during execution, from 1 to 1000.
- network String
- Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
- num
Workers Integer - The initial number of Compute Engine instances for the job.
- service
Account StringEmail - The email address of the service account to run the job as.
- subnetwork String
- Subnetwork to which VMs will be assigned, if desired. You can specify a subnetwork using either a complete URL or an abbreviated path. Expected to be of the form "https://www.googleapis.com/compute/v1/projects/HOST_PROJECT_ID/regions/REGION/subnetworks/SUBNETWORK" or "regions/REGION/subnetworks/SUBNETWORK". If the subnetwork is located in a Shared VPC network, you must use the complete URL.
- temp
Location String - The Cloud Storage path to use for temporary files. Must be a valid Cloud Storage URL, beginning with gs://.
- worker
Region String - The Compute Engine region (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1". Mutually exclusive with workerZone. If neither workerRegion nor workerZone is specified, default to the control plane's region.
- worker
Zone String - The Compute Engine zone (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1-a". Mutually exclusive with workerRegion. If neither workerRegion nor workerZone is specified, a zone in the control plane's region is chosen based on available capacity. If both workerZone and zone are set, workerZone takes precedence.
- zone String
- The Compute Engine availability zone for launching worker instances to run your pipeline. In the future, workerZone will take precedence.
- additional
Experiments string[] - Additional experiment flags for the job.
- additional
User {[key: string]: string}Labels - Additional user labels to be specified for the job. Keys and values should follow the restrictions specified in the labeling restrictions page. An object containing a list of key/value pairs. 'Example: { "name": "wrench", "mass": "1kg", "count": "3" }.' 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- bypass
Temp booleanDir Validation - Whether to bypass the safety checks for the job's temporary directory. Use with caution.
- enable
Streaming booleanEngine - Whether to enable Streaming Engine for the job.
- ip
Configuration string - Configuration for VM IPs.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#WorkerIPAddressConfiguration
Possible values are:
WORKER_IP_UNSPECIFIED
,WORKER_IP_PUBLIC
,WORKER_IP_PRIVATE
. - kms
Key stringName - 'Name for the Cloud KMS key for the job. The key format is: projects//locations//keyRings//cryptoKeys/'
- machine
Type string - The machine type to use for the job. Defaults to the value from the template if not specified.
- max
Workers number - The maximum number of Compute Engine instances to be made available to your pipeline during execution, from 1 to 1000.
- network string
- Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
- num
Workers number - The initial number of Compute Engine instances for the job.
- service
Account stringEmail - The email address of the service account to run the job as.
- subnetwork string
- Subnetwork to which VMs will be assigned, if desired. You can specify a subnetwork using either a complete URL or an abbreviated path. Expected to be of the form "https://www.googleapis.com/compute/v1/projects/HOST_PROJECT_ID/regions/REGION/subnetworks/SUBNETWORK" or "regions/REGION/subnetworks/SUBNETWORK". If the subnetwork is located in a Shared VPC network, you must use the complete URL.
- temp
Location string - The Cloud Storage path to use for temporary files. Must be a valid Cloud Storage URL, beginning with gs://.
- worker
Region string - The Compute Engine region (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1". Mutually exclusive with workerZone. If neither workerRegion nor workerZone is specified, default to the control plane's region.
- worker
Zone string - The Compute Engine zone (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1-a". Mutually exclusive with workerRegion. If neither workerRegion nor workerZone is specified, a zone in the control plane's region is chosen based on available capacity. If both workerZone and zone are set, workerZone takes precedence.
- zone string
- The Compute Engine availability zone for launching worker instances to run your pipeline. In the future, workerZone will take precedence.
- additional_
experiments Sequence[str] - Additional experiment flags for the job.
- additional_
user_ Mapping[str, str]labels - Additional user labels to be specified for the job. Keys and values should follow the restrictions specified in the labeling restrictions page. An object containing a list of key/value pairs. 'Example: { "name": "wrench", "mass": "1kg", "count": "3" }.' 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- bypass_
temp_ booldir_ validation - Whether to bypass the safety checks for the job's temporary directory. Use with caution.
- enable_
streaming_ boolengine - Whether to enable Streaming Engine for the job.
- ip_
configuration str - Configuration for VM IPs.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#WorkerIPAddressConfiguration
Possible values are:
WORKER_IP_UNSPECIFIED
,WORKER_IP_PUBLIC
,WORKER_IP_PRIVATE
. - kms_
key_ strname - 'Name for the Cloud KMS key for the job. The key format is: projects//locations//keyRings//cryptoKeys/'
- machine_
type str - The machine type to use for the job. Defaults to the value from the template if not specified.
- max_
workers int - The maximum number of Compute Engine instances to be made available to your pipeline during execution, from 1 to 1000.
- network str
- Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
- num_
workers int - The initial number of Compute Engine instances for the job.
- service_
account_ stremail - The email address of the service account to run the job as.
- subnetwork str
- Subnetwork to which VMs will be assigned, if desired. You can specify a subnetwork using either a complete URL or an abbreviated path. Expected to be of the form "https://www.googleapis.com/compute/v1/projects/HOST_PROJECT_ID/regions/REGION/subnetworks/SUBNETWORK" or "regions/REGION/subnetworks/SUBNETWORK". If the subnetwork is located in a Shared VPC network, you must use the complete URL.
- temp_
location str - The Cloud Storage path to use for temporary files. Must be a valid Cloud Storage URL, beginning with gs://.
- worker_
region str - The Compute Engine region (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1". Mutually exclusive with workerZone. If neither workerRegion nor workerZone is specified, default to the control plane's region.
- worker_
zone str - The Compute Engine zone (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1-a". Mutually exclusive with workerRegion. If neither workerRegion nor workerZone is specified, a zone in the control plane's region is chosen based on available capacity. If both workerZone and zone are set, workerZone takes precedence.
- zone str
- The Compute Engine availability zone for launching worker instances to run your pipeline. In the future, workerZone will take precedence.
- additional
Experiments List<String> - Additional experiment flags for the job.
- additional
User Map<String>Labels - Additional user labels to be specified for the job. Keys and values should follow the restrictions specified in the labeling restrictions page. An object containing a list of key/value pairs. 'Example: { "name": "wrench", "mass": "1kg", "count": "3" }.' 'An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
- bypass
Temp BooleanDir Validation - Whether to bypass the safety checks for the job's temporary directory. Use with caution.
- enable
Streaming BooleanEngine - Whether to enable Streaming Engine for the job.
- ip
Configuration String - Configuration for VM IPs.
https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#WorkerIPAddressConfiguration
Possible values are:
WORKER_IP_UNSPECIFIED
,WORKER_IP_PUBLIC
,WORKER_IP_PRIVATE
. - kms
Key StringName - 'Name for the Cloud KMS key for the job. The key format is: projects//locations//keyRings//cryptoKeys/'
- machine
Type String - The machine type to use for the job. Defaults to the value from the template if not specified.
- max
Workers Number - The maximum number of Compute Engine instances to be made available to your pipeline during execution, from 1 to 1000.
- network String
- Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
- num
Workers Number - The initial number of Compute Engine instances for the job.
- service
Account StringEmail - The email address of the service account to run the job as.
- subnetwork String
- Subnetwork to which VMs will be assigned, if desired. You can specify a subnetwork using either a complete URL or an abbreviated path. Expected to be of the form "https://www.googleapis.com/compute/v1/projects/HOST_PROJECT_ID/regions/REGION/subnetworks/SUBNETWORK" or "regions/REGION/subnetworks/SUBNETWORK". If the subnetwork is located in a Shared VPC network, you must use the complete URL.
- temp
Location String - The Cloud Storage path to use for temporary files. Must be a valid Cloud Storage URL, beginning with gs://.
- worker
Region String - The Compute Engine region (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1". Mutually exclusive with workerZone. If neither workerRegion nor workerZone is specified, default to the control plane's region.
- worker
Zone String - The Compute Engine zone (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1-a". Mutually exclusive with workerRegion. If neither workerRegion nor workerZone is specified, a zone in the control plane's region is chosen based on available capacity. If both workerZone and zone are set, workerZone takes precedence.
- zone String
- The Compute Engine availability zone for launching worker instances to run your pipeline. In the future, workerZone will take precedence.
Import
Pipeline can be imported using any of these accepted formats:
projects/{{project}}/locations/{{region}}/pipelines/{{name}}
{{project}}/{{region}}/{{name}}
{{region}}/{{name}}
{{name}}
When using the pulumi import
command, Pipeline can be imported using one of the formats above. For example:
$ pulumi import gcp:dataflow/pipeline:Pipeline default projects/{{project}}/locations/{{region}}/pipelines/{{name}}
$ pulumi import gcp:dataflow/pipeline:Pipeline default {{project}}/{{region}}/{{name}}
$ pulumi import gcp:dataflow/pipeline:Pipeline default {{region}}/{{name}}
$ pulumi import gcp:dataflow/pipeline:Pipeline default {{name}}
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- Google Cloud (GCP) Classic pulumi/pulumi-gcp
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
google-beta
Terraform Provider.