gcp.healthcare.PipelineJob
Explore with Pulumi AI
PipelineJobs are Long Running Operations on Healthcare API to Map or Reconcile incoming data into FHIR format
To get more information about PipelineJob, see:
- API documentation
- How-to Guides
Example Usage
Healthcare Pipeline Job Reconciliation
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const project = gcp.organizations.getProject({});
const dataset = new gcp.healthcare.Dataset("dataset", {
name: "example_dataset",
location: "us-central1",
});
const fhirstore = new gcp.healthcare.FhirStore("fhirstore", {
name: "fhir_store",
dataset: dataset.id,
version: "R4",
enableUpdateCreate: true,
disableReferentialIntegrity: true,
});
const bucket = new gcp.storage.Bucket("bucket", {
name: "example_bucket_name",
location: "us-central1",
uniformBucketLevelAccess: true,
});
const mergeFile = new gcp.storage.BucketObject("merge_file", {
name: "merge.wstl",
content: " ",
bucket: bucket.name,
});
const example_pipeline = new gcp.healthcare.PipelineJob("example-pipeline", {
name: "example_pipeline_job",
location: "us-central1",
dataset: dataset.id,
disableLineage: true,
reconciliationPipelineJob: {
mergeConfig: {
description: "sample description for reconciliation rules",
whistleConfigSource: {
uri: pulumi.interpolate`gs://${bucket.name}/${mergeFile.name}`,
importUriPrefix: pulumi.interpolate`gs://${bucket.name}`,
},
},
matchingUriPrefix: pulumi.interpolate`gs://${bucket.name}`,
fhirStoreDestination: pulumi.interpolate`${dataset.id}/fhirStores/${fhirstore.name}`,
},
});
const hsa = new gcp.storage.BucketIAMMember("hsa", {
bucket: bucket.name,
role: "roles/storage.objectUser",
member: project.then(project => `serviceAccount:service-${project.number}@gcp-sa-healthcare.iam.gserviceaccount.com`),
});
import pulumi
import pulumi_gcp as gcp
project = gcp.organizations.get_project()
dataset = gcp.healthcare.Dataset("dataset",
name="example_dataset",
location="us-central1")
fhirstore = gcp.healthcare.FhirStore("fhirstore",
name="fhir_store",
dataset=dataset.id,
version="R4",
enable_update_create=True,
disable_referential_integrity=True)
bucket = gcp.storage.Bucket("bucket",
name="example_bucket_name",
location="us-central1",
uniform_bucket_level_access=True)
merge_file = gcp.storage.BucketObject("merge_file",
name="merge.wstl",
content=" ",
bucket=bucket.name)
example_pipeline = gcp.healthcare.PipelineJob("example-pipeline",
name="example_pipeline_job",
location="us-central1",
dataset=dataset.id,
disable_lineage=True,
reconciliation_pipeline_job={
"merge_config": {
"description": "sample description for reconciliation rules",
"whistle_config_source": {
"uri": pulumi.Output.all(
bucketName=bucket.name,
mergeFileName=merge_file.name
).apply(lambda resolved_outputs: f"gs://{resolved_outputs['bucketName']}/{resolved_outputs['mergeFileName']}")
,
"import_uri_prefix": bucket.name.apply(lambda name: f"gs://{name}"),
},
},
"matching_uri_prefix": bucket.name.apply(lambda name: f"gs://{name}"),
"fhir_store_destination": pulumi.Output.all(
id=dataset.id,
name=fhirstore.name
).apply(lambda resolved_outputs: f"{resolved_outputs['id']}/fhirStores/{resolved_outputs['name']}")
,
})
hsa = gcp.storage.BucketIAMMember("hsa",
bucket=bucket.name,
role="roles/storage.objectUser",
member=f"serviceAccount:service-{project.number}@gcp-sa-healthcare.iam.gserviceaccount.com")
package main
import (
"fmt"
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/healthcare"
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations"
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/storage"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
project, err := organizations.LookupProject(ctx, &organizations.LookupProjectArgs{}, nil)
if err != nil {
return err
}
dataset, err := healthcare.NewDataset(ctx, "dataset", &healthcare.DatasetArgs{
Name: pulumi.String("example_dataset"),
Location: pulumi.String("us-central1"),
})
if err != nil {
return err
}
fhirstore, err := healthcare.NewFhirStore(ctx, "fhirstore", &healthcare.FhirStoreArgs{
Name: pulumi.String("fhir_store"),
Dataset: dataset.ID(),
Version: pulumi.String("R4"),
EnableUpdateCreate: pulumi.Bool(true),
DisableReferentialIntegrity: pulumi.Bool(true),
})
if err != nil {
return err
}
bucket, err := storage.NewBucket(ctx, "bucket", &storage.BucketArgs{
Name: pulumi.String("example_bucket_name"),
Location: pulumi.String("us-central1"),
UniformBucketLevelAccess: pulumi.Bool(true),
})
if err != nil {
return err
}
mergeFile, err := storage.NewBucketObject(ctx, "merge_file", &storage.BucketObjectArgs{
Name: pulumi.String("merge.wstl"),
Content: pulumi.String(" "),
Bucket: bucket.Name,
})
if err != nil {
return err
}
_, err = healthcare.NewPipelineJob(ctx, "example-pipeline", &healthcare.PipelineJobArgs{
Name: pulumi.String("example_pipeline_job"),
Location: pulumi.String("us-central1"),
Dataset: dataset.ID(),
DisableLineage: pulumi.Bool(true),
ReconciliationPipelineJob: &healthcare.PipelineJobReconciliationPipelineJobArgs{
MergeConfig: &healthcare.PipelineJobReconciliationPipelineJobMergeConfigArgs{
Description: pulumi.String("sample description for reconciliation rules"),
WhistleConfigSource: &healthcare.PipelineJobReconciliationPipelineJobMergeConfigWhistleConfigSourceArgs{
Uri: pulumi.All(bucket.Name, mergeFile.Name).ApplyT(func(_args []interface{}) (string, error) {
bucketName := _args[0].(string)
mergeFileName := _args[1].(string)
return fmt.Sprintf("gs://%v/%v", bucketName, mergeFileName), nil
}).(pulumi.StringOutput),
ImportUriPrefix: bucket.Name.ApplyT(func(name string) (string, error) {
return fmt.Sprintf("gs://%v", name), nil
}).(pulumi.StringOutput),
},
},
MatchingUriPrefix: bucket.Name.ApplyT(func(name string) (string, error) {
return fmt.Sprintf("gs://%v", name), nil
}).(pulumi.StringOutput),
FhirStoreDestination: pulumi.All(dataset.ID(), fhirstore.Name).ApplyT(func(_args []interface{}) (string, error) {
id := _args[0].(string)
name := _args[1].(string)
return fmt.Sprintf("%v/fhirStores/%v", id, name), nil
}).(pulumi.StringOutput),
},
})
if err != nil {
return err
}
_, err = storage.NewBucketIAMMember(ctx, "hsa", &storage.BucketIAMMemberArgs{
Bucket: bucket.Name,
Role: pulumi.String("roles/storage.objectUser"),
Member: pulumi.Sprintf("serviceAccount:service-%v@gcp-sa-healthcare.iam.gserviceaccount.com", project.Number),
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var project = Gcp.Organizations.GetProject.Invoke();
var dataset = new Gcp.Healthcare.Dataset("dataset", new()
{
Name = "example_dataset",
Location = "us-central1",
});
var fhirstore = new Gcp.Healthcare.FhirStore("fhirstore", new()
{
Name = "fhir_store",
Dataset = dataset.Id,
Version = "R4",
EnableUpdateCreate = true,
DisableReferentialIntegrity = true,
});
var bucket = new Gcp.Storage.Bucket("bucket", new()
{
Name = "example_bucket_name",
Location = "us-central1",
UniformBucketLevelAccess = true,
});
var mergeFile = new Gcp.Storage.BucketObject("merge_file", new()
{
Name = "merge.wstl",
Content = " ",
Bucket = bucket.Name,
});
var example_pipeline = new Gcp.Healthcare.PipelineJob("example-pipeline", new()
{
Name = "example_pipeline_job",
Location = "us-central1",
Dataset = dataset.Id,
DisableLineage = true,
ReconciliationPipelineJob = new Gcp.Healthcare.Inputs.PipelineJobReconciliationPipelineJobArgs
{
MergeConfig = new Gcp.Healthcare.Inputs.PipelineJobReconciliationPipelineJobMergeConfigArgs
{
Description = "sample description for reconciliation rules",
WhistleConfigSource = new Gcp.Healthcare.Inputs.PipelineJobReconciliationPipelineJobMergeConfigWhistleConfigSourceArgs
{
Uri = Output.Tuple(bucket.Name, mergeFile.Name).Apply(values =>
{
var bucketName = values.Item1;
var mergeFileName = values.Item2;
return $"gs://{bucketName}/{mergeFileName}";
}),
ImportUriPrefix = bucket.Name.Apply(name => $"gs://{name}"),
},
},
MatchingUriPrefix = bucket.Name.Apply(name => $"gs://{name}"),
FhirStoreDestination = Output.Tuple(dataset.Id, fhirstore.Name).Apply(values =>
{
var id = values.Item1;
var name = values.Item2;
return $"{id}/fhirStores/{name}";
}),
},
});
var hsa = new Gcp.Storage.BucketIAMMember("hsa", new()
{
Bucket = bucket.Name,
Role = "roles/storage.objectUser",
Member = $"serviceAccount:service-{project.Apply(getProjectResult => getProjectResult.Number)}@gcp-sa-healthcare.iam.gserviceaccount.com",
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.organizations.OrganizationsFunctions;
import com.pulumi.gcp.organizations.inputs.GetProjectArgs;
import com.pulumi.gcp.healthcare.Dataset;
import com.pulumi.gcp.healthcare.DatasetArgs;
import com.pulumi.gcp.healthcare.FhirStore;
import com.pulumi.gcp.healthcare.FhirStoreArgs;
import com.pulumi.gcp.storage.Bucket;
import com.pulumi.gcp.storage.BucketArgs;
import com.pulumi.gcp.storage.BucketObject;
import com.pulumi.gcp.storage.BucketObjectArgs;
import com.pulumi.gcp.healthcare.PipelineJob;
import com.pulumi.gcp.healthcare.PipelineJobArgs;
import com.pulumi.gcp.healthcare.inputs.PipelineJobReconciliationPipelineJobArgs;
import com.pulumi.gcp.healthcare.inputs.PipelineJobReconciliationPipelineJobMergeConfigArgs;
import com.pulumi.gcp.healthcare.inputs.PipelineJobReconciliationPipelineJobMergeConfigWhistleConfigSourceArgs;
import com.pulumi.gcp.storage.BucketIAMMember;
import com.pulumi.gcp.storage.BucketIAMMemberArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
final var project = OrganizationsFunctions.getProject();
var dataset = new Dataset("dataset", DatasetArgs.builder()
.name("example_dataset")
.location("us-central1")
.build());
var fhirstore = new FhirStore("fhirstore", FhirStoreArgs.builder()
.name("fhir_store")
.dataset(dataset.id())
.version("R4")
.enableUpdateCreate(true)
.disableReferentialIntegrity(true)
.build());
var bucket = new Bucket("bucket", BucketArgs.builder()
.name("example_bucket_name")
.location("us-central1")
.uniformBucketLevelAccess(true)
.build());
var mergeFile = new BucketObject("mergeFile", BucketObjectArgs.builder()
.name("merge.wstl")
.content(" ")
.bucket(bucket.name())
.build());
var example_pipeline = new PipelineJob("example-pipeline", PipelineJobArgs.builder()
.name("example_pipeline_job")
.location("us-central1")
.dataset(dataset.id())
.disableLineage(true)
.reconciliationPipelineJob(PipelineJobReconciliationPipelineJobArgs.builder()
.mergeConfig(PipelineJobReconciliationPipelineJobMergeConfigArgs.builder()
.description("sample description for reconciliation rules")
.whistleConfigSource(PipelineJobReconciliationPipelineJobMergeConfigWhistleConfigSourceArgs.builder()
.uri(Output.tuple(bucket.name(), mergeFile.name()).applyValue(values -> {
var bucketName = values.t1;
var mergeFileName = values.t2;
return String.format("gs://%s/%s", bucketName,mergeFileName);
}))
.importUriPrefix(bucket.name().applyValue(name -> String.format("gs://%s", name)))
.build())
.build())
.matchingUriPrefix(bucket.name().applyValue(name -> String.format("gs://%s", name)))
.fhirStoreDestination(Output.tuple(dataset.id(), fhirstore.name()).applyValue(values -> {
var id = values.t1;
var name = values.t2;
return String.format("%s/fhirStores/%s", id,name);
}))
.build())
.build());
var hsa = new BucketIAMMember("hsa", BucketIAMMemberArgs.builder()
.bucket(bucket.name())
.role("roles/storage.objectUser")
.member(String.format("serviceAccount:service-%s@gcp-sa-healthcare.iam.gserviceaccount.com", project.applyValue(getProjectResult -> getProjectResult.number())))
.build());
}
}
resources:
example-pipeline:
type: gcp:healthcare:PipelineJob
properties:
name: example_pipeline_job
location: us-central1
dataset: ${dataset.id}
disableLineage: true
reconciliationPipelineJob:
mergeConfig:
description: sample description for reconciliation rules
whistleConfigSource:
uri: gs://${bucket.name}/${mergeFile.name}
importUriPrefix: gs://${bucket.name}
matchingUriPrefix: gs://${bucket.name}
fhirStoreDestination: ${dataset.id}/fhirStores/${fhirstore.name}
dataset:
type: gcp:healthcare:Dataset
properties:
name: example_dataset
location: us-central1
fhirstore:
type: gcp:healthcare:FhirStore
properties:
name: fhir_store
dataset: ${dataset.id}
version: R4
enableUpdateCreate: true
disableReferentialIntegrity: true
bucket:
type: gcp:storage:Bucket
properties:
name: example_bucket_name
location: us-central1
uniformBucketLevelAccess: true
mergeFile:
type: gcp:storage:BucketObject
name: merge_file
properties:
name: merge.wstl
content: ' '
bucket: ${bucket.name}
hsa:
type: gcp:storage:BucketIAMMember
properties:
bucket: ${bucket.name}
role: roles/storage.objectUser
member: serviceAccount:service-${project.number}@gcp-sa-healthcare.iam.gserviceaccount.com
variables:
project:
fn::invoke:
Function: gcp:organizations:getProject
Arguments: {}
Healthcare Pipeline Job Backfill
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const dataset = new gcp.healthcare.Dataset("dataset", {
name: "example_dataset",
location: "us-central1",
});
const example_pipeline = new gcp.healthcare.PipelineJob("example-pipeline", {
name: "example_backfill_pipeline",
location: "us-central1",
dataset: dataset.id,
backfillPipelineJob: {
mappingPipelineJob: pulumi.interpolate`${dataset.id}/pipelinejobs/example_mapping_pipeline`,
},
});
import pulumi
import pulumi_gcp as gcp
dataset = gcp.healthcare.Dataset("dataset",
name="example_dataset",
location="us-central1")
example_pipeline = gcp.healthcare.PipelineJob("example-pipeline",
name="example_backfill_pipeline",
location="us-central1",
dataset=dataset.id,
backfill_pipeline_job={
"mapping_pipeline_job": dataset.id.apply(lambda id: f"{id}/pipelinejobs/example_mapping_pipeline"),
})
package main
import (
"fmt"
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/healthcare"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
dataset, err := healthcare.NewDataset(ctx, "dataset", &healthcare.DatasetArgs{
Name: pulumi.String("example_dataset"),
Location: pulumi.String("us-central1"),
})
if err != nil {
return err
}
_, err = healthcare.NewPipelineJob(ctx, "example-pipeline", &healthcare.PipelineJobArgs{
Name: pulumi.String("example_backfill_pipeline"),
Location: pulumi.String("us-central1"),
Dataset: dataset.ID(),
BackfillPipelineJob: &healthcare.PipelineJobBackfillPipelineJobArgs{
MappingPipelineJob: dataset.ID().ApplyT(func(id string) (string, error) {
return fmt.Sprintf("%v/pipelinejobs/example_mapping_pipeline", id), nil
}).(pulumi.StringOutput),
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var dataset = new Gcp.Healthcare.Dataset("dataset", new()
{
Name = "example_dataset",
Location = "us-central1",
});
var example_pipeline = new Gcp.Healthcare.PipelineJob("example-pipeline", new()
{
Name = "example_backfill_pipeline",
Location = "us-central1",
Dataset = dataset.Id,
BackfillPipelineJob = new Gcp.Healthcare.Inputs.PipelineJobBackfillPipelineJobArgs
{
MappingPipelineJob = dataset.Id.Apply(id => $"{id}/pipelinejobs/example_mapping_pipeline"),
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.healthcare.Dataset;
import com.pulumi.gcp.healthcare.DatasetArgs;
import com.pulumi.gcp.healthcare.PipelineJob;
import com.pulumi.gcp.healthcare.PipelineJobArgs;
import com.pulumi.gcp.healthcare.inputs.PipelineJobBackfillPipelineJobArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var dataset = new Dataset("dataset", DatasetArgs.builder()
.name("example_dataset")
.location("us-central1")
.build());
var example_pipeline = new PipelineJob("example-pipeline", PipelineJobArgs.builder()
.name("example_backfill_pipeline")
.location("us-central1")
.dataset(dataset.id())
.backfillPipelineJob(PipelineJobBackfillPipelineJobArgs.builder()
.mappingPipelineJob(dataset.id().applyValue(id -> String.format("%s/pipelinejobs/example_mapping_pipeline", id)))
.build())
.build());
}
}
resources:
example-pipeline:
type: gcp:healthcare:PipelineJob
properties:
name: example_backfill_pipeline
location: us-central1
dataset: ${dataset.id}
backfillPipelineJob:
mappingPipelineJob: ${dataset.id}/pipelinejobs/example_mapping_pipeline
dataset:
type: gcp:healthcare:Dataset
properties:
name: example_dataset
location: us-central1
Healthcare Pipeline Job Whistle Mapping
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const project = gcp.organizations.getProject({});
const dataset = new gcp.healthcare.Dataset("dataset", {
name: "example_dataset",
location: "us-central1",
});
const sourceFhirstore = new gcp.healthcare.FhirStore("source_fhirstore", {
name: "source_fhir_store",
dataset: dataset.id,
version: "R4",
enableUpdateCreate: true,
disableReferentialIntegrity: true,
});
const destFhirstore = new gcp.healthcare.FhirStore("dest_fhirstore", {
name: "dest_fhir_store",
dataset: dataset.id,
version: "R4",
enableUpdateCreate: true,
disableReferentialIntegrity: true,
});
const bucket = new gcp.storage.Bucket("bucket", {
name: "example_bucket_name",
location: "us-central1",
uniformBucketLevelAccess: true,
});
const mappingFile = new gcp.storage.BucketObject("mapping_file", {
name: "mapping.wstl",
content: " ",
bucket: bucket.name,
});
const example_mapping_pipeline = new gcp.healthcare.PipelineJob("example-mapping-pipeline", {
name: "example_mapping_pipeline_job",
location: "us-central1",
dataset: dataset.id,
disableLineage: true,
labels: {
example_label_key: "example_label_value",
},
mappingPipelineJob: {
mappingConfig: {
whistleConfigSource: {
uri: pulumi.interpolate`gs://${bucket.name}/${mappingFile.name}`,
importUriPrefix: pulumi.interpolate`gs://${bucket.name}`,
},
description: "example description for mapping configuration",
},
fhirStreamingSource: {
fhirStore: pulumi.interpolate`${dataset.id}/fhirStores/${sourceFhirstore.name}`,
description: "example description for streaming fhirstore",
},
fhirStoreDestination: pulumi.interpolate`${dataset.id}/fhirStores/${destFhirstore.name}`,
},
});
const hsa = new gcp.storage.BucketIAMMember("hsa", {
bucket: bucket.name,
role: "roles/storage.objectUser",
member: project.then(project => `serviceAccount:service-${project.number}@gcp-sa-healthcare.iam.gserviceaccount.com`),
});
import pulumi
import pulumi_gcp as gcp
project = gcp.organizations.get_project()
dataset = gcp.healthcare.Dataset("dataset",
name="example_dataset",
location="us-central1")
source_fhirstore = gcp.healthcare.FhirStore("source_fhirstore",
name="source_fhir_store",
dataset=dataset.id,
version="R4",
enable_update_create=True,
disable_referential_integrity=True)
dest_fhirstore = gcp.healthcare.FhirStore("dest_fhirstore",
name="dest_fhir_store",
dataset=dataset.id,
version="R4",
enable_update_create=True,
disable_referential_integrity=True)
bucket = gcp.storage.Bucket("bucket",
name="example_bucket_name",
location="us-central1",
uniform_bucket_level_access=True)
mapping_file = gcp.storage.BucketObject("mapping_file",
name="mapping.wstl",
content=" ",
bucket=bucket.name)
example_mapping_pipeline = gcp.healthcare.PipelineJob("example-mapping-pipeline",
name="example_mapping_pipeline_job",
location="us-central1",
dataset=dataset.id,
disable_lineage=True,
labels={
"example_label_key": "example_label_value",
},
mapping_pipeline_job={
"mapping_config": {
"whistle_config_source": {
"uri": pulumi.Output.all(
bucketName=bucket.name,
mappingFileName=mapping_file.name
).apply(lambda resolved_outputs: f"gs://{resolved_outputs['bucketName']}/{resolved_outputs['mappingFileName']}")
,
"import_uri_prefix": bucket.name.apply(lambda name: f"gs://{name}"),
},
"description": "example description for mapping configuration",
},
"fhir_streaming_source": {
"fhir_store": pulumi.Output.all(
id=dataset.id,
name=source_fhirstore.name
).apply(lambda resolved_outputs: f"{resolved_outputs['id']}/fhirStores/{resolved_outputs['name']}")
,
"description": "example description for streaming fhirstore",
},
"fhir_store_destination": pulumi.Output.all(
id=dataset.id,
name=dest_fhirstore.name
).apply(lambda resolved_outputs: f"{resolved_outputs['id']}/fhirStores/{resolved_outputs['name']}")
,
})
hsa = gcp.storage.BucketIAMMember("hsa",
bucket=bucket.name,
role="roles/storage.objectUser",
member=f"serviceAccount:service-{project.number}@gcp-sa-healthcare.iam.gserviceaccount.com")
package main
import (
"fmt"
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/healthcare"
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations"
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/storage"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
project, err := organizations.LookupProject(ctx, &organizations.LookupProjectArgs{}, nil)
if err != nil {
return err
}
dataset, err := healthcare.NewDataset(ctx, "dataset", &healthcare.DatasetArgs{
Name: pulumi.String("example_dataset"),
Location: pulumi.String("us-central1"),
})
if err != nil {
return err
}
sourceFhirstore, err := healthcare.NewFhirStore(ctx, "source_fhirstore", &healthcare.FhirStoreArgs{
Name: pulumi.String("source_fhir_store"),
Dataset: dataset.ID(),
Version: pulumi.String("R4"),
EnableUpdateCreate: pulumi.Bool(true),
DisableReferentialIntegrity: pulumi.Bool(true),
})
if err != nil {
return err
}
destFhirstore, err := healthcare.NewFhirStore(ctx, "dest_fhirstore", &healthcare.FhirStoreArgs{
Name: pulumi.String("dest_fhir_store"),
Dataset: dataset.ID(),
Version: pulumi.String("R4"),
EnableUpdateCreate: pulumi.Bool(true),
DisableReferentialIntegrity: pulumi.Bool(true),
})
if err != nil {
return err
}
bucket, err := storage.NewBucket(ctx, "bucket", &storage.BucketArgs{
Name: pulumi.String("example_bucket_name"),
Location: pulumi.String("us-central1"),
UniformBucketLevelAccess: pulumi.Bool(true),
})
if err != nil {
return err
}
mappingFile, err := storage.NewBucketObject(ctx, "mapping_file", &storage.BucketObjectArgs{
Name: pulumi.String("mapping.wstl"),
Content: pulumi.String(" "),
Bucket: bucket.Name,
})
if err != nil {
return err
}
_, err = healthcare.NewPipelineJob(ctx, "example-mapping-pipeline", &healthcare.PipelineJobArgs{
Name: pulumi.String("example_mapping_pipeline_job"),
Location: pulumi.String("us-central1"),
Dataset: dataset.ID(),
DisableLineage: pulumi.Bool(true),
Labels: pulumi.StringMap{
"example_label_key": pulumi.String("example_label_value"),
},
MappingPipelineJob: &healthcare.PipelineJobMappingPipelineJobArgs{
MappingConfig: &healthcare.PipelineJobMappingPipelineJobMappingConfigArgs{
WhistleConfigSource: &healthcare.PipelineJobMappingPipelineJobMappingConfigWhistleConfigSourceArgs{
Uri: pulumi.All(bucket.Name, mappingFile.Name).ApplyT(func(_args []interface{}) (string, error) {
bucketName := _args[0].(string)
mappingFileName := _args[1].(string)
return fmt.Sprintf("gs://%v/%v", bucketName, mappingFileName), nil
}).(pulumi.StringOutput),
ImportUriPrefix: bucket.Name.ApplyT(func(name string) (string, error) {
return fmt.Sprintf("gs://%v", name), nil
}).(pulumi.StringOutput),
},
Description: pulumi.String("example description for mapping configuration"),
},
FhirStreamingSource: &healthcare.PipelineJobMappingPipelineJobFhirStreamingSourceArgs{
FhirStore: pulumi.All(dataset.ID(), sourceFhirstore.Name).ApplyT(func(_args []interface{}) (string, error) {
id := _args[0].(string)
name := _args[1].(string)
return fmt.Sprintf("%v/fhirStores/%v", id, name), nil
}).(pulumi.StringOutput),
Description: pulumi.String("example description for streaming fhirstore"),
},
FhirStoreDestination: pulumi.All(dataset.ID(), destFhirstore.Name).ApplyT(func(_args []interface{}) (string, error) {
id := _args[0].(string)
name := _args[1].(string)
return fmt.Sprintf("%v/fhirStores/%v", id, name), nil
}).(pulumi.StringOutput),
},
})
if err != nil {
return err
}
_, err = storage.NewBucketIAMMember(ctx, "hsa", &storage.BucketIAMMemberArgs{
Bucket: bucket.Name,
Role: pulumi.String("roles/storage.objectUser"),
Member: pulumi.Sprintf("serviceAccount:service-%v@gcp-sa-healthcare.iam.gserviceaccount.com", project.Number),
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var project = Gcp.Organizations.GetProject.Invoke();
var dataset = new Gcp.Healthcare.Dataset("dataset", new()
{
Name = "example_dataset",
Location = "us-central1",
});
var sourceFhirstore = new Gcp.Healthcare.FhirStore("source_fhirstore", new()
{
Name = "source_fhir_store",
Dataset = dataset.Id,
Version = "R4",
EnableUpdateCreate = true,
DisableReferentialIntegrity = true,
});
var destFhirstore = new Gcp.Healthcare.FhirStore("dest_fhirstore", new()
{
Name = "dest_fhir_store",
Dataset = dataset.Id,
Version = "R4",
EnableUpdateCreate = true,
DisableReferentialIntegrity = true,
});
var bucket = new Gcp.Storage.Bucket("bucket", new()
{
Name = "example_bucket_name",
Location = "us-central1",
UniformBucketLevelAccess = true,
});
var mappingFile = new Gcp.Storage.BucketObject("mapping_file", new()
{
Name = "mapping.wstl",
Content = " ",
Bucket = bucket.Name,
});
var example_mapping_pipeline = new Gcp.Healthcare.PipelineJob("example-mapping-pipeline", new()
{
Name = "example_mapping_pipeline_job",
Location = "us-central1",
Dataset = dataset.Id,
DisableLineage = true,
Labels =
{
{ "example_label_key", "example_label_value" },
},
MappingPipelineJob = new Gcp.Healthcare.Inputs.PipelineJobMappingPipelineJobArgs
{
MappingConfig = new Gcp.Healthcare.Inputs.PipelineJobMappingPipelineJobMappingConfigArgs
{
WhistleConfigSource = new Gcp.Healthcare.Inputs.PipelineJobMappingPipelineJobMappingConfigWhistleConfigSourceArgs
{
Uri = Output.Tuple(bucket.Name, mappingFile.Name).Apply(values =>
{
var bucketName = values.Item1;
var mappingFileName = values.Item2;
return $"gs://{bucketName}/{mappingFileName}";
}),
ImportUriPrefix = bucket.Name.Apply(name => $"gs://{name}"),
},
Description = "example description for mapping configuration",
},
FhirStreamingSource = new Gcp.Healthcare.Inputs.PipelineJobMappingPipelineJobFhirStreamingSourceArgs
{
FhirStore = Output.Tuple(dataset.Id, sourceFhirstore.Name).Apply(values =>
{
var id = values.Item1;
var name = values.Item2;
return $"{id}/fhirStores/{name}";
}),
Description = "example description for streaming fhirstore",
},
FhirStoreDestination = Output.Tuple(dataset.Id, destFhirstore.Name).Apply(values =>
{
var id = values.Item1;
var name = values.Item2;
return $"{id}/fhirStores/{name}";
}),
},
});
var hsa = new Gcp.Storage.BucketIAMMember("hsa", new()
{
Bucket = bucket.Name,
Role = "roles/storage.objectUser",
Member = $"serviceAccount:service-{project.Apply(getProjectResult => getProjectResult.Number)}@gcp-sa-healthcare.iam.gserviceaccount.com",
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.organizations.OrganizationsFunctions;
import com.pulumi.gcp.organizations.inputs.GetProjectArgs;
import com.pulumi.gcp.healthcare.Dataset;
import com.pulumi.gcp.healthcare.DatasetArgs;
import com.pulumi.gcp.healthcare.FhirStore;
import com.pulumi.gcp.healthcare.FhirStoreArgs;
import com.pulumi.gcp.storage.Bucket;
import com.pulumi.gcp.storage.BucketArgs;
import com.pulumi.gcp.storage.BucketObject;
import com.pulumi.gcp.storage.BucketObjectArgs;
import com.pulumi.gcp.healthcare.PipelineJob;
import com.pulumi.gcp.healthcare.PipelineJobArgs;
import com.pulumi.gcp.healthcare.inputs.PipelineJobMappingPipelineJobArgs;
import com.pulumi.gcp.healthcare.inputs.PipelineJobMappingPipelineJobMappingConfigArgs;
import com.pulumi.gcp.healthcare.inputs.PipelineJobMappingPipelineJobMappingConfigWhistleConfigSourceArgs;
import com.pulumi.gcp.healthcare.inputs.PipelineJobMappingPipelineJobFhirStreamingSourceArgs;
import com.pulumi.gcp.storage.BucketIAMMember;
import com.pulumi.gcp.storage.BucketIAMMemberArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
final var project = OrganizationsFunctions.getProject();
var dataset = new Dataset("dataset", DatasetArgs.builder()
.name("example_dataset")
.location("us-central1")
.build());
var sourceFhirstore = new FhirStore("sourceFhirstore", FhirStoreArgs.builder()
.name("source_fhir_store")
.dataset(dataset.id())
.version("R4")
.enableUpdateCreate(true)
.disableReferentialIntegrity(true)
.build());
var destFhirstore = new FhirStore("destFhirstore", FhirStoreArgs.builder()
.name("dest_fhir_store")
.dataset(dataset.id())
.version("R4")
.enableUpdateCreate(true)
.disableReferentialIntegrity(true)
.build());
var bucket = new Bucket("bucket", BucketArgs.builder()
.name("example_bucket_name")
.location("us-central1")
.uniformBucketLevelAccess(true)
.build());
var mappingFile = new BucketObject("mappingFile", BucketObjectArgs.builder()
.name("mapping.wstl")
.content(" ")
.bucket(bucket.name())
.build());
var example_mapping_pipeline = new PipelineJob("example-mapping-pipeline", PipelineJobArgs.builder()
.name("example_mapping_pipeline_job")
.location("us-central1")
.dataset(dataset.id())
.disableLineage(true)
.labels(Map.of("example_label_key", "example_label_value"))
.mappingPipelineJob(PipelineJobMappingPipelineJobArgs.builder()
.mappingConfig(PipelineJobMappingPipelineJobMappingConfigArgs.builder()
.whistleConfigSource(PipelineJobMappingPipelineJobMappingConfigWhistleConfigSourceArgs.builder()
.uri(Output.tuple(bucket.name(), mappingFile.name()).applyValue(values -> {
var bucketName = values.t1;
var mappingFileName = values.t2;
return String.format("gs://%s/%s", bucketName,mappingFileName);
}))
.importUriPrefix(bucket.name().applyValue(name -> String.format("gs://%s", name)))
.build())
.description("example description for mapping configuration")
.build())
.fhirStreamingSource(PipelineJobMappingPipelineJobFhirStreamingSourceArgs.builder()
.fhirStore(Output.tuple(dataset.id(), sourceFhirstore.name()).applyValue(values -> {
var id = values.t1;
var name = values.t2;
return String.format("%s/fhirStores/%s", id,name);
}))
.description("example description for streaming fhirstore")
.build())
.fhirStoreDestination(Output.tuple(dataset.id(), destFhirstore.name()).applyValue(values -> {
var id = values.t1;
var name = values.t2;
return String.format("%s/fhirStores/%s", id,name);
}))
.build())
.build());
var hsa = new BucketIAMMember("hsa", BucketIAMMemberArgs.builder()
.bucket(bucket.name())
.role("roles/storage.objectUser")
.member(String.format("serviceAccount:service-%s@gcp-sa-healthcare.iam.gserviceaccount.com", project.applyValue(getProjectResult -> getProjectResult.number())))
.build());
}
}
resources:
example-mapping-pipeline:
type: gcp:healthcare:PipelineJob
properties:
name: example_mapping_pipeline_job
location: us-central1
dataset: ${dataset.id}
disableLineage: true
labels:
example_label_key: example_label_value
mappingPipelineJob:
mappingConfig:
whistleConfigSource:
uri: gs://${bucket.name}/${mappingFile.name}
importUriPrefix: gs://${bucket.name}
description: example description for mapping configuration
fhirStreamingSource:
fhirStore: ${dataset.id}/fhirStores/${sourceFhirstore.name}
description: example description for streaming fhirstore
fhirStoreDestination: ${dataset.id}/fhirStores/${destFhirstore.name}
dataset:
type: gcp:healthcare:Dataset
properties:
name: example_dataset
location: us-central1
sourceFhirstore:
type: gcp:healthcare:FhirStore
name: source_fhirstore
properties:
name: source_fhir_store
dataset: ${dataset.id}
version: R4
enableUpdateCreate: true
disableReferentialIntegrity: true
destFhirstore:
type: gcp:healthcare:FhirStore
name: dest_fhirstore
properties:
name: dest_fhir_store
dataset: ${dataset.id}
version: R4
enableUpdateCreate: true
disableReferentialIntegrity: true
bucket:
type: gcp:storage:Bucket
properties:
name: example_bucket_name
location: us-central1
uniformBucketLevelAccess: true
mappingFile:
type: gcp:storage:BucketObject
name: mapping_file
properties:
name: mapping.wstl
content: ' '
bucket: ${bucket.name}
hsa:
type: gcp:storage:BucketIAMMember
properties:
bucket: ${bucket.name}
role: roles/storage.objectUser
member: serviceAccount:service-${project.number}@gcp-sa-healthcare.iam.gserviceaccount.com
variables:
project:
fn::invoke:
Function: gcp:organizations:getProject
Arguments: {}
Healthcare Pipeline Job Mapping Recon Dest
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const project = gcp.organizations.getProject({});
const dataset = new gcp.healthcare.Dataset("dataset", {
name: "example_dataset",
location: "us-central1",
});
const destFhirstore = new gcp.healthcare.FhirStore("dest_fhirstore", {
name: "dest_fhir_store",
dataset: dataset.id,
version: "R4",
enableUpdateCreate: true,
disableReferentialIntegrity: true,
});
const bucket = new gcp.storage.Bucket("bucket", {
name: "example_bucket_name",
location: "us-central1",
uniformBucketLevelAccess: true,
});
const mergeFile = new gcp.storage.BucketObject("merge_file", {
name: "merge.wstl",
content: " ",
bucket: bucket.name,
});
const recon = new gcp.healthcare.PipelineJob("recon", {
name: "example_recon_pipeline_job",
location: "us-central1",
dataset: dataset.id,
disableLineage: true,
reconciliationPipelineJob: {
mergeConfig: {
description: "sample description for reconciliation rules",
whistleConfigSource: {
uri: pulumi.interpolate`gs://${bucket.name}/${mergeFile.name}`,
importUriPrefix: pulumi.interpolate`gs://${bucket.name}`,
},
},
matchingUriPrefix: pulumi.interpolate`gs://${bucket.name}`,
fhirStoreDestination: pulumi.interpolate`${dataset.id}/fhirStores/${destFhirstore.name}`,
},
});
const sourceFhirstore = new gcp.healthcare.FhirStore("source_fhirstore", {
name: "source_fhir_store",
dataset: dataset.id,
version: "R4",
enableUpdateCreate: true,
disableReferentialIntegrity: true,
});
const mappingFile = new gcp.storage.BucketObject("mapping_file", {
name: "mapping.wstl",
content: " ",
bucket: bucket.name,
});
const example_mapping_pipeline = new gcp.healthcare.PipelineJob("example-mapping-pipeline", {
name: "example_mapping_pipeline_job",
location: "us-central1",
dataset: dataset.id,
disableLineage: true,
labels: {
example_label_key: "example_label_value",
},
mappingPipelineJob: {
mappingConfig: {
whistleConfigSource: {
uri: pulumi.interpolate`gs://${bucket.name}/${mappingFile.name}`,
importUriPrefix: pulumi.interpolate`gs://${bucket.name}`,
},
description: "example description for mapping configuration",
},
fhirStreamingSource: {
fhirStore: pulumi.interpolate`${dataset.id}/fhirStores/${sourceFhirstore.name}`,
description: "example description for streaming fhirstore",
},
reconciliationDestination: true,
},
}, {
dependsOn: [recon],
});
const hsa = new gcp.storage.BucketIAMMember("hsa", {
bucket: bucket.name,
role: "roles/storage.objectUser",
member: project.then(project => `serviceAccount:service-${project.number}@gcp-sa-healthcare.iam.gserviceaccount.com`),
});
import pulumi
import pulumi_gcp as gcp
project = gcp.organizations.get_project()
dataset = gcp.healthcare.Dataset("dataset",
name="example_dataset",
location="us-central1")
dest_fhirstore = gcp.healthcare.FhirStore("dest_fhirstore",
name="dest_fhir_store",
dataset=dataset.id,
version="R4",
enable_update_create=True,
disable_referential_integrity=True)
bucket = gcp.storage.Bucket("bucket",
name="example_bucket_name",
location="us-central1",
uniform_bucket_level_access=True)
merge_file = gcp.storage.BucketObject("merge_file",
name="merge.wstl",
content=" ",
bucket=bucket.name)
recon = gcp.healthcare.PipelineJob("recon",
name="example_recon_pipeline_job",
location="us-central1",
dataset=dataset.id,
disable_lineage=True,
reconciliation_pipeline_job={
"merge_config": {
"description": "sample description for reconciliation rules",
"whistle_config_source": {
"uri": pulumi.Output.all(
bucketName=bucket.name,
mergeFileName=merge_file.name
).apply(lambda resolved_outputs: f"gs://{resolved_outputs['bucketName']}/{resolved_outputs['mergeFileName']}")
,
"import_uri_prefix": bucket.name.apply(lambda name: f"gs://{name}"),
},
},
"matching_uri_prefix": bucket.name.apply(lambda name: f"gs://{name}"),
"fhir_store_destination": pulumi.Output.all(
id=dataset.id,
name=dest_fhirstore.name
).apply(lambda resolved_outputs: f"{resolved_outputs['id']}/fhirStores/{resolved_outputs['name']}")
,
})
source_fhirstore = gcp.healthcare.FhirStore("source_fhirstore",
name="source_fhir_store",
dataset=dataset.id,
version="R4",
enable_update_create=True,
disable_referential_integrity=True)
mapping_file = gcp.storage.BucketObject("mapping_file",
name="mapping.wstl",
content=" ",
bucket=bucket.name)
example_mapping_pipeline = gcp.healthcare.PipelineJob("example-mapping-pipeline",
name="example_mapping_pipeline_job",
location="us-central1",
dataset=dataset.id,
disable_lineage=True,
labels={
"example_label_key": "example_label_value",
},
mapping_pipeline_job={
"mapping_config": {
"whistle_config_source": {
"uri": pulumi.Output.all(
bucketName=bucket.name,
mappingFileName=mapping_file.name
).apply(lambda resolved_outputs: f"gs://{resolved_outputs['bucketName']}/{resolved_outputs['mappingFileName']}")
,
"import_uri_prefix": bucket.name.apply(lambda name: f"gs://{name}"),
},
"description": "example description for mapping configuration",
},
"fhir_streaming_source": {
"fhir_store": pulumi.Output.all(
id=dataset.id,
name=source_fhirstore.name
).apply(lambda resolved_outputs: f"{resolved_outputs['id']}/fhirStores/{resolved_outputs['name']}")
,
"description": "example description for streaming fhirstore",
},
"reconciliation_destination": True,
},
opts = pulumi.ResourceOptions(depends_on=[recon]))
hsa = gcp.storage.BucketIAMMember("hsa",
bucket=bucket.name,
role="roles/storage.objectUser",
member=f"serviceAccount:service-{project.number}@gcp-sa-healthcare.iam.gserviceaccount.com")
package main
import (
"fmt"
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/healthcare"
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations"
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/storage"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
project, err := organizations.LookupProject(ctx, &organizations.LookupProjectArgs{}, nil)
if err != nil {
return err
}
dataset, err := healthcare.NewDataset(ctx, "dataset", &healthcare.DatasetArgs{
Name: pulumi.String("example_dataset"),
Location: pulumi.String("us-central1"),
})
if err != nil {
return err
}
destFhirstore, err := healthcare.NewFhirStore(ctx, "dest_fhirstore", &healthcare.FhirStoreArgs{
Name: pulumi.String("dest_fhir_store"),
Dataset: dataset.ID(),
Version: pulumi.String("R4"),
EnableUpdateCreate: pulumi.Bool(true),
DisableReferentialIntegrity: pulumi.Bool(true),
})
if err != nil {
return err
}
bucket, err := storage.NewBucket(ctx, "bucket", &storage.BucketArgs{
Name: pulumi.String("example_bucket_name"),
Location: pulumi.String("us-central1"),
UniformBucketLevelAccess: pulumi.Bool(true),
})
if err != nil {
return err
}
mergeFile, err := storage.NewBucketObject(ctx, "merge_file", &storage.BucketObjectArgs{
Name: pulumi.String("merge.wstl"),
Content: pulumi.String(" "),
Bucket: bucket.Name,
})
if err != nil {
return err
}
recon, err := healthcare.NewPipelineJob(ctx, "recon", &healthcare.PipelineJobArgs{
Name: pulumi.String("example_recon_pipeline_job"),
Location: pulumi.String("us-central1"),
Dataset: dataset.ID(),
DisableLineage: pulumi.Bool(true),
ReconciliationPipelineJob: &healthcare.PipelineJobReconciliationPipelineJobArgs{
MergeConfig: &healthcare.PipelineJobReconciliationPipelineJobMergeConfigArgs{
Description: pulumi.String("sample description for reconciliation rules"),
WhistleConfigSource: &healthcare.PipelineJobReconciliationPipelineJobMergeConfigWhistleConfigSourceArgs{
Uri: pulumi.All(bucket.Name, mergeFile.Name).ApplyT(func(_args []interface{}) (string, error) {
bucketName := _args[0].(string)
mergeFileName := _args[1].(string)
return fmt.Sprintf("gs://%v/%v", bucketName, mergeFileName), nil
}).(pulumi.StringOutput),
ImportUriPrefix: bucket.Name.ApplyT(func(name string) (string, error) {
return fmt.Sprintf("gs://%v", name), nil
}).(pulumi.StringOutput),
},
},
MatchingUriPrefix: bucket.Name.ApplyT(func(name string) (string, error) {
return fmt.Sprintf("gs://%v", name), nil
}).(pulumi.StringOutput),
FhirStoreDestination: pulumi.All(dataset.ID(), destFhirstore.Name).ApplyT(func(_args []interface{}) (string, error) {
id := _args[0].(string)
name := _args[1].(string)
return fmt.Sprintf("%v/fhirStores/%v", id, name), nil
}).(pulumi.StringOutput),
},
})
if err != nil {
return err
}
sourceFhirstore, err := healthcare.NewFhirStore(ctx, "source_fhirstore", &healthcare.FhirStoreArgs{
Name: pulumi.String("source_fhir_store"),
Dataset: dataset.ID(),
Version: pulumi.String("R4"),
EnableUpdateCreate: pulumi.Bool(true),
DisableReferentialIntegrity: pulumi.Bool(true),
})
if err != nil {
return err
}
mappingFile, err := storage.NewBucketObject(ctx, "mapping_file", &storage.BucketObjectArgs{
Name: pulumi.String("mapping.wstl"),
Content: pulumi.String(" "),
Bucket: bucket.Name,
})
if err != nil {
return err
}
_, err = healthcare.NewPipelineJob(ctx, "example-mapping-pipeline", &healthcare.PipelineJobArgs{
Name: pulumi.String("example_mapping_pipeline_job"),
Location: pulumi.String("us-central1"),
Dataset: dataset.ID(),
DisableLineage: pulumi.Bool(true),
Labels: pulumi.StringMap{
"example_label_key": pulumi.String("example_label_value"),
},
MappingPipelineJob: &healthcare.PipelineJobMappingPipelineJobArgs{
MappingConfig: &healthcare.PipelineJobMappingPipelineJobMappingConfigArgs{
WhistleConfigSource: &healthcare.PipelineJobMappingPipelineJobMappingConfigWhistleConfigSourceArgs{
Uri: pulumi.All(bucket.Name, mappingFile.Name).ApplyT(func(_args []interface{}) (string, error) {
bucketName := _args[0].(string)
mappingFileName := _args[1].(string)
return fmt.Sprintf("gs://%v/%v", bucketName, mappingFileName), nil
}).(pulumi.StringOutput),
ImportUriPrefix: bucket.Name.ApplyT(func(name string) (string, error) {
return fmt.Sprintf("gs://%v", name), nil
}).(pulumi.StringOutput),
},
Description: pulumi.String("example description for mapping configuration"),
},
FhirStreamingSource: &healthcare.PipelineJobMappingPipelineJobFhirStreamingSourceArgs{
FhirStore: pulumi.All(dataset.ID(), sourceFhirstore.Name).ApplyT(func(_args []interface{}) (string, error) {
id := _args[0].(string)
name := _args[1].(string)
return fmt.Sprintf("%v/fhirStores/%v", id, name), nil
}).(pulumi.StringOutput),
Description: pulumi.String("example description for streaming fhirstore"),
},
ReconciliationDestination: pulumi.Bool(true),
},
}, pulumi.DependsOn([]pulumi.Resource{
recon,
}))
if err != nil {
return err
}
_, err = storage.NewBucketIAMMember(ctx, "hsa", &storage.BucketIAMMemberArgs{
Bucket: bucket.Name,
Role: pulumi.String("roles/storage.objectUser"),
Member: pulumi.Sprintf("serviceAccount:service-%v@gcp-sa-healthcare.iam.gserviceaccount.com", project.Number),
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var project = Gcp.Organizations.GetProject.Invoke();
var dataset = new Gcp.Healthcare.Dataset("dataset", new()
{
Name = "example_dataset",
Location = "us-central1",
});
var destFhirstore = new Gcp.Healthcare.FhirStore("dest_fhirstore", new()
{
Name = "dest_fhir_store",
Dataset = dataset.Id,
Version = "R4",
EnableUpdateCreate = true,
DisableReferentialIntegrity = true,
});
var bucket = new Gcp.Storage.Bucket("bucket", new()
{
Name = "example_bucket_name",
Location = "us-central1",
UniformBucketLevelAccess = true,
});
var mergeFile = new Gcp.Storage.BucketObject("merge_file", new()
{
Name = "merge.wstl",
Content = " ",
Bucket = bucket.Name,
});
var recon = new Gcp.Healthcare.PipelineJob("recon", new()
{
Name = "example_recon_pipeline_job",
Location = "us-central1",
Dataset = dataset.Id,
DisableLineage = true,
ReconciliationPipelineJob = new Gcp.Healthcare.Inputs.PipelineJobReconciliationPipelineJobArgs
{
MergeConfig = new Gcp.Healthcare.Inputs.PipelineJobReconciliationPipelineJobMergeConfigArgs
{
Description = "sample description for reconciliation rules",
WhistleConfigSource = new Gcp.Healthcare.Inputs.PipelineJobReconciliationPipelineJobMergeConfigWhistleConfigSourceArgs
{
Uri = Output.Tuple(bucket.Name, mergeFile.Name).Apply(values =>
{
var bucketName = values.Item1;
var mergeFileName = values.Item2;
return $"gs://{bucketName}/{mergeFileName}";
}),
ImportUriPrefix = bucket.Name.Apply(name => $"gs://{name}"),
},
},
MatchingUriPrefix = bucket.Name.Apply(name => $"gs://{name}"),
FhirStoreDestination = Output.Tuple(dataset.Id, destFhirstore.Name).Apply(values =>
{
var id = values.Item1;
var name = values.Item2;
return $"{id}/fhirStores/{name}";
}),
},
});
var sourceFhirstore = new Gcp.Healthcare.FhirStore("source_fhirstore", new()
{
Name = "source_fhir_store",
Dataset = dataset.Id,
Version = "R4",
EnableUpdateCreate = true,
DisableReferentialIntegrity = true,
});
var mappingFile = new Gcp.Storage.BucketObject("mapping_file", new()
{
Name = "mapping.wstl",
Content = " ",
Bucket = bucket.Name,
});
var example_mapping_pipeline = new Gcp.Healthcare.PipelineJob("example-mapping-pipeline", new()
{
Name = "example_mapping_pipeline_job",
Location = "us-central1",
Dataset = dataset.Id,
DisableLineage = true,
Labels =
{
{ "example_label_key", "example_label_value" },
},
MappingPipelineJob = new Gcp.Healthcare.Inputs.PipelineJobMappingPipelineJobArgs
{
MappingConfig = new Gcp.Healthcare.Inputs.PipelineJobMappingPipelineJobMappingConfigArgs
{
WhistleConfigSource = new Gcp.Healthcare.Inputs.PipelineJobMappingPipelineJobMappingConfigWhistleConfigSourceArgs
{
Uri = Output.Tuple(bucket.Name, mappingFile.Name).Apply(values =>
{
var bucketName = values.Item1;
var mappingFileName = values.Item2;
return $"gs://{bucketName}/{mappingFileName}";
}),
ImportUriPrefix = bucket.Name.Apply(name => $"gs://{name}"),
},
Description = "example description for mapping configuration",
},
FhirStreamingSource = new Gcp.Healthcare.Inputs.PipelineJobMappingPipelineJobFhirStreamingSourceArgs
{
FhirStore = Output.Tuple(dataset.Id, sourceFhirstore.Name).Apply(values =>
{
var id = values.Item1;
var name = values.Item2;
return $"{id}/fhirStores/{name}";
}),
Description = "example description for streaming fhirstore",
},
ReconciliationDestination = true,
},
}, new CustomResourceOptions
{
DependsOn =
{
recon,
},
});
var hsa = new Gcp.Storage.BucketIAMMember("hsa", new()
{
Bucket = bucket.Name,
Role = "roles/storage.objectUser",
Member = $"serviceAccount:service-{project.Apply(getProjectResult => getProjectResult.Number)}@gcp-sa-healthcare.iam.gserviceaccount.com",
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.organizations.OrganizationsFunctions;
import com.pulumi.gcp.organizations.inputs.GetProjectArgs;
import com.pulumi.gcp.healthcare.Dataset;
import com.pulumi.gcp.healthcare.DatasetArgs;
import com.pulumi.gcp.healthcare.FhirStore;
import com.pulumi.gcp.healthcare.FhirStoreArgs;
import com.pulumi.gcp.storage.Bucket;
import com.pulumi.gcp.storage.BucketArgs;
import com.pulumi.gcp.storage.BucketObject;
import com.pulumi.gcp.storage.BucketObjectArgs;
import com.pulumi.gcp.healthcare.PipelineJob;
import com.pulumi.gcp.healthcare.PipelineJobArgs;
import com.pulumi.gcp.healthcare.inputs.PipelineJobReconciliationPipelineJobArgs;
import com.pulumi.gcp.healthcare.inputs.PipelineJobReconciliationPipelineJobMergeConfigArgs;
import com.pulumi.gcp.healthcare.inputs.PipelineJobReconciliationPipelineJobMergeConfigWhistleConfigSourceArgs;
import com.pulumi.gcp.healthcare.inputs.PipelineJobMappingPipelineJobArgs;
import com.pulumi.gcp.healthcare.inputs.PipelineJobMappingPipelineJobMappingConfigArgs;
import com.pulumi.gcp.healthcare.inputs.PipelineJobMappingPipelineJobMappingConfigWhistleConfigSourceArgs;
import com.pulumi.gcp.healthcare.inputs.PipelineJobMappingPipelineJobFhirStreamingSourceArgs;
import com.pulumi.gcp.storage.BucketIAMMember;
import com.pulumi.gcp.storage.BucketIAMMemberArgs;
import com.pulumi.resources.CustomResourceOptions;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
final var project = OrganizationsFunctions.getProject();
var dataset = new Dataset("dataset", DatasetArgs.builder()
.name("example_dataset")
.location("us-central1")
.build());
var destFhirstore = new FhirStore("destFhirstore", FhirStoreArgs.builder()
.name("dest_fhir_store")
.dataset(dataset.id())
.version("R4")
.enableUpdateCreate(true)
.disableReferentialIntegrity(true)
.build());
var bucket = new Bucket("bucket", BucketArgs.builder()
.name("example_bucket_name")
.location("us-central1")
.uniformBucketLevelAccess(true)
.build());
var mergeFile = new BucketObject("mergeFile", BucketObjectArgs.builder()
.name("merge.wstl")
.content(" ")
.bucket(bucket.name())
.build());
var recon = new PipelineJob("recon", PipelineJobArgs.builder()
.name("example_recon_pipeline_job")
.location("us-central1")
.dataset(dataset.id())
.disableLineage(true)
.reconciliationPipelineJob(PipelineJobReconciliationPipelineJobArgs.builder()
.mergeConfig(PipelineJobReconciliationPipelineJobMergeConfigArgs.builder()
.description("sample description for reconciliation rules")
.whistleConfigSource(PipelineJobReconciliationPipelineJobMergeConfigWhistleConfigSourceArgs.builder()
.uri(Output.tuple(bucket.name(), mergeFile.name()).applyValue(values -> {
var bucketName = values.t1;
var mergeFileName = values.t2;
return String.format("gs://%s/%s", bucketName,mergeFileName);
}))
.importUriPrefix(bucket.name().applyValue(name -> String.format("gs://%s", name)))
.build())
.build())
.matchingUriPrefix(bucket.name().applyValue(name -> String.format("gs://%s", name)))
.fhirStoreDestination(Output.tuple(dataset.id(), destFhirstore.name()).applyValue(values -> {
var id = values.t1;
var name = values.t2;
return String.format("%s/fhirStores/%s", id,name);
}))
.build())
.build());
var sourceFhirstore = new FhirStore("sourceFhirstore", FhirStoreArgs.builder()
.name("source_fhir_store")
.dataset(dataset.id())
.version("R4")
.enableUpdateCreate(true)
.disableReferentialIntegrity(true)
.build());
var mappingFile = new BucketObject("mappingFile", BucketObjectArgs.builder()
.name("mapping.wstl")
.content(" ")
.bucket(bucket.name())
.build());
var example_mapping_pipeline = new PipelineJob("example-mapping-pipeline", PipelineJobArgs.builder()
.name("example_mapping_pipeline_job")
.location("us-central1")
.dataset(dataset.id())
.disableLineage(true)
.labels(Map.of("example_label_key", "example_label_value"))
.mappingPipelineJob(PipelineJobMappingPipelineJobArgs.builder()
.mappingConfig(PipelineJobMappingPipelineJobMappingConfigArgs.builder()
.whistleConfigSource(PipelineJobMappingPipelineJobMappingConfigWhistleConfigSourceArgs.builder()
.uri(Output.tuple(bucket.name(), mappingFile.name()).applyValue(values -> {
var bucketName = values.t1;
var mappingFileName = values.t2;
return String.format("gs://%s/%s", bucketName,mappingFileName);
}))
.importUriPrefix(bucket.name().applyValue(name -> String.format("gs://%s", name)))
.build())
.description("example description for mapping configuration")
.build())
.fhirStreamingSource(PipelineJobMappingPipelineJobFhirStreamingSourceArgs.builder()
.fhirStore(Output.tuple(dataset.id(), sourceFhirstore.name()).applyValue(values -> {
var id = values.t1;
var name = values.t2;
return String.format("%s/fhirStores/%s", id,name);
}))
.description("example description for streaming fhirstore")
.build())
.reconciliationDestination(true)
.build())
.build(), CustomResourceOptions.builder()
.dependsOn(recon)
.build());
var hsa = new BucketIAMMember("hsa", BucketIAMMemberArgs.builder()
.bucket(bucket.name())
.role("roles/storage.objectUser")
.member(String.format("serviceAccount:service-%s@gcp-sa-healthcare.iam.gserviceaccount.com", project.applyValue(getProjectResult -> getProjectResult.number())))
.build());
}
}
resources:
recon:
type: gcp:healthcare:PipelineJob
properties:
name: example_recon_pipeline_job
location: us-central1
dataset: ${dataset.id}
disableLineage: true
reconciliationPipelineJob:
mergeConfig:
description: sample description for reconciliation rules
whistleConfigSource:
uri: gs://${bucket.name}/${mergeFile.name}
importUriPrefix: gs://${bucket.name}
matchingUriPrefix: gs://${bucket.name}
fhirStoreDestination: ${dataset.id}/fhirStores/${destFhirstore.name}
example-mapping-pipeline:
type: gcp:healthcare:PipelineJob
properties:
name: example_mapping_pipeline_job
location: us-central1
dataset: ${dataset.id}
disableLineage: true
labels:
example_label_key: example_label_value
mappingPipelineJob:
mappingConfig:
whistleConfigSource:
uri: gs://${bucket.name}/${mappingFile.name}
importUriPrefix: gs://${bucket.name}
description: example description for mapping configuration
fhirStreamingSource:
fhirStore: ${dataset.id}/fhirStores/${sourceFhirstore.name}
description: example description for streaming fhirstore
reconciliationDestination: true
options:
dependson:
- ${recon}
dataset:
type: gcp:healthcare:Dataset
properties:
name: example_dataset
location: us-central1
sourceFhirstore:
type: gcp:healthcare:FhirStore
name: source_fhirstore
properties:
name: source_fhir_store
dataset: ${dataset.id}
version: R4
enableUpdateCreate: true
disableReferentialIntegrity: true
destFhirstore:
type: gcp:healthcare:FhirStore
name: dest_fhirstore
properties:
name: dest_fhir_store
dataset: ${dataset.id}
version: R4
enableUpdateCreate: true
disableReferentialIntegrity: true
bucket:
type: gcp:storage:Bucket
properties:
name: example_bucket_name
location: us-central1
uniformBucketLevelAccess: true
mappingFile:
type: gcp:storage:BucketObject
name: mapping_file
properties:
name: mapping.wstl
content: ' '
bucket: ${bucket.name}
mergeFile:
type: gcp:storage:BucketObject
name: merge_file
properties:
name: merge.wstl
content: ' '
bucket: ${bucket.name}
hsa:
type: gcp:storage:BucketIAMMember
properties:
bucket: ${bucket.name}
role: roles/storage.objectUser
member: serviceAccount:service-${project.number}@gcp-sa-healthcare.iam.gserviceaccount.com
variables:
project:
fn::invoke:
Function: gcp:organizations:getProject
Arguments: {}
Create PipelineJob Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new PipelineJob(name: string, args: PipelineJobArgs, opts?: CustomResourceOptions);
@overload
def PipelineJob(resource_name: str,
args: PipelineJobArgs,
opts: Optional[ResourceOptions] = None)
@overload
def PipelineJob(resource_name: str,
opts: Optional[ResourceOptions] = None,
dataset: Optional[str] = None,
location: Optional[str] = None,
backfill_pipeline_job: Optional[PipelineJobBackfillPipelineJobArgs] = None,
disable_lineage: Optional[bool] = None,
labels: Optional[Mapping[str, str]] = None,
mapping_pipeline_job: Optional[PipelineJobMappingPipelineJobArgs] = None,
name: Optional[str] = None,
reconciliation_pipeline_job: Optional[PipelineJobReconciliationPipelineJobArgs] = None)
func NewPipelineJob(ctx *Context, name string, args PipelineJobArgs, opts ...ResourceOption) (*PipelineJob, error)
public PipelineJob(string name, PipelineJobArgs args, CustomResourceOptions? opts = null)
public PipelineJob(String name, PipelineJobArgs args)
public PipelineJob(String name, PipelineJobArgs args, CustomResourceOptions options)
type: gcp:healthcare:PipelineJob
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args PipelineJobArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args PipelineJobArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args PipelineJobArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args PipelineJobArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args PipelineJobArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var pipelineJobResource = new Gcp.Healthcare.PipelineJob("pipelineJobResource", new()
{
Dataset = "string",
Location = "string",
BackfillPipelineJob = new Gcp.Healthcare.Inputs.PipelineJobBackfillPipelineJobArgs
{
MappingPipelineJob = "string",
},
DisableLineage = false,
Labels =
{
{ "string", "string" },
},
MappingPipelineJob = new Gcp.Healthcare.Inputs.PipelineJobMappingPipelineJobArgs
{
MappingConfig = new Gcp.Healthcare.Inputs.PipelineJobMappingPipelineJobMappingConfigArgs
{
Description = "string",
WhistleConfigSource = new Gcp.Healthcare.Inputs.PipelineJobMappingPipelineJobMappingConfigWhistleConfigSourceArgs
{
ImportUriPrefix = "string",
Uri = "string",
},
},
FhirStoreDestination = "string",
FhirStreamingSource = new Gcp.Healthcare.Inputs.PipelineJobMappingPipelineJobFhirStreamingSourceArgs
{
FhirStore = "string",
Description = "string",
},
ReconciliationDestination = false,
},
Name = "string",
ReconciliationPipelineJob = new Gcp.Healthcare.Inputs.PipelineJobReconciliationPipelineJobArgs
{
MatchingUriPrefix = "string",
MergeConfig = new Gcp.Healthcare.Inputs.PipelineJobReconciliationPipelineJobMergeConfigArgs
{
WhistleConfigSource = new Gcp.Healthcare.Inputs.PipelineJobReconciliationPipelineJobMergeConfigWhistleConfigSourceArgs
{
ImportUriPrefix = "string",
Uri = "string",
},
Description = "string",
},
FhirStoreDestination = "string",
},
});
example, err := healthcare.NewPipelineJob(ctx, "pipelineJobResource", &healthcare.PipelineJobArgs{
Dataset: pulumi.String("string"),
Location: pulumi.String("string"),
BackfillPipelineJob: &healthcare.PipelineJobBackfillPipelineJobArgs{
MappingPipelineJob: pulumi.String("string"),
},
DisableLineage: pulumi.Bool(false),
Labels: pulumi.StringMap{
"string": pulumi.String("string"),
},
MappingPipelineJob: &healthcare.PipelineJobMappingPipelineJobArgs{
MappingConfig: &healthcare.PipelineJobMappingPipelineJobMappingConfigArgs{
Description: pulumi.String("string"),
WhistleConfigSource: &healthcare.PipelineJobMappingPipelineJobMappingConfigWhistleConfigSourceArgs{
ImportUriPrefix: pulumi.String("string"),
Uri: pulumi.String("string"),
},
},
FhirStoreDestination: pulumi.String("string"),
FhirStreamingSource: &healthcare.PipelineJobMappingPipelineJobFhirStreamingSourceArgs{
FhirStore: pulumi.String("string"),
Description: pulumi.String("string"),
},
ReconciliationDestination: pulumi.Bool(false),
},
Name: pulumi.String("string"),
ReconciliationPipelineJob: &healthcare.PipelineJobReconciliationPipelineJobArgs{
MatchingUriPrefix: pulumi.String("string"),
MergeConfig: &healthcare.PipelineJobReconciliationPipelineJobMergeConfigArgs{
WhistleConfigSource: &healthcare.PipelineJobReconciliationPipelineJobMergeConfigWhistleConfigSourceArgs{
ImportUriPrefix: pulumi.String("string"),
Uri: pulumi.String("string"),
},
Description: pulumi.String("string"),
},
FhirStoreDestination: pulumi.String("string"),
},
})
var pipelineJobResource = new PipelineJob("pipelineJobResource", PipelineJobArgs.builder()
.dataset("string")
.location("string")
.backfillPipelineJob(PipelineJobBackfillPipelineJobArgs.builder()
.mappingPipelineJob("string")
.build())
.disableLineage(false)
.labels(Map.of("string", "string"))
.mappingPipelineJob(PipelineJobMappingPipelineJobArgs.builder()
.mappingConfig(PipelineJobMappingPipelineJobMappingConfigArgs.builder()
.description("string")
.whistleConfigSource(PipelineJobMappingPipelineJobMappingConfigWhistleConfigSourceArgs.builder()
.importUriPrefix("string")
.uri("string")
.build())
.build())
.fhirStoreDestination("string")
.fhirStreamingSource(PipelineJobMappingPipelineJobFhirStreamingSourceArgs.builder()
.fhirStore("string")
.description("string")
.build())
.reconciliationDestination(false)
.build())
.name("string")
.reconciliationPipelineJob(PipelineJobReconciliationPipelineJobArgs.builder()
.matchingUriPrefix("string")
.mergeConfig(PipelineJobReconciliationPipelineJobMergeConfigArgs.builder()
.whistleConfigSource(PipelineJobReconciliationPipelineJobMergeConfigWhistleConfigSourceArgs.builder()
.importUriPrefix("string")
.uri("string")
.build())
.description("string")
.build())
.fhirStoreDestination("string")
.build())
.build());
pipeline_job_resource = gcp.healthcare.PipelineJob("pipelineJobResource",
dataset="string",
location="string",
backfill_pipeline_job={
"mapping_pipeline_job": "string",
},
disable_lineage=False,
labels={
"string": "string",
},
mapping_pipeline_job={
"mapping_config": {
"description": "string",
"whistle_config_source": {
"import_uri_prefix": "string",
"uri": "string",
},
},
"fhir_store_destination": "string",
"fhir_streaming_source": {
"fhir_store": "string",
"description": "string",
},
"reconciliation_destination": False,
},
name="string",
reconciliation_pipeline_job={
"matching_uri_prefix": "string",
"merge_config": {
"whistle_config_source": {
"import_uri_prefix": "string",
"uri": "string",
},
"description": "string",
},
"fhir_store_destination": "string",
})
const pipelineJobResource = new gcp.healthcare.PipelineJob("pipelineJobResource", {
dataset: "string",
location: "string",
backfillPipelineJob: {
mappingPipelineJob: "string",
},
disableLineage: false,
labels: {
string: "string",
},
mappingPipelineJob: {
mappingConfig: {
description: "string",
whistleConfigSource: {
importUriPrefix: "string",
uri: "string",
},
},
fhirStoreDestination: "string",
fhirStreamingSource: {
fhirStore: "string",
description: "string",
},
reconciliationDestination: false,
},
name: "string",
reconciliationPipelineJob: {
matchingUriPrefix: "string",
mergeConfig: {
whistleConfigSource: {
importUriPrefix: "string",
uri: "string",
},
description: "string",
},
fhirStoreDestination: "string",
},
});
type: gcp:healthcare:PipelineJob
properties:
backfillPipelineJob:
mappingPipelineJob: string
dataset: string
disableLineage: false
labels:
string: string
location: string
mappingPipelineJob:
fhirStoreDestination: string
fhirStreamingSource:
description: string
fhirStore: string
mappingConfig:
description: string
whistleConfigSource:
importUriPrefix: string
uri: string
reconciliationDestination: false
name: string
reconciliationPipelineJob:
fhirStoreDestination: string
matchingUriPrefix: string
mergeConfig:
description: string
whistleConfigSource:
importUriPrefix: string
uri: string
PipelineJob Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The PipelineJob resource accepts the following input properties:
- Dataset string
- Healthcare Dataset under which the Pipeline Job is to run
- Location string
- Location where the Pipeline Job is to run
- Backfill
Pipeline PipelineJob Job Backfill Pipeline Job - Specifies the backfill configuration. Structure is documented below.
- Disable
Lineage bool - If true, disables writing lineage for the pipeline.
- Labels Dictionary<string, string>
User-supplied key-value pairs used to organize Pipeline Jobs. Label keys must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, and must conform to the following PCRE regular expression: [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}-]{0,62} Label values are optional, must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, and must conform to the following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}-]{0,63} No more than 64 labels can be associated with a given pipeline. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field
effective_labels
for all of the labels present on the resource.- Mapping
Pipeline PipelineJob Job Mapping Pipeline Job - Specifies mapping configuration. Structure is documented below.
- Name string
- Specifies the name of the pipeline job. This field is user-assigned.
- Reconciliation
Pipeline PipelineJob Job Reconciliation Pipeline Job - Specifies reconciliation configuration. Structure is documented below.
- Dataset string
- Healthcare Dataset under which the Pipeline Job is to run
- Location string
- Location where the Pipeline Job is to run
- Backfill
Pipeline PipelineJob Job Backfill Pipeline Job Args - Specifies the backfill configuration. Structure is documented below.
- Disable
Lineage bool - If true, disables writing lineage for the pipeline.
- Labels map[string]string
User-supplied key-value pairs used to organize Pipeline Jobs. Label keys must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, and must conform to the following PCRE regular expression: [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}-]{0,62} Label values are optional, must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, and must conform to the following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}-]{0,63} No more than 64 labels can be associated with a given pipeline. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field
effective_labels
for all of the labels present on the resource.- Mapping
Pipeline PipelineJob Job Mapping Pipeline Job Args - Specifies mapping configuration. Structure is documented below.
- Name string
- Specifies the name of the pipeline job. This field is user-assigned.
- Reconciliation
Pipeline PipelineJob Job Reconciliation Pipeline Job Args - Specifies reconciliation configuration. Structure is documented below.
- dataset String
- Healthcare Dataset under which the Pipeline Job is to run
- location String
- Location where the Pipeline Job is to run
- backfill
Pipeline PipelineJob Job Backfill Pipeline Job - Specifies the backfill configuration. Structure is documented below.
- disable
Lineage Boolean - If true, disables writing lineage for the pipeline.
- labels Map<String,String>
User-supplied key-value pairs used to organize Pipeline Jobs. Label keys must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, and must conform to the following PCRE regular expression: [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}-]{0,62} Label values are optional, must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, and must conform to the following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}-]{0,63} No more than 64 labels can be associated with a given pipeline. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field
effective_labels
for all of the labels present on the resource.- mapping
Pipeline PipelineJob Job Mapping Pipeline Job - Specifies mapping configuration. Structure is documented below.
- name String
- Specifies the name of the pipeline job. This field is user-assigned.
- reconciliation
Pipeline PipelineJob Job Reconciliation Pipeline Job - Specifies reconciliation configuration. Structure is documented below.
- dataset string
- Healthcare Dataset under which the Pipeline Job is to run
- location string
- Location where the Pipeline Job is to run
- backfill
Pipeline PipelineJob Job Backfill Pipeline Job - Specifies the backfill configuration. Structure is documented below.
- disable
Lineage boolean - If true, disables writing lineage for the pipeline.
- labels {[key: string]: string}
User-supplied key-value pairs used to organize Pipeline Jobs. Label keys must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, and must conform to the following PCRE regular expression: [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}-]{0,62} Label values are optional, must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, and must conform to the following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}-]{0,63} No more than 64 labels can be associated with a given pipeline. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field
effective_labels
for all of the labels present on the resource.- mapping
Pipeline PipelineJob Job Mapping Pipeline Job - Specifies mapping configuration. Structure is documented below.
- name string
- Specifies the name of the pipeline job. This field is user-assigned.
- reconciliation
Pipeline PipelineJob Job Reconciliation Pipeline Job - Specifies reconciliation configuration. Structure is documented below.
- dataset str
- Healthcare Dataset under which the Pipeline Job is to run
- location str
- Location where the Pipeline Job is to run
- backfill_
pipeline_ Pipelinejob Job Backfill Pipeline Job Args - Specifies the backfill configuration. Structure is documented below.
- disable_
lineage bool - If true, disables writing lineage for the pipeline.
- labels Mapping[str, str]
User-supplied key-value pairs used to organize Pipeline Jobs. Label keys must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, and must conform to the following PCRE regular expression: [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}-]{0,62} Label values are optional, must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, and must conform to the following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}-]{0,63} No more than 64 labels can be associated with a given pipeline. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field
effective_labels
for all of the labels present on the resource.- mapping_
pipeline_ Pipelinejob Job Mapping Pipeline Job Args - Specifies mapping configuration. Structure is documented below.
- name str
- Specifies the name of the pipeline job. This field is user-assigned.
- reconciliation_
pipeline_ Pipelinejob Job Reconciliation Pipeline Job Args - Specifies reconciliation configuration. Structure is documented below.
- dataset String
- Healthcare Dataset under which the Pipeline Job is to run
- location String
- Location where the Pipeline Job is to run
- backfill
Pipeline Property MapJob - Specifies the backfill configuration. Structure is documented below.
- disable
Lineage Boolean - If true, disables writing lineage for the pipeline.
- labels Map<String>
User-supplied key-value pairs used to organize Pipeline Jobs. Label keys must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, and must conform to the following PCRE regular expression: [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}-]{0,62} Label values are optional, must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, and must conform to the following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}-]{0,63} No more than 64 labels can be associated with a given pipeline. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field
effective_labels
for all of the labels present on the resource.- mapping
Pipeline Property MapJob - Specifies mapping configuration. Structure is documented below.
- name String
- Specifies the name of the pipeline job. This field is user-assigned.
- reconciliation
Pipeline Property MapJob - Specifies reconciliation configuration. Structure is documented below.
Outputs
All input properties are implicitly available as output properties. Additionally, the PipelineJob resource produces the following output properties:
- Effective
Labels Dictionary<string, string> - All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- Id string
- The provider-assigned unique ID for this managed resource.
- Pulumi
Labels Dictionary<string, string> - The combination of labels configured directly on the resource and default labels configured on the provider.
- Self
Link string - The fully qualified name of this dataset
- Effective
Labels map[string]string - All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- Id string
- The provider-assigned unique ID for this managed resource.
- Pulumi
Labels map[string]string - The combination of labels configured directly on the resource and default labels configured on the provider.
- Self
Link string - The fully qualified name of this dataset
- effective
Labels Map<String,String> - All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- id String
- The provider-assigned unique ID for this managed resource.
- pulumi
Labels Map<String,String> - The combination of labels configured directly on the resource and default labels configured on the provider.
- self
Link String - The fully qualified name of this dataset
- effective
Labels {[key: string]: string} - All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- id string
- The provider-assigned unique ID for this managed resource.
- pulumi
Labels {[key: string]: string} - The combination of labels configured directly on the resource and default labels configured on the provider.
- self
Link string - The fully qualified name of this dataset
- effective_
labels Mapping[str, str] - All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- id str
- The provider-assigned unique ID for this managed resource.
- pulumi_
labels Mapping[str, str] - The combination of labels configured directly on the resource and default labels configured on the provider.
- self_
link str - The fully qualified name of this dataset
- effective
Labels Map<String> - All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- id String
- The provider-assigned unique ID for this managed resource.
- pulumi
Labels Map<String> - The combination of labels configured directly on the resource and default labels configured on the provider.
- self
Link String - The fully qualified name of this dataset
Look up Existing PipelineJob Resource
Get an existing PipelineJob resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: PipelineJobState, opts?: CustomResourceOptions): PipelineJob
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
backfill_pipeline_job: Optional[PipelineJobBackfillPipelineJobArgs] = None,
dataset: Optional[str] = None,
disable_lineage: Optional[bool] = None,
effective_labels: Optional[Mapping[str, str]] = None,
labels: Optional[Mapping[str, str]] = None,
location: Optional[str] = None,
mapping_pipeline_job: Optional[PipelineJobMappingPipelineJobArgs] = None,
name: Optional[str] = None,
pulumi_labels: Optional[Mapping[str, str]] = None,
reconciliation_pipeline_job: Optional[PipelineJobReconciliationPipelineJobArgs] = None,
self_link: Optional[str] = None) -> PipelineJob
func GetPipelineJob(ctx *Context, name string, id IDInput, state *PipelineJobState, opts ...ResourceOption) (*PipelineJob, error)
public static PipelineJob Get(string name, Input<string> id, PipelineJobState? state, CustomResourceOptions? opts = null)
public static PipelineJob get(String name, Output<String> id, PipelineJobState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Backfill
Pipeline PipelineJob Job Backfill Pipeline Job - Specifies the backfill configuration. Structure is documented below.
- Dataset string
- Healthcare Dataset under which the Pipeline Job is to run
- Disable
Lineage bool - If true, disables writing lineage for the pipeline.
- Effective
Labels Dictionary<string, string> - All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- Labels Dictionary<string, string>
User-supplied key-value pairs used to organize Pipeline Jobs. Label keys must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, and must conform to the following PCRE regular expression: [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}-]{0,62} Label values are optional, must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, and must conform to the following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}-]{0,63} No more than 64 labels can be associated with a given pipeline. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field
effective_labels
for all of the labels present on the resource.- Location string
- Location where the Pipeline Job is to run
- Mapping
Pipeline PipelineJob Job Mapping Pipeline Job - Specifies mapping configuration. Structure is documented below.
- Name string
- Specifies the name of the pipeline job. This field is user-assigned.
- Pulumi
Labels Dictionary<string, string> - The combination of labels configured directly on the resource and default labels configured on the provider.
- Reconciliation
Pipeline PipelineJob Job Reconciliation Pipeline Job - Specifies reconciliation configuration. Structure is documented below.
- Self
Link string - The fully qualified name of this dataset
- Backfill
Pipeline PipelineJob Job Backfill Pipeline Job Args - Specifies the backfill configuration. Structure is documented below.
- Dataset string
- Healthcare Dataset under which the Pipeline Job is to run
- Disable
Lineage bool - If true, disables writing lineage for the pipeline.
- Effective
Labels map[string]string - All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- Labels map[string]string
User-supplied key-value pairs used to organize Pipeline Jobs. Label keys must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, and must conform to the following PCRE regular expression: [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}-]{0,62} Label values are optional, must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, and must conform to the following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}-]{0,63} No more than 64 labels can be associated with a given pipeline. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field
effective_labels
for all of the labels present on the resource.- Location string
- Location where the Pipeline Job is to run
- Mapping
Pipeline PipelineJob Job Mapping Pipeline Job Args - Specifies mapping configuration. Structure is documented below.
- Name string
- Specifies the name of the pipeline job. This field is user-assigned.
- Pulumi
Labels map[string]string - The combination of labels configured directly on the resource and default labels configured on the provider.
- Reconciliation
Pipeline PipelineJob Job Reconciliation Pipeline Job Args - Specifies reconciliation configuration. Structure is documented below.
- Self
Link string - The fully qualified name of this dataset
- backfill
Pipeline PipelineJob Job Backfill Pipeline Job - Specifies the backfill configuration. Structure is documented below.
- dataset String
- Healthcare Dataset under which the Pipeline Job is to run
- disable
Lineage Boolean - If true, disables writing lineage for the pipeline.
- effective
Labels Map<String,String> - All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- labels Map<String,String>
User-supplied key-value pairs used to organize Pipeline Jobs. Label keys must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, and must conform to the following PCRE regular expression: [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}-]{0,62} Label values are optional, must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, and must conform to the following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}-]{0,63} No more than 64 labels can be associated with a given pipeline. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field
effective_labels
for all of the labels present on the resource.- location String
- Location where the Pipeline Job is to run
- mapping
Pipeline PipelineJob Job Mapping Pipeline Job - Specifies mapping configuration. Structure is documented below.
- name String
- Specifies the name of the pipeline job. This field is user-assigned.
- pulumi
Labels Map<String,String> - The combination of labels configured directly on the resource and default labels configured on the provider.
- reconciliation
Pipeline PipelineJob Job Reconciliation Pipeline Job - Specifies reconciliation configuration. Structure is documented below.
- self
Link String - The fully qualified name of this dataset
- backfill
Pipeline PipelineJob Job Backfill Pipeline Job - Specifies the backfill configuration. Structure is documented below.
- dataset string
- Healthcare Dataset under which the Pipeline Job is to run
- disable
Lineage boolean - If true, disables writing lineage for the pipeline.
- effective
Labels {[key: string]: string} - All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- labels {[key: string]: string}
User-supplied key-value pairs used to organize Pipeline Jobs. Label keys must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, and must conform to the following PCRE regular expression: [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}-]{0,62} Label values are optional, must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, and must conform to the following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}-]{0,63} No more than 64 labels can be associated with a given pipeline. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field
effective_labels
for all of the labels present on the resource.- location string
- Location where the Pipeline Job is to run
- mapping
Pipeline PipelineJob Job Mapping Pipeline Job - Specifies mapping configuration. Structure is documented below.
- name string
- Specifies the name of the pipeline job. This field is user-assigned.
- pulumi
Labels {[key: string]: string} - The combination of labels configured directly on the resource and default labels configured on the provider.
- reconciliation
Pipeline PipelineJob Job Reconciliation Pipeline Job - Specifies reconciliation configuration. Structure is documented below.
- self
Link string - The fully qualified name of this dataset
- backfill_
pipeline_ Pipelinejob Job Backfill Pipeline Job Args - Specifies the backfill configuration. Structure is documented below.
- dataset str
- Healthcare Dataset under which the Pipeline Job is to run
- disable_
lineage bool - If true, disables writing lineage for the pipeline.
- effective_
labels Mapping[str, str] - All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- labels Mapping[str, str]
User-supplied key-value pairs used to organize Pipeline Jobs. Label keys must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, and must conform to the following PCRE regular expression: [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}-]{0,62} Label values are optional, must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, and must conform to the following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}-]{0,63} No more than 64 labels can be associated with a given pipeline. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field
effective_labels
for all of the labels present on the resource.- location str
- Location where the Pipeline Job is to run
- mapping_
pipeline_ Pipelinejob Job Mapping Pipeline Job Args - Specifies mapping configuration. Structure is documented below.
- name str
- Specifies the name of the pipeline job. This field is user-assigned.
- pulumi_
labels Mapping[str, str] - The combination of labels configured directly on the resource and default labels configured on the provider.
- reconciliation_
pipeline_ Pipelinejob Job Reconciliation Pipeline Job Args - Specifies reconciliation configuration. Structure is documented below.
- self_
link str - The fully qualified name of this dataset
- backfill
Pipeline Property MapJob - Specifies the backfill configuration. Structure is documented below.
- dataset String
- Healthcare Dataset under which the Pipeline Job is to run
- disable
Lineage Boolean - If true, disables writing lineage for the pipeline.
- effective
Labels Map<String> - All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- labels Map<String>
User-supplied key-value pairs used to organize Pipeline Jobs. Label keys must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, and must conform to the following PCRE regular expression: [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}-]{0,62} Label values are optional, must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, and must conform to the following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}-]{0,63} No more than 64 labels can be associated with a given pipeline. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field
effective_labels
for all of the labels present on the resource.- location String
- Location where the Pipeline Job is to run
- mapping
Pipeline Property MapJob - Specifies mapping configuration. Structure is documented below.
- name String
- Specifies the name of the pipeline job. This field is user-assigned.
- pulumi
Labels Map<String> - The combination of labels configured directly on the resource and default labels configured on the provider.
- reconciliation
Pipeline Property MapJob - Specifies reconciliation configuration. Structure is documented below.
- self
Link String - The fully qualified name of this dataset
Supporting Types
PipelineJobBackfillPipelineJob, PipelineJobBackfillPipelineJobArgs
- Mapping
Pipeline stringJob - Specifies the mapping pipeline job to backfill, the name format should follow: projects/{projectId}/locations/{locationId}/datasets/{datasetId}/pipelineJobs/{pipelineJobId}.
- Mapping
Pipeline stringJob - Specifies the mapping pipeline job to backfill, the name format should follow: projects/{projectId}/locations/{locationId}/datasets/{datasetId}/pipelineJobs/{pipelineJobId}.
- mapping
Pipeline StringJob - Specifies the mapping pipeline job to backfill, the name format should follow: projects/{projectId}/locations/{locationId}/datasets/{datasetId}/pipelineJobs/{pipelineJobId}.
- mapping
Pipeline stringJob - Specifies the mapping pipeline job to backfill, the name format should follow: projects/{projectId}/locations/{locationId}/datasets/{datasetId}/pipelineJobs/{pipelineJobId}.
- mapping_
pipeline_ strjob - Specifies the mapping pipeline job to backfill, the name format should follow: projects/{projectId}/locations/{locationId}/datasets/{datasetId}/pipelineJobs/{pipelineJobId}.
- mapping
Pipeline StringJob - Specifies the mapping pipeline job to backfill, the name format should follow: projects/{projectId}/locations/{locationId}/datasets/{datasetId}/pipelineJobs/{pipelineJobId}.
PipelineJobMappingPipelineJob, PipelineJobMappingPipelineJobArgs
- Mapping
Config PipelineJob Mapping Pipeline Job Mapping Config - The location of the mapping configuration. Structure is documented below.
- Fhir
Store stringDestination - If set, the mapping pipeline will write snapshots to this FHIR store without assigning stable IDs. You must grant your pipeline project's Cloud Healthcare Service Agent serviceaccount healthcare.fhirResources.executeBundle and healthcare.fhirResources.create permissions on the destination store. The destination store must set [disableReferentialIntegrity][FhirStore.disable_referential_integrity] to true. The destination store must use FHIR version R4. Format: project/{projectID}/locations/{locationID}/datasets/{datasetName}/fhirStores/{fhirStoreID}.
- Fhir
Streaming PipelineSource Job Mapping Pipeline Job Fhir Streaming Source - A streaming FHIR data source. Structure is documented below.
- Reconciliation
Destination bool - If set to true, a mapping pipeline will send output snapshots to the reconciliation pipeline in its dataset. A reconciliation pipeline must exist in this dataset before a mapping pipeline with a reconciliation destination can be created.
- Mapping
Config PipelineJob Mapping Pipeline Job Mapping Config - The location of the mapping configuration. Structure is documented below.
- Fhir
Store stringDestination - If set, the mapping pipeline will write snapshots to this FHIR store without assigning stable IDs. You must grant your pipeline project's Cloud Healthcare Service Agent serviceaccount healthcare.fhirResources.executeBundle and healthcare.fhirResources.create permissions on the destination store. The destination store must set [disableReferentialIntegrity][FhirStore.disable_referential_integrity] to true. The destination store must use FHIR version R4. Format: project/{projectID}/locations/{locationID}/datasets/{datasetName}/fhirStores/{fhirStoreID}.
- Fhir
Streaming PipelineSource Job Mapping Pipeline Job Fhir Streaming Source - A streaming FHIR data source. Structure is documented below.
- Reconciliation
Destination bool - If set to true, a mapping pipeline will send output snapshots to the reconciliation pipeline in its dataset. A reconciliation pipeline must exist in this dataset before a mapping pipeline with a reconciliation destination can be created.
- mapping
Config PipelineJob Mapping Pipeline Job Mapping Config - The location of the mapping configuration. Structure is documented below.
- fhir
Store StringDestination - If set, the mapping pipeline will write snapshots to this FHIR store without assigning stable IDs. You must grant your pipeline project's Cloud Healthcare Service Agent serviceaccount healthcare.fhirResources.executeBundle and healthcare.fhirResources.create permissions on the destination store. The destination store must set [disableReferentialIntegrity][FhirStore.disable_referential_integrity] to true. The destination store must use FHIR version R4. Format: project/{projectID}/locations/{locationID}/datasets/{datasetName}/fhirStores/{fhirStoreID}.
- fhir
Streaming PipelineSource Job Mapping Pipeline Job Fhir Streaming Source - A streaming FHIR data source. Structure is documented below.
- reconciliation
Destination Boolean - If set to true, a mapping pipeline will send output snapshots to the reconciliation pipeline in its dataset. A reconciliation pipeline must exist in this dataset before a mapping pipeline with a reconciliation destination can be created.
- mapping
Config PipelineJob Mapping Pipeline Job Mapping Config - The location of the mapping configuration. Structure is documented below.
- fhir
Store stringDestination - If set, the mapping pipeline will write snapshots to this FHIR store without assigning stable IDs. You must grant your pipeline project's Cloud Healthcare Service Agent serviceaccount healthcare.fhirResources.executeBundle and healthcare.fhirResources.create permissions on the destination store. The destination store must set [disableReferentialIntegrity][FhirStore.disable_referential_integrity] to true. The destination store must use FHIR version R4. Format: project/{projectID}/locations/{locationID}/datasets/{datasetName}/fhirStores/{fhirStoreID}.
- fhir
Streaming PipelineSource Job Mapping Pipeline Job Fhir Streaming Source - A streaming FHIR data source. Structure is documented below.
- reconciliation
Destination boolean - If set to true, a mapping pipeline will send output snapshots to the reconciliation pipeline in its dataset. A reconciliation pipeline must exist in this dataset before a mapping pipeline with a reconciliation destination can be created.
- mapping_
config PipelineJob Mapping Pipeline Job Mapping Config - The location of the mapping configuration. Structure is documented below.
- fhir_
store_ strdestination - If set, the mapping pipeline will write snapshots to this FHIR store without assigning stable IDs. You must grant your pipeline project's Cloud Healthcare Service Agent serviceaccount healthcare.fhirResources.executeBundle and healthcare.fhirResources.create permissions on the destination store. The destination store must set [disableReferentialIntegrity][FhirStore.disable_referential_integrity] to true. The destination store must use FHIR version R4. Format: project/{projectID}/locations/{locationID}/datasets/{datasetName}/fhirStores/{fhirStoreID}.
- fhir_
streaming_ Pipelinesource Job Mapping Pipeline Job Fhir Streaming Source - A streaming FHIR data source. Structure is documented below.
- reconciliation_
destination bool - If set to true, a mapping pipeline will send output snapshots to the reconciliation pipeline in its dataset. A reconciliation pipeline must exist in this dataset before a mapping pipeline with a reconciliation destination can be created.
- mapping
Config Property Map - The location of the mapping configuration. Structure is documented below.
- fhir
Store StringDestination - If set, the mapping pipeline will write snapshots to this FHIR store without assigning stable IDs. You must grant your pipeline project's Cloud Healthcare Service Agent serviceaccount healthcare.fhirResources.executeBundle and healthcare.fhirResources.create permissions on the destination store. The destination store must set [disableReferentialIntegrity][FhirStore.disable_referential_integrity] to true. The destination store must use FHIR version R4. Format: project/{projectID}/locations/{locationID}/datasets/{datasetName}/fhirStores/{fhirStoreID}.
- fhir
Streaming Property MapSource - A streaming FHIR data source. Structure is documented below.
- reconciliation
Destination Boolean - If set to true, a mapping pipeline will send output snapshots to the reconciliation pipeline in its dataset. A reconciliation pipeline must exist in this dataset before a mapping pipeline with a reconciliation destination can be created.
PipelineJobMappingPipelineJobFhirStreamingSource, PipelineJobMappingPipelineJobFhirStreamingSourceArgs
- Fhir
Store string - The path to the FHIR store in the format projects/{projectId}/locations/{locationId}/datasets/{datasetId}/fhirStores/{fhirStoreId}.
- Description string
- Describes the streaming FHIR data source.
- Fhir
Store string - The path to the FHIR store in the format projects/{projectId}/locations/{locationId}/datasets/{datasetId}/fhirStores/{fhirStoreId}.
- Description string
- Describes the streaming FHIR data source.
- fhir
Store String - The path to the FHIR store in the format projects/{projectId}/locations/{locationId}/datasets/{datasetId}/fhirStores/{fhirStoreId}.
- description String
- Describes the streaming FHIR data source.
- fhir
Store string - The path to the FHIR store in the format projects/{projectId}/locations/{locationId}/datasets/{datasetId}/fhirStores/{fhirStoreId}.
- description string
- Describes the streaming FHIR data source.
- fhir_
store str - The path to the FHIR store in the format projects/{projectId}/locations/{locationId}/datasets/{datasetId}/fhirStores/{fhirStoreId}.
- description str
- Describes the streaming FHIR data source.
- fhir
Store String - The path to the FHIR store in the format projects/{projectId}/locations/{locationId}/datasets/{datasetId}/fhirStores/{fhirStoreId}.
- description String
- Describes the streaming FHIR data source.
PipelineJobMappingPipelineJobMappingConfig, PipelineJobMappingPipelineJobMappingConfigArgs
- Description string
- Describes the mapping configuration.
- Whistle
Config PipelineSource Job Mapping Pipeline Job Mapping Config Whistle Config Source - Specifies the path to the mapping configuration for harmonization pipeline. Structure is documented below.
- Description string
- Describes the mapping configuration.
- Whistle
Config PipelineSource Job Mapping Pipeline Job Mapping Config Whistle Config Source - Specifies the path to the mapping configuration for harmonization pipeline. Structure is documented below.
- description String
- Describes the mapping configuration.
- whistle
Config PipelineSource Job Mapping Pipeline Job Mapping Config Whistle Config Source - Specifies the path to the mapping configuration for harmonization pipeline. Structure is documented below.
- description string
- Describes the mapping configuration.
- whistle
Config PipelineSource Job Mapping Pipeline Job Mapping Config Whistle Config Source - Specifies the path to the mapping configuration for harmonization pipeline. Structure is documented below.
- description str
- Describes the mapping configuration.
- whistle_
config_ Pipelinesource Job Mapping Pipeline Job Mapping Config Whistle Config Source - Specifies the path to the mapping configuration for harmonization pipeline. Structure is documented below.
- description String
- Describes the mapping configuration.
- whistle
Config Property MapSource - Specifies the path to the mapping configuration for harmonization pipeline. Structure is documented below.
PipelineJobMappingPipelineJobMappingConfigWhistleConfigSource, PipelineJobMappingPipelineJobMappingConfigWhistleConfigSourceArgs
- Import
Uri stringPrefix - Directory path where all the Whistle files are located. Example: gs://{bucket-id}/{path/to/import-root/dir}
- Uri string
- Main configuration file which has the entrypoint or the root function. Example: gs://{bucket-id}/{path/to/import-root/dir}/entrypoint-file-name.wstl.
- Import
Uri stringPrefix - Directory path where all the Whistle files are located. Example: gs://{bucket-id}/{path/to/import-root/dir}
- Uri string
- Main configuration file which has the entrypoint or the root function. Example: gs://{bucket-id}/{path/to/import-root/dir}/entrypoint-file-name.wstl.
- import
Uri StringPrefix - Directory path where all the Whistle files are located. Example: gs://{bucket-id}/{path/to/import-root/dir}
- uri String
- Main configuration file which has the entrypoint or the root function. Example: gs://{bucket-id}/{path/to/import-root/dir}/entrypoint-file-name.wstl.
- import
Uri stringPrefix - Directory path where all the Whistle files are located. Example: gs://{bucket-id}/{path/to/import-root/dir}
- uri string
- Main configuration file which has the entrypoint or the root function. Example: gs://{bucket-id}/{path/to/import-root/dir}/entrypoint-file-name.wstl.
- import_
uri_ strprefix - Directory path where all the Whistle files are located. Example: gs://{bucket-id}/{path/to/import-root/dir}
- uri str
- Main configuration file which has the entrypoint or the root function. Example: gs://{bucket-id}/{path/to/import-root/dir}/entrypoint-file-name.wstl.
- import
Uri StringPrefix - Directory path where all the Whistle files are located. Example: gs://{bucket-id}/{path/to/import-root/dir}
- uri String
- Main configuration file which has the entrypoint or the root function. Example: gs://{bucket-id}/{path/to/import-root/dir}/entrypoint-file-name.wstl.
PipelineJobReconciliationPipelineJob, PipelineJobReconciliationPipelineJobArgs
- Matching
Uri stringPrefix - Specifies the top level directory of the matching configs used in all mapping pipelines, which extract properties for resources to be matched on. Example: gs://{bucket-id}/{path/to/matching/configs}
- Merge
Config PipelineJob Reconciliation Pipeline Job Merge Config - Specifies the location of the reconciliation configuration. Structure is documented below.
- Fhir
Store stringDestination - The harmonized FHIR store to write harmonized FHIR resources to, in the format of: project/{projectID}/locations/{locationID}/datasets/{datasetName}/fhirStores/{id}
- Matching
Uri stringPrefix - Specifies the top level directory of the matching configs used in all mapping pipelines, which extract properties for resources to be matched on. Example: gs://{bucket-id}/{path/to/matching/configs}
- Merge
Config PipelineJob Reconciliation Pipeline Job Merge Config - Specifies the location of the reconciliation configuration. Structure is documented below.
- Fhir
Store stringDestination - The harmonized FHIR store to write harmonized FHIR resources to, in the format of: project/{projectID}/locations/{locationID}/datasets/{datasetName}/fhirStores/{id}
- matching
Uri StringPrefix - Specifies the top level directory of the matching configs used in all mapping pipelines, which extract properties for resources to be matched on. Example: gs://{bucket-id}/{path/to/matching/configs}
- merge
Config PipelineJob Reconciliation Pipeline Job Merge Config - Specifies the location of the reconciliation configuration. Structure is documented below.
- fhir
Store StringDestination - The harmonized FHIR store to write harmonized FHIR resources to, in the format of: project/{projectID}/locations/{locationID}/datasets/{datasetName}/fhirStores/{id}
- matching
Uri stringPrefix - Specifies the top level directory of the matching configs used in all mapping pipelines, which extract properties for resources to be matched on. Example: gs://{bucket-id}/{path/to/matching/configs}
- merge
Config PipelineJob Reconciliation Pipeline Job Merge Config - Specifies the location of the reconciliation configuration. Structure is documented below.
- fhir
Store stringDestination - The harmonized FHIR store to write harmonized FHIR resources to, in the format of: project/{projectID}/locations/{locationID}/datasets/{datasetName}/fhirStores/{id}
- matching_
uri_ strprefix - Specifies the top level directory of the matching configs used in all mapping pipelines, which extract properties for resources to be matched on. Example: gs://{bucket-id}/{path/to/matching/configs}
- merge_
config PipelineJob Reconciliation Pipeline Job Merge Config - Specifies the location of the reconciliation configuration. Structure is documented below.
- fhir_
store_ strdestination - The harmonized FHIR store to write harmonized FHIR resources to, in the format of: project/{projectID}/locations/{locationID}/datasets/{datasetName}/fhirStores/{id}
- matching
Uri StringPrefix - Specifies the top level directory of the matching configs used in all mapping pipelines, which extract properties for resources to be matched on. Example: gs://{bucket-id}/{path/to/matching/configs}
- merge
Config Property Map - Specifies the location of the reconciliation configuration. Structure is documented below.
- fhir
Store StringDestination - The harmonized FHIR store to write harmonized FHIR resources to, in the format of: project/{projectID}/locations/{locationID}/datasets/{datasetName}/fhirStores/{id}
PipelineJobReconciliationPipelineJobMergeConfig, PipelineJobReconciliationPipelineJobMergeConfigArgs
- Whistle
Config PipelineSource Job Reconciliation Pipeline Job Merge Config Whistle Config Source - Specifies the path to the mapping configuration for harmonization pipeline. Structure is documented below.
- Description string
- Describes the mapping configuration.
- Whistle
Config PipelineSource Job Reconciliation Pipeline Job Merge Config Whistle Config Source - Specifies the path to the mapping configuration for harmonization pipeline. Structure is documented below.
- Description string
- Describes the mapping configuration.
- whistle
Config PipelineSource Job Reconciliation Pipeline Job Merge Config Whistle Config Source - Specifies the path to the mapping configuration for harmonization pipeline. Structure is documented below.
- description String
- Describes the mapping configuration.
- whistle
Config PipelineSource Job Reconciliation Pipeline Job Merge Config Whistle Config Source - Specifies the path to the mapping configuration for harmonization pipeline. Structure is documented below.
- description string
- Describes the mapping configuration.
- whistle_
config_ Pipelinesource Job Reconciliation Pipeline Job Merge Config Whistle Config Source - Specifies the path to the mapping configuration for harmonization pipeline. Structure is documented below.
- description str
- Describes the mapping configuration.
- whistle
Config Property MapSource - Specifies the path to the mapping configuration for harmonization pipeline. Structure is documented below.
- description String
- Describes the mapping configuration.
PipelineJobReconciliationPipelineJobMergeConfigWhistleConfigSource, PipelineJobReconciliationPipelineJobMergeConfigWhistleConfigSourceArgs
- Import
Uri stringPrefix - Directory path where all the Whistle files are located. Example: gs://{bucket-id}/{path/to/import-root/dir}
- Uri string
- Main configuration file which has the entrypoint or the root function. Example: gs://{bucket-id}/{path/to/import-root/dir}/entrypoint-file-name.wstl.
- Import
Uri stringPrefix - Directory path where all the Whistle files are located. Example: gs://{bucket-id}/{path/to/import-root/dir}
- Uri string
- Main configuration file which has the entrypoint or the root function. Example: gs://{bucket-id}/{path/to/import-root/dir}/entrypoint-file-name.wstl.
- import
Uri StringPrefix - Directory path where all the Whistle files are located. Example: gs://{bucket-id}/{path/to/import-root/dir}
- uri String
- Main configuration file which has the entrypoint or the root function. Example: gs://{bucket-id}/{path/to/import-root/dir}/entrypoint-file-name.wstl.
- import
Uri stringPrefix - Directory path where all the Whistle files are located. Example: gs://{bucket-id}/{path/to/import-root/dir}
- uri string
- Main configuration file which has the entrypoint or the root function. Example: gs://{bucket-id}/{path/to/import-root/dir}/entrypoint-file-name.wstl.
- import_
uri_ strprefix - Directory path where all the Whistle files are located. Example: gs://{bucket-id}/{path/to/import-root/dir}
- uri str
- Main configuration file which has the entrypoint or the root function. Example: gs://{bucket-id}/{path/to/import-root/dir}/entrypoint-file-name.wstl.
- import
Uri StringPrefix - Directory path where all the Whistle files are located. Example: gs://{bucket-id}/{path/to/import-root/dir}
- uri String
- Main configuration file which has the entrypoint or the root function. Example: gs://{bucket-id}/{path/to/import-root/dir}/entrypoint-file-name.wstl.
Import
PipelineJob can be imported using any of these accepted formats:
{{dataset}}/pipelineJobs/{{name}}
{{dataset}}/pipelineJobs?pipelineJobId={{name}}
{{name}}
When using the pulumi import
command, PipelineJob can be imported using one of the formats above. For example:
$ pulumi import gcp:healthcare/pipelineJob:PipelineJob default {{dataset}}/pipelineJobs/{{name}}
$ pulumi import gcp:healthcare/pipelineJob:PipelineJob default {{dataset}}/pipelineJobs?pipelineJobId={{name}}
$ pulumi import gcp:healthcare/pipelineJob:PipelineJob default {{name}}
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- Google Cloud (GCP) Classic pulumi/pulumi-gcp
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
google-beta
Terraform Provider.