mongodbatlas.DataLakePipeline
Explore with Pulumi AI
WARNING: Data Lake is deprecated. To learn more, see https://dochub.mongodb.org/core/data-lake-deprecation
mongodbatlas.DataLakePipeline
provides a Data Lake Pipeline resource.
NOTE: Groups and projects are synonymous terms. You may find
group_id
in the official documentation.
Example Usage
S
Coming soon!
Coming soon!
Coming soon!
Coming soon!
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.mongodbatlas.Project;
import com.pulumi.mongodbatlas.ProjectArgs;
import com.pulumi.mongodbatlas.AdvancedCluster;
import com.pulumi.mongodbatlas.AdvancedClusterArgs;
import com.pulumi.mongodbatlas.inputs.AdvancedClusterReplicationSpecArgs;
import com.pulumi.mongodbatlas.DataLakePipeline;
import com.pulumi.mongodbatlas.DataLakePipelineArgs;
import com.pulumi.mongodbatlas.inputs.DataLakePipelineSinkArgs;
import com.pulumi.mongodbatlas.inputs.DataLakePipelineSourceArgs;
import com.pulumi.mongodbatlas.inputs.DataLakePipelineTransformationArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var projectTest = new Project("projectTest", ProjectArgs.builder()
.name("NAME OF THE PROJECT")
.orgId("ORGANIZATION ID")
.build());
var automatedBackupTest = new AdvancedCluster("automatedBackupTest", AdvancedClusterArgs.builder()
.projectId(projectId)
.name("automated-backup-test")
.clusterType("REPLICASET")
.backupEnabled(true)
.replicationSpecs(AdvancedClusterReplicationSpecArgs.builder()
.regionConfigs(AdvancedClusterReplicationSpecRegionConfigArgs.builder()
.priority(7)
.providerName("GCP")
.regionName("US_EAST_4")
.electableSpecs(AdvancedClusterReplicationSpecRegionConfigElectableSpecsArgs.builder()
.instanceSize("M10")
.nodeCount(3)
.build())
.build())
.build())
.build());
var pipeline = new DataLakePipeline("pipeline", DataLakePipelineArgs.builder()
.projectId(projectTest.projectId())
.name("DataLakePipelineName")
.sink(DataLakePipelineSinkArgs.builder()
.type("DLS")
.partitionFields(DataLakePipelineSinkPartitionFieldArgs.builder()
.name("access")
.order(0)
.build())
.build())
.source(DataLakePipelineSourceArgs.builder()
.type("ON_DEMAND_CPS")
.clusterName(automatedBackupTest.name())
.databaseName("sample_airbnb")
.collectionName("listingsAndReviews")
.build())
.transformations(
DataLakePipelineTransformationArgs.builder()
.field("test")
.type("EXCLUDE")
.build(),
DataLakePipelineTransformationArgs.builder()
.field("test22")
.type("EXCLUDE")
.build())
.build());
}
}
resources:
projectTest:
type: mongodbatlas:Project
properties:
name: NAME OF THE PROJECT
orgId: ORGANIZATION ID
automatedBackupTest:
type: mongodbatlas:AdvancedCluster
name: automated_backup_test
properties:
projectId: ${projectId}
name: automated-backup-test
clusterType: REPLICASET
backupEnabled: true # enable cloud backup snapshots
replicationSpecs:
- regionConfigs:
- priority: 7
providerName: GCP
regionName: US_EAST_4
electableSpecs:
instanceSize: M10
nodeCount: 3
pipeline:
type: mongodbatlas:DataLakePipeline
properties:
projectId: ${projectTest.projectId}
name: DataLakePipelineName
sink:
type: DLS
partitionFields:
- name: access
order: 0
source:
type: ON_DEMAND_CPS
clusterName: ${automatedBackupTest.name}
databaseName: sample_airbnb
collectionName: listingsAndReviews
transformations:
- field: test
type: EXCLUDE
- field: test22
type: EXCLUDE
Create DataLakePipeline Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new DataLakePipeline(name: string, args: DataLakePipelineArgs, opts?: CustomResourceOptions);
@overload
def DataLakePipeline(resource_name: str,
args: DataLakePipelineArgs,
opts: Optional[ResourceOptions] = None)
@overload
def DataLakePipeline(resource_name: str,
opts: Optional[ResourceOptions] = None,
project_id: Optional[str] = None,
name: Optional[str] = None,
sink: Optional[DataLakePipelineSinkArgs] = None,
source: Optional[DataLakePipelineSourceArgs] = None,
transformations: Optional[Sequence[DataLakePipelineTransformationArgs]] = None)
func NewDataLakePipeline(ctx *Context, name string, args DataLakePipelineArgs, opts ...ResourceOption) (*DataLakePipeline, error)
public DataLakePipeline(string name, DataLakePipelineArgs args, CustomResourceOptions? opts = null)
public DataLakePipeline(String name, DataLakePipelineArgs args)
public DataLakePipeline(String name, DataLakePipelineArgs args, CustomResourceOptions options)
type: mongodbatlas:DataLakePipeline
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args DataLakePipelineArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args DataLakePipelineArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args DataLakePipelineArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args DataLakePipelineArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args DataLakePipelineArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var dataLakePipelineResource = new Mongodbatlas.DataLakePipeline("dataLakePipelineResource", new()
{
ProjectId = "string",
Name = "string",
Sink = new Mongodbatlas.Inputs.DataLakePipelineSinkArgs
{
PartitionFields = new[]
{
new Mongodbatlas.Inputs.DataLakePipelineSinkPartitionFieldArgs
{
FieldName = "string",
Order = 0,
},
},
Provider = "string",
Region = "string",
Type = "string",
},
Source = new Mongodbatlas.Inputs.DataLakePipelineSourceArgs
{
ClusterName = "string",
CollectionName = "string",
DatabaseName = "string",
PolicyItemId = "string",
ProjectId = "string",
Type = "string",
},
Transformations = new[]
{
new Mongodbatlas.Inputs.DataLakePipelineTransformationArgs
{
Field = "string",
Type = "string",
},
},
});
example, err := mongodbatlas.NewDataLakePipeline(ctx, "dataLakePipelineResource", &mongodbatlas.DataLakePipelineArgs{
ProjectId: pulumi.String("string"),
Name: pulumi.String("string"),
Sink: &mongodbatlas.DataLakePipelineSinkArgs{
PartitionFields: mongodbatlas.DataLakePipelineSinkPartitionFieldArray{
&mongodbatlas.DataLakePipelineSinkPartitionFieldArgs{
FieldName: pulumi.String("string"),
Order: pulumi.Int(0),
},
},
Provider: pulumi.String("string"),
Region: pulumi.String("string"),
Type: pulumi.String("string"),
},
Source: &mongodbatlas.DataLakePipelineSourceArgs{
ClusterName: pulumi.String("string"),
CollectionName: pulumi.String("string"),
DatabaseName: pulumi.String("string"),
PolicyItemId: pulumi.String("string"),
ProjectId: pulumi.String("string"),
Type: pulumi.String("string"),
},
Transformations: mongodbatlas.DataLakePipelineTransformationArray{
&mongodbatlas.DataLakePipelineTransformationArgs{
Field: pulumi.String("string"),
Type: pulumi.String("string"),
},
},
})
var dataLakePipelineResource = new DataLakePipeline("dataLakePipelineResource", DataLakePipelineArgs.builder()
.projectId("string")
.name("string")
.sink(DataLakePipelineSinkArgs.builder()
.partitionFields(DataLakePipelineSinkPartitionFieldArgs.builder()
.fieldName("string")
.order(0)
.build())
.provider("string")
.region("string")
.type("string")
.build())
.source(DataLakePipelineSourceArgs.builder()
.clusterName("string")
.collectionName("string")
.databaseName("string")
.policyItemId("string")
.projectId("string")
.type("string")
.build())
.transformations(DataLakePipelineTransformationArgs.builder()
.field("string")
.type("string")
.build())
.build());
data_lake_pipeline_resource = mongodbatlas.DataLakePipeline("dataLakePipelineResource",
project_id="string",
name="string",
sink={
"partition_fields": [{
"field_name": "string",
"order": 0,
}],
"provider": "string",
"region": "string",
"type": "string",
},
source={
"cluster_name": "string",
"collection_name": "string",
"database_name": "string",
"policy_item_id": "string",
"project_id": "string",
"type": "string",
},
transformations=[{
"field": "string",
"type": "string",
}])
const dataLakePipelineResource = new mongodbatlas.DataLakePipeline("dataLakePipelineResource", {
projectId: "string",
name: "string",
sink: {
partitionFields: [{
fieldName: "string",
order: 0,
}],
provider: "string",
region: "string",
type: "string",
},
source: {
clusterName: "string",
collectionName: "string",
databaseName: "string",
policyItemId: "string",
projectId: "string",
type: "string",
},
transformations: [{
field: "string",
type: "string",
}],
});
type: mongodbatlas:DataLakePipeline
properties:
name: string
projectId: string
sink:
partitionFields:
- fieldName: string
order: 0
provider: string
region: string
type: string
source:
clusterName: string
collectionName: string
databaseName: string
policyItemId: string
projectId: string
type: string
transformations:
- field: string
type: string
DataLakePipeline Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The DataLakePipeline resource accepts the following input properties:
- Project
Id string - The unique ID for the project to create a data lake pipeline.
- Name string
- Name of the Atlas Data Lake Pipeline.
- Sink
Data
Lake Pipeline Sink - Source
Data
Lake Pipeline Source - Transformations
List<Data
Lake Pipeline Transformation> - Fields to be excluded for this Data Lake Pipeline.
transformations.#.field
- Key in the document.transformations.#.type
- Type of transformation applied during the export of the namespace in a Data Lake Pipeline.
- Project
Id string - The unique ID for the project to create a data lake pipeline.
- Name string
- Name of the Atlas Data Lake Pipeline.
- Sink
Data
Lake Pipeline Sink Args - Source
Data
Lake Pipeline Source Args - Transformations
[]Data
Lake Pipeline Transformation Args - Fields to be excluded for this Data Lake Pipeline.
transformations.#.field
- Key in the document.transformations.#.type
- Type of transformation applied during the export of the namespace in a Data Lake Pipeline.
- project
Id String - The unique ID for the project to create a data lake pipeline.
- name String
- Name of the Atlas Data Lake Pipeline.
- sink
Data
Lake Pipeline Sink - source
Data
Lake Pipeline Source - transformations
List<Data
Lake Pipeline Transformation> - Fields to be excluded for this Data Lake Pipeline.
transformations.#.field
- Key in the document.transformations.#.type
- Type of transformation applied during the export of the namespace in a Data Lake Pipeline.
- project
Id string - The unique ID for the project to create a data lake pipeline.
- name string
- Name of the Atlas Data Lake Pipeline.
- sink
Data
Lake Pipeline Sink - source
Data
Lake Pipeline Source - transformations
Data
Lake Pipeline Transformation[] - Fields to be excluded for this Data Lake Pipeline.
transformations.#.field
- Key in the document.transformations.#.type
- Type of transformation applied during the export of the namespace in a Data Lake Pipeline.
- project_
id str - The unique ID for the project to create a data lake pipeline.
- name str
- Name of the Atlas Data Lake Pipeline.
- sink
Data
Lake Pipeline Sink Args - source
Data
Lake Pipeline Source Args - transformations
Sequence[Data
Lake Pipeline Transformation Args] - Fields to be excluded for this Data Lake Pipeline.
transformations.#.field
- Key in the document.transformations.#.type
- Type of transformation applied during the export of the namespace in a Data Lake Pipeline.
- project
Id String - The unique ID for the project to create a data lake pipeline.
- name String
- Name of the Atlas Data Lake Pipeline.
- sink Property Map
- source Property Map
- transformations List<Property Map>
- Fields to be excluded for this Data Lake Pipeline.
transformations.#.field
- Key in the document.transformations.#.type
- Type of transformation applied during the export of the namespace in a Data Lake Pipeline.
Outputs
All input properties are implicitly available as output properties. Additionally, the DataLakePipeline resource produces the following output properties:
- Created
Date string - Timestamp that indicates when the Data Lake Pipeline was created.
- Id string
- The provider-assigned unique ID for this managed resource.
- Ingestion
Schedules List<DataLake Pipeline Ingestion Schedule> - List of backup schedule policy items that you can use as a Data Lake Pipeline source.
ingestion_schedules.#.id
- Unique 24-hexadecimal digit string that identifies this backup policy item.ingestion_schedules.#.frequency_type
- Human-readable label that identifies the frequency type associated with the backup policy.ingestion_schedules.#.frequency_interval
- Number that indicates the frequency interval for a set of snapshots.ingestion_schedules.#.retention_unit
- Unit of time in which MongoDB Atlas measures snapshot retention.ingestion_schedules.#.retention_value
- Duration in days, weeks, or months that MongoDB Atlas retains the snapshot.
- Last
Updated stringDate - Timestamp that indicates the last time that the Data Lake Pipeline was updated.
- Snapshots
List<Data
Lake Pipeline Snapshot> - List of backup snapshots that you can use to trigger an on demand pipeline run.
snapshots.#.id
- Unique 24-hexadecimal digit string that identifies the snapshot.snapshots.#.provider
- Human-readable label that identifies the cloud provider that stores this snapshot.snapshots.#.created_at
- Date and time when MongoDB Atlas took the snapshot.snapshots.#.expires_at
- Date and time when MongoDB Atlas deletes the snapshot.snapshots.#.frequency_type
- Human-readable label that identifies how often this snapshot triggers.snapshots.#.master_key
- Unique string that identifies the Amazon Web Services (AWS) Key Management Service (KMS) Customer Master Key (CMK) used to encrypt the snapshot.snapshots.#.mongod_version
- Version of the MongoDB host that this snapshot backs up.snapshots.#.replica_set_name
- Human-readable label that identifies the replica set from which MongoDB Atlas took this snapshot.snapshots.#.type
- Human-readable label that categorizes the cluster as a replica set or sharded cluster.snapshots.#.snapshot_type
- Human-readable label that identifies when this snapshot triggers.snapshots.#.status
- Human-readable label that indicates the stage of the backup process for this snapshot.snapshots.#.size
- List of backup snapshots that you can use to trigger an on demand pipeline run.snapshots.#.copy_region
- List that identifies the regions to which MongoDB Atlas copies the snapshot.snapshots.#.policies
- List that contains unique identifiers for the policy items.
- State string
- State of this Data Lake Pipeline.
- Created
Date string - Timestamp that indicates when the Data Lake Pipeline was created.
- Id string
- The provider-assigned unique ID for this managed resource.
- Ingestion
Schedules []DataLake Pipeline Ingestion Schedule - List of backup schedule policy items that you can use as a Data Lake Pipeline source.
ingestion_schedules.#.id
- Unique 24-hexadecimal digit string that identifies this backup policy item.ingestion_schedules.#.frequency_type
- Human-readable label that identifies the frequency type associated with the backup policy.ingestion_schedules.#.frequency_interval
- Number that indicates the frequency interval for a set of snapshots.ingestion_schedules.#.retention_unit
- Unit of time in which MongoDB Atlas measures snapshot retention.ingestion_schedules.#.retention_value
- Duration in days, weeks, or months that MongoDB Atlas retains the snapshot.
- Last
Updated stringDate - Timestamp that indicates the last time that the Data Lake Pipeline was updated.
- Snapshots
[]Data
Lake Pipeline Snapshot - List of backup snapshots that you can use to trigger an on demand pipeline run.
snapshots.#.id
- Unique 24-hexadecimal digit string that identifies the snapshot.snapshots.#.provider
- Human-readable label that identifies the cloud provider that stores this snapshot.snapshots.#.created_at
- Date and time when MongoDB Atlas took the snapshot.snapshots.#.expires_at
- Date and time when MongoDB Atlas deletes the snapshot.snapshots.#.frequency_type
- Human-readable label that identifies how often this snapshot triggers.snapshots.#.master_key
- Unique string that identifies the Amazon Web Services (AWS) Key Management Service (KMS) Customer Master Key (CMK) used to encrypt the snapshot.snapshots.#.mongod_version
- Version of the MongoDB host that this snapshot backs up.snapshots.#.replica_set_name
- Human-readable label that identifies the replica set from which MongoDB Atlas took this snapshot.snapshots.#.type
- Human-readable label that categorizes the cluster as a replica set or sharded cluster.snapshots.#.snapshot_type
- Human-readable label that identifies when this snapshot triggers.snapshots.#.status
- Human-readable label that indicates the stage of the backup process for this snapshot.snapshots.#.size
- List of backup snapshots that you can use to trigger an on demand pipeline run.snapshots.#.copy_region
- List that identifies the regions to which MongoDB Atlas copies the snapshot.snapshots.#.policies
- List that contains unique identifiers for the policy items.
- State string
- State of this Data Lake Pipeline.
- created
Date String - Timestamp that indicates when the Data Lake Pipeline was created.
- id String
- The provider-assigned unique ID for this managed resource.
- ingestion
Schedules List<DataLake Pipeline Ingestion Schedule> - List of backup schedule policy items that you can use as a Data Lake Pipeline source.
ingestion_schedules.#.id
- Unique 24-hexadecimal digit string that identifies this backup policy item.ingestion_schedules.#.frequency_type
- Human-readable label that identifies the frequency type associated with the backup policy.ingestion_schedules.#.frequency_interval
- Number that indicates the frequency interval for a set of snapshots.ingestion_schedules.#.retention_unit
- Unit of time in which MongoDB Atlas measures snapshot retention.ingestion_schedules.#.retention_value
- Duration in days, weeks, or months that MongoDB Atlas retains the snapshot.
- last
Updated StringDate - Timestamp that indicates the last time that the Data Lake Pipeline was updated.
- snapshots
List<Data
Lake Pipeline Snapshot> - List of backup snapshots that you can use to trigger an on demand pipeline run.
snapshots.#.id
- Unique 24-hexadecimal digit string that identifies the snapshot.snapshots.#.provider
- Human-readable label that identifies the cloud provider that stores this snapshot.snapshots.#.created_at
- Date and time when MongoDB Atlas took the snapshot.snapshots.#.expires_at
- Date and time when MongoDB Atlas deletes the snapshot.snapshots.#.frequency_type
- Human-readable label that identifies how often this snapshot triggers.snapshots.#.master_key
- Unique string that identifies the Amazon Web Services (AWS) Key Management Service (KMS) Customer Master Key (CMK) used to encrypt the snapshot.snapshots.#.mongod_version
- Version of the MongoDB host that this snapshot backs up.snapshots.#.replica_set_name
- Human-readable label that identifies the replica set from which MongoDB Atlas took this snapshot.snapshots.#.type
- Human-readable label that categorizes the cluster as a replica set or sharded cluster.snapshots.#.snapshot_type
- Human-readable label that identifies when this snapshot triggers.snapshots.#.status
- Human-readable label that indicates the stage of the backup process for this snapshot.snapshots.#.size
- List of backup snapshots that you can use to trigger an on demand pipeline run.snapshots.#.copy_region
- List that identifies the regions to which MongoDB Atlas copies the snapshot.snapshots.#.policies
- List that contains unique identifiers for the policy items.
- state String
- State of this Data Lake Pipeline.
- created
Date string - Timestamp that indicates when the Data Lake Pipeline was created.
- id string
- The provider-assigned unique ID for this managed resource.
- ingestion
Schedules DataLake Pipeline Ingestion Schedule[] - List of backup schedule policy items that you can use as a Data Lake Pipeline source.
ingestion_schedules.#.id
- Unique 24-hexadecimal digit string that identifies this backup policy item.ingestion_schedules.#.frequency_type
- Human-readable label that identifies the frequency type associated with the backup policy.ingestion_schedules.#.frequency_interval
- Number that indicates the frequency interval for a set of snapshots.ingestion_schedules.#.retention_unit
- Unit of time in which MongoDB Atlas measures snapshot retention.ingestion_schedules.#.retention_value
- Duration in days, weeks, or months that MongoDB Atlas retains the snapshot.
- last
Updated stringDate - Timestamp that indicates the last time that the Data Lake Pipeline was updated.
- snapshots
Data
Lake Pipeline Snapshot[] - List of backup snapshots that you can use to trigger an on demand pipeline run.
snapshots.#.id
- Unique 24-hexadecimal digit string that identifies the snapshot.snapshots.#.provider
- Human-readable label that identifies the cloud provider that stores this snapshot.snapshots.#.created_at
- Date and time when MongoDB Atlas took the snapshot.snapshots.#.expires_at
- Date and time when MongoDB Atlas deletes the snapshot.snapshots.#.frequency_type
- Human-readable label that identifies how often this snapshot triggers.snapshots.#.master_key
- Unique string that identifies the Amazon Web Services (AWS) Key Management Service (KMS) Customer Master Key (CMK) used to encrypt the snapshot.snapshots.#.mongod_version
- Version of the MongoDB host that this snapshot backs up.snapshots.#.replica_set_name
- Human-readable label that identifies the replica set from which MongoDB Atlas took this snapshot.snapshots.#.type
- Human-readable label that categorizes the cluster as a replica set or sharded cluster.snapshots.#.snapshot_type
- Human-readable label that identifies when this snapshot triggers.snapshots.#.status
- Human-readable label that indicates the stage of the backup process for this snapshot.snapshots.#.size
- List of backup snapshots that you can use to trigger an on demand pipeline run.snapshots.#.copy_region
- List that identifies the regions to which MongoDB Atlas copies the snapshot.snapshots.#.policies
- List that contains unique identifiers for the policy items.
- state string
- State of this Data Lake Pipeline.
- created_
date str - Timestamp that indicates when the Data Lake Pipeline was created.
- id str
- The provider-assigned unique ID for this managed resource.
- ingestion_
schedules Sequence[DataLake Pipeline Ingestion Schedule] - List of backup schedule policy items that you can use as a Data Lake Pipeline source.
ingestion_schedules.#.id
- Unique 24-hexadecimal digit string that identifies this backup policy item.ingestion_schedules.#.frequency_type
- Human-readable label that identifies the frequency type associated with the backup policy.ingestion_schedules.#.frequency_interval
- Number that indicates the frequency interval for a set of snapshots.ingestion_schedules.#.retention_unit
- Unit of time in which MongoDB Atlas measures snapshot retention.ingestion_schedules.#.retention_value
- Duration in days, weeks, or months that MongoDB Atlas retains the snapshot.
- last_
updated_ strdate - Timestamp that indicates the last time that the Data Lake Pipeline was updated.
- snapshots
Sequence[Data
Lake Pipeline Snapshot] - List of backup snapshots that you can use to trigger an on demand pipeline run.
snapshots.#.id
- Unique 24-hexadecimal digit string that identifies the snapshot.snapshots.#.provider
- Human-readable label that identifies the cloud provider that stores this snapshot.snapshots.#.created_at
- Date and time when MongoDB Atlas took the snapshot.snapshots.#.expires_at
- Date and time when MongoDB Atlas deletes the snapshot.snapshots.#.frequency_type
- Human-readable label that identifies how often this snapshot triggers.snapshots.#.master_key
- Unique string that identifies the Amazon Web Services (AWS) Key Management Service (KMS) Customer Master Key (CMK) used to encrypt the snapshot.snapshots.#.mongod_version
- Version of the MongoDB host that this snapshot backs up.snapshots.#.replica_set_name
- Human-readable label that identifies the replica set from which MongoDB Atlas took this snapshot.snapshots.#.type
- Human-readable label that categorizes the cluster as a replica set or sharded cluster.snapshots.#.snapshot_type
- Human-readable label that identifies when this snapshot triggers.snapshots.#.status
- Human-readable label that indicates the stage of the backup process for this snapshot.snapshots.#.size
- List of backup snapshots that you can use to trigger an on demand pipeline run.snapshots.#.copy_region
- List that identifies the regions to which MongoDB Atlas copies the snapshot.snapshots.#.policies
- List that contains unique identifiers for the policy items.
- state str
- State of this Data Lake Pipeline.
- created
Date String - Timestamp that indicates when the Data Lake Pipeline was created.
- id String
- The provider-assigned unique ID for this managed resource.
- ingestion
Schedules List<Property Map> - List of backup schedule policy items that you can use as a Data Lake Pipeline source.
ingestion_schedules.#.id
- Unique 24-hexadecimal digit string that identifies this backup policy item.ingestion_schedules.#.frequency_type
- Human-readable label that identifies the frequency type associated with the backup policy.ingestion_schedules.#.frequency_interval
- Number that indicates the frequency interval for a set of snapshots.ingestion_schedules.#.retention_unit
- Unit of time in which MongoDB Atlas measures snapshot retention.ingestion_schedules.#.retention_value
- Duration in days, weeks, or months that MongoDB Atlas retains the snapshot.
- last
Updated StringDate - Timestamp that indicates the last time that the Data Lake Pipeline was updated.
- snapshots List<Property Map>
- List of backup snapshots that you can use to trigger an on demand pipeline run.
snapshots.#.id
- Unique 24-hexadecimal digit string that identifies the snapshot.snapshots.#.provider
- Human-readable label that identifies the cloud provider that stores this snapshot.snapshots.#.created_at
- Date and time when MongoDB Atlas took the snapshot.snapshots.#.expires_at
- Date and time when MongoDB Atlas deletes the snapshot.snapshots.#.frequency_type
- Human-readable label that identifies how often this snapshot triggers.snapshots.#.master_key
- Unique string that identifies the Amazon Web Services (AWS) Key Management Service (KMS) Customer Master Key (CMK) used to encrypt the snapshot.snapshots.#.mongod_version
- Version of the MongoDB host that this snapshot backs up.snapshots.#.replica_set_name
- Human-readable label that identifies the replica set from which MongoDB Atlas took this snapshot.snapshots.#.type
- Human-readable label that categorizes the cluster as a replica set or sharded cluster.snapshots.#.snapshot_type
- Human-readable label that identifies when this snapshot triggers.snapshots.#.status
- Human-readable label that indicates the stage of the backup process for this snapshot.snapshots.#.size
- List of backup snapshots that you can use to trigger an on demand pipeline run.snapshots.#.copy_region
- List that identifies the regions to which MongoDB Atlas copies the snapshot.snapshots.#.policies
- List that contains unique identifiers for the policy items.
- state String
- State of this Data Lake Pipeline.
Look up Existing DataLakePipeline Resource
Get an existing DataLakePipeline resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: DataLakePipelineState, opts?: CustomResourceOptions): DataLakePipeline
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
created_date: Optional[str] = None,
ingestion_schedules: Optional[Sequence[DataLakePipelineIngestionScheduleArgs]] = None,
last_updated_date: Optional[str] = None,
name: Optional[str] = None,
project_id: Optional[str] = None,
sink: Optional[DataLakePipelineSinkArgs] = None,
snapshots: Optional[Sequence[DataLakePipelineSnapshotArgs]] = None,
source: Optional[DataLakePipelineSourceArgs] = None,
state: Optional[str] = None,
transformations: Optional[Sequence[DataLakePipelineTransformationArgs]] = None) -> DataLakePipeline
func GetDataLakePipeline(ctx *Context, name string, id IDInput, state *DataLakePipelineState, opts ...ResourceOption) (*DataLakePipeline, error)
public static DataLakePipeline Get(string name, Input<string> id, DataLakePipelineState? state, CustomResourceOptions? opts = null)
public static DataLakePipeline get(String name, Output<String> id, DataLakePipelineState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Created
Date string - Timestamp that indicates when the Data Lake Pipeline was created.
- Ingestion
Schedules List<DataLake Pipeline Ingestion Schedule> - List of backup schedule policy items that you can use as a Data Lake Pipeline source.
ingestion_schedules.#.id
- Unique 24-hexadecimal digit string that identifies this backup policy item.ingestion_schedules.#.frequency_type
- Human-readable label that identifies the frequency type associated with the backup policy.ingestion_schedules.#.frequency_interval
- Number that indicates the frequency interval for a set of snapshots.ingestion_schedules.#.retention_unit
- Unit of time in which MongoDB Atlas measures snapshot retention.ingestion_schedules.#.retention_value
- Duration in days, weeks, or months that MongoDB Atlas retains the snapshot.
- Last
Updated stringDate - Timestamp that indicates the last time that the Data Lake Pipeline was updated.
- Name string
- Name of the Atlas Data Lake Pipeline.
- Project
Id string - The unique ID for the project to create a data lake pipeline.
- Sink
Data
Lake Pipeline Sink - Snapshots
List<Data
Lake Pipeline Snapshot> - List of backup snapshots that you can use to trigger an on demand pipeline run.
snapshots.#.id
- Unique 24-hexadecimal digit string that identifies the snapshot.snapshots.#.provider
- Human-readable label that identifies the cloud provider that stores this snapshot.snapshots.#.created_at
- Date and time when MongoDB Atlas took the snapshot.snapshots.#.expires_at
- Date and time when MongoDB Atlas deletes the snapshot.snapshots.#.frequency_type
- Human-readable label that identifies how often this snapshot triggers.snapshots.#.master_key
- Unique string that identifies the Amazon Web Services (AWS) Key Management Service (KMS) Customer Master Key (CMK) used to encrypt the snapshot.snapshots.#.mongod_version
- Version of the MongoDB host that this snapshot backs up.snapshots.#.replica_set_name
- Human-readable label that identifies the replica set from which MongoDB Atlas took this snapshot.snapshots.#.type
- Human-readable label that categorizes the cluster as a replica set or sharded cluster.snapshots.#.snapshot_type
- Human-readable label that identifies when this snapshot triggers.snapshots.#.status
- Human-readable label that indicates the stage of the backup process for this snapshot.snapshots.#.size
- List of backup snapshots that you can use to trigger an on demand pipeline run.snapshots.#.copy_region
- List that identifies the regions to which MongoDB Atlas copies the snapshot.snapshots.#.policies
- List that contains unique identifiers for the policy items.
- Source
Data
Lake Pipeline Source - State string
- State of this Data Lake Pipeline.
- Transformations
List<Data
Lake Pipeline Transformation> - Fields to be excluded for this Data Lake Pipeline.
transformations.#.field
- Key in the document.transformations.#.type
- Type of transformation applied during the export of the namespace in a Data Lake Pipeline.
- Created
Date string - Timestamp that indicates when the Data Lake Pipeline was created.
- Ingestion
Schedules []DataLake Pipeline Ingestion Schedule Args - List of backup schedule policy items that you can use as a Data Lake Pipeline source.
ingestion_schedules.#.id
- Unique 24-hexadecimal digit string that identifies this backup policy item.ingestion_schedules.#.frequency_type
- Human-readable label that identifies the frequency type associated with the backup policy.ingestion_schedules.#.frequency_interval
- Number that indicates the frequency interval for a set of snapshots.ingestion_schedules.#.retention_unit
- Unit of time in which MongoDB Atlas measures snapshot retention.ingestion_schedules.#.retention_value
- Duration in days, weeks, or months that MongoDB Atlas retains the snapshot.
- Last
Updated stringDate - Timestamp that indicates the last time that the Data Lake Pipeline was updated.
- Name string
- Name of the Atlas Data Lake Pipeline.
- Project
Id string - The unique ID for the project to create a data lake pipeline.
- Sink
Data
Lake Pipeline Sink Args - Snapshots
[]Data
Lake Pipeline Snapshot Args - List of backup snapshots that you can use to trigger an on demand pipeline run.
snapshots.#.id
- Unique 24-hexadecimal digit string that identifies the snapshot.snapshots.#.provider
- Human-readable label that identifies the cloud provider that stores this snapshot.snapshots.#.created_at
- Date and time when MongoDB Atlas took the snapshot.snapshots.#.expires_at
- Date and time when MongoDB Atlas deletes the snapshot.snapshots.#.frequency_type
- Human-readable label that identifies how often this snapshot triggers.snapshots.#.master_key
- Unique string that identifies the Amazon Web Services (AWS) Key Management Service (KMS) Customer Master Key (CMK) used to encrypt the snapshot.snapshots.#.mongod_version
- Version of the MongoDB host that this snapshot backs up.snapshots.#.replica_set_name
- Human-readable label that identifies the replica set from which MongoDB Atlas took this snapshot.snapshots.#.type
- Human-readable label that categorizes the cluster as a replica set or sharded cluster.snapshots.#.snapshot_type
- Human-readable label that identifies when this snapshot triggers.snapshots.#.status
- Human-readable label that indicates the stage of the backup process for this snapshot.snapshots.#.size
- List of backup snapshots that you can use to trigger an on demand pipeline run.snapshots.#.copy_region
- List that identifies the regions to which MongoDB Atlas copies the snapshot.snapshots.#.policies
- List that contains unique identifiers for the policy items.
- Source
Data
Lake Pipeline Source Args - State string
- State of this Data Lake Pipeline.
- Transformations
[]Data
Lake Pipeline Transformation Args - Fields to be excluded for this Data Lake Pipeline.
transformations.#.field
- Key in the document.transformations.#.type
- Type of transformation applied during the export of the namespace in a Data Lake Pipeline.
- created
Date String - Timestamp that indicates when the Data Lake Pipeline was created.
- ingestion
Schedules List<DataLake Pipeline Ingestion Schedule> - List of backup schedule policy items that you can use as a Data Lake Pipeline source.
ingestion_schedules.#.id
- Unique 24-hexadecimal digit string that identifies this backup policy item.ingestion_schedules.#.frequency_type
- Human-readable label that identifies the frequency type associated with the backup policy.ingestion_schedules.#.frequency_interval
- Number that indicates the frequency interval for a set of snapshots.ingestion_schedules.#.retention_unit
- Unit of time in which MongoDB Atlas measures snapshot retention.ingestion_schedules.#.retention_value
- Duration in days, weeks, or months that MongoDB Atlas retains the snapshot.
- last
Updated StringDate - Timestamp that indicates the last time that the Data Lake Pipeline was updated.
- name String
- Name of the Atlas Data Lake Pipeline.
- project
Id String - The unique ID for the project to create a data lake pipeline.
- sink
Data
Lake Pipeline Sink - snapshots
List<Data
Lake Pipeline Snapshot> - List of backup snapshots that you can use to trigger an on demand pipeline run.
snapshots.#.id
- Unique 24-hexadecimal digit string that identifies the snapshot.snapshots.#.provider
- Human-readable label that identifies the cloud provider that stores this snapshot.snapshots.#.created_at
- Date and time when MongoDB Atlas took the snapshot.snapshots.#.expires_at
- Date and time when MongoDB Atlas deletes the snapshot.snapshots.#.frequency_type
- Human-readable label that identifies how often this snapshot triggers.snapshots.#.master_key
- Unique string that identifies the Amazon Web Services (AWS) Key Management Service (KMS) Customer Master Key (CMK) used to encrypt the snapshot.snapshots.#.mongod_version
- Version of the MongoDB host that this snapshot backs up.snapshots.#.replica_set_name
- Human-readable label that identifies the replica set from which MongoDB Atlas took this snapshot.snapshots.#.type
- Human-readable label that categorizes the cluster as a replica set or sharded cluster.snapshots.#.snapshot_type
- Human-readable label that identifies when this snapshot triggers.snapshots.#.status
- Human-readable label that indicates the stage of the backup process for this snapshot.snapshots.#.size
- List of backup snapshots that you can use to trigger an on demand pipeline run.snapshots.#.copy_region
- List that identifies the regions to which MongoDB Atlas copies the snapshot.snapshots.#.policies
- List that contains unique identifiers for the policy items.
- source
Data
Lake Pipeline Source - state String
- State of this Data Lake Pipeline.
- transformations
List<Data
Lake Pipeline Transformation> - Fields to be excluded for this Data Lake Pipeline.
transformations.#.field
- Key in the document.transformations.#.type
- Type of transformation applied during the export of the namespace in a Data Lake Pipeline.
- created
Date string - Timestamp that indicates when the Data Lake Pipeline was created.
- ingestion
Schedules DataLake Pipeline Ingestion Schedule[] - List of backup schedule policy items that you can use as a Data Lake Pipeline source.
ingestion_schedules.#.id
- Unique 24-hexadecimal digit string that identifies this backup policy item.ingestion_schedules.#.frequency_type
- Human-readable label that identifies the frequency type associated with the backup policy.ingestion_schedules.#.frequency_interval
- Number that indicates the frequency interval for a set of snapshots.ingestion_schedules.#.retention_unit
- Unit of time in which MongoDB Atlas measures snapshot retention.ingestion_schedules.#.retention_value
- Duration in days, weeks, or months that MongoDB Atlas retains the snapshot.
- last
Updated stringDate - Timestamp that indicates the last time that the Data Lake Pipeline was updated.
- name string
- Name of the Atlas Data Lake Pipeline.
- project
Id string - The unique ID for the project to create a data lake pipeline.
- sink
Data
Lake Pipeline Sink - snapshots
Data
Lake Pipeline Snapshot[] - List of backup snapshots that you can use to trigger an on demand pipeline run.
snapshots.#.id
- Unique 24-hexadecimal digit string that identifies the snapshot.snapshots.#.provider
- Human-readable label that identifies the cloud provider that stores this snapshot.snapshots.#.created_at
- Date and time when MongoDB Atlas took the snapshot.snapshots.#.expires_at
- Date and time when MongoDB Atlas deletes the snapshot.snapshots.#.frequency_type
- Human-readable label that identifies how often this snapshot triggers.snapshots.#.master_key
- Unique string that identifies the Amazon Web Services (AWS) Key Management Service (KMS) Customer Master Key (CMK) used to encrypt the snapshot.snapshots.#.mongod_version
- Version of the MongoDB host that this snapshot backs up.snapshots.#.replica_set_name
- Human-readable label that identifies the replica set from which MongoDB Atlas took this snapshot.snapshots.#.type
- Human-readable label that categorizes the cluster as a replica set or sharded cluster.snapshots.#.snapshot_type
- Human-readable label that identifies when this snapshot triggers.snapshots.#.status
- Human-readable label that indicates the stage of the backup process for this snapshot.snapshots.#.size
- List of backup snapshots that you can use to trigger an on demand pipeline run.snapshots.#.copy_region
- List that identifies the regions to which MongoDB Atlas copies the snapshot.snapshots.#.policies
- List that contains unique identifiers for the policy items.
- source
Data
Lake Pipeline Source - state string
- State of this Data Lake Pipeline.
- transformations
Data
Lake Pipeline Transformation[] - Fields to be excluded for this Data Lake Pipeline.
transformations.#.field
- Key in the document.transformations.#.type
- Type of transformation applied during the export of the namespace in a Data Lake Pipeline.
- created_
date str - Timestamp that indicates when the Data Lake Pipeline was created.
- ingestion_
schedules Sequence[DataLake Pipeline Ingestion Schedule Args] - List of backup schedule policy items that you can use as a Data Lake Pipeline source.
ingestion_schedules.#.id
- Unique 24-hexadecimal digit string that identifies this backup policy item.ingestion_schedules.#.frequency_type
- Human-readable label that identifies the frequency type associated with the backup policy.ingestion_schedules.#.frequency_interval
- Number that indicates the frequency interval for a set of snapshots.ingestion_schedules.#.retention_unit
- Unit of time in which MongoDB Atlas measures snapshot retention.ingestion_schedules.#.retention_value
- Duration in days, weeks, or months that MongoDB Atlas retains the snapshot.
- last_
updated_ strdate - Timestamp that indicates the last time that the Data Lake Pipeline was updated.
- name str
- Name of the Atlas Data Lake Pipeline.
- project_
id str - The unique ID for the project to create a data lake pipeline.
- sink
Data
Lake Pipeline Sink Args - snapshots
Sequence[Data
Lake Pipeline Snapshot Args] - List of backup snapshots that you can use to trigger an on demand pipeline run.
snapshots.#.id
- Unique 24-hexadecimal digit string that identifies the snapshot.snapshots.#.provider
- Human-readable label that identifies the cloud provider that stores this snapshot.snapshots.#.created_at
- Date and time when MongoDB Atlas took the snapshot.snapshots.#.expires_at
- Date and time when MongoDB Atlas deletes the snapshot.snapshots.#.frequency_type
- Human-readable label that identifies how often this snapshot triggers.snapshots.#.master_key
- Unique string that identifies the Amazon Web Services (AWS) Key Management Service (KMS) Customer Master Key (CMK) used to encrypt the snapshot.snapshots.#.mongod_version
- Version of the MongoDB host that this snapshot backs up.snapshots.#.replica_set_name
- Human-readable label that identifies the replica set from which MongoDB Atlas took this snapshot.snapshots.#.type
- Human-readable label that categorizes the cluster as a replica set or sharded cluster.snapshots.#.snapshot_type
- Human-readable label that identifies when this snapshot triggers.snapshots.#.status
- Human-readable label that indicates the stage of the backup process for this snapshot.snapshots.#.size
- List of backup snapshots that you can use to trigger an on demand pipeline run.snapshots.#.copy_region
- List that identifies the regions to which MongoDB Atlas copies the snapshot.snapshots.#.policies
- List that contains unique identifiers for the policy items.
- source
Data
Lake Pipeline Source Args - state str
- State of this Data Lake Pipeline.
- transformations
Sequence[Data
Lake Pipeline Transformation Args] - Fields to be excluded for this Data Lake Pipeline.
transformations.#.field
- Key in the document.transformations.#.type
- Type of transformation applied during the export of the namespace in a Data Lake Pipeline.
- created
Date String - Timestamp that indicates when the Data Lake Pipeline was created.
- ingestion
Schedules List<Property Map> - List of backup schedule policy items that you can use as a Data Lake Pipeline source.
ingestion_schedules.#.id
- Unique 24-hexadecimal digit string that identifies this backup policy item.ingestion_schedules.#.frequency_type
- Human-readable label that identifies the frequency type associated with the backup policy.ingestion_schedules.#.frequency_interval
- Number that indicates the frequency interval for a set of snapshots.ingestion_schedules.#.retention_unit
- Unit of time in which MongoDB Atlas measures snapshot retention.ingestion_schedules.#.retention_value
- Duration in days, weeks, or months that MongoDB Atlas retains the snapshot.
- last
Updated StringDate - Timestamp that indicates the last time that the Data Lake Pipeline was updated.
- name String
- Name of the Atlas Data Lake Pipeline.
- project
Id String - The unique ID for the project to create a data lake pipeline.
- sink Property Map
- snapshots List<Property Map>
- List of backup snapshots that you can use to trigger an on demand pipeline run.
snapshots.#.id
- Unique 24-hexadecimal digit string that identifies the snapshot.snapshots.#.provider
- Human-readable label that identifies the cloud provider that stores this snapshot.snapshots.#.created_at
- Date and time when MongoDB Atlas took the snapshot.snapshots.#.expires_at
- Date and time when MongoDB Atlas deletes the snapshot.snapshots.#.frequency_type
- Human-readable label that identifies how often this snapshot triggers.snapshots.#.master_key
- Unique string that identifies the Amazon Web Services (AWS) Key Management Service (KMS) Customer Master Key (CMK) used to encrypt the snapshot.snapshots.#.mongod_version
- Version of the MongoDB host that this snapshot backs up.snapshots.#.replica_set_name
- Human-readable label that identifies the replica set from which MongoDB Atlas took this snapshot.snapshots.#.type
- Human-readable label that categorizes the cluster as a replica set or sharded cluster.snapshots.#.snapshot_type
- Human-readable label that identifies when this snapshot triggers.snapshots.#.status
- Human-readable label that indicates the stage of the backup process for this snapshot.snapshots.#.size
- List of backup snapshots that you can use to trigger an on demand pipeline run.snapshots.#.copy_region
- List that identifies the regions to which MongoDB Atlas copies the snapshot.snapshots.#.policies
- List that contains unique identifiers for the policy items.
- source Property Map
- state String
- State of this Data Lake Pipeline.
- transformations List<Property Map>
- Fields to be excluded for this Data Lake Pipeline.
transformations.#.field
- Key in the document.transformations.#.type
- Type of transformation applied during the export of the namespace in a Data Lake Pipeline.
Supporting Types
DataLakePipelineIngestionSchedule, DataLakePipelineIngestionScheduleArgs
- Frequency
Interval int - Frequency
Type string - Id string
- Unique 24-hexadecimal digit string that identifies the Data Lake Pipeline.
- Retention
Unit string - Retention
Value int
- Frequency
Interval int - Frequency
Type string - Id string
- Unique 24-hexadecimal digit string that identifies the Data Lake Pipeline.
- Retention
Unit string - Retention
Value int
- frequency
Interval Integer - frequency
Type String - id String
- Unique 24-hexadecimal digit string that identifies the Data Lake Pipeline.
- retention
Unit String - retention
Value Integer
- frequency
Interval number - frequency
Type string - id string
- Unique 24-hexadecimal digit string that identifies the Data Lake Pipeline.
- retention
Unit string - retention
Value number
- frequency_
interval int - frequency_
type str - id str
- Unique 24-hexadecimal digit string that identifies the Data Lake Pipeline.
- retention_
unit str - retention_
value int
- frequency
Interval Number - frequency
Type String - id String
- Unique 24-hexadecimal digit string that identifies the Data Lake Pipeline.
- retention
Unit String - retention
Value Number
DataLakePipelineSink, DataLakePipelineSinkArgs
- Partition
Fields List<DataLake Pipeline Sink Partition Field> - Ordered fields used to physically organize data in the destination.
partition_fields.#.field_name
- Human-readable label that identifies the field name used to partition data.partition_fields.#.order
- Sequence in which MongoDB Atlas slices the collection data to create partitions. The resource expresses this sequence starting with zero.
- Provider string
- Target cloud provider for this Data Lake Pipeline.
- Region string
- Target cloud provider region for this Data Lake Pipeline. Supported cloud provider regions.
- Type string
- Type of ingestion source of this Data Lake Pipeline.
- Partition
Fields []DataLake Pipeline Sink Partition Field - Ordered fields used to physically organize data in the destination.
partition_fields.#.field_name
- Human-readable label that identifies the field name used to partition data.partition_fields.#.order
- Sequence in which MongoDB Atlas slices the collection data to create partitions. The resource expresses this sequence starting with zero.
- Provider string
- Target cloud provider for this Data Lake Pipeline.
- Region string
- Target cloud provider region for this Data Lake Pipeline. Supported cloud provider regions.
- Type string
- Type of ingestion source of this Data Lake Pipeline.
- partition
Fields List<DataLake Pipeline Sink Partition Field> - Ordered fields used to physically organize data in the destination.
partition_fields.#.field_name
- Human-readable label that identifies the field name used to partition data.partition_fields.#.order
- Sequence in which MongoDB Atlas slices the collection data to create partitions. The resource expresses this sequence starting with zero.
- provider String
- Target cloud provider for this Data Lake Pipeline.
- region String
- Target cloud provider region for this Data Lake Pipeline. Supported cloud provider regions.
- type String
- Type of ingestion source of this Data Lake Pipeline.
- partition
Fields DataLake Pipeline Sink Partition Field[] - Ordered fields used to physically organize data in the destination.
partition_fields.#.field_name
- Human-readable label that identifies the field name used to partition data.partition_fields.#.order
- Sequence in which MongoDB Atlas slices the collection data to create partitions. The resource expresses this sequence starting with zero.
- provider string
- Target cloud provider for this Data Lake Pipeline.
- region string
- Target cloud provider region for this Data Lake Pipeline. Supported cloud provider regions.
- type string
- Type of ingestion source of this Data Lake Pipeline.
- partition_
fields Sequence[DataLake Pipeline Sink Partition Field] - Ordered fields used to physically organize data in the destination.
partition_fields.#.field_name
- Human-readable label that identifies the field name used to partition data.partition_fields.#.order
- Sequence in which MongoDB Atlas slices the collection data to create partitions. The resource expresses this sequence starting with zero.
- provider str
- Target cloud provider for this Data Lake Pipeline.
- region str
- Target cloud provider region for this Data Lake Pipeline. Supported cloud provider regions.
- type str
- Type of ingestion source of this Data Lake Pipeline.
- partition
Fields List<Property Map> - Ordered fields used to physically organize data in the destination.
partition_fields.#.field_name
- Human-readable label that identifies the field name used to partition data.partition_fields.#.order
- Sequence in which MongoDB Atlas slices the collection data to create partitions. The resource expresses this sequence starting with zero.
- provider String
- Target cloud provider for this Data Lake Pipeline.
- region String
- Target cloud provider region for this Data Lake Pipeline. Supported cloud provider regions.
- type String
- Type of ingestion source of this Data Lake Pipeline.
DataLakePipelineSinkPartitionField, DataLakePipelineSinkPartitionFieldArgs
- field_
name str - order int
DataLakePipelineSnapshot, DataLakePipelineSnapshotArgs
- Copy
Region string - Created
At string - Expires
At string - Frequency
Yype string - Id string
- Unique 24-hexadecimal digit string that identifies the Data Lake Pipeline.
- Master
Key string - Mongod
Version string - Policies List<string>
- Provider string
- Target cloud provider for this Data Lake Pipeline.
- Replica
Set stringName - Size int
- Snapshot
Type string - Status string
- Type string
- Type of ingestion source of this Data Lake Pipeline.
- Copy
Region string - Created
At string - Expires
At string - Frequency
Yype string - Id string
- Unique 24-hexadecimal digit string that identifies the Data Lake Pipeline.
- Master
Key string - Mongod
Version string - Policies []string
- Provider string
- Target cloud provider for this Data Lake Pipeline.
- Replica
Set stringName - Size int
- Snapshot
Type string - Status string
- Type string
- Type of ingestion source of this Data Lake Pipeline.
- copy
Region String - created
At String - expires
At String - frequency
Yype String - id String
- Unique 24-hexadecimal digit string that identifies the Data Lake Pipeline.
- master
Key String - mongod
Version String - policies List<String>
- provider String
- Target cloud provider for this Data Lake Pipeline.
- replica
Set StringName - size Integer
- snapshot
Type String - status String
- type String
- Type of ingestion source of this Data Lake Pipeline.
- copy
Region string - created
At string - expires
At string - frequency
Yype string - id string
- Unique 24-hexadecimal digit string that identifies the Data Lake Pipeline.
- master
Key string - mongod
Version string - policies string[]
- provider string
- Target cloud provider for this Data Lake Pipeline.
- replica
Set stringName - size number
- snapshot
Type string - status string
- type string
- Type of ingestion source of this Data Lake Pipeline.
- copy_
region str - created_
at str - expires_
at str - frequency_
yype str - id str
- Unique 24-hexadecimal digit string that identifies the Data Lake Pipeline.
- master_
key str - mongod_
version str - policies Sequence[str]
- provider str
- Target cloud provider for this Data Lake Pipeline.
- replica_
set_ strname - size int
- snapshot_
type str - status str
- type str
- Type of ingestion source of this Data Lake Pipeline.
- copy
Region String - created
At String - expires
At String - frequency
Yype String - id String
- Unique 24-hexadecimal digit string that identifies the Data Lake Pipeline.
- master
Key String - mongod
Version String - policies List<String>
- provider String
- Target cloud provider for this Data Lake Pipeline.
- replica
Set StringName - size Number
- snapshot
Type String - status String
- type String
- Type of ingestion source of this Data Lake Pipeline.
DataLakePipelineSource, DataLakePipelineSourceArgs
- Cluster
Name string - Human-readable name that identifies the cluster.
- Collection
Name string - Human-readable name that identifies the collection.
- Database
Name string - Human-readable name that identifies the database.
- Policy
Item stringId - Project
Id string - The unique ID for the project to create a data lake pipeline.
- Type string
- Type of ingestion source of this Data Lake Pipeline.
- Cluster
Name string - Human-readable name that identifies the cluster.
- Collection
Name string - Human-readable name that identifies the collection.
- Database
Name string - Human-readable name that identifies the database.
- Policy
Item stringId - Project
Id string - The unique ID for the project to create a data lake pipeline.
- Type string
- Type of ingestion source of this Data Lake Pipeline.
- cluster
Name String - Human-readable name that identifies the cluster.
- collection
Name String - Human-readable name that identifies the collection.
- database
Name String - Human-readable name that identifies the database.
- policy
Item StringId - project
Id String - The unique ID for the project to create a data lake pipeline.
- type String
- Type of ingestion source of this Data Lake Pipeline.
- cluster
Name string - Human-readable name that identifies the cluster.
- collection
Name string - Human-readable name that identifies the collection.
- database
Name string - Human-readable name that identifies the database.
- policy
Item stringId - project
Id string - The unique ID for the project to create a data lake pipeline.
- type string
- Type of ingestion source of this Data Lake Pipeline.
- cluster_
name str - Human-readable name that identifies the cluster.
- collection_
name str - Human-readable name that identifies the collection.
- database_
name str - Human-readable name that identifies the database.
- policy_
item_ strid - project_
id str - The unique ID for the project to create a data lake pipeline.
- type str
- Type of ingestion source of this Data Lake Pipeline.
- cluster
Name String - Human-readable name that identifies the cluster.
- collection
Name String - Human-readable name that identifies the collection.
- database
Name String - Human-readable name that identifies the database.
- policy
Item StringId - project
Id String - The unique ID for the project to create a data lake pipeline.
- type String
- Type of ingestion source of this Data Lake Pipeline.
DataLakePipelineTransformation, DataLakePipelineTransformationArgs
Import
Data Lake Pipeline can be imported using project ID, name of the data lake and name of the AWS s3 bucket, in the format project_id
–name
, e.g.
$ pulumi import mongodbatlas:index/dataLakePipeline:DataLakePipeline example 1112222b3bf99403840e8934--test-data-lake-pipeline-test
See MongoDB Atlas API Documentation for more information.
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- MongoDB Atlas pulumi/pulumi-mongodbatlas
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
mongodbatlas
Terraform Provider.