We recommend new projects start with resources from the AWS provider.
aws-native.databrew.Job
Explore with Pulumi AI
We recommend new projects start with resources from the AWS provider.
Resource schema for AWS::DataBrew::Job.
Example Usage
Example
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using AwsNative = Pulumi.AwsNative;
return await Deployment.RunAsync(() =>
{
var myDataBrewProfileJob = new AwsNative.DataBrew.Job("myDataBrewProfileJob", new()
{
Type = AwsNative.DataBrew.JobType.Profile,
Name = "job-test",
DatasetName = "dataset-test",
RoleArn = "arn:aws:iam::1234567891011:role/PassRoleAdmin",
JobSample = new AwsNative.DataBrew.Inputs.JobSampleArgs
{
Mode = AwsNative.DataBrew.JobSampleMode.FullDataset,
},
OutputLocation = new AwsNative.DataBrew.Inputs.JobOutputLocationArgs
{
Bucket = "test-output",
Key = "job-output.json",
},
Tags = new[]
{
new AwsNative.Inputs.CreateOnlyTagArgs
{
Key = "key00AtCreate",
Value = "value001AtCreate",
},
},
});
});
package main
import (
awsnative "github.com/pulumi/pulumi-aws-native/sdk/go/aws"
"github.com/pulumi/pulumi-aws-native/sdk/go/aws/databrew"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := databrew.NewJob(ctx, "myDataBrewProfileJob", &databrew.JobArgs{
Type: databrew.JobTypeProfile,
Name: pulumi.String("job-test"),
DatasetName: pulumi.String("dataset-test"),
RoleArn: pulumi.String("arn:aws:iam::1234567891011:role/PassRoleAdmin"),
JobSample: &databrew.JobSampleArgs{
Mode: databrew.JobSampleModeFullDataset,
},
OutputLocation: &databrew.JobOutputLocationArgs{
Bucket: pulumi.String("test-output"),
Key: pulumi.String("job-output.json"),
},
Tags: aws.CreateOnlyTagArray{
&aws.CreateOnlyTagArgs{
Key: pulumi.String("key00AtCreate"),
Value: pulumi.String("value001AtCreate"),
},
},
})
if err != nil {
return err
}
return nil
})
}
Coming soon!
import pulumi
import pulumi_aws_native as aws_native
my_data_brew_profile_job = aws_native.databrew.Job("myDataBrewProfileJob",
type=aws_native.databrew.JobType.PROFILE,
name="job-test",
dataset_name="dataset-test",
role_arn="arn:aws:iam::1234567891011:role/PassRoleAdmin",
job_sample={
"mode": aws_native.databrew.JobSampleMode.FULL_DATASET,
},
output_location={
"bucket": "test-output",
"key": "job-output.json",
},
tags=[{
"key": "key00AtCreate",
"value": "value001AtCreate",
}])
import * as pulumi from "@pulumi/pulumi";
import * as aws_native from "@pulumi/aws-native";
const myDataBrewProfileJob = new aws_native.databrew.Job("myDataBrewProfileJob", {
type: aws_native.databrew.JobType.Profile,
name: "job-test",
datasetName: "dataset-test",
roleArn: "arn:aws:iam::1234567891011:role/PassRoleAdmin",
jobSample: {
mode: aws_native.databrew.JobSampleMode.FullDataset,
},
outputLocation: {
bucket: "test-output",
key: "job-output.json",
},
tags: [{
key: "key00AtCreate",
value: "value001AtCreate",
}],
});
Coming soon!
Create Job Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new Job(name: string, args: JobArgs, opts?: CustomResourceOptions);
@overload
def Job(resource_name: str,
args: JobArgs,
opts: Optional[ResourceOptions] = None)
@overload
def Job(resource_name: str,
opts: Optional[ResourceOptions] = None,
role_arn: Optional[str] = None,
type: Optional[JobType] = None,
job_sample: Optional[JobSampleArgs] = None,
outputs: Optional[Sequence[JobOutputArgs]] = None,
encryption_mode: Optional[JobEncryptionMode] = None,
data_catalog_outputs: Optional[Sequence[JobDataCatalogOutputArgs]] = None,
log_subscription: Optional[JobLogSubscription] = None,
max_capacity: Optional[int] = None,
max_retries: Optional[int] = None,
name: Optional[str] = None,
output_location: Optional[JobOutputLocationArgs] = None,
encryption_key_arn: Optional[str] = None,
profile_configuration: Optional[JobProfileConfigurationArgs] = None,
project_name: Optional[str] = None,
recipe: Optional[JobRecipeArgs] = None,
dataset_name: Optional[str] = None,
tags: Optional[Sequence[_root_inputs.CreateOnlyTagArgs]] = None,
timeout: Optional[int] = None,
database_outputs: Optional[Sequence[JobDatabaseOutputArgs]] = None,
validation_configurations: Optional[Sequence[JobValidationConfigurationArgs]] = None)
func NewJob(ctx *Context, name string, args JobArgs, opts ...ResourceOption) (*Job, error)
public Job(string name, JobArgs args, CustomResourceOptions? opts = null)
type: aws-native:databrew:Job
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args JobArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args JobArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args JobArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args JobArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args JobArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Job Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The Job resource accepts the following input properties:
- Role
Arn string - Role arn
- Type
Pulumi.
Aws Native. Data Brew. Job Type - Job type
- Data
Catalog List<Pulumi.Outputs Aws Native. Data Brew. Inputs. Job Data Catalog Output> - One or more artifacts that represent the AWS Glue Data Catalog output from running the job.
- Database
Outputs List<Pulumi.Aws Native. Data Brew. Inputs. Job Database Output> - Represents a list of JDBC database output objects which defines the output destination for a DataBrew recipe job to write into.
- Dataset
Name string - Dataset name
- Encryption
Key stringArn - Encryption Key Arn
- Encryption
Mode Pulumi.Aws Native. Data Brew. Job Encryption Mode - Encryption mode
- Job
Sample Pulumi.Aws Native. Data Brew. Inputs. Job Sample - Job Sample
- Log
Subscription Pulumi.Aws Native. Data Brew. Job Log Subscription - Log subscription
- Max
Capacity int - Max capacity
- Max
Retries int - Max retries
- Name string
- Job name
- Output
Location Pulumi.Aws Native. Data Brew. Inputs. Job Output Location - Output location
- Outputs
List<Pulumi.
Aws Native. Data Brew. Inputs. Job Output> - One or more artifacts that represent output from running the job.
- Profile
Configuration Pulumi.Aws Native. Data Brew. Inputs. Job Profile Configuration - Profile Job configuration
- Project
Name string - Project name
- Recipe
Pulumi.
Aws Native. Data Brew. Inputs. Job Recipe - A series of data transformation steps that the job runs.
- List<Pulumi.
Aws Native. Inputs. Create Only Tag> - Metadata tags that have been applied to the job.
- Timeout int
- Timeout
- Validation
Configurations List<Pulumi.Aws Native. Data Brew. Inputs. Job Validation Configuration> - Data quality rules configuration
- Role
Arn string - Role arn
- Type
Job
Type - Job type
- Data
Catalog []JobOutputs Data Catalog Output Args - One or more artifacts that represent the AWS Glue Data Catalog output from running the job.
- Database
Outputs []JobDatabase Output Args - Represents a list of JDBC database output objects which defines the output destination for a DataBrew recipe job to write into.
- Dataset
Name string - Dataset name
- Encryption
Key stringArn - Encryption Key Arn
- Encryption
Mode JobEncryption Mode - Encryption mode
- Job
Sample JobSample Args - Job Sample
- Log
Subscription JobLog Subscription - Log subscription
- Max
Capacity int - Max capacity
- Max
Retries int - Max retries
- Name string
- Job name
- Output
Location JobOutput Location Args - Output location
- Outputs
[]Job
Output Type Args - One or more artifacts that represent output from running the job.
- Profile
Configuration JobProfile Configuration Args - Profile Job configuration
- Project
Name string - Project name
- Recipe
Job
Recipe Args - A series of data transformation steps that the job runs.
- Create
Only Tag Args - Metadata tags that have been applied to the job.
- Timeout int
- Timeout
- Validation
Configurations []JobValidation Configuration Args - Data quality rules configuration
- role
Arn String - Role arn
- type
Job
Type - Job type
- data
Catalog List<JobOutputs Data Catalog Output> - One or more artifacts that represent the AWS Glue Data Catalog output from running the job.
- database
Outputs List<JobDatabase Output> - Represents a list of JDBC database output objects which defines the output destination for a DataBrew recipe job to write into.
- dataset
Name String - Dataset name
- encryption
Key StringArn - Encryption Key Arn
- encryption
Mode JobEncryption Mode - Encryption mode
- job
Sample JobSample - Job Sample
- log
Subscription JobLog Subscription - Log subscription
- max
Capacity Integer - Max capacity
- max
Retries Integer - Max retries
- name String
- Job name
- output
Location JobOutput Location - Output location
- outputs
List<Job
Output> - One or more artifacts that represent output from running the job.
- profile
Configuration JobProfile Configuration - Profile Job configuration
- project
Name String - Project name
- recipe
Job
Recipe - A series of data transformation steps that the job runs.
- List<Create
Only Tag> - Metadata tags that have been applied to the job.
- timeout Integer
- Timeout
- validation
Configurations List<JobValidation Configuration> - Data quality rules configuration
- role
Arn string - Role arn
- type
Job
Type - Job type
- data
Catalog JobOutputs Data Catalog Output[] - One or more artifacts that represent the AWS Glue Data Catalog output from running the job.
- database
Outputs JobDatabase Output[] - Represents a list of JDBC database output objects which defines the output destination for a DataBrew recipe job to write into.
- dataset
Name string - Dataset name
- encryption
Key stringArn - Encryption Key Arn
- encryption
Mode JobEncryption Mode - Encryption mode
- job
Sample JobSample - Job Sample
- log
Subscription JobLog Subscription - Log subscription
- max
Capacity number - Max capacity
- max
Retries number - Max retries
- name string
- Job name
- output
Location JobOutput Location - Output location
- outputs
Job
Output[] - One or more artifacts that represent output from running the job.
- profile
Configuration JobProfile Configuration - Profile Job configuration
- project
Name string - Project name
- recipe
Job
Recipe - A series of data transformation steps that the job runs.
- Create
Only Tag[] - Metadata tags that have been applied to the job.
- timeout number
- Timeout
- validation
Configurations JobValidation Configuration[] - Data quality rules configuration
- role_
arn str - Role arn
- type
Job
Type - Job type
- data_
catalog_ Sequence[Joboutputs Data Catalog Output Args] - One or more artifacts that represent the AWS Glue Data Catalog output from running the job.
- database_
outputs Sequence[JobDatabase Output Args] - Represents a list of JDBC database output objects which defines the output destination for a DataBrew recipe job to write into.
- dataset_
name str - Dataset name
- encryption_
key_ strarn - Encryption Key Arn
- encryption_
mode JobEncryption Mode - Encryption mode
- job_
sample JobSample Args - Job Sample
- log_
subscription JobLog Subscription - Log subscription
- max_
capacity int - Max capacity
- max_
retries int - Max retries
- name str
- Job name
- output_
location JobOutput Location Args - Output location
- outputs
Sequence[Job
Output Args] - One or more artifacts that represent output from running the job.
- profile_
configuration JobProfile Configuration Args - Profile Job configuration
- project_
name str - Project name
- recipe
Job
Recipe Args - A series of data transformation steps that the job runs.
- Sequence[Create
Only Tag Args] - Metadata tags that have been applied to the job.
- timeout int
- Timeout
- validation_
configurations Sequence[JobValidation Configuration Args] - Data quality rules configuration
- role
Arn String - Role arn
- type "PROFILE" | "RECIPE"
- Job type
- data
Catalog List<Property Map>Outputs - One or more artifacts that represent the AWS Glue Data Catalog output from running the job.
- database
Outputs List<Property Map> - Represents a list of JDBC database output objects which defines the output destination for a DataBrew recipe job to write into.
- dataset
Name String - Dataset name
- encryption
Key StringArn - Encryption Key Arn
- encryption
Mode "SSE-KMS" | "SSE-S3" - Encryption mode
- job
Sample Property Map - Job Sample
- log
Subscription "ENABLE" | "DISABLE" - Log subscription
- max
Capacity Number - Max capacity
- max
Retries Number - Max retries
- name String
- Job name
- output
Location Property Map - Output location
- outputs List<Property Map>
- One or more artifacts that represent output from running the job.
- profile
Configuration Property Map - Profile Job configuration
- project
Name String - Project name
- recipe Property Map
- A series of data transformation steps that the job runs.
- List<Property Map>
- Metadata tags that have been applied to the job.
- timeout Number
- Timeout
- validation
Configurations List<Property Map> - Data quality rules configuration
Outputs
All input properties are implicitly available as output properties. Additionally, the Job resource produces the following output properties:
- Id string
- The provider-assigned unique ID for this managed resource.
- Id string
- The provider-assigned unique ID for this managed resource.
- id String
- The provider-assigned unique ID for this managed resource.
- id string
- The provider-assigned unique ID for this managed resource.
- id str
- The provider-assigned unique ID for this managed resource.
- id String
- The provider-assigned unique ID for this managed resource.
Supporting Types
CreateOnlyTag, CreateOnlyTagArgs
JobAllowedStatistics, JobAllowedStatisticsArgs
- Statistics List<string>
- One or more column statistics to allow for columns that contain detected entities.
- Statistics []string
- One or more column statistics to allow for columns that contain detected entities.
- statistics List<String>
- One or more column statistics to allow for columns that contain detected entities.
- statistics string[]
- One or more column statistics to allow for columns that contain detected entities.
- statistics Sequence[str]
- One or more column statistics to allow for columns that contain detected entities.
- statistics List<String>
- One or more column statistics to allow for columns that contain detected entities.
JobColumnSelector, JobColumnSelectorArgs
JobColumnStatisticsConfiguration, JobColumnStatisticsConfigurationArgs
- Statistics
Pulumi.
Aws Native. Data Brew. Inputs. Job Statistics Configuration - Configuration for evaluations. Statistics can be used to select evaluations and override parameters of evaluations.
- Selectors
List<Pulumi.
Aws Native. Data Brew. Inputs. Job Column Selector> - List of column selectors. Selectors can be used to select columns from the dataset. When selectors are undefined, configuration will be applied to all supported columns.
- Statistics
Job
Statistics Configuration - Configuration for evaluations. Statistics can be used to select evaluations and override parameters of evaluations.
- Selectors
[]Job
Column Selector - List of column selectors. Selectors can be used to select columns from the dataset. When selectors are undefined, configuration will be applied to all supported columns.
- statistics
Job
Statistics Configuration - Configuration for evaluations. Statistics can be used to select evaluations and override parameters of evaluations.
- selectors
List<Job
Column Selector> - List of column selectors. Selectors can be used to select columns from the dataset. When selectors are undefined, configuration will be applied to all supported columns.
- statistics
Job
Statistics Configuration - Configuration for evaluations. Statistics can be used to select evaluations and override parameters of evaluations.
- selectors
Job
Column Selector[] - List of column selectors. Selectors can be used to select columns from the dataset. When selectors are undefined, configuration will be applied to all supported columns.
- statistics
Job
Statistics Configuration - Configuration for evaluations. Statistics can be used to select evaluations and override parameters of evaluations.
- selectors
Sequence[Job
Column Selector] - List of column selectors. Selectors can be used to select columns from the dataset. When selectors are undefined, configuration will be applied to all supported columns.
- statistics Property Map
- Configuration for evaluations. Statistics can be used to select evaluations and override parameters of evaluations.
- selectors List<Property Map>
- List of column selectors. Selectors can be used to select columns from the dataset. When selectors are undefined, configuration will be applied to all supported columns.
JobCsvOutputOptions, JobCsvOutputOptionsArgs
- Delimiter string
- A single character that specifies the delimiter used to create CSV job output.
- Delimiter string
- A single character that specifies the delimiter used to create CSV job output.
- delimiter String
- A single character that specifies the delimiter used to create CSV job output.
- delimiter string
- A single character that specifies the delimiter used to create CSV job output.
- delimiter str
- A single character that specifies the delimiter used to create CSV job output.
- delimiter String
- A single character that specifies the delimiter used to create CSV job output.
JobDataCatalogOutput, JobDataCatalogOutputArgs
- Database
Name string - The name of a database in the Data Catalog.
- Table
Name string - The name of a table in the Data Catalog.
- Catalog
Id string - The unique identifier of the AWS account that holds the Data Catalog that stores the data.
- Database
Options Pulumi.Aws Native. Data Brew. Inputs. Job Database Table Output Options - Represents options that specify how and where DataBrew writes the database output generated by recipe jobs.
- Overwrite bool
- A value that, if true, means that any data in the location specified for output is overwritten with new output. Not supported with DatabaseOptions.
- S3Options
Pulumi.
Aws Native. Data Brew. Inputs. Job S3Table Output Options - Represents options that specify how and where DataBrew writes the Amazon S3 output generated by recipe jobs.
- Database
Name string - The name of a database in the Data Catalog.
- Table
Name string - The name of a table in the Data Catalog.
- Catalog
Id string - The unique identifier of the AWS account that holds the Data Catalog that stores the data.
- Database
Options JobDatabase Table Output Options - Represents options that specify how and where DataBrew writes the database output generated by recipe jobs.
- Overwrite bool
- A value that, if true, means that any data in the location specified for output is overwritten with new output. Not supported with DatabaseOptions.
- S3Options
Job
S3Table Output Options - Represents options that specify how and where DataBrew writes the Amazon S3 output generated by recipe jobs.
- database
Name String - The name of a database in the Data Catalog.
- table
Name String - The name of a table in the Data Catalog.
- catalog
Id String - The unique identifier of the AWS account that holds the Data Catalog that stores the data.
- database
Options JobDatabase Table Output Options - Represents options that specify how and where DataBrew writes the database output generated by recipe jobs.
- overwrite Boolean
- A value that, if true, means that any data in the location specified for output is overwritten with new output. Not supported with DatabaseOptions.
- s3Options
Job
S3Table Output Options - Represents options that specify how and where DataBrew writes the Amazon S3 output generated by recipe jobs.
- database
Name string - The name of a database in the Data Catalog.
- table
Name string - The name of a table in the Data Catalog.
- catalog
Id string - The unique identifier of the AWS account that holds the Data Catalog that stores the data.
- database
Options JobDatabase Table Output Options - Represents options that specify how and where DataBrew writes the database output generated by recipe jobs.
- overwrite boolean
- A value that, if true, means that any data in the location specified for output is overwritten with new output. Not supported with DatabaseOptions.
- s3Options
Job
S3Table Output Options - Represents options that specify how and where DataBrew writes the Amazon S3 output generated by recipe jobs.
- database_
name str - The name of a database in the Data Catalog.
- table_
name str - The name of a table in the Data Catalog.
- catalog_
id str - The unique identifier of the AWS account that holds the Data Catalog that stores the data.
- database_
options JobDatabase Table Output Options - Represents options that specify how and where DataBrew writes the database output generated by recipe jobs.
- overwrite bool
- A value that, if true, means that any data in the location specified for output is overwritten with new output. Not supported with DatabaseOptions.
- s3_
options JobS3Table Output Options - Represents options that specify how and where DataBrew writes the Amazon S3 output generated by recipe jobs.
- database
Name String - The name of a database in the Data Catalog.
- table
Name String - The name of a table in the Data Catalog.
- catalog
Id String - The unique identifier of the AWS account that holds the Data Catalog that stores the data.
- database
Options Property Map - Represents options that specify how and where DataBrew writes the database output generated by recipe jobs.
- overwrite Boolean
- A value that, if true, means that any data in the location specified for output is overwritten with new output. Not supported with DatabaseOptions.
- s3Options Property Map
- Represents options that specify how and where DataBrew writes the Amazon S3 output generated by recipe jobs.
JobDatabaseOutput, JobDatabaseOutputArgs
- Database
Options Pulumi.Aws Native. Data Brew. Inputs. Job Database Table Output Options - Represents options that specify how and where DataBrew writes the database output generated by recipe jobs.
- Glue
Connection stringName - Glue connection name
- Database
Output Pulumi.Mode Aws Native. Data Brew. Job Database Output Database Output Mode - Database table name
- Database
Options JobDatabase Table Output Options - Represents options that specify how and where DataBrew writes the database output generated by recipe jobs.
- Glue
Connection stringName - Glue connection name
- Database
Output JobMode Database Output Database Output Mode - Database table name
- database
Options JobDatabase Table Output Options - Represents options that specify how and where DataBrew writes the database output generated by recipe jobs.
- glue
Connection StringName - Glue connection name
- database
Output JobMode Database Output Database Output Mode - Database table name
- database
Options JobDatabase Table Output Options - Represents options that specify how and where DataBrew writes the database output generated by recipe jobs.
- glue
Connection stringName - Glue connection name
- database
Output JobMode Database Output Database Output Mode - Database table name
- database_
options JobDatabase Table Output Options - Represents options that specify how and where DataBrew writes the database output generated by recipe jobs.
- glue_
connection_ strname - Glue connection name
- database_
output_ Jobmode Database Output Database Output Mode - Database table name
- database
Options Property Map - Represents options that specify how and where DataBrew writes the database output generated by recipe jobs.
- glue
Connection StringName - Glue connection name
- database
Output "NEW_TABLE"Mode - Database table name
JobDatabaseOutputDatabaseOutputMode, JobDatabaseOutputDatabaseOutputModeArgs
- New
Table - NEW_TABLE
- Job
Database Output Database Output Mode New Table - NEW_TABLE
- New
Table - NEW_TABLE
- New
Table - NEW_TABLE
- NEW_TABLE
- NEW_TABLE
- "NEW_TABLE"
- NEW_TABLE
JobDatabaseTableOutputOptions, JobDatabaseTableOutputOptionsArgs
- Table
Name string - A prefix for the name of a table DataBrew will create in the database.
- Temp
Directory Pulumi.Aws Native. Data Brew. Inputs. Job S3Location - Represents an Amazon S3 location (bucket name and object key) where DataBrew can store intermediate results.
- Table
Name string - A prefix for the name of a table DataBrew will create in the database.
- Temp
Directory JobS3Location - Represents an Amazon S3 location (bucket name and object key) where DataBrew can store intermediate results.
- table
Name String - A prefix for the name of a table DataBrew will create in the database.
- temp
Directory JobS3Location - Represents an Amazon S3 location (bucket name and object key) where DataBrew can store intermediate results.
- table
Name string - A prefix for the name of a table DataBrew will create in the database.
- temp
Directory JobS3Location - Represents an Amazon S3 location (bucket name and object key) where DataBrew can store intermediate results.
- table_
name str - A prefix for the name of a table DataBrew will create in the database.
- temp_
directory JobS3Location - Represents an Amazon S3 location (bucket name and object key) where DataBrew can store intermediate results.
- table
Name String - A prefix for the name of a table DataBrew will create in the database.
- temp
Directory Property Map - Represents an Amazon S3 location (bucket name and object key) where DataBrew can store intermediate results.
JobEncryptionMode, JobEncryptionModeArgs
- Sse
Kms - SSE-KMS
- Sse
S3 - SSE-S3
- Job
Encryption Mode Sse Kms - SSE-KMS
- Job
Encryption Mode Sse S3 - SSE-S3
- Sse
Kms - SSE-KMS
- Sse
S3 - SSE-S3
- Sse
Kms - SSE-KMS
- Sse
S3 - SSE-S3
- SSE_KMS
- SSE-KMS
- SSE_S3
- SSE-S3
- "SSE-KMS"
- SSE-KMS
- "SSE-S3"
- SSE-S3
JobEntityDetectorConfiguration, JobEntityDetectorConfigurationArgs
- Entity
Types List<string> Entity types to detect. Can be any of the following:
- USA_SSN
- USA_ITIN
- USA_PASSPORT_NUMBER
- PHONE_NUMBER
- USA_DRIVING_LICENSE
- BANK_ACCOUNT
- CREDIT_CARD
- IP_ADDRESS
- MAC_ADDRESS
- USA_DEA_NUMBER
- USA_HCPCS_CODE
- USA_NATIONAL_PROVIDER_IDENTIFIER
- USA_NATIONAL_DRUG_CODE
- USA_HEALTH_INSURANCE_CLAIM_NUMBER
- USA_MEDICARE_BENEFICIARY_IDENTIFIER
- USA_CPT_CODE
- PERSON_NAME
- DATE
The Entity type group USA_ALL is also supported, and includes all of the above entity types except PERSON_NAME and DATE.
- Allowed
Statistics Pulumi.Aws Native. Data Brew. Inputs. Job Allowed Statistics - Configuration of statistics that are allowed to be run on columns that contain detected entities. When undefined, no statistics will be computed on columns that contain detected entities.
- Entity
Types []string Entity types to detect. Can be any of the following:
- USA_SSN
- USA_ITIN
- USA_PASSPORT_NUMBER
- PHONE_NUMBER
- USA_DRIVING_LICENSE
- BANK_ACCOUNT
- CREDIT_CARD
- IP_ADDRESS
- MAC_ADDRESS
- USA_DEA_NUMBER
- USA_HCPCS_CODE
- USA_NATIONAL_PROVIDER_IDENTIFIER
- USA_NATIONAL_DRUG_CODE
- USA_HEALTH_INSURANCE_CLAIM_NUMBER
- USA_MEDICARE_BENEFICIARY_IDENTIFIER
- USA_CPT_CODE
- PERSON_NAME
- DATE
The Entity type group USA_ALL is also supported, and includes all of the above entity types except PERSON_NAME and DATE.
- Allowed
Statistics JobAllowed Statistics - Configuration of statistics that are allowed to be run on columns that contain detected entities. When undefined, no statistics will be computed on columns that contain detected entities.
- entity
Types List<String> Entity types to detect. Can be any of the following:
- USA_SSN
- USA_ITIN
- USA_PASSPORT_NUMBER
- PHONE_NUMBER
- USA_DRIVING_LICENSE
- BANK_ACCOUNT
- CREDIT_CARD
- IP_ADDRESS
- MAC_ADDRESS
- USA_DEA_NUMBER
- USA_HCPCS_CODE
- USA_NATIONAL_PROVIDER_IDENTIFIER
- USA_NATIONAL_DRUG_CODE
- USA_HEALTH_INSURANCE_CLAIM_NUMBER
- USA_MEDICARE_BENEFICIARY_IDENTIFIER
- USA_CPT_CODE
- PERSON_NAME
- DATE
The Entity type group USA_ALL is also supported, and includes all of the above entity types except PERSON_NAME and DATE.
- allowed
Statistics JobAllowed Statistics - Configuration of statistics that are allowed to be run on columns that contain detected entities. When undefined, no statistics will be computed on columns that contain detected entities.
- entity
Types string[] Entity types to detect. Can be any of the following:
- USA_SSN
- USA_ITIN
- USA_PASSPORT_NUMBER
- PHONE_NUMBER
- USA_DRIVING_LICENSE
- BANK_ACCOUNT
- CREDIT_CARD
- IP_ADDRESS
- MAC_ADDRESS
- USA_DEA_NUMBER
- USA_HCPCS_CODE
- USA_NATIONAL_PROVIDER_IDENTIFIER
- USA_NATIONAL_DRUG_CODE
- USA_HEALTH_INSURANCE_CLAIM_NUMBER
- USA_MEDICARE_BENEFICIARY_IDENTIFIER
- USA_CPT_CODE
- PERSON_NAME
- DATE
The Entity type group USA_ALL is also supported, and includes all of the above entity types except PERSON_NAME and DATE.
- allowed
Statistics JobAllowed Statistics - Configuration of statistics that are allowed to be run on columns that contain detected entities. When undefined, no statistics will be computed on columns that contain detected entities.
- entity_
types Sequence[str] Entity types to detect. Can be any of the following:
- USA_SSN
- USA_ITIN
- USA_PASSPORT_NUMBER
- PHONE_NUMBER
- USA_DRIVING_LICENSE
- BANK_ACCOUNT
- CREDIT_CARD
- IP_ADDRESS
- MAC_ADDRESS
- USA_DEA_NUMBER
- USA_HCPCS_CODE
- USA_NATIONAL_PROVIDER_IDENTIFIER
- USA_NATIONAL_DRUG_CODE
- USA_HEALTH_INSURANCE_CLAIM_NUMBER
- USA_MEDICARE_BENEFICIARY_IDENTIFIER
- USA_CPT_CODE
- PERSON_NAME
- DATE
The Entity type group USA_ALL is also supported, and includes all of the above entity types except PERSON_NAME and DATE.
- allowed_
statistics JobAllowed Statistics - Configuration of statistics that are allowed to be run on columns that contain detected entities. When undefined, no statistics will be computed on columns that contain detected entities.
- entity
Types List<String> Entity types to detect. Can be any of the following:
- USA_SSN
- USA_ITIN
- USA_PASSPORT_NUMBER
- PHONE_NUMBER
- USA_DRIVING_LICENSE
- BANK_ACCOUNT
- CREDIT_CARD
- IP_ADDRESS
- MAC_ADDRESS
- USA_DEA_NUMBER
- USA_HCPCS_CODE
- USA_NATIONAL_PROVIDER_IDENTIFIER
- USA_NATIONAL_DRUG_CODE
- USA_HEALTH_INSURANCE_CLAIM_NUMBER
- USA_MEDICARE_BENEFICIARY_IDENTIFIER
- USA_CPT_CODE
- PERSON_NAME
- DATE
The Entity type group USA_ALL is also supported, and includes all of the above entity types except PERSON_NAME and DATE.
- allowed
Statistics Property Map - Configuration of statistics that are allowed to be run on columns that contain detected entities. When undefined, no statistics will be computed on columns that contain detected entities.
JobLogSubscription, JobLogSubscriptionArgs
- Enable
- ENABLE
- Disable
- DISABLE
- Job
Log Subscription Enable - ENABLE
- Job
Log Subscription Disable - DISABLE
- Enable
- ENABLE
- Disable
- DISABLE
- Enable
- ENABLE
- Disable
- DISABLE
- ENABLE
- ENABLE
- DISABLE
- DISABLE
- "ENABLE"
- ENABLE
- "DISABLE"
- DISABLE
JobOutput, JobOutputArgs
- Location
Pulumi.
Aws Native. Data Brew. Inputs. Job S3Location - The location in Amazon S3 where the job writes its output.
- Compression
Format Pulumi.Aws Native. Data Brew. Job Output Compression Format - The compression algorithm used to compress the output text of the job.
- Format
Pulumi.
Aws Native. Data Brew. Job Output Format - The data format of the output of the job.
- Format
Options Pulumi.Aws Native. Data Brew. Inputs. Job Output Format Options - Represents options that define how DataBrew formats job output files.
- Max
Output intFiles - The maximum number of files to be generated by the job and written to the output folder.
- Overwrite bool
- A value that, if true, means that any data in the location specified for output is overwritten with new output.
- Partition
Columns List<string> - The names of one or more partition columns for the output of the job.
- Location
Job
S3Location - The location in Amazon S3 where the job writes its output.
- Compression
Format JobOutput Compression Format - The compression algorithm used to compress the output text of the job.
- Format
Job
Output Format - The data format of the output of the job.
- Format
Options JobOutput Format Options - Represents options that define how DataBrew formats job output files.
- Max
Output intFiles - The maximum number of files to be generated by the job and written to the output folder.
- Overwrite bool
- A value that, if true, means that any data in the location specified for output is overwritten with new output.
- Partition
Columns []string - The names of one or more partition columns for the output of the job.
- location
Job
S3Location - The location in Amazon S3 where the job writes its output.
- compression
Format JobOutput Compression Format - The compression algorithm used to compress the output text of the job.
- format
Job
Output Format - The data format of the output of the job.
- format
Options JobOutput Format Options - Represents options that define how DataBrew formats job output files.
- max
Output IntegerFiles - The maximum number of files to be generated by the job and written to the output folder.
- overwrite Boolean
- A value that, if true, means that any data in the location specified for output is overwritten with new output.
- partition
Columns List<String> - The names of one or more partition columns for the output of the job.
- location
Job
S3Location - The location in Amazon S3 where the job writes its output.
- compression
Format JobOutput Compression Format - The compression algorithm used to compress the output text of the job.
- format
Job
Output Format - The data format of the output of the job.
- format
Options JobOutput Format Options - Represents options that define how DataBrew formats job output files.
- max
Output numberFiles - The maximum number of files to be generated by the job and written to the output folder.
- overwrite boolean
- A value that, if true, means that any data in the location specified for output is overwritten with new output.
- partition
Columns string[] - The names of one or more partition columns for the output of the job.
- location
Job
S3Location - The location in Amazon S3 where the job writes its output.
- compression_
format JobOutput Compression Format - The compression algorithm used to compress the output text of the job.
- format
Job
Output Format - The data format of the output of the job.
- format_
options JobOutput Format Options - Represents options that define how DataBrew formats job output files.
- max_
output_ intfiles - The maximum number of files to be generated by the job and written to the output folder.
- overwrite bool
- A value that, if true, means that any data in the location specified for output is overwritten with new output.
- partition_
columns Sequence[str] - The names of one or more partition columns for the output of the job.
- location Property Map
- The location in Amazon S3 where the job writes its output.
- compression
Format "GZIP" | "LZ4" | "SNAPPY" | "BZIP2" | "DEFLATE" | "LZO" | "BROTLI" | "ZSTD" | "ZLIB" - The compression algorithm used to compress the output text of the job.
- format "CSV" | "JSON" | "PARQUET" | "GLUEPARQUET" | "AVRO" | "ORC" | "XML" | "TABLEAUHYPER"
- The data format of the output of the job.
- format
Options Property Map - Represents options that define how DataBrew formats job output files.
- max
Output NumberFiles - The maximum number of files to be generated by the job and written to the output folder.
- overwrite Boolean
- A value that, if true, means that any data in the location specified for output is overwritten with new output.
- partition
Columns List<String> - The names of one or more partition columns for the output of the job.
JobOutputCompressionFormat, JobOutputCompressionFormatArgs
- Gzip
- GZIP
- Lz4
- LZ4
- Snappy
- SNAPPY
- Bzip2
- BZIP2
- Deflate
- DEFLATE
- Lzo
- LZO
- Brotli
- BROTLI
- Zstd
- ZSTD
- Zlib
- ZLIB
- Job
Output Compression Format Gzip - GZIP
- Job
Output Compression Format Lz4 - LZ4
- Job
Output Compression Format Snappy - SNAPPY
- Job
Output Compression Format Bzip2 - BZIP2
- Job
Output Compression Format Deflate - DEFLATE
- Job
Output Compression Format Lzo - LZO
- Job
Output Compression Format Brotli - BROTLI
- Job
Output Compression Format Zstd - ZSTD
- Job
Output Compression Format Zlib - ZLIB
- Gzip
- GZIP
- Lz4
- LZ4
- Snappy
- SNAPPY
- Bzip2
- BZIP2
- Deflate
- DEFLATE
- Lzo
- LZO
- Brotli
- BROTLI
- Zstd
- ZSTD
- Zlib
- ZLIB
- Gzip
- GZIP
- Lz4
- LZ4
- Snappy
- SNAPPY
- Bzip2
- BZIP2
- Deflate
- DEFLATE
- Lzo
- LZO
- Brotli
- BROTLI
- Zstd
- ZSTD
- Zlib
- ZLIB
- GZIP
- GZIP
- LZ4
- LZ4
- SNAPPY
- SNAPPY
- BZIP2
- BZIP2
- DEFLATE
- DEFLATE
- LZO
- LZO
- BROTLI
- BROTLI
- ZSTD
- ZSTD
- ZLIB
- ZLIB
- "GZIP"
- GZIP
- "LZ4"
- LZ4
- "SNAPPY"
- SNAPPY
- "BZIP2"
- BZIP2
- "DEFLATE"
- DEFLATE
- "LZO"
- LZO
- "BROTLI"
- BROTLI
- "ZSTD"
- ZSTD
- "ZLIB"
- ZLIB
JobOutputFormat, JobOutputFormatArgs
- Csv
- CSV
- Json
- JSON
- Parquet
- PARQUET
- Glueparquet
- GLUEPARQUET
- Avro
- AVRO
- Orc
- ORC
- Xml
- XML
- Tableauhyper
- TABLEAUHYPER
- Job
Output Format Csv - CSV
- Job
Output Format Json - JSON
- Job
Output Format Parquet - PARQUET
- Job
Output Format Glueparquet - GLUEPARQUET
- Job
Output Format Avro - AVRO
- Job
Output Format Orc - ORC
- Job
Output Format Xml - XML
- Job
Output Format Tableauhyper - TABLEAUHYPER
- Csv
- CSV
- Json
- JSON
- Parquet
- PARQUET
- Glueparquet
- GLUEPARQUET
- Avro
- AVRO
- Orc
- ORC
- Xml
- XML
- Tableauhyper
- TABLEAUHYPER
- Csv
- CSV
- Json
- JSON
- Parquet
- PARQUET
- Glueparquet
- GLUEPARQUET
- Avro
- AVRO
- Orc
- ORC
- Xml
- XML
- Tableauhyper
- TABLEAUHYPER
- CSV
- CSV
- JSON
- JSON
- PARQUET
- PARQUET
- GLUEPARQUET
- GLUEPARQUET
- AVRO
- AVRO
- ORC
- ORC
- XML
- XML
- TABLEAUHYPER
- TABLEAUHYPER
- "CSV"
- CSV
- "JSON"
- JSON
- "PARQUET"
- PARQUET
- "GLUEPARQUET"
- GLUEPARQUET
- "AVRO"
- AVRO
- "ORC"
- ORC
- "XML"
- XML
- "TABLEAUHYPER"
- TABLEAUHYPER
JobOutputFormatOptions, JobOutputFormatOptionsArgs
- Csv
Pulumi.
Aws Native. Data Brew. Inputs. Job Csv Output Options - Represents a set of options that define the structure of comma-separated value (CSV) job output.
- Csv
Job
Csv Output Options - Represents a set of options that define the structure of comma-separated value (CSV) job output.
- csv
Job
Csv Output Options - Represents a set of options that define the structure of comma-separated value (CSV) job output.
- csv
Job
Csv Output Options - Represents a set of options that define the structure of comma-separated value (CSV) job output.
- csv
Job
Csv Output Options - Represents a set of options that define the structure of comma-separated value (CSV) job output.
- csv Property Map
- Represents a set of options that define the structure of comma-separated value (CSV) job output.
JobOutputLocation, JobOutputLocationArgs
- Bucket string
- The Amazon S3 bucket name.
- Bucket
Owner string - Key string
- The unique name of the object in the bucket.
- Bucket string
- The Amazon S3 bucket name.
- Bucket
Owner string - Key string
- The unique name of the object in the bucket.
- bucket String
- The Amazon S3 bucket name.
- bucket
Owner String - key String
- The unique name of the object in the bucket.
- bucket string
- The Amazon S3 bucket name.
- bucket
Owner string - key string
- The unique name of the object in the bucket.
- bucket str
- The Amazon S3 bucket name.
- bucket_
owner str - key str
- The unique name of the object in the bucket.
- bucket String
- The Amazon S3 bucket name.
- bucket
Owner String - key String
- The unique name of the object in the bucket.
JobProfileConfiguration, JobProfileConfigurationArgs
- Column
Statistics List<Pulumi.Configurations Aws Native. Data Brew. Inputs. Job Column Statistics Configuration> - List of configurations for column evaluations. ColumnStatisticsConfigurations are used to select evaluations and override parameters of evaluations for particular columns. When ColumnStatisticsConfigurations is undefined, the profile job will profile all supported columns and run all supported evaluations.
- Dataset
Statistics Pulumi.Configuration Aws Native. Data Brew. Inputs. Job Statistics Configuration - Configuration for inter-column evaluations. Configuration can be used to select evaluations and override parameters of evaluations. When configuration is undefined, the profile job will run all supported inter-column evaluations.
- Entity
Detector Pulumi.Configuration Aws Native. Data Brew. Inputs. Job Entity Detector Configuration - Configuration of entity detection for a profile job. When undefined, entity detection is disabled.
- Profile
Columns List<Pulumi.Aws Native. Data Brew. Inputs. Job Column Selector> - List of column selectors. ProfileColumns can be used to select columns from the dataset. When ProfileColumns is undefined, the profile job will profile all supported columns.
- Column
Statistics []JobConfigurations Column Statistics Configuration - List of configurations for column evaluations. ColumnStatisticsConfigurations are used to select evaluations and override parameters of evaluations for particular columns. When ColumnStatisticsConfigurations is undefined, the profile job will profile all supported columns and run all supported evaluations.
- Dataset
Statistics JobConfiguration Statistics Configuration - Configuration for inter-column evaluations. Configuration can be used to select evaluations and override parameters of evaluations. When configuration is undefined, the profile job will run all supported inter-column evaluations.
- Entity
Detector JobConfiguration Entity Detector Configuration - Configuration of entity detection for a profile job. When undefined, entity detection is disabled.
- Profile
Columns []JobColumn Selector - List of column selectors. ProfileColumns can be used to select columns from the dataset. When ProfileColumns is undefined, the profile job will profile all supported columns.
- column
Statistics List<JobConfigurations Column Statistics Configuration> - List of configurations for column evaluations. ColumnStatisticsConfigurations are used to select evaluations and override parameters of evaluations for particular columns. When ColumnStatisticsConfigurations is undefined, the profile job will profile all supported columns and run all supported evaluations.
- dataset
Statistics JobConfiguration Statistics Configuration - Configuration for inter-column evaluations. Configuration can be used to select evaluations and override parameters of evaluations. When configuration is undefined, the profile job will run all supported inter-column evaluations.
- entity
Detector JobConfiguration Entity Detector Configuration - Configuration of entity detection for a profile job. When undefined, entity detection is disabled.
- profile
Columns List<JobColumn Selector> - List of column selectors. ProfileColumns can be used to select columns from the dataset. When ProfileColumns is undefined, the profile job will profile all supported columns.
- column
Statistics JobConfigurations Column Statistics Configuration[] - List of configurations for column evaluations. ColumnStatisticsConfigurations are used to select evaluations and override parameters of evaluations for particular columns. When ColumnStatisticsConfigurations is undefined, the profile job will profile all supported columns and run all supported evaluations.
- dataset
Statistics JobConfiguration Statistics Configuration - Configuration for inter-column evaluations. Configuration can be used to select evaluations and override parameters of evaluations. When configuration is undefined, the profile job will run all supported inter-column evaluations.
- entity
Detector JobConfiguration Entity Detector Configuration - Configuration of entity detection for a profile job. When undefined, entity detection is disabled.
- profile
Columns JobColumn Selector[] - List of column selectors. ProfileColumns can be used to select columns from the dataset. When ProfileColumns is undefined, the profile job will profile all supported columns.
- column_
statistics_ Sequence[Jobconfigurations Column Statistics Configuration] - List of configurations for column evaluations. ColumnStatisticsConfigurations are used to select evaluations and override parameters of evaluations for particular columns. When ColumnStatisticsConfigurations is undefined, the profile job will profile all supported columns and run all supported evaluations.
- dataset_
statistics_ Jobconfiguration Statistics Configuration - Configuration for inter-column evaluations. Configuration can be used to select evaluations and override parameters of evaluations. When configuration is undefined, the profile job will run all supported inter-column evaluations.
- entity_
detector_ Jobconfiguration Entity Detector Configuration - Configuration of entity detection for a profile job. When undefined, entity detection is disabled.
- profile_
columns Sequence[JobColumn Selector] - List of column selectors. ProfileColumns can be used to select columns from the dataset. When ProfileColumns is undefined, the profile job will profile all supported columns.
- column
Statistics List<Property Map>Configurations - List of configurations for column evaluations. ColumnStatisticsConfigurations are used to select evaluations and override parameters of evaluations for particular columns. When ColumnStatisticsConfigurations is undefined, the profile job will profile all supported columns and run all supported evaluations.
- dataset
Statistics Property MapConfiguration - Configuration for inter-column evaluations. Configuration can be used to select evaluations and override parameters of evaluations. When configuration is undefined, the profile job will run all supported inter-column evaluations.
- entity
Detector Property MapConfiguration - Configuration of entity detection for a profile job. When undefined, entity detection is disabled.
- profile
Columns List<Property Map> - List of column selectors. ProfileColumns can be used to select columns from the dataset. When ProfileColumns is undefined, the profile job will profile all supported columns.
JobRecipe, JobRecipeArgs
JobS3Location, JobS3LocationArgs
- Bucket string
- The Amazon S3 bucket name.
- Bucket
Owner string - The AWS account ID of the bucket owner.
- Key string
- The unique name of the object in the bucket.
- Bucket string
- The Amazon S3 bucket name.
- Bucket
Owner string - The AWS account ID of the bucket owner.
- Key string
- The unique name of the object in the bucket.
- bucket String
- The Amazon S3 bucket name.
- bucket
Owner String - The AWS account ID of the bucket owner.
- key String
- The unique name of the object in the bucket.
- bucket string
- The Amazon S3 bucket name.
- bucket
Owner string - The AWS account ID of the bucket owner.
- key string
- The unique name of the object in the bucket.
- bucket str
- The Amazon S3 bucket name.
- bucket_
owner str - The AWS account ID of the bucket owner.
- key str
- The unique name of the object in the bucket.
- bucket String
- The Amazon S3 bucket name.
- bucket
Owner String - The AWS account ID of the bucket owner.
- key String
- The unique name of the object in the bucket.
JobS3TableOutputOptions, JobS3TableOutputOptionsArgs
- Location
Pulumi.
Aws Native. Data Brew. Inputs. Job S3Location - Represents an Amazon S3 location (bucket name and object key) where DataBrew can write output from a job.
- Location
Job
S3Location - Represents an Amazon S3 location (bucket name and object key) where DataBrew can write output from a job.
- location
Job
S3Location - Represents an Amazon S3 location (bucket name and object key) where DataBrew can write output from a job.
- location
Job
S3Location - Represents an Amazon S3 location (bucket name and object key) where DataBrew can write output from a job.
- location
Job
S3Location - Represents an Amazon S3 location (bucket name and object key) where DataBrew can write output from a job.
- location Property Map
- Represents an Amazon S3 location (bucket name and object key) where DataBrew can write output from a job.
JobSample, JobSampleArgs
- Mode
Pulumi.
Aws Native. Data Brew. Job Sample Mode - A value that determines whether the profile job is run on the entire dataset or a specified number of rows. This value must be one of the following:
- FULL_DATASET - The profile job is run on the entire dataset.
- CUSTOM_ROWS - The profile job is run on the number of rows specified in the
Size
parameter.
- Size int
The
Size
parameter is only required when the mode is CUSTOM_ROWS. The profile job is run on the specified number of rows. The maximum value for size is Long.MAX_VALUE.Long.MAX_VALUE = 9223372036854775807
- Mode
Job
Sample Mode - A value that determines whether the profile job is run on the entire dataset or a specified number of rows. This value must be one of the following:
- FULL_DATASET - The profile job is run on the entire dataset.
- CUSTOM_ROWS - The profile job is run on the number of rows specified in the
Size
parameter.
- Size int
The
Size
parameter is only required when the mode is CUSTOM_ROWS. The profile job is run on the specified number of rows. The maximum value for size is Long.MAX_VALUE.Long.MAX_VALUE = 9223372036854775807
- mode
Job
Sample Mode - A value that determines whether the profile job is run on the entire dataset or a specified number of rows. This value must be one of the following:
- FULL_DATASET - The profile job is run on the entire dataset.
- CUSTOM_ROWS - The profile job is run on the number of rows specified in the
Size
parameter.
- size Integer
The
Size
parameter is only required when the mode is CUSTOM_ROWS. The profile job is run on the specified number of rows. The maximum value for size is Long.MAX_VALUE.Long.MAX_VALUE = 9223372036854775807
- mode
Job
Sample Mode - A value that determines whether the profile job is run on the entire dataset or a specified number of rows. This value must be one of the following:
- FULL_DATASET - The profile job is run on the entire dataset.
- CUSTOM_ROWS - The profile job is run on the number of rows specified in the
Size
parameter.
- size number
The
Size
parameter is only required when the mode is CUSTOM_ROWS. The profile job is run on the specified number of rows. The maximum value for size is Long.MAX_VALUE.Long.MAX_VALUE = 9223372036854775807
- mode
Job
Sample Mode - A value that determines whether the profile job is run on the entire dataset or a specified number of rows. This value must be one of the following:
- FULL_DATASET - The profile job is run on the entire dataset.
- CUSTOM_ROWS - The profile job is run on the number of rows specified in the
Size
parameter.
- size int
The
Size
parameter is only required when the mode is CUSTOM_ROWS. The profile job is run on the specified number of rows. The maximum value for size is Long.MAX_VALUE.Long.MAX_VALUE = 9223372036854775807
- mode "FULL_DATASET" | "CUSTOM_ROWS"
- A value that determines whether the profile job is run on the entire dataset or a specified number of rows. This value must be one of the following:
- FULL_DATASET - The profile job is run on the entire dataset.
- CUSTOM_ROWS - The profile job is run on the number of rows specified in the
Size
parameter.
- size Number
The
Size
parameter is only required when the mode is CUSTOM_ROWS. The profile job is run on the specified number of rows. The maximum value for size is Long.MAX_VALUE.Long.MAX_VALUE = 9223372036854775807
JobSampleMode, JobSampleModeArgs
- Full
Dataset - FULL_DATASET
- Custom
Rows - CUSTOM_ROWS
- Job
Sample Mode Full Dataset - FULL_DATASET
- Job
Sample Mode Custom Rows - CUSTOM_ROWS
- Full
Dataset - FULL_DATASET
- Custom
Rows - CUSTOM_ROWS
- Full
Dataset - FULL_DATASET
- Custom
Rows - CUSTOM_ROWS
- FULL_DATASET
- FULL_DATASET
- CUSTOM_ROWS
- CUSTOM_ROWS
- "FULL_DATASET"
- FULL_DATASET
- "CUSTOM_ROWS"
- CUSTOM_ROWS
JobStatisticOverride, JobStatisticOverrideArgs
- Parameters Dictionary<string, string>
- A map that includes overrides of an evaluation’s parameters.
- Statistic string
- The name of an evaluation
- Parameters map[string]string
- A map that includes overrides of an evaluation’s parameters.
- Statistic string
- The name of an evaluation
- parameters Map<String,String>
- A map that includes overrides of an evaluation’s parameters.
- statistic String
- The name of an evaluation
- parameters {[key: string]: string}
- A map that includes overrides of an evaluation’s parameters.
- statistic string
- The name of an evaluation
- parameters Mapping[str, str]
- A map that includes overrides of an evaluation’s parameters.
- statistic str
- The name of an evaluation
- parameters Map<String>
- A map that includes overrides of an evaluation’s parameters.
- statistic String
- The name of an evaluation
JobStatisticsConfiguration, JobStatisticsConfigurationArgs
- Included
Statistics List<string> - List of included evaluations. When the list is undefined, all supported evaluations will be included.
- Overrides
List<Pulumi.
Aws Native. Data Brew. Inputs. Job Statistic Override> - List of overrides for evaluations.
- Included
Statistics []string - List of included evaluations. When the list is undefined, all supported evaluations will be included.
- Overrides
[]Job
Statistic Override - List of overrides for evaluations.
- included
Statistics List<String> - List of included evaluations. When the list is undefined, all supported evaluations will be included.
- overrides
List<Job
Statistic Override> - List of overrides for evaluations.
- included
Statistics string[] - List of included evaluations. When the list is undefined, all supported evaluations will be included.
- overrides
Job
Statistic Override[] - List of overrides for evaluations.
- included_
statistics Sequence[str] - List of included evaluations. When the list is undefined, all supported evaluations will be included.
- overrides
Sequence[Job
Statistic Override] - List of overrides for evaluations.
- included
Statistics List<String> - List of included evaluations. When the list is undefined, all supported evaluations will be included.
- overrides List<Property Map>
- List of overrides for evaluations.
JobType, JobTypeArgs
- Profile
- PROFILE
- Recipe
- RECIPE
- Job
Type Profile - PROFILE
- Job
Type Recipe - RECIPE
- Profile
- PROFILE
- Recipe
- RECIPE
- Profile
- PROFILE
- Recipe
- RECIPE
- PROFILE
- PROFILE
- RECIPE
- RECIPE
- "PROFILE"
- PROFILE
- "RECIPE"
- RECIPE
JobValidationConfiguration, JobValidationConfigurationArgs
- Ruleset
Arn string - Arn of the Ruleset
- Validation
Mode Pulumi.Aws Native. Data Brew. Job Validation Mode - Mode of data quality validation. Default mode is "CHECK_ALL" which verifies all rules defined in the selected ruleset.
- Ruleset
Arn string - Arn of the Ruleset
- Validation
Mode JobValidation Mode - Mode of data quality validation. Default mode is "CHECK_ALL" which verifies all rules defined in the selected ruleset.
- ruleset
Arn String - Arn of the Ruleset
- validation
Mode JobValidation Mode - Mode of data quality validation. Default mode is "CHECK_ALL" which verifies all rules defined in the selected ruleset.
- ruleset
Arn string - Arn of the Ruleset
- validation
Mode JobValidation Mode - Mode of data quality validation. Default mode is "CHECK_ALL" which verifies all rules defined in the selected ruleset.
- ruleset_
arn str - Arn of the Ruleset
- validation_
mode JobValidation Mode - Mode of data quality validation. Default mode is "CHECK_ALL" which verifies all rules defined in the selected ruleset.
- ruleset
Arn String - Arn of the Ruleset
- validation
Mode "CHECK_ALL" - Mode of data quality validation. Default mode is "CHECK_ALL" which verifies all rules defined in the selected ruleset.
JobValidationMode, JobValidationModeArgs
- Check
All - CHECK_ALL
- Job
Validation Mode Check All - CHECK_ALL
- Check
All - CHECK_ALL
- Check
All - CHECK_ALL
- CHECK_ALL
- CHECK_ALL
- "CHECK_ALL"
- CHECK_ALL
Package Details
- Repository
- AWS Native pulumi/pulumi-aws-native
- License
- Apache-2.0
We recommend new projects start with resources from the AWS provider.