gcp.bigquery.Table
Explore with Pulumi AI
Creates a table resource in a dataset for Google BigQuery. For more information see the official documentation and API.
Note: On newer versions of the provider, you must explicitly set
deletion_protection=false
(and runpulumi update
to write the field to state) in order to destroy an instance. It is recommended to not set this field (or set it to true) until you’re ready to destroy.
Example Usage
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const _default = new gcp.bigquery.Dataset("default", {
datasetId: "foo",
friendlyName: "test",
description: "This is a test description",
location: "EU",
defaultTableExpirationMs: 3600000,
labels: {
env: "default",
},
});
const defaultTable = new gcp.bigquery.Table("default", {
datasetId: _default.datasetId,
tableId: "bar",
timePartitioning: {
type: "DAY",
},
labels: {
env: "default",
},
schema: `[
{
"name": "permalink",
"type": "STRING",
"mode": "NULLABLE",
"description": "The Permalink"
},
{
"name": "state",
"type": "STRING",
"mode": "NULLABLE",
"description": "State where the head office is located"
}
]
`,
});
const sheet = new gcp.bigquery.Table("sheet", {
datasetId: _default.datasetId,
tableId: "sheet",
externalDataConfiguration: {
autodetect: true,
sourceFormat: "GOOGLE_SHEETS",
googleSheetsOptions: {
skipLeadingRows: 1,
},
sourceUris: ["https://docs.google.com/spreadsheets/d/123456789012345"],
},
});
import pulumi
import pulumi_gcp as gcp
default = gcp.bigquery.Dataset("default",
dataset_id="foo",
friendly_name="test",
description="This is a test description",
location="EU",
default_table_expiration_ms=3600000,
labels={
"env": "default",
})
default_table = gcp.bigquery.Table("default",
dataset_id=default.dataset_id,
table_id="bar",
time_partitioning={
"type": "DAY",
},
labels={
"env": "default",
},
schema="""[
{
"name": "permalink",
"type": "STRING",
"mode": "NULLABLE",
"description": "The Permalink"
},
{
"name": "state",
"type": "STRING",
"mode": "NULLABLE",
"description": "State where the head office is located"
}
]
""")
sheet = gcp.bigquery.Table("sheet",
dataset_id=default.dataset_id,
table_id="sheet",
external_data_configuration={
"autodetect": True,
"source_format": "GOOGLE_SHEETS",
"google_sheets_options": {
"skip_leading_rows": 1,
},
"source_uris": ["https://docs.google.com/spreadsheets/d/123456789012345"],
})
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigquery"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := bigquery.NewDataset(ctx, "default", &bigquery.DatasetArgs{
DatasetId: pulumi.String("foo"),
FriendlyName: pulumi.String("test"),
Description: pulumi.String("This is a test description"),
Location: pulumi.String("EU"),
DefaultTableExpirationMs: pulumi.Int(3600000),
Labels: pulumi.StringMap{
"env": pulumi.String("default"),
},
})
if err != nil {
return err
}
_, err = bigquery.NewTable(ctx, "default", &bigquery.TableArgs{
DatasetId: _default.DatasetId,
TableId: pulumi.String("bar"),
TimePartitioning: &bigquery.TableTimePartitioningArgs{
Type: pulumi.String("DAY"),
},
Labels: pulumi.StringMap{
"env": pulumi.String("default"),
},
Schema: pulumi.String(`[
{
"name": "permalink",
"type": "STRING",
"mode": "NULLABLE",
"description": "The Permalink"
},
{
"name": "state",
"type": "STRING",
"mode": "NULLABLE",
"description": "State where the head office is located"
}
]
`),
})
if err != nil {
return err
}
_, err = bigquery.NewTable(ctx, "sheet", &bigquery.TableArgs{
DatasetId: _default.DatasetId,
TableId: pulumi.String("sheet"),
ExternalDataConfiguration: &bigquery.TableExternalDataConfigurationArgs{
Autodetect: pulumi.Bool(true),
SourceFormat: pulumi.String("GOOGLE_SHEETS"),
GoogleSheetsOptions: &bigquery.TableExternalDataConfigurationGoogleSheetsOptionsArgs{
SkipLeadingRows: pulumi.Int(1),
},
SourceUris: pulumi.StringArray{
pulumi.String("https://docs.google.com/spreadsheets/d/123456789012345"),
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var @default = new Gcp.BigQuery.Dataset("default", new()
{
DatasetId = "foo",
FriendlyName = "test",
Description = "This is a test description",
Location = "EU",
DefaultTableExpirationMs = 3600000,
Labels =
{
{ "env", "default" },
},
});
var defaultTable = new Gcp.BigQuery.Table("default", new()
{
DatasetId = @default.DatasetId,
TableId = "bar",
TimePartitioning = new Gcp.BigQuery.Inputs.TableTimePartitioningArgs
{
Type = "DAY",
},
Labels =
{
{ "env", "default" },
},
Schema = @"[
{
""name"": ""permalink"",
""type"": ""STRING"",
""mode"": ""NULLABLE"",
""description"": ""The Permalink""
},
{
""name"": ""state"",
""type"": ""STRING"",
""mode"": ""NULLABLE"",
""description"": ""State where the head office is located""
}
]
",
});
var sheet = new Gcp.BigQuery.Table("sheet", new()
{
DatasetId = @default.DatasetId,
TableId = "sheet",
ExternalDataConfiguration = new Gcp.BigQuery.Inputs.TableExternalDataConfigurationArgs
{
Autodetect = true,
SourceFormat = "GOOGLE_SHEETS",
GoogleSheetsOptions = new Gcp.BigQuery.Inputs.TableExternalDataConfigurationGoogleSheetsOptionsArgs
{
SkipLeadingRows = 1,
},
SourceUris = new[]
{
"https://docs.google.com/spreadsheets/d/123456789012345",
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.bigquery.Dataset;
import com.pulumi.gcp.bigquery.DatasetArgs;
import com.pulumi.gcp.bigquery.Table;
import com.pulumi.gcp.bigquery.TableArgs;
import com.pulumi.gcp.bigquery.inputs.TableTimePartitioningArgs;
import com.pulumi.gcp.bigquery.inputs.TableExternalDataConfigurationArgs;
import com.pulumi.gcp.bigquery.inputs.TableExternalDataConfigurationGoogleSheetsOptionsArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var default_ = new Dataset("default", DatasetArgs.builder()
.datasetId("foo")
.friendlyName("test")
.description("This is a test description")
.location("EU")
.defaultTableExpirationMs(3600000)
.labels(Map.of("env", "default"))
.build());
var defaultTable = new Table("defaultTable", TableArgs.builder()
.datasetId(default_.datasetId())
.tableId("bar")
.timePartitioning(TableTimePartitioningArgs.builder()
.type("DAY")
.build())
.labels(Map.of("env", "default"))
.schema("""
[
{
"name": "permalink",
"type": "STRING",
"mode": "NULLABLE",
"description": "The Permalink"
},
{
"name": "state",
"type": "STRING",
"mode": "NULLABLE",
"description": "State where the head office is located"
}
]
""")
.build());
var sheet = new Table("sheet", TableArgs.builder()
.datasetId(default_.datasetId())
.tableId("sheet")
.externalDataConfiguration(TableExternalDataConfigurationArgs.builder()
.autodetect(true)
.sourceFormat("GOOGLE_SHEETS")
.googleSheetsOptions(TableExternalDataConfigurationGoogleSheetsOptionsArgs.builder()
.skipLeadingRows(1)
.build())
.sourceUris("https://docs.google.com/spreadsheets/d/123456789012345")
.build())
.build());
}
}
resources:
default:
type: gcp:bigquery:Dataset
properties:
datasetId: foo
friendlyName: test
description: This is a test description
location: EU
defaultTableExpirationMs: 3.6e+06
labels:
env: default
defaultTable:
type: gcp:bigquery:Table
name: default
properties:
datasetId: ${default.datasetId}
tableId: bar
timePartitioning:
type: DAY
labels:
env: default
schema: |
[
{
"name": "permalink",
"type": "STRING",
"mode": "NULLABLE",
"description": "The Permalink"
},
{
"name": "state",
"type": "STRING",
"mode": "NULLABLE",
"description": "State where the head office is located"
}
]
sheet:
type: gcp:bigquery:Table
properties:
datasetId: ${default.datasetId}
tableId: sheet
externalDataConfiguration:
autodetect: true
sourceFormat: GOOGLE_SHEETS
googleSheetsOptions:
skipLeadingRows: 1
sourceUris:
- https://docs.google.com/spreadsheets/d/123456789012345
Create Table Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new Table(name: string, args: TableArgs, opts?: CustomResourceOptions);
@overload
def Table(resource_name: str,
args: TableArgs,
opts: Optional[ResourceOptions] = None)
@overload
def Table(resource_name: str,
opts: Optional[ResourceOptions] = None,
dataset_id: Optional[str] = None,
table_id: Optional[str] = None,
materialized_view: Optional[TableMaterializedViewArgs] = None,
max_staleness: Optional[str] = None,
description: Optional[str] = None,
encryption_configuration: Optional[TableEncryptionConfigurationArgs] = None,
expiration_time: Optional[int] = None,
external_data_configuration: Optional[TableExternalDataConfigurationArgs] = None,
friendly_name: Optional[str] = None,
labels: Optional[Mapping[str, str]] = None,
biglake_configuration: Optional[TableBiglakeConfigurationArgs] = None,
deletion_protection: Optional[bool] = None,
project: Optional[str] = None,
range_partitioning: Optional[TableRangePartitioningArgs] = None,
require_partition_filter: Optional[bool] = None,
resource_tags: Optional[Mapping[str, str]] = None,
schema: Optional[str] = None,
table_constraints: Optional[TableTableConstraintsArgs] = None,
clusterings: Optional[Sequence[str]] = None,
table_replication_info: Optional[TableTableReplicationInfoArgs] = None,
time_partitioning: Optional[TableTimePartitioningArgs] = None,
view: Optional[TableViewArgs] = None)
func NewTable(ctx *Context, name string, args TableArgs, opts ...ResourceOption) (*Table, error)
public Table(string name, TableArgs args, CustomResourceOptions? opts = null)
type: gcp:bigquery:Table
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args TableArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args TableArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args TableArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args TableArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args TableArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var gcpTableResource = new Gcp.BigQuery.Table("gcpTableResource", new()
{
DatasetId = "string",
TableId = "string",
MaterializedView = new Gcp.BigQuery.Inputs.TableMaterializedViewArgs
{
Query = "string",
AllowNonIncrementalDefinition = false,
EnableRefresh = false,
RefreshIntervalMs = 0,
},
MaxStaleness = "string",
Description = "string",
EncryptionConfiguration = new Gcp.BigQuery.Inputs.TableEncryptionConfigurationArgs
{
KmsKeyName = "string",
KmsKeyVersion = "string",
},
ExpirationTime = 0,
ExternalDataConfiguration = new Gcp.BigQuery.Inputs.TableExternalDataConfigurationArgs
{
Autodetect = false,
SourceUris = new[]
{
"string",
},
JsonExtension = "string",
JsonOptions = new Gcp.BigQuery.Inputs.TableExternalDataConfigurationJsonOptionsArgs
{
Encoding = "string",
},
ConnectionId = "string",
CsvOptions = new Gcp.BigQuery.Inputs.TableExternalDataConfigurationCsvOptionsArgs
{
Quote = "string",
AllowJaggedRows = false,
AllowQuotedNewlines = false,
Encoding = "string",
FieldDelimiter = "string",
SkipLeadingRows = 0,
},
FileSetSpecType = "string",
GoogleSheetsOptions = new Gcp.BigQuery.Inputs.TableExternalDataConfigurationGoogleSheetsOptionsArgs
{
Range = "string",
SkipLeadingRows = 0,
},
HivePartitioningOptions = new Gcp.BigQuery.Inputs.TableExternalDataConfigurationHivePartitioningOptionsArgs
{
Mode = "string",
RequirePartitionFilter = false,
SourceUriPrefix = "string",
},
IgnoreUnknownValues = false,
BigtableOptions = new Gcp.BigQuery.Inputs.TableExternalDataConfigurationBigtableOptionsArgs
{
ColumnFamilies = new[]
{
new Gcp.BigQuery.Inputs.TableExternalDataConfigurationBigtableOptionsColumnFamilyArgs
{
Columns = new[]
{
new Gcp.BigQuery.Inputs.TableExternalDataConfigurationBigtableOptionsColumnFamilyColumnArgs
{
Encoding = "string",
FieldName = "string",
OnlyReadLatest = false,
QualifierEncoded = "string",
QualifierString = "string",
Type = "string",
},
},
Encoding = "string",
FamilyId = "string",
OnlyReadLatest = false,
Type = "string",
},
},
IgnoreUnspecifiedColumnFamilies = false,
OutputColumnFamiliesAsJson = false,
ReadRowkeyAsString = false,
},
Compression = "string",
MaxBadRecords = 0,
MetadataCacheMode = "string",
ObjectMetadata = "string",
ParquetOptions = new Gcp.BigQuery.Inputs.TableExternalDataConfigurationParquetOptionsArgs
{
EnableListInference = false,
EnumAsString = false,
},
ReferenceFileSchemaUri = "string",
Schema = "string",
SourceFormat = "string",
AvroOptions = new Gcp.BigQuery.Inputs.TableExternalDataConfigurationAvroOptionsArgs
{
UseAvroLogicalTypes = false,
},
},
FriendlyName = "string",
Labels =
{
{ "string", "string" },
},
BiglakeConfiguration = new Gcp.BigQuery.Inputs.TableBiglakeConfigurationArgs
{
ConnectionId = "string",
FileFormat = "string",
StorageUri = "string",
TableFormat = "string",
},
DeletionProtection = false,
Project = "string",
RangePartitioning = new Gcp.BigQuery.Inputs.TableRangePartitioningArgs
{
Field = "string",
Range = new Gcp.BigQuery.Inputs.TableRangePartitioningRangeArgs
{
End = 0,
Interval = 0,
Start = 0,
},
},
RequirePartitionFilter = false,
ResourceTags =
{
{ "string", "string" },
},
Schema = "string",
TableConstraints = new Gcp.BigQuery.Inputs.TableTableConstraintsArgs
{
ForeignKeys = new[]
{
new Gcp.BigQuery.Inputs.TableTableConstraintsForeignKeyArgs
{
ColumnReferences = new Gcp.BigQuery.Inputs.TableTableConstraintsForeignKeyColumnReferencesArgs
{
ReferencedColumn = "string",
ReferencingColumn = "string",
},
ReferencedTable = new Gcp.BigQuery.Inputs.TableTableConstraintsForeignKeyReferencedTableArgs
{
DatasetId = "string",
ProjectId = "string",
TableId = "string",
},
Name = "string",
},
},
PrimaryKey = new Gcp.BigQuery.Inputs.TableTableConstraintsPrimaryKeyArgs
{
Columns = new[]
{
"string",
},
},
},
Clusterings = new[]
{
"string",
},
TableReplicationInfo = new Gcp.BigQuery.Inputs.TableTableReplicationInfoArgs
{
SourceDatasetId = "string",
SourceProjectId = "string",
SourceTableId = "string",
ReplicationIntervalMs = 0,
},
TimePartitioning = new Gcp.BigQuery.Inputs.TableTimePartitioningArgs
{
Type = "string",
ExpirationMs = 0,
Field = "string",
},
View = new Gcp.BigQuery.Inputs.TableViewArgs
{
Query = "string",
UseLegacySql = false,
},
});
example, err := bigquery.NewTable(ctx, "gcpTableResource", &bigquery.TableArgs{
DatasetId: pulumi.String("string"),
TableId: pulumi.String("string"),
MaterializedView: &bigquery.TableMaterializedViewArgs{
Query: pulumi.String("string"),
AllowNonIncrementalDefinition: pulumi.Bool(false),
EnableRefresh: pulumi.Bool(false),
RefreshIntervalMs: pulumi.Int(0),
},
MaxStaleness: pulumi.String("string"),
Description: pulumi.String("string"),
EncryptionConfiguration: &bigquery.TableEncryptionConfigurationArgs{
KmsKeyName: pulumi.String("string"),
KmsKeyVersion: pulumi.String("string"),
},
ExpirationTime: pulumi.Int(0),
ExternalDataConfiguration: &bigquery.TableExternalDataConfigurationArgs{
Autodetect: pulumi.Bool(false),
SourceUris: pulumi.StringArray{
pulumi.String("string"),
},
JsonExtension: pulumi.String("string"),
JsonOptions: &bigquery.TableExternalDataConfigurationJsonOptionsArgs{
Encoding: pulumi.String("string"),
},
ConnectionId: pulumi.String("string"),
CsvOptions: &bigquery.TableExternalDataConfigurationCsvOptionsArgs{
Quote: pulumi.String("string"),
AllowJaggedRows: pulumi.Bool(false),
AllowQuotedNewlines: pulumi.Bool(false),
Encoding: pulumi.String("string"),
FieldDelimiter: pulumi.String("string"),
SkipLeadingRows: pulumi.Int(0),
},
FileSetSpecType: pulumi.String("string"),
GoogleSheetsOptions: &bigquery.TableExternalDataConfigurationGoogleSheetsOptionsArgs{
Range: pulumi.String("string"),
SkipLeadingRows: pulumi.Int(0),
},
HivePartitioningOptions: &bigquery.TableExternalDataConfigurationHivePartitioningOptionsArgs{
Mode: pulumi.String("string"),
RequirePartitionFilter: pulumi.Bool(false),
SourceUriPrefix: pulumi.String("string"),
},
IgnoreUnknownValues: pulumi.Bool(false),
BigtableOptions: &bigquery.TableExternalDataConfigurationBigtableOptionsArgs{
ColumnFamilies: bigquery.TableExternalDataConfigurationBigtableOptionsColumnFamilyArray{
&bigquery.TableExternalDataConfigurationBigtableOptionsColumnFamilyArgs{
Columns: bigquery.TableExternalDataConfigurationBigtableOptionsColumnFamilyColumnArray{
&bigquery.TableExternalDataConfigurationBigtableOptionsColumnFamilyColumnArgs{
Encoding: pulumi.String("string"),
FieldName: pulumi.String("string"),
OnlyReadLatest: pulumi.Bool(false),
QualifierEncoded: pulumi.String("string"),
QualifierString: pulumi.String("string"),
Type: pulumi.String("string"),
},
},
Encoding: pulumi.String("string"),
FamilyId: pulumi.String("string"),
OnlyReadLatest: pulumi.Bool(false),
Type: pulumi.String("string"),
},
},
IgnoreUnspecifiedColumnFamilies: pulumi.Bool(false),
OutputColumnFamiliesAsJson: pulumi.Bool(false),
ReadRowkeyAsString: pulumi.Bool(false),
},
Compression: pulumi.String("string"),
MaxBadRecords: pulumi.Int(0),
MetadataCacheMode: pulumi.String("string"),
ObjectMetadata: pulumi.String("string"),
ParquetOptions: &bigquery.TableExternalDataConfigurationParquetOptionsArgs{
EnableListInference: pulumi.Bool(false),
EnumAsString: pulumi.Bool(false),
},
ReferenceFileSchemaUri: pulumi.String("string"),
Schema: pulumi.String("string"),
SourceFormat: pulumi.String("string"),
AvroOptions: &bigquery.TableExternalDataConfigurationAvroOptionsArgs{
UseAvroLogicalTypes: pulumi.Bool(false),
},
},
FriendlyName: pulumi.String("string"),
Labels: pulumi.StringMap{
"string": pulumi.String("string"),
},
BiglakeConfiguration: &bigquery.TableBiglakeConfigurationArgs{
ConnectionId: pulumi.String("string"),
FileFormat: pulumi.String("string"),
StorageUri: pulumi.String("string"),
TableFormat: pulumi.String("string"),
},
DeletionProtection: pulumi.Bool(false),
Project: pulumi.String("string"),
RangePartitioning: &bigquery.TableRangePartitioningArgs{
Field: pulumi.String("string"),
Range: &bigquery.TableRangePartitioningRangeArgs{
End: pulumi.Int(0),
Interval: pulumi.Int(0),
Start: pulumi.Int(0),
},
},
RequirePartitionFilter: pulumi.Bool(false),
ResourceTags: pulumi.StringMap{
"string": pulumi.String("string"),
},
Schema: pulumi.String("string"),
TableConstraints: &bigquery.TableTableConstraintsArgs{
ForeignKeys: bigquery.TableTableConstraintsForeignKeyArray{
&bigquery.TableTableConstraintsForeignKeyArgs{
ColumnReferences: &bigquery.TableTableConstraintsForeignKeyColumnReferencesArgs{
ReferencedColumn: pulumi.String("string"),
ReferencingColumn: pulumi.String("string"),
},
ReferencedTable: &bigquery.TableTableConstraintsForeignKeyReferencedTableArgs{
DatasetId: pulumi.String("string"),
ProjectId: pulumi.String("string"),
TableId: pulumi.String("string"),
},
Name: pulumi.String("string"),
},
},
PrimaryKey: &bigquery.TableTableConstraintsPrimaryKeyArgs{
Columns: pulumi.StringArray{
pulumi.String("string"),
},
},
},
Clusterings: pulumi.StringArray{
pulumi.String("string"),
},
TableReplicationInfo: &bigquery.TableTableReplicationInfoArgs{
SourceDatasetId: pulumi.String("string"),
SourceProjectId: pulumi.String("string"),
SourceTableId: pulumi.String("string"),
ReplicationIntervalMs: pulumi.Int(0),
},
TimePartitioning: &bigquery.TableTimePartitioningArgs{
Type: pulumi.String("string"),
ExpirationMs: pulumi.Int(0),
Field: pulumi.String("string"),
},
View: &bigquery.TableViewArgs{
Query: pulumi.String("string"),
UseLegacySql: pulumi.Bool(false),
},
})
var gcpTableResource = new Table("gcpTableResource", TableArgs.builder()
.datasetId("string")
.tableId("string")
.materializedView(TableMaterializedViewArgs.builder()
.query("string")
.allowNonIncrementalDefinition(false)
.enableRefresh(false)
.refreshIntervalMs(0)
.build())
.maxStaleness("string")
.description("string")
.encryptionConfiguration(TableEncryptionConfigurationArgs.builder()
.kmsKeyName("string")
.kmsKeyVersion("string")
.build())
.expirationTime(0)
.externalDataConfiguration(TableExternalDataConfigurationArgs.builder()
.autodetect(false)
.sourceUris("string")
.jsonExtension("string")
.jsonOptions(TableExternalDataConfigurationJsonOptionsArgs.builder()
.encoding("string")
.build())
.connectionId("string")
.csvOptions(TableExternalDataConfigurationCsvOptionsArgs.builder()
.quote("string")
.allowJaggedRows(false)
.allowQuotedNewlines(false)
.encoding("string")
.fieldDelimiter("string")
.skipLeadingRows(0)
.build())
.fileSetSpecType("string")
.googleSheetsOptions(TableExternalDataConfigurationGoogleSheetsOptionsArgs.builder()
.range("string")
.skipLeadingRows(0)
.build())
.hivePartitioningOptions(TableExternalDataConfigurationHivePartitioningOptionsArgs.builder()
.mode("string")
.requirePartitionFilter(false)
.sourceUriPrefix("string")
.build())
.ignoreUnknownValues(false)
.bigtableOptions(TableExternalDataConfigurationBigtableOptionsArgs.builder()
.columnFamilies(TableExternalDataConfigurationBigtableOptionsColumnFamilyArgs.builder()
.columns(TableExternalDataConfigurationBigtableOptionsColumnFamilyColumnArgs.builder()
.encoding("string")
.fieldName("string")
.onlyReadLatest(false)
.qualifierEncoded("string")
.qualifierString("string")
.type("string")
.build())
.encoding("string")
.familyId("string")
.onlyReadLatest(false)
.type("string")
.build())
.ignoreUnspecifiedColumnFamilies(false)
.outputColumnFamiliesAsJson(false)
.readRowkeyAsString(false)
.build())
.compression("string")
.maxBadRecords(0)
.metadataCacheMode("string")
.objectMetadata("string")
.parquetOptions(TableExternalDataConfigurationParquetOptionsArgs.builder()
.enableListInference(false)
.enumAsString(false)
.build())
.referenceFileSchemaUri("string")
.schema("string")
.sourceFormat("string")
.avroOptions(TableExternalDataConfigurationAvroOptionsArgs.builder()
.useAvroLogicalTypes(false)
.build())
.build())
.friendlyName("string")
.labels(Map.of("string", "string"))
.biglakeConfiguration(TableBiglakeConfigurationArgs.builder()
.connectionId("string")
.fileFormat("string")
.storageUri("string")
.tableFormat("string")
.build())
.deletionProtection(false)
.project("string")
.rangePartitioning(TableRangePartitioningArgs.builder()
.field("string")
.range(TableRangePartitioningRangeArgs.builder()
.end(0)
.interval(0)
.start(0)
.build())
.build())
.requirePartitionFilter(false)
.resourceTags(Map.of("string", "string"))
.schema("string")
.tableConstraints(TableTableConstraintsArgs.builder()
.foreignKeys(TableTableConstraintsForeignKeyArgs.builder()
.columnReferences(TableTableConstraintsForeignKeyColumnReferencesArgs.builder()
.referencedColumn("string")
.referencingColumn("string")
.build())
.referencedTable(TableTableConstraintsForeignKeyReferencedTableArgs.builder()
.datasetId("string")
.projectId("string")
.tableId("string")
.build())
.name("string")
.build())
.primaryKey(TableTableConstraintsPrimaryKeyArgs.builder()
.columns("string")
.build())
.build())
.clusterings("string")
.tableReplicationInfo(TableTableReplicationInfoArgs.builder()
.sourceDatasetId("string")
.sourceProjectId("string")
.sourceTableId("string")
.replicationIntervalMs(0)
.build())
.timePartitioning(TableTimePartitioningArgs.builder()
.type("string")
.expirationMs(0)
.field("string")
.build())
.view(TableViewArgs.builder()
.query("string")
.useLegacySql(false)
.build())
.build());
gcp_table_resource = gcp.bigquery.Table("gcpTableResource",
dataset_id="string",
table_id="string",
materialized_view={
"query": "string",
"allow_non_incremental_definition": False,
"enable_refresh": False,
"refresh_interval_ms": 0,
},
max_staleness="string",
description="string",
encryption_configuration={
"kms_key_name": "string",
"kms_key_version": "string",
},
expiration_time=0,
external_data_configuration={
"autodetect": False,
"source_uris": ["string"],
"json_extension": "string",
"json_options": {
"encoding": "string",
},
"connection_id": "string",
"csv_options": {
"quote": "string",
"allow_jagged_rows": False,
"allow_quoted_newlines": False,
"encoding": "string",
"field_delimiter": "string",
"skip_leading_rows": 0,
},
"file_set_spec_type": "string",
"google_sheets_options": {
"range": "string",
"skip_leading_rows": 0,
},
"hive_partitioning_options": {
"mode": "string",
"require_partition_filter": False,
"source_uri_prefix": "string",
},
"ignore_unknown_values": False,
"bigtable_options": {
"column_families": [{
"columns": [{
"encoding": "string",
"field_name": "string",
"only_read_latest": False,
"qualifier_encoded": "string",
"qualifier_string": "string",
"type": "string",
}],
"encoding": "string",
"family_id": "string",
"only_read_latest": False,
"type": "string",
}],
"ignore_unspecified_column_families": False,
"output_column_families_as_json": False,
"read_rowkey_as_string": False,
},
"compression": "string",
"max_bad_records": 0,
"metadata_cache_mode": "string",
"object_metadata": "string",
"parquet_options": {
"enable_list_inference": False,
"enum_as_string": False,
},
"reference_file_schema_uri": "string",
"schema": "string",
"source_format": "string",
"avro_options": {
"use_avro_logical_types": False,
},
},
friendly_name="string",
labels={
"string": "string",
},
biglake_configuration={
"connection_id": "string",
"file_format": "string",
"storage_uri": "string",
"table_format": "string",
},
deletion_protection=False,
project="string",
range_partitioning={
"field": "string",
"range": {
"end": 0,
"interval": 0,
"start": 0,
},
},
require_partition_filter=False,
resource_tags={
"string": "string",
},
schema="string",
table_constraints={
"foreign_keys": [{
"column_references": {
"referenced_column": "string",
"referencing_column": "string",
},
"referenced_table": {
"dataset_id": "string",
"project_id": "string",
"table_id": "string",
},
"name": "string",
}],
"primary_key": {
"columns": ["string"],
},
},
clusterings=["string"],
table_replication_info={
"source_dataset_id": "string",
"source_project_id": "string",
"source_table_id": "string",
"replication_interval_ms": 0,
},
time_partitioning={
"type": "string",
"expiration_ms": 0,
"field": "string",
},
view={
"query": "string",
"use_legacy_sql": False,
})
const gcpTableResource = new gcp.bigquery.Table("gcpTableResource", {
datasetId: "string",
tableId: "string",
materializedView: {
query: "string",
allowNonIncrementalDefinition: false,
enableRefresh: false,
refreshIntervalMs: 0,
},
maxStaleness: "string",
description: "string",
encryptionConfiguration: {
kmsKeyName: "string",
kmsKeyVersion: "string",
},
expirationTime: 0,
externalDataConfiguration: {
autodetect: false,
sourceUris: ["string"],
jsonExtension: "string",
jsonOptions: {
encoding: "string",
},
connectionId: "string",
csvOptions: {
quote: "string",
allowJaggedRows: false,
allowQuotedNewlines: false,
encoding: "string",
fieldDelimiter: "string",
skipLeadingRows: 0,
},
fileSetSpecType: "string",
googleSheetsOptions: {
range: "string",
skipLeadingRows: 0,
},
hivePartitioningOptions: {
mode: "string",
requirePartitionFilter: false,
sourceUriPrefix: "string",
},
ignoreUnknownValues: false,
bigtableOptions: {
columnFamilies: [{
columns: [{
encoding: "string",
fieldName: "string",
onlyReadLatest: false,
qualifierEncoded: "string",
qualifierString: "string",
type: "string",
}],
encoding: "string",
familyId: "string",
onlyReadLatest: false,
type: "string",
}],
ignoreUnspecifiedColumnFamilies: false,
outputColumnFamiliesAsJson: false,
readRowkeyAsString: false,
},
compression: "string",
maxBadRecords: 0,
metadataCacheMode: "string",
objectMetadata: "string",
parquetOptions: {
enableListInference: false,
enumAsString: false,
},
referenceFileSchemaUri: "string",
schema: "string",
sourceFormat: "string",
avroOptions: {
useAvroLogicalTypes: false,
},
},
friendlyName: "string",
labels: {
string: "string",
},
biglakeConfiguration: {
connectionId: "string",
fileFormat: "string",
storageUri: "string",
tableFormat: "string",
},
deletionProtection: false,
project: "string",
rangePartitioning: {
field: "string",
range: {
end: 0,
interval: 0,
start: 0,
},
},
requirePartitionFilter: false,
resourceTags: {
string: "string",
},
schema: "string",
tableConstraints: {
foreignKeys: [{
columnReferences: {
referencedColumn: "string",
referencingColumn: "string",
},
referencedTable: {
datasetId: "string",
projectId: "string",
tableId: "string",
},
name: "string",
}],
primaryKey: {
columns: ["string"],
},
},
clusterings: ["string"],
tableReplicationInfo: {
sourceDatasetId: "string",
sourceProjectId: "string",
sourceTableId: "string",
replicationIntervalMs: 0,
},
timePartitioning: {
type: "string",
expirationMs: 0,
field: "string",
},
view: {
query: "string",
useLegacySql: false,
},
});
type: gcp:bigquery:Table
properties:
biglakeConfiguration:
connectionId: string
fileFormat: string
storageUri: string
tableFormat: string
clusterings:
- string
datasetId: string
deletionProtection: false
description: string
encryptionConfiguration:
kmsKeyName: string
kmsKeyVersion: string
expirationTime: 0
externalDataConfiguration:
autodetect: false
avroOptions:
useAvroLogicalTypes: false
bigtableOptions:
columnFamilies:
- columns:
- encoding: string
fieldName: string
onlyReadLatest: false
qualifierEncoded: string
qualifierString: string
type: string
encoding: string
familyId: string
onlyReadLatest: false
type: string
ignoreUnspecifiedColumnFamilies: false
outputColumnFamiliesAsJson: false
readRowkeyAsString: false
compression: string
connectionId: string
csvOptions:
allowJaggedRows: false
allowQuotedNewlines: false
encoding: string
fieldDelimiter: string
quote: string
skipLeadingRows: 0
fileSetSpecType: string
googleSheetsOptions:
range: string
skipLeadingRows: 0
hivePartitioningOptions:
mode: string
requirePartitionFilter: false
sourceUriPrefix: string
ignoreUnknownValues: false
jsonExtension: string
jsonOptions:
encoding: string
maxBadRecords: 0
metadataCacheMode: string
objectMetadata: string
parquetOptions:
enableListInference: false
enumAsString: false
referenceFileSchemaUri: string
schema: string
sourceFormat: string
sourceUris:
- string
friendlyName: string
labels:
string: string
materializedView:
allowNonIncrementalDefinition: false
enableRefresh: false
query: string
refreshIntervalMs: 0
maxStaleness: string
project: string
rangePartitioning:
field: string
range:
end: 0
interval: 0
start: 0
requirePartitionFilter: false
resourceTags:
string: string
schema: string
tableConstraints:
foreignKeys:
- columnReferences:
referencedColumn: string
referencingColumn: string
name: string
referencedTable:
datasetId: string
projectId: string
tableId: string
primaryKey:
columns:
- string
tableId: string
tableReplicationInfo:
replicationIntervalMs: 0
sourceDatasetId: string
sourceProjectId: string
sourceTableId: string
timePartitioning:
expirationMs: 0
field: string
type: string
view:
query: string
useLegacySql: false
Table Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The Table resource accepts the following input properties:
- Dataset
Id string - The dataset ID to create the table in. Changing this forces a new resource to be created.
- Table
Id string - A unique ID for the resource. Changing this forces a new resource to be created.
- Biglake
Configuration TableBiglake Configuration - Specifies the configuration of a BigLake managed table. Structure is documented below
- Clusterings List<string>
- Specifies column names to use for data clustering. Up to four top-level columns are allowed, and should be specified in descending priority order.
- Deletion
Protection bool - Whether or not to allow the provider to destroy the instance. Unless this field is set to false
in state, a
=destroy
or=update
that would delete the instance will fail. - Description string
- The field description.
- Encryption
Configuration TableEncryption Configuration - Specifies how the table should be encrypted. If left blank, the table will be encrypted with a Google-managed key; that process is transparent to the user. Structure is documented below.
- Expiration
Time int - The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.
- External
Data TableConfiguration External Data Configuration - Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table. Structure is documented below.
- Friendly
Name string - A descriptive name for the table.
- Labels Dictionary<string, string>
A mapping of labels to assign to the resource.
Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- Materialized
View TableMaterialized View - If specified, configures this table as a materialized view. Structure is documented below.
- Max
Staleness string - The maximum staleness of data that could be returned when the table (or stale MV) is queried. Staleness encoded as a string encoding of SQL IntervalValue type.
- Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- Range
Partitioning TableRange Partitioning - If specified, configures range-based partitioning for this table. Structure is documented below.
- Require
Partition boolFilter - If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
- Dictionary<string, string>
- The tags attached to this table. Tag keys are globally unique. Tag key is expected to be in the namespaced format, for example "123456789012/environment" where 123456789012 is the ID of the parent organization or project resource for this tag key. Tag value is expected to be the short name, for example "Production".
- Schema string
- A JSON schema for the table.
- Table
Constraints TableTable Constraints - Defines the primary key and foreign keys. Structure is documented below.
- Table
Replication TableInfo Table Replication Info - Replication info of a table created
using "AS REPLICA" DDL like:
CREATE MATERIALIZED VIEW mv1 AS REPLICA OF src_mv
. Structure is documented below. - Time
Partitioning TableTime Partitioning - If specified, configures time-based partitioning for this table. Structure is documented below.
- View
Table
View - If specified, configures this table as a view. Structure is documented below.
- Dataset
Id string - The dataset ID to create the table in. Changing this forces a new resource to be created.
- Table
Id string - A unique ID for the resource. Changing this forces a new resource to be created.
- Biglake
Configuration TableBiglake Configuration Args - Specifies the configuration of a BigLake managed table. Structure is documented below
- Clusterings []string
- Specifies column names to use for data clustering. Up to four top-level columns are allowed, and should be specified in descending priority order.
- Deletion
Protection bool - Whether or not to allow the provider to destroy the instance. Unless this field is set to false
in state, a
=destroy
or=update
that would delete the instance will fail. - Description string
- The field description.
- Encryption
Configuration TableEncryption Configuration Args - Specifies how the table should be encrypted. If left blank, the table will be encrypted with a Google-managed key; that process is transparent to the user. Structure is documented below.
- Expiration
Time int - The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.
- External
Data TableConfiguration External Data Configuration Args - Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table. Structure is documented below.
- Friendly
Name string - A descriptive name for the table.
- Labels map[string]string
A mapping of labels to assign to the resource.
Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- Materialized
View TableMaterialized View Args - If specified, configures this table as a materialized view. Structure is documented below.
- Max
Staleness string - The maximum staleness of data that could be returned when the table (or stale MV) is queried. Staleness encoded as a string encoding of SQL IntervalValue type.
- Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- Range
Partitioning TableRange Partitioning Args - If specified, configures range-based partitioning for this table. Structure is documented below.
- Require
Partition boolFilter - If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
- map[string]string
- The tags attached to this table. Tag keys are globally unique. Tag key is expected to be in the namespaced format, for example "123456789012/environment" where 123456789012 is the ID of the parent organization or project resource for this tag key. Tag value is expected to be the short name, for example "Production".
- Schema string
- A JSON schema for the table.
- Table
Constraints TableTable Constraints Args - Defines the primary key and foreign keys. Structure is documented below.
- Table
Replication TableInfo Table Replication Info Args - Replication info of a table created
using "AS REPLICA" DDL like:
CREATE MATERIALIZED VIEW mv1 AS REPLICA OF src_mv
. Structure is documented below. - Time
Partitioning TableTime Partitioning Args - If specified, configures time-based partitioning for this table. Structure is documented below.
- View
Table
View Args - If specified, configures this table as a view. Structure is documented below.
- dataset
Id String - The dataset ID to create the table in. Changing this forces a new resource to be created.
- table
Id String - A unique ID for the resource. Changing this forces a new resource to be created.
- biglake
Configuration TableBiglake Configuration - Specifies the configuration of a BigLake managed table. Structure is documented below
- clusterings List<String>
- Specifies column names to use for data clustering. Up to four top-level columns are allowed, and should be specified in descending priority order.
- deletion
Protection Boolean - Whether or not to allow the provider to destroy the instance. Unless this field is set to false
in state, a
=destroy
or=update
that would delete the instance will fail. - description String
- The field description.
- encryption
Configuration TableEncryption Configuration - Specifies how the table should be encrypted. If left blank, the table will be encrypted with a Google-managed key; that process is transparent to the user. Structure is documented below.
- expiration
Time Integer - The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.
- external
Data TableConfiguration External Data Configuration - Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table. Structure is documented below.
- friendly
Name String - A descriptive name for the table.
- labels Map<String,String>
A mapping of labels to assign to the resource.
Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- materialized
View TableMaterialized View - If specified, configures this table as a materialized view. Structure is documented below.
- max
Staleness String - The maximum staleness of data that could be returned when the table (or stale MV) is queried. Staleness encoded as a string encoding of SQL IntervalValue type.
- project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- range
Partitioning TableRange Partitioning - If specified, configures range-based partitioning for this table. Structure is documented below.
- require
Partition BooleanFilter - If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
- Map<String,String>
- The tags attached to this table. Tag keys are globally unique. Tag key is expected to be in the namespaced format, for example "123456789012/environment" where 123456789012 is the ID of the parent organization or project resource for this tag key. Tag value is expected to be the short name, for example "Production".
- schema String
- A JSON schema for the table.
- table
Constraints TableTable Constraints - Defines the primary key and foreign keys. Structure is documented below.
- table
Replication TableInfo Table Replication Info - Replication info of a table created
using "AS REPLICA" DDL like:
CREATE MATERIALIZED VIEW mv1 AS REPLICA OF src_mv
. Structure is documented below. - time
Partitioning TableTime Partitioning - If specified, configures time-based partitioning for this table. Structure is documented below.
- view
Table
View - If specified, configures this table as a view. Structure is documented below.
- dataset
Id string - The dataset ID to create the table in. Changing this forces a new resource to be created.
- table
Id string - A unique ID for the resource. Changing this forces a new resource to be created.
- biglake
Configuration TableBiglake Configuration - Specifies the configuration of a BigLake managed table. Structure is documented below
- clusterings string[]
- Specifies column names to use for data clustering. Up to four top-level columns are allowed, and should be specified in descending priority order.
- deletion
Protection boolean - Whether or not to allow the provider to destroy the instance. Unless this field is set to false
in state, a
=destroy
or=update
that would delete the instance will fail. - description string
- The field description.
- encryption
Configuration TableEncryption Configuration - Specifies how the table should be encrypted. If left blank, the table will be encrypted with a Google-managed key; that process is transparent to the user. Structure is documented below.
- expiration
Time number - The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.
- external
Data TableConfiguration External Data Configuration - Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table. Structure is documented below.
- friendly
Name string - A descriptive name for the table.
- labels {[key: string]: string}
A mapping of labels to assign to the resource.
Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- materialized
View TableMaterialized View - If specified, configures this table as a materialized view. Structure is documented below.
- max
Staleness string - The maximum staleness of data that could be returned when the table (or stale MV) is queried. Staleness encoded as a string encoding of SQL IntervalValue type.
- project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- range
Partitioning TableRange Partitioning - If specified, configures range-based partitioning for this table. Structure is documented below.
- require
Partition booleanFilter - If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
- {[key: string]: string}
- The tags attached to this table. Tag keys are globally unique. Tag key is expected to be in the namespaced format, for example "123456789012/environment" where 123456789012 is the ID of the parent organization or project resource for this tag key. Tag value is expected to be the short name, for example "Production".
- schema string
- A JSON schema for the table.
- table
Constraints TableTable Constraints - Defines the primary key and foreign keys. Structure is documented below.
- table
Replication TableInfo Table Replication Info - Replication info of a table created
using "AS REPLICA" DDL like:
CREATE MATERIALIZED VIEW mv1 AS REPLICA OF src_mv
. Structure is documented below. - time
Partitioning TableTime Partitioning - If specified, configures time-based partitioning for this table. Structure is documented below.
- view
Table
View - If specified, configures this table as a view. Structure is documented below.
- dataset_
id str - The dataset ID to create the table in. Changing this forces a new resource to be created.
- table_
id str - A unique ID for the resource. Changing this forces a new resource to be created.
- biglake_
configuration TableBiglake Configuration Args - Specifies the configuration of a BigLake managed table. Structure is documented below
- clusterings Sequence[str]
- Specifies column names to use for data clustering. Up to four top-level columns are allowed, and should be specified in descending priority order.
- deletion_
protection bool - Whether or not to allow the provider to destroy the instance. Unless this field is set to false
in state, a
=destroy
or=update
that would delete the instance will fail. - description str
- The field description.
- encryption_
configuration TableEncryption Configuration Args - Specifies how the table should be encrypted. If left blank, the table will be encrypted with a Google-managed key; that process is transparent to the user. Structure is documented below.
- expiration_
time int - The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.
- external_
data_ Tableconfiguration External Data Configuration Args - Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table. Structure is documented below.
- friendly_
name str - A descriptive name for the table.
- labels Mapping[str, str]
A mapping of labels to assign to the resource.
Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- materialized_
view TableMaterialized View Args - If specified, configures this table as a materialized view. Structure is documented below.
- max_
staleness str - The maximum staleness of data that could be returned when the table (or stale MV) is queried. Staleness encoded as a string encoding of SQL IntervalValue type.
- project str
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- range_
partitioning TableRange Partitioning Args - If specified, configures range-based partitioning for this table. Structure is documented below.
- require_
partition_ boolfilter - If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
- Mapping[str, str]
- The tags attached to this table. Tag keys are globally unique. Tag key is expected to be in the namespaced format, for example "123456789012/environment" where 123456789012 is the ID of the parent organization or project resource for this tag key. Tag value is expected to be the short name, for example "Production".
- schema str
- A JSON schema for the table.
- table_
constraints TableTable Constraints Args - Defines the primary key and foreign keys. Structure is documented below.
- table_
replication_ Tableinfo Table Replication Info Args - Replication info of a table created
using "AS REPLICA" DDL like:
CREATE MATERIALIZED VIEW mv1 AS REPLICA OF src_mv
. Structure is documented below. - time_
partitioning TableTime Partitioning Args - If specified, configures time-based partitioning for this table. Structure is documented below.
- view
Table
View Args - If specified, configures this table as a view. Structure is documented below.
- dataset
Id String - The dataset ID to create the table in. Changing this forces a new resource to be created.
- table
Id String - A unique ID for the resource. Changing this forces a new resource to be created.
- biglake
Configuration Property Map - Specifies the configuration of a BigLake managed table. Structure is documented below
- clusterings List<String>
- Specifies column names to use for data clustering. Up to four top-level columns are allowed, and should be specified in descending priority order.
- deletion
Protection Boolean - Whether or not to allow the provider to destroy the instance. Unless this field is set to false
in state, a
=destroy
or=update
that would delete the instance will fail. - description String
- The field description.
- encryption
Configuration Property Map - Specifies how the table should be encrypted. If left blank, the table will be encrypted with a Google-managed key; that process is transparent to the user. Structure is documented below.
- expiration
Time Number - The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.
- external
Data Property MapConfiguration - Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table. Structure is documented below.
- friendly
Name String - A descriptive name for the table.
- labels Map<String>
A mapping of labels to assign to the resource.
Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- materialized
View Property Map - If specified, configures this table as a materialized view. Structure is documented below.
- max
Staleness String - The maximum staleness of data that could be returned when the table (or stale MV) is queried. Staleness encoded as a string encoding of SQL IntervalValue type.
- project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- range
Partitioning Property Map - If specified, configures range-based partitioning for this table. Structure is documented below.
- require
Partition BooleanFilter - If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
- Map<String>
- The tags attached to this table. Tag keys are globally unique. Tag key is expected to be in the namespaced format, for example "123456789012/environment" where 123456789012 is the ID of the parent organization or project resource for this tag key. Tag value is expected to be the short name, for example "Production".
- schema String
- A JSON schema for the table.
- table
Constraints Property Map - Defines the primary key and foreign keys. Structure is documented below.
- table
Replication Property MapInfo - Replication info of a table created
using "AS REPLICA" DDL like:
CREATE MATERIALIZED VIEW mv1 AS REPLICA OF src_mv
. Structure is documented below. - time
Partitioning Property Map - If specified, configures time-based partitioning for this table. Structure is documented below.
- view Property Map
- If specified, configures this table as a view. Structure is documented below.
Outputs
All input properties are implicitly available as output properties. Additionally, the Table resource produces the following output properties:
- Creation
Time int - The time when this table was created, in milliseconds since the epoch.
- Effective
Labels Dictionary<string, string> All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
schema
- (Optional) A JSON schema for the table.
~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switched the order of values or replaced
STRUCT
field type withRECORD
field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.~>NOTE: If you use
external_data_configuration
documented below and do not setexternal_data_configuration.connection_id
, schemas must be specified withexternal_data_configuration.schema
. Otherwise, schemas must be specified with this top-level field.- Etag string
- A hash of the resource.
- Id string
- The provider-assigned unique ID for this managed resource.
- Last
Modified intTime - The time when this table was last modified, in milliseconds since the epoch.
- Location string
- The geographic location where the table resides. This value is inherited from the dataset.
- Num
Bytes int - The size of this table in bytes, excluding any data in the streaming buffer.
- Num
Long intTerm Bytes - The number of bytes in the table that are considered "long-term storage".
- Num
Rows int - The number of rows of data in this table, excluding any data in the streaming buffer.
- Pulumi
Labels Dictionary<string, string> - The combination of labels configured directly on the resource and default labels configured on the provider.
- Self
Link string - The URI of the created resource.
- Type string
- Describes the table type.
- Creation
Time int - The time when this table was created, in milliseconds since the epoch.
- Effective
Labels map[string]string All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
schema
- (Optional) A JSON schema for the table.
~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switched the order of values or replaced
STRUCT
field type withRECORD
field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.~>NOTE: If you use
external_data_configuration
documented below and do not setexternal_data_configuration.connection_id
, schemas must be specified withexternal_data_configuration.schema
. Otherwise, schemas must be specified with this top-level field.- Etag string
- A hash of the resource.
- Id string
- The provider-assigned unique ID for this managed resource.
- Last
Modified intTime - The time when this table was last modified, in milliseconds since the epoch.
- Location string
- The geographic location where the table resides. This value is inherited from the dataset.
- Num
Bytes int - The size of this table in bytes, excluding any data in the streaming buffer.
- Num
Long intTerm Bytes - The number of bytes in the table that are considered "long-term storage".
- Num
Rows int - The number of rows of data in this table, excluding any data in the streaming buffer.
- Pulumi
Labels map[string]string - The combination of labels configured directly on the resource and default labels configured on the provider.
- Self
Link string - The URI of the created resource.
- Type string
- Describes the table type.
- creation
Time Integer - The time when this table was created, in milliseconds since the epoch.
- effective
Labels Map<String,String> All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
schema
- (Optional) A JSON schema for the table.
~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switched the order of values or replaced
STRUCT
field type withRECORD
field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.~>NOTE: If you use
external_data_configuration
documented below and do not setexternal_data_configuration.connection_id
, schemas must be specified withexternal_data_configuration.schema
. Otherwise, schemas must be specified with this top-level field.- etag String
- A hash of the resource.
- id String
- The provider-assigned unique ID for this managed resource.
- last
Modified IntegerTime - The time when this table was last modified, in milliseconds since the epoch.
- location String
- The geographic location where the table resides. This value is inherited from the dataset.
- num
Bytes Integer - The size of this table in bytes, excluding any data in the streaming buffer.
- num
Long IntegerTerm Bytes - The number of bytes in the table that are considered "long-term storage".
- num
Rows Integer - The number of rows of data in this table, excluding any data in the streaming buffer.
- pulumi
Labels Map<String,String> - The combination of labels configured directly on the resource and default labels configured on the provider.
- self
Link String - The URI of the created resource.
- type String
- Describes the table type.
- creation
Time number - The time when this table was created, in milliseconds since the epoch.
- effective
Labels {[key: string]: string} All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
schema
- (Optional) A JSON schema for the table.
~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switched the order of values or replaced
STRUCT
field type withRECORD
field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.~>NOTE: If you use
external_data_configuration
documented below and do not setexternal_data_configuration.connection_id
, schemas must be specified withexternal_data_configuration.schema
. Otherwise, schemas must be specified with this top-level field.- etag string
- A hash of the resource.
- id string
- The provider-assigned unique ID for this managed resource.
- last
Modified numberTime - The time when this table was last modified, in milliseconds since the epoch.
- location string
- The geographic location where the table resides. This value is inherited from the dataset.
- num
Bytes number - The size of this table in bytes, excluding any data in the streaming buffer.
- num
Long numberTerm Bytes - The number of bytes in the table that are considered "long-term storage".
- num
Rows number - The number of rows of data in this table, excluding any data in the streaming buffer.
- pulumi
Labels {[key: string]: string} - The combination of labels configured directly on the resource and default labels configured on the provider.
- self
Link string - The URI of the created resource.
- type string
- Describes the table type.
- creation_
time int - The time when this table was created, in milliseconds since the epoch.
- effective_
labels Mapping[str, str] All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
schema
- (Optional) A JSON schema for the table.
~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switched the order of values or replaced
STRUCT
field type withRECORD
field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.~>NOTE: If you use
external_data_configuration
documented below and do not setexternal_data_configuration.connection_id
, schemas must be specified withexternal_data_configuration.schema
. Otherwise, schemas must be specified with this top-level field.- etag str
- A hash of the resource.
- id str
- The provider-assigned unique ID for this managed resource.
- last_
modified_ inttime - The time when this table was last modified, in milliseconds since the epoch.
- location str
- The geographic location where the table resides. This value is inherited from the dataset.
- num_
bytes int - The size of this table in bytes, excluding any data in the streaming buffer.
- num_
long_ intterm_ bytes - The number of bytes in the table that are considered "long-term storage".
- num_
rows int - The number of rows of data in this table, excluding any data in the streaming buffer.
- pulumi_
labels Mapping[str, str] - The combination of labels configured directly on the resource and default labels configured on the provider.
- self_
link str - The URI of the created resource.
- type str
- Describes the table type.
- creation
Time Number - The time when this table was created, in milliseconds since the epoch.
- effective
Labels Map<String> All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
schema
- (Optional) A JSON schema for the table.
~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switched the order of values or replaced
STRUCT
field type withRECORD
field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.~>NOTE: If you use
external_data_configuration
documented below and do not setexternal_data_configuration.connection_id
, schemas must be specified withexternal_data_configuration.schema
. Otherwise, schemas must be specified with this top-level field.- etag String
- A hash of the resource.
- id String
- The provider-assigned unique ID for this managed resource.
- last
Modified NumberTime - The time when this table was last modified, in milliseconds since the epoch.
- location String
- The geographic location where the table resides. This value is inherited from the dataset.
- num
Bytes Number - The size of this table in bytes, excluding any data in the streaming buffer.
- num
Long NumberTerm Bytes - The number of bytes in the table that are considered "long-term storage".
- num
Rows Number - The number of rows of data in this table, excluding any data in the streaming buffer.
- pulumi
Labels Map<String> - The combination of labels configured directly on the resource and default labels configured on the provider.
- self
Link String - The URI of the created resource.
- type String
- Describes the table type.
Look up Existing Table Resource
Get an existing Table resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: TableState, opts?: CustomResourceOptions): Table
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
biglake_configuration: Optional[TableBiglakeConfigurationArgs] = None,
clusterings: Optional[Sequence[str]] = None,
creation_time: Optional[int] = None,
dataset_id: Optional[str] = None,
deletion_protection: Optional[bool] = None,
description: Optional[str] = None,
effective_labels: Optional[Mapping[str, str]] = None,
encryption_configuration: Optional[TableEncryptionConfigurationArgs] = None,
etag: Optional[str] = None,
expiration_time: Optional[int] = None,
external_data_configuration: Optional[TableExternalDataConfigurationArgs] = None,
friendly_name: Optional[str] = None,
labels: Optional[Mapping[str, str]] = None,
last_modified_time: Optional[int] = None,
location: Optional[str] = None,
materialized_view: Optional[TableMaterializedViewArgs] = None,
max_staleness: Optional[str] = None,
num_bytes: Optional[int] = None,
num_long_term_bytes: Optional[int] = None,
num_rows: Optional[int] = None,
project: Optional[str] = None,
pulumi_labels: Optional[Mapping[str, str]] = None,
range_partitioning: Optional[TableRangePartitioningArgs] = None,
require_partition_filter: Optional[bool] = None,
resource_tags: Optional[Mapping[str, str]] = None,
schema: Optional[str] = None,
self_link: Optional[str] = None,
table_constraints: Optional[TableTableConstraintsArgs] = None,
table_id: Optional[str] = None,
table_replication_info: Optional[TableTableReplicationInfoArgs] = None,
time_partitioning: Optional[TableTimePartitioningArgs] = None,
type: Optional[str] = None,
view: Optional[TableViewArgs] = None) -> Table
func GetTable(ctx *Context, name string, id IDInput, state *TableState, opts ...ResourceOption) (*Table, error)
public static Table Get(string name, Input<string> id, TableState? state, CustomResourceOptions? opts = null)
public static Table get(String name, Output<String> id, TableState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Biglake
Configuration TableBiglake Configuration - Specifies the configuration of a BigLake managed table. Structure is documented below
- Clusterings List<string>
- Specifies column names to use for data clustering. Up to four top-level columns are allowed, and should be specified in descending priority order.
- Creation
Time int - The time when this table was created, in milliseconds since the epoch.
- Dataset
Id string - The dataset ID to create the table in. Changing this forces a new resource to be created.
- Deletion
Protection bool - Whether or not to allow the provider to destroy the instance. Unless this field is set to false
in state, a
=destroy
or=update
that would delete the instance will fail. - Description string
- The field description.
- Effective
Labels Dictionary<string, string> All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
schema
- (Optional) A JSON schema for the table.
~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switched the order of values or replaced
STRUCT
field type withRECORD
field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.~>NOTE: If you use
external_data_configuration
documented below and do not setexternal_data_configuration.connection_id
, schemas must be specified withexternal_data_configuration.schema
. Otherwise, schemas must be specified with this top-level field.- Encryption
Configuration TableEncryption Configuration - Specifies how the table should be encrypted. If left blank, the table will be encrypted with a Google-managed key; that process is transparent to the user. Structure is documented below.
- Etag string
- A hash of the resource.
- Expiration
Time int - The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.
- External
Data TableConfiguration External Data Configuration - Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table. Structure is documented below.
- Friendly
Name string - A descriptive name for the table.
- Labels Dictionary<string, string>
A mapping of labels to assign to the resource.
Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- Last
Modified intTime - The time when this table was last modified, in milliseconds since the epoch.
- Location string
- The geographic location where the table resides. This value is inherited from the dataset.
- Materialized
View TableMaterialized View - If specified, configures this table as a materialized view. Structure is documented below.
- Max
Staleness string - The maximum staleness of data that could be returned when the table (or stale MV) is queried. Staleness encoded as a string encoding of SQL IntervalValue type.
- Num
Bytes int - The size of this table in bytes, excluding any data in the streaming buffer.
- Num
Long intTerm Bytes - The number of bytes in the table that are considered "long-term storage".
- Num
Rows int - The number of rows of data in this table, excluding any data in the streaming buffer.
- Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- Pulumi
Labels Dictionary<string, string> - The combination of labels configured directly on the resource and default labels configured on the provider.
- Range
Partitioning TableRange Partitioning - If specified, configures range-based partitioning for this table. Structure is documented below.
- Require
Partition boolFilter - If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
- Dictionary<string, string>
- The tags attached to this table. Tag keys are globally unique. Tag key is expected to be in the namespaced format, for example "123456789012/environment" where 123456789012 is the ID of the parent organization or project resource for this tag key. Tag value is expected to be the short name, for example "Production".
- Schema string
- A JSON schema for the table.
- Self
Link string - The URI of the created resource.
- Table
Constraints TableTable Constraints - Defines the primary key and foreign keys. Structure is documented below.
- Table
Id string - A unique ID for the resource. Changing this forces a new resource to be created.
- Table
Replication TableInfo Table Replication Info - Replication info of a table created
using "AS REPLICA" DDL like:
CREATE MATERIALIZED VIEW mv1 AS REPLICA OF src_mv
. Structure is documented below. - Time
Partitioning TableTime Partitioning - If specified, configures time-based partitioning for this table. Structure is documented below.
- Type string
- Describes the table type.
- View
Table
View - If specified, configures this table as a view. Structure is documented below.
- Biglake
Configuration TableBiglake Configuration Args - Specifies the configuration of a BigLake managed table. Structure is documented below
- Clusterings []string
- Specifies column names to use for data clustering. Up to four top-level columns are allowed, and should be specified in descending priority order.
- Creation
Time int - The time when this table was created, in milliseconds since the epoch.
- Dataset
Id string - The dataset ID to create the table in. Changing this forces a new resource to be created.
- Deletion
Protection bool - Whether or not to allow the provider to destroy the instance. Unless this field is set to false
in state, a
=destroy
or=update
that would delete the instance will fail. - Description string
- The field description.
- Effective
Labels map[string]string All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
schema
- (Optional) A JSON schema for the table.
~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switched the order of values or replaced
STRUCT
field type withRECORD
field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.~>NOTE: If you use
external_data_configuration
documented below and do not setexternal_data_configuration.connection_id
, schemas must be specified withexternal_data_configuration.schema
. Otherwise, schemas must be specified with this top-level field.- Encryption
Configuration TableEncryption Configuration Args - Specifies how the table should be encrypted. If left blank, the table will be encrypted with a Google-managed key; that process is transparent to the user. Structure is documented below.
- Etag string
- A hash of the resource.
- Expiration
Time int - The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.
- External
Data TableConfiguration External Data Configuration Args - Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table. Structure is documented below.
- Friendly
Name string - A descriptive name for the table.
- Labels map[string]string
A mapping of labels to assign to the resource.
Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- Last
Modified intTime - The time when this table was last modified, in milliseconds since the epoch.
- Location string
- The geographic location where the table resides. This value is inherited from the dataset.
- Materialized
View TableMaterialized View Args - If specified, configures this table as a materialized view. Structure is documented below.
- Max
Staleness string - The maximum staleness of data that could be returned when the table (or stale MV) is queried. Staleness encoded as a string encoding of SQL IntervalValue type.
- Num
Bytes int - The size of this table in bytes, excluding any data in the streaming buffer.
- Num
Long intTerm Bytes - The number of bytes in the table that are considered "long-term storage".
- Num
Rows int - The number of rows of data in this table, excluding any data in the streaming buffer.
- Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- Pulumi
Labels map[string]string - The combination of labels configured directly on the resource and default labels configured on the provider.
- Range
Partitioning TableRange Partitioning Args - If specified, configures range-based partitioning for this table. Structure is documented below.
- Require
Partition boolFilter - If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
- map[string]string
- The tags attached to this table. Tag keys are globally unique. Tag key is expected to be in the namespaced format, for example "123456789012/environment" where 123456789012 is the ID of the parent organization or project resource for this tag key. Tag value is expected to be the short name, for example "Production".
- Schema string
- A JSON schema for the table.
- Self
Link string - The URI of the created resource.
- Table
Constraints TableTable Constraints Args - Defines the primary key and foreign keys. Structure is documented below.
- Table
Id string - A unique ID for the resource. Changing this forces a new resource to be created.
- Table
Replication TableInfo Table Replication Info Args - Replication info of a table created
using "AS REPLICA" DDL like:
CREATE MATERIALIZED VIEW mv1 AS REPLICA OF src_mv
. Structure is documented below. - Time
Partitioning TableTime Partitioning Args - If specified, configures time-based partitioning for this table. Structure is documented below.
- Type string
- Describes the table type.
- View
Table
View Args - If specified, configures this table as a view. Structure is documented below.
- biglake
Configuration TableBiglake Configuration - Specifies the configuration of a BigLake managed table. Structure is documented below
- clusterings List<String>
- Specifies column names to use for data clustering. Up to four top-level columns are allowed, and should be specified in descending priority order.
- creation
Time Integer - The time when this table was created, in milliseconds since the epoch.
- dataset
Id String - The dataset ID to create the table in. Changing this forces a new resource to be created.
- deletion
Protection Boolean - Whether or not to allow the provider to destroy the instance. Unless this field is set to false
in state, a
=destroy
or=update
that would delete the instance will fail. - description String
- The field description.
- effective
Labels Map<String,String> All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
schema
- (Optional) A JSON schema for the table.
~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switched the order of values or replaced
STRUCT
field type withRECORD
field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.~>NOTE: If you use
external_data_configuration
documented below and do not setexternal_data_configuration.connection_id
, schemas must be specified withexternal_data_configuration.schema
. Otherwise, schemas must be specified with this top-level field.- encryption
Configuration TableEncryption Configuration - Specifies how the table should be encrypted. If left blank, the table will be encrypted with a Google-managed key; that process is transparent to the user. Structure is documented below.
- etag String
- A hash of the resource.
- expiration
Time Integer - The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.
- external
Data TableConfiguration External Data Configuration - Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table. Structure is documented below.
- friendly
Name String - A descriptive name for the table.
- labels Map<String,String>
A mapping of labels to assign to the resource.
Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- last
Modified IntegerTime - The time when this table was last modified, in milliseconds since the epoch.
- location String
- The geographic location where the table resides. This value is inherited from the dataset.
- materialized
View TableMaterialized View - If specified, configures this table as a materialized view. Structure is documented below.
- max
Staleness String - The maximum staleness of data that could be returned when the table (or stale MV) is queried. Staleness encoded as a string encoding of SQL IntervalValue type.
- num
Bytes Integer - The size of this table in bytes, excluding any data in the streaming buffer.
- num
Long IntegerTerm Bytes - The number of bytes in the table that are considered "long-term storage".
- num
Rows Integer - The number of rows of data in this table, excluding any data in the streaming buffer.
- project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- pulumi
Labels Map<String,String> - The combination of labels configured directly on the resource and default labels configured on the provider.
- range
Partitioning TableRange Partitioning - If specified, configures range-based partitioning for this table. Structure is documented below.
- require
Partition BooleanFilter - If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
- Map<String,String>
- The tags attached to this table. Tag keys are globally unique. Tag key is expected to be in the namespaced format, for example "123456789012/environment" where 123456789012 is the ID of the parent organization or project resource for this tag key. Tag value is expected to be the short name, for example "Production".
- schema String
- A JSON schema for the table.
- self
Link String - The URI of the created resource.
- table
Constraints TableTable Constraints - Defines the primary key and foreign keys. Structure is documented below.
- table
Id String - A unique ID for the resource. Changing this forces a new resource to be created.
- table
Replication TableInfo Table Replication Info - Replication info of a table created
using "AS REPLICA" DDL like:
CREATE MATERIALIZED VIEW mv1 AS REPLICA OF src_mv
. Structure is documented below. - time
Partitioning TableTime Partitioning - If specified, configures time-based partitioning for this table. Structure is documented below.
- type String
- Describes the table type.
- view
Table
View - If specified, configures this table as a view. Structure is documented below.
- biglake
Configuration TableBiglake Configuration - Specifies the configuration of a BigLake managed table. Structure is documented below
- clusterings string[]
- Specifies column names to use for data clustering. Up to four top-level columns are allowed, and should be specified in descending priority order.
- creation
Time number - The time when this table was created, in milliseconds since the epoch.
- dataset
Id string - The dataset ID to create the table in. Changing this forces a new resource to be created.
- deletion
Protection boolean - Whether or not to allow the provider to destroy the instance. Unless this field is set to false
in state, a
=destroy
or=update
that would delete the instance will fail. - description string
- The field description.
- effective
Labels {[key: string]: string} All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
schema
- (Optional) A JSON schema for the table.
~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switched the order of values or replaced
STRUCT
field type withRECORD
field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.~>NOTE: If you use
external_data_configuration
documented below and do not setexternal_data_configuration.connection_id
, schemas must be specified withexternal_data_configuration.schema
. Otherwise, schemas must be specified with this top-level field.- encryption
Configuration TableEncryption Configuration - Specifies how the table should be encrypted. If left blank, the table will be encrypted with a Google-managed key; that process is transparent to the user. Structure is documented below.
- etag string
- A hash of the resource.
- expiration
Time number - The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.
- external
Data TableConfiguration External Data Configuration - Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table. Structure is documented below.
- friendly
Name string - A descriptive name for the table.
- labels {[key: string]: string}
A mapping of labels to assign to the resource.
Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- last
Modified numberTime - The time when this table was last modified, in milliseconds since the epoch.
- location string
- The geographic location where the table resides. This value is inherited from the dataset.
- materialized
View TableMaterialized View - If specified, configures this table as a materialized view. Structure is documented below.
- max
Staleness string - The maximum staleness of data that could be returned when the table (or stale MV) is queried. Staleness encoded as a string encoding of SQL IntervalValue type.
- num
Bytes number - The size of this table in bytes, excluding any data in the streaming buffer.
- num
Long numberTerm Bytes - The number of bytes in the table that are considered "long-term storage".
- num
Rows number - The number of rows of data in this table, excluding any data in the streaming buffer.
- project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- pulumi
Labels {[key: string]: string} - The combination of labels configured directly on the resource and default labels configured on the provider.
- range
Partitioning TableRange Partitioning - If specified, configures range-based partitioning for this table. Structure is documented below.
- require
Partition booleanFilter - If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
- {[key: string]: string}
- The tags attached to this table. Tag keys are globally unique. Tag key is expected to be in the namespaced format, for example "123456789012/environment" where 123456789012 is the ID of the parent organization or project resource for this tag key. Tag value is expected to be the short name, for example "Production".
- schema string
- A JSON schema for the table.
- self
Link string - The URI of the created resource.
- table
Constraints TableTable Constraints - Defines the primary key and foreign keys. Structure is documented below.
- table
Id string - A unique ID for the resource. Changing this forces a new resource to be created.
- table
Replication TableInfo Table Replication Info - Replication info of a table created
using "AS REPLICA" DDL like:
CREATE MATERIALIZED VIEW mv1 AS REPLICA OF src_mv
. Structure is documented below. - time
Partitioning TableTime Partitioning - If specified, configures time-based partitioning for this table. Structure is documented below.
- type string
- Describes the table type.
- view
Table
View - If specified, configures this table as a view. Structure is documented below.
- biglake_
configuration TableBiglake Configuration Args - Specifies the configuration of a BigLake managed table. Structure is documented below
- clusterings Sequence[str]
- Specifies column names to use for data clustering. Up to four top-level columns are allowed, and should be specified in descending priority order.
- creation_
time int - The time when this table was created, in milliseconds since the epoch.
- dataset_
id str - The dataset ID to create the table in. Changing this forces a new resource to be created.
- deletion_
protection bool - Whether or not to allow the provider to destroy the instance. Unless this field is set to false
in state, a
=destroy
or=update
that would delete the instance will fail. - description str
- The field description.
- effective_
labels Mapping[str, str] All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
schema
- (Optional) A JSON schema for the table.
~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switched the order of values or replaced
STRUCT
field type withRECORD
field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.~>NOTE: If you use
external_data_configuration
documented below and do not setexternal_data_configuration.connection_id
, schemas must be specified withexternal_data_configuration.schema
. Otherwise, schemas must be specified with this top-level field.- encryption_
configuration TableEncryption Configuration Args - Specifies how the table should be encrypted. If left blank, the table will be encrypted with a Google-managed key; that process is transparent to the user. Structure is documented below.
- etag str
- A hash of the resource.
- expiration_
time int - The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.
- external_
data_ Tableconfiguration External Data Configuration Args - Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table. Structure is documented below.
- friendly_
name str - A descriptive name for the table.
- labels Mapping[str, str]
A mapping of labels to assign to the resource.
Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- last_
modified_ inttime - The time when this table was last modified, in milliseconds since the epoch.
- location str
- The geographic location where the table resides. This value is inherited from the dataset.
- materialized_
view TableMaterialized View Args - If specified, configures this table as a materialized view. Structure is documented below.
- max_
staleness str - The maximum staleness of data that could be returned when the table (or stale MV) is queried. Staleness encoded as a string encoding of SQL IntervalValue type.
- num_
bytes int - The size of this table in bytes, excluding any data in the streaming buffer.
- num_
long_ intterm_ bytes - The number of bytes in the table that are considered "long-term storage".
- num_
rows int - The number of rows of data in this table, excluding any data in the streaming buffer.
- project str
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- pulumi_
labels Mapping[str, str] - The combination of labels configured directly on the resource and default labels configured on the provider.
- range_
partitioning TableRange Partitioning Args - If specified, configures range-based partitioning for this table. Structure is documented below.
- require_
partition_ boolfilter - If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
- Mapping[str, str]
- The tags attached to this table. Tag keys are globally unique. Tag key is expected to be in the namespaced format, for example "123456789012/environment" where 123456789012 is the ID of the parent organization or project resource for this tag key. Tag value is expected to be the short name, for example "Production".
- schema str
- A JSON schema for the table.
- self_
link str - The URI of the created resource.
- table_
constraints TableTable Constraints Args - Defines the primary key and foreign keys. Structure is documented below.
- table_
id str - A unique ID for the resource. Changing this forces a new resource to be created.
- table_
replication_ Tableinfo Table Replication Info Args - Replication info of a table created
using "AS REPLICA" DDL like:
CREATE MATERIALIZED VIEW mv1 AS REPLICA OF src_mv
. Structure is documented below. - time_
partitioning TableTime Partitioning Args - If specified, configures time-based partitioning for this table. Structure is documented below.
- type str
- Describes the table type.
- view
Table
View Args - If specified, configures this table as a view. Structure is documented below.
- biglake
Configuration Property Map - Specifies the configuration of a BigLake managed table. Structure is documented below
- clusterings List<String>
- Specifies column names to use for data clustering. Up to four top-level columns are allowed, and should be specified in descending priority order.
- creation
Time Number - The time when this table was created, in milliseconds since the epoch.
- dataset
Id String - The dataset ID to create the table in. Changing this forces a new resource to be created.
- deletion
Protection Boolean - Whether or not to allow the provider to destroy the instance. Unless this field is set to false
in state, a
=destroy
or=update
that would delete the instance will fail. - description String
- The field description.
- effective
Labels Map<String> All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
schema
- (Optional) A JSON schema for the table.
~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switched the order of values or replaced
STRUCT
field type withRECORD
field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.~>NOTE: If you use
external_data_configuration
documented below and do not setexternal_data_configuration.connection_id
, schemas must be specified withexternal_data_configuration.schema
. Otherwise, schemas must be specified with this top-level field.- encryption
Configuration Property Map - Specifies how the table should be encrypted. If left blank, the table will be encrypted with a Google-managed key; that process is transparent to the user. Structure is documented below.
- etag String
- A hash of the resource.
- expiration
Time Number - The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.
- external
Data Property MapConfiguration - Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table. Structure is documented below.
- friendly
Name String - A descriptive name for the table.
- labels Map<String>
A mapping of labels to assign to the resource.
Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- last
Modified NumberTime - The time when this table was last modified, in milliseconds since the epoch.
- location String
- The geographic location where the table resides. This value is inherited from the dataset.
- materialized
View Property Map - If specified, configures this table as a materialized view. Structure is documented below.
- max
Staleness String - The maximum staleness of data that could be returned when the table (or stale MV) is queried. Staleness encoded as a string encoding of SQL IntervalValue type.
- num
Bytes Number - The size of this table in bytes, excluding any data in the streaming buffer.
- num
Long NumberTerm Bytes - The number of bytes in the table that are considered "long-term storage".
- num
Rows Number - The number of rows of data in this table, excluding any data in the streaming buffer.
- project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- pulumi
Labels Map<String> - The combination of labels configured directly on the resource and default labels configured on the provider.
- range
Partitioning Property Map - If specified, configures range-based partitioning for this table. Structure is documented below.
- require
Partition BooleanFilter - If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
- Map<String>
- The tags attached to this table. Tag keys are globally unique. Tag key is expected to be in the namespaced format, for example "123456789012/environment" where 123456789012 is the ID of the parent organization or project resource for this tag key. Tag value is expected to be the short name, for example "Production".
- schema String
- A JSON schema for the table.
- self
Link String - The URI of the created resource.
- table
Constraints Property Map - Defines the primary key and foreign keys. Structure is documented below.
- table
Id String - A unique ID for the resource. Changing this forces a new resource to be created.
- table
Replication Property MapInfo - Replication info of a table created
using "AS REPLICA" DDL like:
CREATE MATERIALIZED VIEW mv1 AS REPLICA OF src_mv
. Structure is documented below. - time
Partitioning Property Map - If specified, configures time-based partitioning for this table. Structure is documented below.
- type String
- Describes the table type.
- view Property Map
- If specified, configures this table as a view. Structure is documented below.
Supporting Types
TableBiglakeConfiguration, TableBiglakeConfigurationArgs
- Connection
Id string - The connection specifying the credentials to be used to read and write to external storage, such as Cloud Storage. The connection_id can have the form "<project_id>.<location_id>.<connection_id>" or projects/<project_id>/locations/<location_id>/connections/<connection_id>".
- File
Format string - The file format the table data is stored in.
- Storage
Uri string - The fully qualified location prefix of the external folder where table data is stored. The '*' wildcard character is not allowed. The URI should be in the format "gs://bucket/path_to_table/"
- Table
Format string - The table format the metadata only snapshots are stored in.
- Connection
Id string - The connection specifying the credentials to be used to read and write to external storage, such as Cloud Storage. The connection_id can have the form "<project_id>.<location_id>.<connection_id>" or projects/<project_id>/locations/<location_id>/connections/<connection_id>".
- File
Format string - The file format the table data is stored in.
- Storage
Uri string - The fully qualified location prefix of the external folder where table data is stored. The '*' wildcard character is not allowed. The URI should be in the format "gs://bucket/path_to_table/"
- Table
Format string - The table format the metadata only snapshots are stored in.
- connection
Id String - The connection specifying the credentials to be used to read and write to external storage, such as Cloud Storage. The connection_id can have the form "<project_id>.<location_id>.<connection_id>" or projects/<project_id>/locations/<location_id>/connections/<connection_id>".
- file
Format String - The file format the table data is stored in.
- storage
Uri String - The fully qualified location prefix of the external folder where table data is stored. The '*' wildcard character is not allowed. The URI should be in the format "gs://bucket/path_to_table/"
- table
Format String - The table format the metadata only snapshots are stored in.
- connection
Id string - The connection specifying the credentials to be used to read and write to external storage, such as Cloud Storage. The connection_id can have the form "<project_id>.<location_id>.<connection_id>" or projects/<project_id>/locations/<location_id>/connections/<connection_id>".
- file
Format string - The file format the table data is stored in.
- storage
Uri string - The fully qualified location prefix of the external folder where table data is stored. The '*' wildcard character is not allowed. The URI should be in the format "gs://bucket/path_to_table/"
- table
Format string - The table format the metadata only snapshots are stored in.
- connection_
id str - The connection specifying the credentials to be used to read and write to external storage, such as Cloud Storage. The connection_id can have the form "<project_id>.<location_id>.<connection_id>" or projects/<project_id>/locations/<location_id>/connections/<connection_id>".
- file_
format str - The file format the table data is stored in.
- storage_
uri str - The fully qualified location prefix of the external folder where table data is stored. The '*' wildcard character is not allowed. The URI should be in the format "gs://bucket/path_to_table/"
- table_
format str - The table format the metadata only snapshots are stored in.
- connection
Id String - The connection specifying the credentials to be used to read and write to external storage, such as Cloud Storage. The connection_id can have the form "<project_id>.<location_id>.<connection_id>" or projects/<project_id>/locations/<location_id>/connections/<connection_id>".
- file
Format String - The file format the table data is stored in.
- storage
Uri String - The fully qualified location prefix of the external folder where table data is stored. The '*' wildcard character is not allowed. The URI should be in the format "gs://bucket/path_to_table/"
- table
Format String - The table format the metadata only snapshots are stored in.
TableEncryptionConfiguration, TableEncryptionConfigurationArgs
- Kms
Key stringName - The self link or full name of a key which should be used to
encrypt this table. Note that the default bigquery service account will need to have
encrypt/decrypt permissions on this key - you may want to see the
gcp.bigquery.getDefaultServiceAccount
datasource and thegcp.kms.CryptoKeyIAMBinding
resource. - Kms
Key stringVersion - The self link or full name of the kms key version used to encrypt this table.
- Kms
Key stringName - The self link or full name of a key which should be used to
encrypt this table. Note that the default bigquery service account will need to have
encrypt/decrypt permissions on this key - you may want to see the
gcp.bigquery.getDefaultServiceAccount
datasource and thegcp.kms.CryptoKeyIAMBinding
resource. - Kms
Key stringVersion - The self link or full name of the kms key version used to encrypt this table.
- kms
Key StringName - The self link or full name of a key which should be used to
encrypt this table. Note that the default bigquery service account will need to have
encrypt/decrypt permissions on this key - you may want to see the
gcp.bigquery.getDefaultServiceAccount
datasource and thegcp.kms.CryptoKeyIAMBinding
resource. - kms
Key StringVersion - The self link or full name of the kms key version used to encrypt this table.
- kms
Key stringName - The self link or full name of a key which should be used to
encrypt this table. Note that the default bigquery service account will need to have
encrypt/decrypt permissions on this key - you may want to see the
gcp.bigquery.getDefaultServiceAccount
datasource and thegcp.kms.CryptoKeyIAMBinding
resource. - kms
Key stringVersion - The self link or full name of the kms key version used to encrypt this table.
- kms_
key_ strname - The self link or full name of a key which should be used to
encrypt this table. Note that the default bigquery service account will need to have
encrypt/decrypt permissions on this key - you may want to see the
gcp.bigquery.getDefaultServiceAccount
datasource and thegcp.kms.CryptoKeyIAMBinding
resource. - kms_
key_ strversion - The self link or full name of the kms key version used to encrypt this table.
- kms
Key StringName - The self link or full name of a key which should be used to
encrypt this table. Note that the default bigquery service account will need to have
encrypt/decrypt permissions on this key - you may want to see the
gcp.bigquery.getDefaultServiceAccount
datasource and thegcp.kms.CryptoKeyIAMBinding
resource. - kms
Key StringVersion - The self link or full name of the kms key version used to encrypt this table.
TableExternalDataConfiguration, TableExternalDataConfigurationArgs
- Autodetect bool
- Let BigQuery try to autodetect the schema and format of the table.
- Source
Uris List<string> - A list of the fully-qualified URIs that point to your data in Google Cloud.
- Avro
Options TableExternal Data Configuration Avro Options - Additional options if
source_format
is set to "AVRO". Structure is documented below. - Bigtable
Options TableExternal Data Configuration Bigtable Options - Additional properties to set if
source_format
is set to "BIGTABLE". Structure is documented below. - Compression string
- The compression type of the data source. Valid values are "NONE" or "GZIP".
- Connection
Id string The connection specifying the credentials to be used to read external storage, such as Azure Blob, Cloud Storage, or S3. The
connection_id
can have the form{{project}}.{{location}}.{{connection_id}}
orprojects/{{project}}/locations/{{location}}/connections/{{connection_id}}
.~>NOTE: If you set
external_data_configuration.connection_id
, the table schema must be specified using the top-levelschema
field documented above.- Csv
Options TableExternal Data Configuration Csv Options - Additional properties to set if
source_format
is set to "CSV". Structure is documented below. - File
Set stringSpec Type - Specifies how source URIs are interpreted for constructing the file set to load. By default source URIs are expanded against the underlying storage. Other options include specifying manifest files. Only applicable to object storage systems. Docs
- Google
Sheets TableOptions External Data Configuration Google Sheets Options - Additional options if
source_format
is set to "GOOGLE_SHEETS". Structure is documented below. - Hive
Partitioning TableOptions External Data Configuration Hive Partitioning Options - When set, configures hive partitioning support. Not all storage formats support hive partitioning -- requesting hive partitioning on an unsupported format will lead to an error, as will providing an invalid specification. Structure is documented below.
- Ignore
Unknown boolValues - Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false.
- Json
Extension string - Used to indicate that a JSON variant, rather than normal JSON, is being used as the sourceFormat. This should only be used in combination with the
JSON
source format. Valid values are:GEOJSON
. - Json
Options TableExternal Data Configuration Json Options - Additional properties to set if
source_format
is set to "JSON". Structure is documented below. - Max
Bad intRecords - The maximum number of bad records that BigQuery can ignore when reading data.
- Metadata
Cache stringMode - Metadata Cache Mode for the table. Set this to enable caching of metadata from external data source. Valid values are
AUTOMATIC
andMANUAL
. - Object
Metadata string - Object Metadata is used to create Object Tables. Object Tables contain a listing of objects (with their metadata) found at the sourceUris. If
object_metadata
is set,source_format
should be omitted. - Parquet
Options TableExternal Data Configuration Parquet Options - Additional properties to set if
source_format
is set to "PARQUET". Structure is documented below. - Reference
File stringSchema Uri - When creating an external table, the user can provide a reference file with the table schema. This is enabled for the following formats: AVRO, PARQUET, ORC.
- Schema string
A JSON schema for the external table. Schema is required for CSV and JSON formats if autodetect is not on. Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, Avro, Iceberg, ORC and Parquet formats. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. Furthermore drift for this field cannot not be detected because BigQuery only uses this schema to compute the effective schema for the table, therefore any changes on the configured value will force the table to be recreated. This schema is effectively only applied when creating a table from an external datasource, after creation the computed schema will be stored in
google_bigquery_table.schema
~>NOTE: If you set
external_data_configuration.connection_id
, the table schema must be specified using the top-levelschema
field documented above.- Source
Format string - The data format. Please see sourceFormat under
ExternalDataConfiguration
in Bigquery's public API documentation for supported formats. To use "GOOGLE_SHEETS"
the
scopes
must include "https://www.googleapis.com/auth/drive.readonly".
- Autodetect bool
- Let BigQuery try to autodetect the schema and format of the table.
- Source
Uris []string - A list of the fully-qualified URIs that point to your data in Google Cloud.
- Avro
Options TableExternal Data Configuration Avro Options - Additional options if
source_format
is set to "AVRO". Structure is documented below. - Bigtable
Options TableExternal Data Configuration Bigtable Options - Additional properties to set if
source_format
is set to "BIGTABLE". Structure is documented below. - Compression string
- The compression type of the data source. Valid values are "NONE" or "GZIP".
- Connection
Id string The connection specifying the credentials to be used to read external storage, such as Azure Blob, Cloud Storage, or S3. The
connection_id
can have the form{{project}}.{{location}}.{{connection_id}}
orprojects/{{project}}/locations/{{location}}/connections/{{connection_id}}
.~>NOTE: If you set
external_data_configuration.connection_id
, the table schema must be specified using the top-levelschema
field documented above.- Csv
Options TableExternal Data Configuration Csv Options - Additional properties to set if
source_format
is set to "CSV". Structure is documented below. - File
Set stringSpec Type - Specifies how source URIs are interpreted for constructing the file set to load. By default source URIs are expanded against the underlying storage. Other options include specifying manifest files. Only applicable to object storage systems. Docs
- Google
Sheets TableOptions External Data Configuration Google Sheets Options - Additional options if
source_format
is set to "GOOGLE_SHEETS". Structure is documented below. - Hive
Partitioning TableOptions External Data Configuration Hive Partitioning Options - When set, configures hive partitioning support. Not all storage formats support hive partitioning -- requesting hive partitioning on an unsupported format will lead to an error, as will providing an invalid specification. Structure is documented below.
- Ignore
Unknown boolValues - Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false.
- Json
Extension string - Used to indicate that a JSON variant, rather than normal JSON, is being used as the sourceFormat. This should only be used in combination with the
JSON
source format. Valid values are:GEOJSON
. - Json
Options TableExternal Data Configuration Json Options - Additional properties to set if
source_format
is set to "JSON". Structure is documented below. - Max
Bad intRecords - The maximum number of bad records that BigQuery can ignore when reading data.
- Metadata
Cache stringMode - Metadata Cache Mode for the table. Set this to enable caching of metadata from external data source. Valid values are
AUTOMATIC
andMANUAL
. - Object
Metadata string - Object Metadata is used to create Object Tables. Object Tables contain a listing of objects (with their metadata) found at the sourceUris. If
object_metadata
is set,source_format
should be omitted. - Parquet
Options TableExternal Data Configuration Parquet Options - Additional properties to set if
source_format
is set to "PARQUET". Structure is documented below. - Reference
File stringSchema Uri - When creating an external table, the user can provide a reference file with the table schema. This is enabled for the following formats: AVRO, PARQUET, ORC.
- Schema string
A JSON schema for the external table. Schema is required for CSV and JSON formats if autodetect is not on. Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, Avro, Iceberg, ORC and Parquet formats. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. Furthermore drift for this field cannot not be detected because BigQuery only uses this schema to compute the effective schema for the table, therefore any changes on the configured value will force the table to be recreated. This schema is effectively only applied when creating a table from an external datasource, after creation the computed schema will be stored in
google_bigquery_table.schema
~>NOTE: If you set
external_data_configuration.connection_id
, the table schema must be specified using the top-levelschema
field documented above.- Source
Format string - The data format. Please see sourceFormat under
ExternalDataConfiguration
in Bigquery's public API documentation for supported formats. To use "GOOGLE_SHEETS"
the
scopes
must include "https://www.googleapis.com/auth/drive.readonly".
- autodetect Boolean
- Let BigQuery try to autodetect the schema and format of the table.
- source
Uris List<String> - A list of the fully-qualified URIs that point to your data in Google Cloud.
- avro
Options TableExternal Data Configuration Avro Options - Additional options if
source_format
is set to "AVRO". Structure is documented below. - bigtable
Options TableExternal Data Configuration Bigtable Options - Additional properties to set if
source_format
is set to "BIGTABLE". Structure is documented below. - compression String
- The compression type of the data source. Valid values are "NONE" or "GZIP".
- connection
Id String The connection specifying the credentials to be used to read external storage, such as Azure Blob, Cloud Storage, or S3. The
connection_id
can have the form{{project}}.{{location}}.{{connection_id}}
orprojects/{{project}}/locations/{{location}}/connections/{{connection_id}}
.~>NOTE: If you set
external_data_configuration.connection_id
, the table schema must be specified using the top-levelschema
field documented above.- csv
Options TableExternal Data Configuration Csv Options - Additional properties to set if
source_format
is set to "CSV". Structure is documented below. - file
Set StringSpec Type - Specifies how source URIs are interpreted for constructing the file set to load. By default source URIs are expanded against the underlying storage. Other options include specifying manifest files. Only applicable to object storage systems. Docs
- google
Sheets TableOptions External Data Configuration Google Sheets Options - Additional options if
source_format
is set to "GOOGLE_SHEETS". Structure is documented below. - hive
Partitioning TableOptions External Data Configuration Hive Partitioning Options - When set, configures hive partitioning support. Not all storage formats support hive partitioning -- requesting hive partitioning on an unsupported format will lead to an error, as will providing an invalid specification. Structure is documented below.
- ignore
Unknown BooleanValues - Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false.
- json
Extension String - Used to indicate that a JSON variant, rather than normal JSON, is being used as the sourceFormat. This should only be used in combination with the
JSON
source format. Valid values are:GEOJSON
. - json
Options TableExternal Data Configuration Json Options - Additional properties to set if
source_format
is set to "JSON". Structure is documented below. - max
Bad IntegerRecords - The maximum number of bad records that BigQuery can ignore when reading data.
- metadata
Cache StringMode - Metadata Cache Mode for the table. Set this to enable caching of metadata from external data source. Valid values are
AUTOMATIC
andMANUAL
. - object
Metadata String - Object Metadata is used to create Object Tables. Object Tables contain a listing of objects (with their metadata) found at the sourceUris. If
object_metadata
is set,source_format
should be omitted. - parquet
Options TableExternal Data Configuration Parquet Options - Additional properties to set if
source_format
is set to "PARQUET". Structure is documented below. - reference
File StringSchema Uri - When creating an external table, the user can provide a reference file with the table schema. This is enabled for the following formats: AVRO, PARQUET, ORC.
- schema String
A JSON schema for the external table. Schema is required for CSV and JSON formats if autodetect is not on. Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, Avro, Iceberg, ORC and Parquet formats. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. Furthermore drift for this field cannot not be detected because BigQuery only uses this schema to compute the effective schema for the table, therefore any changes on the configured value will force the table to be recreated. This schema is effectively only applied when creating a table from an external datasource, after creation the computed schema will be stored in
google_bigquery_table.schema
~>NOTE: If you set
external_data_configuration.connection_id
, the table schema must be specified using the top-levelschema
field documented above.- source
Format String - The data format. Please see sourceFormat under
ExternalDataConfiguration
in Bigquery's public API documentation for supported formats. To use "GOOGLE_SHEETS"
the
scopes
must include "https://www.googleapis.com/auth/drive.readonly".
- autodetect boolean
- Let BigQuery try to autodetect the schema and format of the table.
- source
Uris string[] - A list of the fully-qualified URIs that point to your data in Google Cloud.
- avro
Options TableExternal Data Configuration Avro Options - Additional options if
source_format
is set to "AVRO". Structure is documented below. - bigtable
Options TableExternal Data Configuration Bigtable Options - Additional properties to set if
source_format
is set to "BIGTABLE". Structure is documented below. - compression string
- The compression type of the data source. Valid values are "NONE" or "GZIP".
- connection
Id string The connection specifying the credentials to be used to read external storage, such as Azure Blob, Cloud Storage, or S3. The
connection_id
can have the form{{project}}.{{location}}.{{connection_id}}
orprojects/{{project}}/locations/{{location}}/connections/{{connection_id}}
.~>NOTE: If you set
external_data_configuration.connection_id
, the table schema must be specified using the top-levelschema
field documented above.- csv
Options TableExternal Data Configuration Csv Options - Additional properties to set if
source_format
is set to "CSV". Structure is documented below. - file
Set stringSpec Type - Specifies how source URIs are interpreted for constructing the file set to load. By default source URIs are expanded against the underlying storage. Other options include specifying manifest files. Only applicable to object storage systems. Docs
- google
Sheets TableOptions External Data Configuration Google Sheets Options - Additional options if
source_format
is set to "GOOGLE_SHEETS". Structure is documented below. - hive
Partitioning TableOptions External Data Configuration Hive Partitioning Options - When set, configures hive partitioning support. Not all storage formats support hive partitioning -- requesting hive partitioning on an unsupported format will lead to an error, as will providing an invalid specification. Structure is documented below.
- ignore
Unknown booleanValues - Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false.
- json
Extension string - Used to indicate that a JSON variant, rather than normal JSON, is being used as the sourceFormat. This should only be used in combination with the
JSON
source format. Valid values are:GEOJSON
. - json
Options TableExternal Data Configuration Json Options - Additional properties to set if
source_format
is set to "JSON". Structure is documented below. - max
Bad numberRecords - The maximum number of bad records that BigQuery can ignore when reading data.
- metadata
Cache stringMode - Metadata Cache Mode for the table. Set this to enable caching of metadata from external data source. Valid values are
AUTOMATIC
andMANUAL
. - object
Metadata string - Object Metadata is used to create Object Tables. Object Tables contain a listing of objects (with their metadata) found at the sourceUris. If
object_metadata
is set,source_format
should be omitted. - parquet
Options TableExternal Data Configuration Parquet Options - Additional properties to set if
source_format
is set to "PARQUET". Structure is documented below. - reference
File stringSchema Uri - When creating an external table, the user can provide a reference file with the table schema. This is enabled for the following formats: AVRO, PARQUET, ORC.
- schema string
A JSON schema for the external table. Schema is required for CSV and JSON formats if autodetect is not on. Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, Avro, Iceberg, ORC and Parquet formats. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. Furthermore drift for this field cannot not be detected because BigQuery only uses this schema to compute the effective schema for the table, therefore any changes on the configured value will force the table to be recreated. This schema is effectively only applied when creating a table from an external datasource, after creation the computed schema will be stored in
google_bigquery_table.schema
~>NOTE: If you set
external_data_configuration.connection_id
, the table schema must be specified using the top-levelschema
field documented above.- source
Format string - The data format. Please see sourceFormat under
ExternalDataConfiguration
in Bigquery's public API documentation for supported formats. To use "GOOGLE_SHEETS"
the
scopes
must include "https://www.googleapis.com/auth/drive.readonly".
- autodetect bool
- Let BigQuery try to autodetect the schema and format of the table.
- source_
uris Sequence[str] - A list of the fully-qualified URIs that point to your data in Google Cloud.
- avro_
options TableExternal Data Configuration Avro Options - Additional options if
source_format
is set to "AVRO". Structure is documented below. - bigtable_
options TableExternal Data Configuration Bigtable Options - Additional properties to set if
source_format
is set to "BIGTABLE". Structure is documented below. - compression str
- The compression type of the data source. Valid values are "NONE" or "GZIP".
- connection_
id str The connection specifying the credentials to be used to read external storage, such as Azure Blob, Cloud Storage, or S3. The
connection_id
can have the form{{project}}.{{location}}.{{connection_id}}
orprojects/{{project}}/locations/{{location}}/connections/{{connection_id}}
.~>NOTE: If you set
external_data_configuration.connection_id
, the table schema must be specified using the top-levelschema
field documented above.- csv_
options TableExternal Data Configuration Csv Options - Additional properties to set if
source_format
is set to "CSV". Structure is documented below. - file_
set_ strspec_ type - Specifies how source URIs are interpreted for constructing the file set to load. By default source URIs are expanded against the underlying storage. Other options include specifying manifest files. Only applicable to object storage systems. Docs
- google_
sheets_ Tableoptions External Data Configuration Google Sheets Options - Additional options if
source_format
is set to "GOOGLE_SHEETS". Structure is documented below. - hive_
partitioning_ Tableoptions External Data Configuration Hive Partitioning Options - When set, configures hive partitioning support. Not all storage formats support hive partitioning -- requesting hive partitioning on an unsupported format will lead to an error, as will providing an invalid specification. Structure is documented below.
- ignore_
unknown_ boolvalues - Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false.
- json_
extension str - Used to indicate that a JSON variant, rather than normal JSON, is being used as the sourceFormat. This should only be used in combination with the
JSON
source format. Valid values are:GEOJSON
. - json_
options TableExternal Data Configuration Json Options - Additional properties to set if
source_format
is set to "JSON". Structure is documented below. - max_
bad_ intrecords - The maximum number of bad records that BigQuery can ignore when reading data.
- metadata_
cache_ strmode - Metadata Cache Mode for the table. Set this to enable caching of metadata from external data source. Valid values are
AUTOMATIC
andMANUAL
. - object_
metadata str - Object Metadata is used to create Object Tables. Object Tables contain a listing of objects (with their metadata) found at the sourceUris. If
object_metadata
is set,source_format
should be omitted. - parquet_
options TableExternal Data Configuration Parquet Options - Additional properties to set if
source_format
is set to "PARQUET". Structure is documented below. - reference_
file_ strschema_ uri - When creating an external table, the user can provide a reference file with the table schema. This is enabled for the following formats: AVRO, PARQUET, ORC.
- schema str
A JSON schema for the external table. Schema is required for CSV and JSON formats if autodetect is not on. Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, Avro, Iceberg, ORC and Parquet formats. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. Furthermore drift for this field cannot not be detected because BigQuery only uses this schema to compute the effective schema for the table, therefore any changes on the configured value will force the table to be recreated. This schema is effectively only applied when creating a table from an external datasource, after creation the computed schema will be stored in
google_bigquery_table.schema
~>NOTE: If you set
external_data_configuration.connection_id
, the table schema must be specified using the top-levelschema
field documented above.- source_
format str - The data format. Please see sourceFormat under
ExternalDataConfiguration
in Bigquery's public API documentation for supported formats. To use "GOOGLE_SHEETS"
the
scopes
must include "https://www.googleapis.com/auth/drive.readonly".
- autodetect Boolean
- Let BigQuery try to autodetect the schema and format of the table.
- source
Uris List<String> - A list of the fully-qualified URIs that point to your data in Google Cloud.
- avro
Options Property Map - Additional options if
source_format
is set to "AVRO". Structure is documented below. - bigtable
Options Property Map - Additional properties to set if
source_format
is set to "BIGTABLE". Structure is documented below. - compression String
- The compression type of the data source. Valid values are "NONE" or "GZIP".
- connection
Id String The connection specifying the credentials to be used to read external storage, such as Azure Blob, Cloud Storage, or S3. The
connection_id
can have the form{{project}}.{{location}}.{{connection_id}}
orprojects/{{project}}/locations/{{location}}/connections/{{connection_id}}
.~>NOTE: If you set
external_data_configuration.connection_id
, the table schema must be specified using the top-levelschema
field documented above.- csv
Options Property Map - Additional properties to set if
source_format
is set to "CSV". Structure is documented below. - file
Set StringSpec Type - Specifies how source URIs are interpreted for constructing the file set to load. By default source URIs are expanded against the underlying storage. Other options include specifying manifest files. Only applicable to object storage systems. Docs
- google
Sheets Property MapOptions - Additional options if
source_format
is set to "GOOGLE_SHEETS". Structure is documented below. - hive
Partitioning Property MapOptions - When set, configures hive partitioning support. Not all storage formats support hive partitioning -- requesting hive partitioning on an unsupported format will lead to an error, as will providing an invalid specification. Structure is documented below.
- ignore
Unknown BooleanValues - Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false.
- json
Extension String - Used to indicate that a JSON variant, rather than normal JSON, is being used as the sourceFormat. This should only be used in combination with the
JSON
source format. Valid values are:GEOJSON
. - json
Options Property Map - Additional properties to set if
source_format
is set to "JSON". Structure is documented below. - max
Bad NumberRecords - The maximum number of bad records that BigQuery can ignore when reading data.
- metadata
Cache StringMode - Metadata Cache Mode for the table. Set this to enable caching of metadata from external data source. Valid values are
AUTOMATIC
andMANUAL
. - object
Metadata String - Object Metadata is used to create Object Tables. Object Tables contain a listing of objects (with their metadata) found at the sourceUris. If
object_metadata
is set,source_format
should be omitted. - parquet
Options Property Map - Additional properties to set if
source_format
is set to "PARQUET". Structure is documented below. - reference
File StringSchema Uri - When creating an external table, the user can provide a reference file with the table schema. This is enabled for the following formats: AVRO, PARQUET, ORC.
- schema String
A JSON schema for the external table. Schema is required for CSV and JSON formats if autodetect is not on. Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, Avro, Iceberg, ORC and Parquet formats. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. Furthermore drift for this field cannot not be detected because BigQuery only uses this schema to compute the effective schema for the table, therefore any changes on the configured value will force the table to be recreated. This schema is effectively only applied when creating a table from an external datasource, after creation the computed schema will be stored in
google_bigquery_table.schema
~>NOTE: If you set
external_data_configuration.connection_id
, the table schema must be specified using the top-levelschema
field documented above.- source
Format String - The data format. Please see sourceFormat under
ExternalDataConfiguration
in Bigquery's public API documentation for supported formats. To use "GOOGLE_SHEETS"
the
scopes
must include "https://www.googleapis.com/auth/drive.readonly".
TableExternalDataConfigurationAvroOptions, TableExternalDataConfigurationAvroOptionsArgs
- Use
Avro boolLogical Types - If is set to true, indicates whether to interpret logical types as the corresponding BigQuery data type (for example, TIMESTAMP), instead of using the raw type (for example, INTEGER).
- Use
Avro boolLogical Types - If is set to true, indicates whether to interpret logical types as the corresponding BigQuery data type (for example, TIMESTAMP), instead of using the raw type (for example, INTEGER).
- use
Avro BooleanLogical Types - If is set to true, indicates whether to interpret logical types as the corresponding BigQuery data type (for example, TIMESTAMP), instead of using the raw type (for example, INTEGER).
- use
Avro booleanLogical Types - If is set to true, indicates whether to interpret logical types as the corresponding BigQuery data type (for example, TIMESTAMP), instead of using the raw type (for example, INTEGER).
- use_
avro_ boollogical_ types - If is set to true, indicates whether to interpret logical types as the corresponding BigQuery data type (for example, TIMESTAMP), instead of using the raw type (for example, INTEGER).
- use
Avro BooleanLogical Types - If is set to true, indicates whether to interpret logical types as the corresponding BigQuery data type (for example, TIMESTAMP), instead of using the raw type (for example, INTEGER).
TableExternalDataConfigurationBigtableOptions, TableExternalDataConfigurationBigtableOptionsArgs
- Column
Families List<TableExternal Data Configuration Bigtable Options Column Family> - A list of column families to expose in the table schema along with their types. This list restricts the column families that can be referenced in queries and specifies their value types. You can use this list to do type conversions - see the 'type' field for more details. If you leave this list empty, all column families are present in the table schema and their values are read as BYTES. During a query only the column families referenced in that query are read from Bigtable. Structure is documented below.
- Ignore
Unspecified boolColumn Families - If field is true, then the column families that are not specified in columnFamilies list are not exposed in the table schema. Otherwise, they are read with BYTES type values. The default value is false.
- Output
Column boolFamilies As Json - If field is true, then each column family will be read as a single JSON column. Otherwise they are read as a repeated cell structure containing timestamp/value tuples. The default value is false.
- Read
Rowkey boolAs String - If field is true, then the rowkey column families will be read and converted to string. Otherwise they are read with BYTES type values and users need to manually cast them with CAST if necessary. The default value is false.
- Column
Families []TableExternal Data Configuration Bigtable Options Column Family - A list of column families to expose in the table schema along with their types. This list restricts the column families that can be referenced in queries and specifies their value types. You can use this list to do type conversions - see the 'type' field for more details. If you leave this list empty, all column families are present in the table schema and their values are read as BYTES. During a query only the column families referenced in that query are read from Bigtable. Structure is documented below.
- Ignore
Unspecified boolColumn Families - If field is true, then the column families that are not specified in columnFamilies list are not exposed in the table schema. Otherwise, they are read with BYTES type values. The default value is false.
- Output
Column boolFamilies As Json - If field is true, then each column family will be read as a single JSON column. Otherwise they are read as a repeated cell structure containing timestamp/value tuples. The default value is false.
- Read
Rowkey boolAs String - If field is true, then the rowkey column families will be read and converted to string. Otherwise they are read with BYTES type values and users need to manually cast them with CAST if necessary. The default value is false.
- column
Families List<TableExternal Data Configuration Bigtable Options Column Family> - A list of column families to expose in the table schema along with their types. This list restricts the column families that can be referenced in queries and specifies their value types. You can use this list to do type conversions - see the 'type' field for more details. If you leave this list empty, all column families are present in the table schema and their values are read as BYTES. During a query only the column families referenced in that query are read from Bigtable. Structure is documented below.
- ignore
Unspecified BooleanColumn Families - If field is true, then the column families that are not specified in columnFamilies list are not exposed in the table schema. Otherwise, they are read with BYTES type values. The default value is false.
- output
Column BooleanFamilies As Json - If field is true, then each column family will be read as a single JSON column. Otherwise they are read as a repeated cell structure containing timestamp/value tuples. The default value is false.
- read
Rowkey BooleanAs String - If field is true, then the rowkey column families will be read and converted to string. Otherwise they are read with BYTES type values and users need to manually cast them with CAST if necessary. The default value is false.
- column
Families TableExternal Data Configuration Bigtable Options Column Family[] - A list of column families to expose in the table schema along with their types. This list restricts the column families that can be referenced in queries and specifies their value types. You can use this list to do type conversions - see the 'type' field for more details. If you leave this list empty, all column families are present in the table schema and their values are read as BYTES. During a query only the column families referenced in that query are read from Bigtable. Structure is documented below.
- ignore
Unspecified booleanColumn Families - If field is true, then the column families that are not specified in columnFamilies list are not exposed in the table schema. Otherwise, they are read with BYTES type values. The default value is false.
- output
Column booleanFamilies As Json - If field is true, then each column family will be read as a single JSON column. Otherwise they are read as a repeated cell structure containing timestamp/value tuples. The default value is false.
- read
Rowkey booleanAs String - If field is true, then the rowkey column families will be read and converted to string. Otherwise they are read with BYTES type values and users need to manually cast them with CAST if necessary. The default value is false.
- column_
families Sequence[TableExternal Data Configuration Bigtable Options Column Family] - A list of column families to expose in the table schema along with their types. This list restricts the column families that can be referenced in queries and specifies their value types. You can use this list to do type conversions - see the 'type' field for more details. If you leave this list empty, all column families are present in the table schema and their values are read as BYTES. During a query only the column families referenced in that query are read from Bigtable. Structure is documented below.
- ignore_
unspecified_ boolcolumn_ families - If field is true, then the column families that are not specified in columnFamilies list are not exposed in the table schema. Otherwise, they are read with BYTES type values. The default value is false.
- output_
column_ boolfamilies_ as_ json - If field is true, then each column family will be read as a single JSON column. Otherwise they are read as a repeated cell structure containing timestamp/value tuples. The default value is false.
- read_
rowkey_ boolas_ string - If field is true, then the rowkey column families will be read and converted to string. Otherwise they are read with BYTES type values and users need to manually cast them with CAST if necessary. The default value is false.
- column
Families List<Property Map> - A list of column families to expose in the table schema along with their types. This list restricts the column families that can be referenced in queries and specifies their value types. You can use this list to do type conversions - see the 'type' field for more details. If you leave this list empty, all column families are present in the table schema and their values are read as BYTES. During a query only the column families referenced in that query are read from Bigtable. Structure is documented below.
- ignore
Unspecified BooleanColumn Families - If field is true, then the column families that are not specified in columnFamilies list are not exposed in the table schema. Otherwise, they are read with BYTES type values. The default value is false.
- output
Column BooleanFamilies As Json - If field is true, then each column family will be read as a single JSON column. Otherwise they are read as a repeated cell structure containing timestamp/value tuples. The default value is false.
- read
Rowkey BooleanAs String - If field is true, then the rowkey column families will be read and converted to string. Otherwise they are read with BYTES type values and users need to manually cast them with CAST if necessary. The default value is false.
TableExternalDataConfigurationBigtableOptionsColumnFamily, TableExternalDataConfigurationBigtableOptionsColumnFamilyArgs
- Columns
List<Table
External Data Configuration Bigtable Options Column Family Column> - A List of columns that should be exposed as individual fields as opposed to a list of (column name, value) pairs. All columns whose qualifier matches a qualifier in this list can be accessed as Other columns can be accessed as a list through column field. Structure is documented below.
- Encoding string
- The encoding of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. This can be overridden for a specific column by listing that column in 'columns' and specifying an encoding for it.
- Family
Id string - Identifier of the column family.
- Only
Read boolLatest - If this is set only the latest version of value are exposed for all columns in this column family. This can be overridden for a specific column by listing that column in 'columns' and specifying a different setting for that column.
- Type string
- The type to convert the value in cells of this column family. The values are expected to be encoded using HBase Bytes.toBytes function when using the BINARY encoding value. Following BigQuery types are allowed (case-sensitive): "BYTES", "STRING", "INTEGER", "FLOAT", "BOOLEAN", "JSON". Default type is BYTES. This can be overridden for a specific column by listing that column in 'columns' and specifying a type for it.
- Columns
[]Table
External Data Configuration Bigtable Options Column Family Column - A List of columns that should be exposed as individual fields as opposed to a list of (column name, value) pairs. All columns whose qualifier matches a qualifier in this list can be accessed as Other columns can be accessed as a list through column field. Structure is documented below.
- Encoding string
- The encoding of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. This can be overridden for a specific column by listing that column in 'columns' and specifying an encoding for it.
- Family
Id string - Identifier of the column family.
- Only
Read boolLatest - If this is set only the latest version of value are exposed for all columns in this column family. This can be overridden for a specific column by listing that column in 'columns' and specifying a different setting for that column.
- Type string
- The type to convert the value in cells of this column family. The values are expected to be encoded using HBase Bytes.toBytes function when using the BINARY encoding value. Following BigQuery types are allowed (case-sensitive): "BYTES", "STRING", "INTEGER", "FLOAT", "BOOLEAN", "JSON". Default type is BYTES. This can be overridden for a specific column by listing that column in 'columns' and specifying a type for it.
- columns
List<Table
External Data Configuration Bigtable Options Column Family Column> - A List of columns that should be exposed as individual fields as opposed to a list of (column name, value) pairs. All columns whose qualifier matches a qualifier in this list can be accessed as Other columns can be accessed as a list through column field. Structure is documented below.
- encoding String
- The encoding of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. This can be overridden for a specific column by listing that column in 'columns' and specifying an encoding for it.
- family
Id String - Identifier of the column family.
- only
Read BooleanLatest - If this is set only the latest version of value are exposed for all columns in this column family. This can be overridden for a specific column by listing that column in 'columns' and specifying a different setting for that column.
- type String
- The type to convert the value in cells of this column family. The values are expected to be encoded using HBase Bytes.toBytes function when using the BINARY encoding value. Following BigQuery types are allowed (case-sensitive): "BYTES", "STRING", "INTEGER", "FLOAT", "BOOLEAN", "JSON". Default type is BYTES. This can be overridden for a specific column by listing that column in 'columns' and specifying a type for it.
- columns
Table
External Data Configuration Bigtable Options Column Family Column[] - A List of columns that should be exposed as individual fields as opposed to a list of (column name, value) pairs. All columns whose qualifier matches a qualifier in this list can be accessed as Other columns can be accessed as a list through column field. Structure is documented below.
- encoding string
- The encoding of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. This can be overridden for a specific column by listing that column in 'columns' and specifying an encoding for it.
- family
Id string - Identifier of the column family.
- only
Read booleanLatest - If this is set only the latest version of value are exposed for all columns in this column family. This can be overridden for a specific column by listing that column in 'columns' and specifying a different setting for that column.
- type string
- The type to convert the value in cells of this column family. The values are expected to be encoded using HBase Bytes.toBytes function when using the BINARY encoding value. Following BigQuery types are allowed (case-sensitive): "BYTES", "STRING", "INTEGER", "FLOAT", "BOOLEAN", "JSON". Default type is BYTES. This can be overridden for a specific column by listing that column in 'columns' and specifying a type for it.
- columns
Sequence[Table
External Data Configuration Bigtable Options Column Family Column] - A List of columns that should be exposed as individual fields as opposed to a list of (column name, value) pairs. All columns whose qualifier matches a qualifier in this list can be accessed as Other columns can be accessed as a list through column field. Structure is documented below.
- encoding str
- The encoding of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. This can be overridden for a specific column by listing that column in 'columns' and specifying an encoding for it.
- family_
id str - Identifier of the column family.
- only_
read_ boollatest - If this is set only the latest version of value are exposed for all columns in this column family. This can be overridden for a specific column by listing that column in 'columns' and specifying a different setting for that column.
- type str
- The type to convert the value in cells of this column family. The values are expected to be encoded using HBase Bytes.toBytes function when using the BINARY encoding value. Following BigQuery types are allowed (case-sensitive): "BYTES", "STRING", "INTEGER", "FLOAT", "BOOLEAN", "JSON". Default type is BYTES. This can be overridden for a specific column by listing that column in 'columns' and specifying a type for it.
- columns List<Property Map>
- A List of columns that should be exposed as individual fields as opposed to a list of (column name, value) pairs. All columns whose qualifier matches a qualifier in this list can be accessed as Other columns can be accessed as a list through column field. Structure is documented below.
- encoding String
- The encoding of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. This can be overridden for a specific column by listing that column in 'columns' and specifying an encoding for it.
- family
Id String - Identifier of the column family.
- only
Read BooleanLatest - If this is set only the latest version of value are exposed for all columns in this column family. This can be overridden for a specific column by listing that column in 'columns' and specifying a different setting for that column.
- type String
- The type to convert the value in cells of this column family. The values are expected to be encoded using HBase Bytes.toBytes function when using the BINARY encoding value. Following BigQuery types are allowed (case-sensitive): "BYTES", "STRING", "INTEGER", "FLOAT", "BOOLEAN", "JSON". Default type is BYTES. This can be overridden for a specific column by listing that column in 'columns' and specifying a type for it.
TableExternalDataConfigurationBigtableOptionsColumnFamilyColumn, TableExternalDataConfigurationBigtableOptionsColumnFamilyColumnArgs
- Encoding string
- The encoding of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. 'encoding' can also be set at the column family level. However, the setting at this level takes precedence if 'encoding' is set at both levels.
- Field
Name string - If the qualifier is not a valid BigQuery field identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as the column field name and is used as field name in queries.
- Only
Read boolLatest - If this is set, only the latest version of value in this column are exposed. 'onlyReadLatest' can also be set at the column family level. However, the setting at this level takes precedence if 'onlyReadLatest' is set at both levels.
- Qualifier
Encoded string - Qualifier of the column. Columns in the parent column family that has this exact qualifier are exposed as . field. If the qualifier is valid UTF-8 string, it can be specified in the qualifierString field. Otherwise, a base-64 encoded value must be set to qualifierEncoded. The column field name is the same as the column qualifier. However, if the qualifier is not a valid BigQuery field identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as fieldName.
- Qualifier
String string - Qualifier string.
- Type string
- The type to convert the value in cells of this column. The values are expected to be encoded using HBase Bytes.toBytes function when using the BINARY encoding value. Following BigQuery types are allowed (case-sensitive): "BYTES", "STRING", "INTEGER", "FLOAT", "BOOLEAN", "JSON", Default type is "BYTES". 'type' can also be set at the column family level. However, the setting at this level takes precedence if 'type' is set at both levels.
- Encoding string
- The encoding of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. 'encoding' can also be set at the column family level. However, the setting at this level takes precedence if 'encoding' is set at both levels.
- Field
Name string - If the qualifier is not a valid BigQuery field identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as the column field name and is used as field name in queries.
- Only
Read boolLatest - If this is set, only the latest version of value in this column are exposed. 'onlyReadLatest' can also be set at the column family level. However, the setting at this level takes precedence if 'onlyReadLatest' is set at both levels.
- Qualifier
Encoded string - Qualifier of the column. Columns in the parent column family that has this exact qualifier are exposed as . field. If the qualifier is valid UTF-8 string, it can be specified in the qualifierString field. Otherwise, a base-64 encoded value must be set to qualifierEncoded. The column field name is the same as the column qualifier. However, if the qualifier is not a valid BigQuery field identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as fieldName.
- Qualifier
String string - Qualifier string.
- Type string
- The type to convert the value in cells of this column. The values are expected to be encoded using HBase Bytes.toBytes function when using the BINARY encoding value. Following BigQuery types are allowed (case-sensitive): "BYTES", "STRING", "INTEGER", "FLOAT", "BOOLEAN", "JSON", Default type is "BYTES". 'type' can also be set at the column family level. However, the setting at this level takes precedence if 'type' is set at both levels.
- encoding String
- The encoding of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. 'encoding' can also be set at the column family level. However, the setting at this level takes precedence if 'encoding' is set at both levels.
- field
Name String - If the qualifier is not a valid BigQuery field identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as the column field name and is used as field name in queries.
- only
Read BooleanLatest - If this is set, only the latest version of value in this column are exposed. 'onlyReadLatest' can also be set at the column family level. However, the setting at this level takes precedence if 'onlyReadLatest' is set at both levels.
- qualifier
Encoded String - Qualifier of the column. Columns in the parent column family that has this exact qualifier are exposed as . field. If the qualifier is valid UTF-8 string, it can be specified in the qualifierString field. Otherwise, a base-64 encoded value must be set to qualifierEncoded. The column field name is the same as the column qualifier. However, if the qualifier is not a valid BigQuery field identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as fieldName.
- qualifier
String String - Qualifier string.
- type String
- The type to convert the value in cells of this column. The values are expected to be encoded using HBase Bytes.toBytes function when using the BINARY encoding value. Following BigQuery types are allowed (case-sensitive): "BYTES", "STRING", "INTEGER", "FLOAT", "BOOLEAN", "JSON", Default type is "BYTES". 'type' can also be set at the column family level. However, the setting at this level takes precedence if 'type' is set at both levels.
- encoding string
- The encoding of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. 'encoding' can also be set at the column family level. However, the setting at this level takes precedence if 'encoding' is set at both levels.
- field
Name string - If the qualifier is not a valid BigQuery field identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as the column field name and is used as field name in queries.
- only
Read booleanLatest - If this is set, only the latest version of value in this column are exposed. 'onlyReadLatest' can also be set at the column family level. However, the setting at this level takes precedence if 'onlyReadLatest' is set at both levels.
- qualifier
Encoded string - Qualifier of the column. Columns in the parent column family that has this exact qualifier are exposed as . field. If the qualifier is valid UTF-8 string, it can be specified in the qualifierString field. Otherwise, a base-64 encoded value must be set to qualifierEncoded. The column field name is the same as the column qualifier. However, if the qualifier is not a valid BigQuery field identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as fieldName.
- qualifier
String string - Qualifier string.
- type string
- The type to convert the value in cells of this column. The values are expected to be encoded using HBase Bytes.toBytes function when using the BINARY encoding value. Following BigQuery types are allowed (case-sensitive): "BYTES", "STRING", "INTEGER", "FLOAT", "BOOLEAN", "JSON", Default type is "BYTES". 'type' can also be set at the column family level. However, the setting at this level takes precedence if 'type' is set at both levels.
- encoding str
- The encoding of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. 'encoding' can also be set at the column family level. However, the setting at this level takes precedence if 'encoding' is set at both levels.
- field_
name str - If the qualifier is not a valid BigQuery field identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as the column field name and is used as field name in queries.
- only_
read_ boollatest - If this is set, only the latest version of value in this column are exposed. 'onlyReadLatest' can also be set at the column family level. However, the setting at this level takes precedence if 'onlyReadLatest' is set at both levels.
- qualifier_
encoded str - Qualifier of the column. Columns in the parent column family that has this exact qualifier are exposed as . field. If the qualifier is valid UTF-8 string, it can be specified in the qualifierString field. Otherwise, a base-64 encoded value must be set to qualifierEncoded. The column field name is the same as the column qualifier. However, if the qualifier is not a valid BigQuery field identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as fieldName.
- qualifier_
string str - Qualifier string.
- type str
- The type to convert the value in cells of this column. The values are expected to be encoded using HBase Bytes.toBytes function when using the BINARY encoding value. Following BigQuery types are allowed (case-sensitive): "BYTES", "STRING", "INTEGER", "FLOAT", "BOOLEAN", "JSON", Default type is "BYTES". 'type' can also be set at the column family level. However, the setting at this level takes precedence if 'type' is set at both levels.
- encoding String
- The encoding of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. 'encoding' can also be set at the column family level. However, the setting at this level takes precedence if 'encoding' is set at both levels.
- field
Name String - If the qualifier is not a valid BigQuery field identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as the column field name and is used as field name in queries.
- only
Read BooleanLatest - If this is set, only the latest version of value in this column are exposed. 'onlyReadLatest' can also be set at the column family level. However, the setting at this level takes precedence if 'onlyReadLatest' is set at both levels.
- qualifier
Encoded String - Qualifier of the column. Columns in the parent column family that has this exact qualifier are exposed as . field. If the qualifier is valid UTF-8 string, it can be specified in the qualifierString field. Otherwise, a base-64 encoded value must be set to qualifierEncoded. The column field name is the same as the column qualifier. However, if the qualifier is not a valid BigQuery field identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as fieldName.
- qualifier
String String - Qualifier string.
- type String
- The type to convert the value in cells of this column. The values are expected to be encoded using HBase Bytes.toBytes function when using the BINARY encoding value. Following BigQuery types are allowed (case-sensitive): "BYTES", "STRING", "INTEGER", "FLOAT", "BOOLEAN", "JSON", Default type is "BYTES". 'type' can also be set at the column family level. However, the setting at this level takes precedence if 'type' is set at both levels.
TableExternalDataConfigurationCsvOptions, TableExternalDataConfigurationCsvOptionsArgs
- Quote string
- The value that is used to quote data sections in a
CSV file. If your data does not contain quoted sections, set the
property value to an empty string. If your data contains quoted newline
characters, you must also set the
allow_quoted_newlines
property to true. The API-side default is"
, specified in the provider escaped as\"
. Due to limitations with default values, this value is required to be explicitly set. - Allow
Jagged boolRows - Indicates if BigQuery should accept rows that are missing trailing optional columns.
- Allow
Quoted boolNewlines - Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
- Encoding string
- The character encoding of the data. The supported values are UTF-8 or ISO-8859-1.
- Field
Delimiter string - The separator for fields in a CSV file.
- Skip
Leading intRows - The number of rows at the top of a CSV file that BigQuery will skip when reading the data.
- Quote string
- The value that is used to quote data sections in a
CSV file. If your data does not contain quoted sections, set the
property value to an empty string. If your data contains quoted newline
characters, you must also set the
allow_quoted_newlines
property to true. The API-side default is"
, specified in the provider escaped as\"
. Due to limitations with default values, this value is required to be explicitly set. - Allow
Jagged boolRows - Indicates if BigQuery should accept rows that are missing trailing optional columns.
- Allow
Quoted boolNewlines - Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
- Encoding string
- The character encoding of the data. The supported values are UTF-8 or ISO-8859-1.
- Field
Delimiter string - The separator for fields in a CSV file.
- Skip
Leading intRows - The number of rows at the top of a CSV file that BigQuery will skip when reading the data.
- quote String
- The value that is used to quote data sections in a
CSV file. If your data does not contain quoted sections, set the
property value to an empty string. If your data contains quoted newline
characters, you must also set the
allow_quoted_newlines
property to true. The API-side default is"
, specified in the provider escaped as\"
. Due to limitations with default values, this value is required to be explicitly set. - allow
Jagged BooleanRows - Indicates if BigQuery should accept rows that are missing trailing optional columns.
- allow
Quoted BooleanNewlines - Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
- encoding String
- The character encoding of the data. The supported values are UTF-8 or ISO-8859-1.
- field
Delimiter String - The separator for fields in a CSV file.
- skip
Leading IntegerRows - The number of rows at the top of a CSV file that BigQuery will skip when reading the data.
- quote string
- The value that is used to quote data sections in a
CSV file. If your data does not contain quoted sections, set the
property value to an empty string. If your data contains quoted newline
characters, you must also set the
allow_quoted_newlines
property to true. The API-side default is"
, specified in the provider escaped as\"
. Due to limitations with default values, this value is required to be explicitly set. - allow
Jagged booleanRows - Indicates if BigQuery should accept rows that are missing trailing optional columns.
- allow
Quoted booleanNewlines - Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
- encoding string
- The character encoding of the data. The supported values are UTF-8 or ISO-8859-1.
- field
Delimiter string - The separator for fields in a CSV file.
- skip
Leading numberRows - The number of rows at the top of a CSV file that BigQuery will skip when reading the data.
- quote str
- The value that is used to quote data sections in a
CSV file. If your data does not contain quoted sections, set the
property value to an empty string. If your data contains quoted newline
characters, you must also set the
allow_quoted_newlines
property to true. The API-side default is"
, specified in the provider escaped as\"
. Due to limitations with default values, this value is required to be explicitly set. - allow_
jagged_ boolrows - Indicates if BigQuery should accept rows that are missing trailing optional columns.
- allow_
quoted_ boolnewlines - Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
- encoding str
- The character encoding of the data. The supported values are UTF-8 or ISO-8859-1.
- field_
delimiter str - The separator for fields in a CSV file.
- skip_
leading_ introws - The number of rows at the top of a CSV file that BigQuery will skip when reading the data.
- quote String
- The value that is used to quote data sections in a
CSV file. If your data does not contain quoted sections, set the
property value to an empty string. If your data contains quoted newline
characters, you must also set the
allow_quoted_newlines
property to true. The API-side default is"
, specified in the provider escaped as\"
. Due to limitations with default values, this value is required to be explicitly set. - allow
Jagged BooleanRows - Indicates if BigQuery should accept rows that are missing trailing optional columns.
- allow
Quoted BooleanNewlines - Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
- encoding String
- The character encoding of the data. The supported values are UTF-8 or ISO-8859-1.
- field
Delimiter String - The separator for fields in a CSV file.
- skip
Leading NumberRows - The number of rows at the top of a CSV file that BigQuery will skip when reading the data.
TableExternalDataConfigurationGoogleSheetsOptions, TableExternalDataConfigurationGoogleSheetsOptionsArgs
- Range string
- Range of a sheet to query from. Only used when
non-empty. At least one of
range
orskip_leading_rows
must be set. Typical format: "sheet_name!top_left_cell_id:bottom_right_cell_id" For example: "sheet1!A1:B20" - Skip
Leading intRows - The number of rows at the top of the sheet
that BigQuery will skip when reading the data. At least one of
range
orskip_leading_rows
must be set.
- Range string
- Range of a sheet to query from. Only used when
non-empty. At least one of
range
orskip_leading_rows
must be set. Typical format: "sheet_name!top_left_cell_id:bottom_right_cell_id" For example: "sheet1!A1:B20" - Skip
Leading intRows - The number of rows at the top of the sheet
that BigQuery will skip when reading the data. At least one of
range
orskip_leading_rows
must be set.
- range String
- Range of a sheet to query from. Only used when
non-empty. At least one of
range
orskip_leading_rows
must be set. Typical format: "sheet_name!top_left_cell_id:bottom_right_cell_id" For example: "sheet1!A1:B20" - skip
Leading IntegerRows - The number of rows at the top of the sheet
that BigQuery will skip when reading the data. At least one of
range
orskip_leading_rows
must be set.
- range string
- Range of a sheet to query from. Only used when
non-empty. At least one of
range
orskip_leading_rows
must be set. Typical format: "sheet_name!top_left_cell_id:bottom_right_cell_id" For example: "sheet1!A1:B20" - skip
Leading numberRows - The number of rows at the top of the sheet
that BigQuery will skip when reading the data. At least one of
range
orskip_leading_rows
must be set.
- range str
- Range of a sheet to query from. Only used when
non-empty. At least one of
range
orskip_leading_rows
must be set. Typical format: "sheet_name!top_left_cell_id:bottom_right_cell_id" For example: "sheet1!A1:B20" - skip_
leading_ introws - The number of rows at the top of the sheet
that BigQuery will skip when reading the data. At least one of
range
orskip_leading_rows
must be set.
- range String
- Range of a sheet to query from. Only used when
non-empty. At least one of
range
orskip_leading_rows
must be set. Typical format: "sheet_name!top_left_cell_id:bottom_right_cell_id" For example: "sheet1!A1:B20" - skip
Leading NumberRows - The number of rows at the top of the sheet
that BigQuery will skip when reading the data. At least one of
range
orskip_leading_rows
must be set.
TableExternalDataConfigurationHivePartitioningOptions, TableExternalDataConfigurationHivePartitioningOptionsArgs
- Mode string
- When set, what mode of hive partitioning to use when
reading data. The following modes are supported.
- AUTO: automatically infer partition key name(s) and type(s).
- STRINGS: automatically infer partition key name(s). All types are Not all storage formats support hive partitioning. Requesting hive partitioning on an unsupported format will lead to an error. Currently supported formats are: JSON, CSV, ORC, Avro and Parquet.
- CUSTOM: when set to
CUSTOM
, you must encode the partition key schema within thesource_uri_prefix
by settingsource_uri_prefix
togs://bucket/path_to_table/{key1:TYPE1}/{key2:TYPE2}/{key3:TYPE3}
.
- Require
Partition boolFilter - If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
- Source
Uri stringPrefix - When hive partition detection is requested,
a common for all source uris must be required. The prefix must end immediately
before the partition key encoding begins. For example, consider files following
this data layout.
gs://bucket/path_to_table/dt=2019-06-01/country=USA/id=7/file.avro
gs://bucket/path_to_table/dt=2019-05-31/country=CA/id=3/file.avro
When hive partitioning is requested with either AUTO or STRINGS detection, the common prefix can be either ofgs://bucket/path_to_table
orgs://bucket/path_to_table/
. Note that whenmode
is set toCUSTOM
, you must encode the partition key schema within thesource_uri_prefix
by settingsource_uri_prefix
togs://bucket/path_to_table/{key1:TYPE1}/{key2:TYPE2}/{key3:TYPE3}
.
- Mode string
- When set, what mode of hive partitioning to use when
reading data. The following modes are supported.
- AUTO: automatically infer partition key name(s) and type(s).
- STRINGS: automatically infer partition key name(s). All types are Not all storage formats support hive partitioning. Requesting hive partitioning on an unsupported format will lead to an error. Currently supported formats are: JSON, CSV, ORC, Avro and Parquet.
- CUSTOM: when set to
CUSTOM
, you must encode the partition key schema within thesource_uri_prefix
by settingsource_uri_prefix
togs://bucket/path_to_table/{key1:TYPE1}/{key2:TYPE2}/{key3:TYPE3}
.
- Require
Partition boolFilter - If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
- Source
Uri stringPrefix - When hive partition detection is requested,
a common for all source uris must be required. The prefix must end immediately
before the partition key encoding begins. For example, consider files following
this data layout.
gs://bucket/path_to_table/dt=2019-06-01/country=USA/id=7/file.avro
gs://bucket/path_to_table/dt=2019-05-31/country=CA/id=3/file.avro
When hive partitioning is requested with either AUTO or STRINGS detection, the common prefix can be either ofgs://bucket/path_to_table
orgs://bucket/path_to_table/
. Note that whenmode
is set toCUSTOM
, you must encode the partition key schema within thesource_uri_prefix
by settingsource_uri_prefix
togs://bucket/path_to_table/{key1:TYPE1}/{key2:TYPE2}/{key3:TYPE3}
.
- mode String
- When set, what mode of hive partitioning to use when
reading data. The following modes are supported.
- AUTO: automatically infer partition key name(s) and type(s).
- STRINGS: automatically infer partition key name(s). All types are Not all storage formats support hive partitioning. Requesting hive partitioning on an unsupported format will lead to an error. Currently supported formats are: JSON, CSV, ORC, Avro and Parquet.
- CUSTOM: when set to
CUSTOM
, you must encode the partition key schema within thesource_uri_prefix
by settingsource_uri_prefix
togs://bucket/path_to_table/{key1:TYPE1}/{key2:TYPE2}/{key3:TYPE3}
.
- require
Partition BooleanFilter - If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
- source
Uri StringPrefix - When hive partition detection is requested,
a common for all source uris must be required. The prefix must end immediately
before the partition key encoding begins. For example, consider files following
this data layout.
gs://bucket/path_to_table/dt=2019-06-01/country=USA/id=7/file.avro
gs://bucket/path_to_table/dt=2019-05-31/country=CA/id=3/file.avro
When hive partitioning is requested with either AUTO or STRINGS detection, the common prefix can be either ofgs://bucket/path_to_table
orgs://bucket/path_to_table/
. Note that whenmode
is set toCUSTOM
, you must encode the partition key schema within thesource_uri_prefix
by settingsource_uri_prefix
togs://bucket/path_to_table/{key1:TYPE1}/{key2:TYPE2}/{key3:TYPE3}
.
- mode string
- When set, what mode of hive partitioning to use when
reading data. The following modes are supported.
- AUTO: automatically infer partition key name(s) and type(s).
- STRINGS: automatically infer partition key name(s). All types are Not all storage formats support hive partitioning. Requesting hive partitioning on an unsupported format will lead to an error. Currently supported formats are: JSON, CSV, ORC, Avro and Parquet.
- CUSTOM: when set to
CUSTOM
, you must encode the partition key schema within thesource_uri_prefix
by settingsource_uri_prefix
togs://bucket/path_to_table/{key1:TYPE1}/{key2:TYPE2}/{key3:TYPE3}
.
- require
Partition booleanFilter - If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
- source
Uri stringPrefix - When hive partition detection is requested,
a common for all source uris must be required. The prefix must end immediately
before the partition key encoding begins. For example, consider files following
this data layout.
gs://bucket/path_to_table/dt=2019-06-01/country=USA/id=7/file.avro
gs://bucket/path_to_table/dt=2019-05-31/country=CA/id=3/file.avro
When hive partitioning is requested with either AUTO or STRINGS detection, the common prefix can be either ofgs://bucket/path_to_table
orgs://bucket/path_to_table/
. Note that whenmode
is set toCUSTOM
, you must encode the partition key schema within thesource_uri_prefix
by settingsource_uri_prefix
togs://bucket/path_to_table/{key1:TYPE1}/{key2:TYPE2}/{key3:TYPE3}
.
- mode str
- When set, what mode of hive partitioning to use when
reading data. The following modes are supported.
- AUTO: automatically infer partition key name(s) and type(s).
- STRINGS: automatically infer partition key name(s). All types are Not all storage formats support hive partitioning. Requesting hive partitioning on an unsupported format will lead to an error. Currently supported formats are: JSON, CSV, ORC, Avro and Parquet.
- CUSTOM: when set to
CUSTOM
, you must encode the partition key schema within thesource_uri_prefix
by settingsource_uri_prefix
togs://bucket/path_to_table/{key1:TYPE1}/{key2:TYPE2}/{key3:TYPE3}
.
- require_
partition_ boolfilter - If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
- source_
uri_ strprefix - When hive partition detection is requested,
a common for all source uris must be required. The prefix must end immediately
before the partition key encoding begins. For example, consider files following
this data layout.
gs://bucket/path_to_table/dt=2019-06-01/country=USA/id=7/file.avro
gs://bucket/path_to_table/dt=2019-05-31/country=CA/id=3/file.avro
When hive partitioning is requested with either AUTO or STRINGS detection, the common prefix can be either ofgs://bucket/path_to_table
orgs://bucket/path_to_table/
. Note that whenmode
is set toCUSTOM
, you must encode the partition key schema within thesource_uri_prefix
by settingsource_uri_prefix
togs://bucket/path_to_table/{key1:TYPE1}/{key2:TYPE2}/{key3:TYPE3}
.
- mode String
- When set, what mode of hive partitioning to use when
reading data. The following modes are supported.
- AUTO: automatically infer partition key name(s) and type(s).
- STRINGS: automatically infer partition key name(s). All types are Not all storage formats support hive partitioning. Requesting hive partitioning on an unsupported format will lead to an error. Currently supported formats are: JSON, CSV, ORC, Avro and Parquet.
- CUSTOM: when set to
CUSTOM
, you must encode the partition key schema within thesource_uri_prefix
by settingsource_uri_prefix
togs://bucket/path_to_table/{key1:TYPE1}/{key2:TYPE2}/{key3:TYPE3}
.
- require
Partition BooleanFilter - If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
- source
Uri StringPrefix - When hive partition detection is requested,
a common for all source uris must be required. The prefix must end immediately
before the partition key encoding begins. For example, consider files following
this data layout.
gs://bucket/path_to_table/dt=2019-06-01/country=USA/id=7/file.avro
gs://bucket/path_to_table/dt=2019-05-31/country=CA/id=3/file.avro
When hive partitioning is requested with either AUTO or STRINGS detection, the common prefix can be either ofgs://bucket/path_to_table
orgs://bucket/path_to_table/
. Note that whenmode
is set toCUSTOM
, you must encode the partition key schema within thesource_uri_prefix
by settingsource_uri_prefix
togs://bucket/path_to_table/{key1:TYPE1}/{key2:TYPE2}/{key3:TYPE3}
.
TableExternalDataConfigurationJsonOptions, TableExternalDataConfigurationJsonOptionsArgs
- Encoding string
- The character encoding of the data. The supported values are UTF-8, UTF-16BE, UTF-16LE, UTF-32BE, and UTF-32LE. The default value is UTF-8.
- Encoding string
- The character encoding of the data. The supported values are UTF-8, UTF-16BE, UTF-16LE, UTF-32BE, and UTF-32LE. The default value is UTF-8.
- encoding String
- The character encoding of the data. The supported values are UTF-8, UTF-16BE, UTF-16LE, UTF-32BE, and UTF-32LE. The default value is UTF-8.
- encoding string
- The character encoding of the data. The supported values are UTF-8, UTF-16BE, UTF-16LE, UTF-32BE, and UTF-32LE. The default value is UTF-8.
- encoding str
- The character encoding of the data. The supported values are UTF-8, UTF-16BE, UTF-16LE, UTF-32BE, and UTF-32LE. The default value is UTF-8.
- encoding String
- The character encoding of the data. The supported values are UTF-8, UTF-16BE, UTF-16LE, UTF-32BE, and UTF-32LE. The default value is UTF-8.
TableExternalDataConfigurationParquetOptions, TableExternalDataConfigurationParquetOptionsArgs
- Enable
List boolInference - Indicates whether to use schema inference specifically for Parquet LIST logical type.
- Enum
As boolString - Indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.
- Enable
List boolInference - Indicates whether to use schema inference specifically for Parquet LIST logical type.
- Enum
As boolString - Indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.
- enable
List BooleanInference - Indicates whether to use schema inference specifically for Parquet LIST logical type.
- enum
As BooleanString - Indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.
- enable
List booleanInference - Indicates whether to use schema inference specifically for Parquet LIST logical type.
- enum
As booleanString - Indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.
- enable_
list_ boolinference - Indicates whether to use schema inference specifically for Parquet LIST logical type.
- enum_
as_ boolstring - Indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.
- enable
List BooleanInference - Indicates whether to use schema inference specifically for Parquet LIST logical type.
- enum
As BooleanString - Indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.
TableMaterializedView, TableMaterializedViewArgs
- Query string
- A query whose result is persisted.
- Allow
Non boolIncremental Definition - Allow non incremental materialized view definition. The default value is false.
- Enable
Refresh bool - Specifies whether to use BigQuery's automatic refresh for this materialized view when the base table is updated. The default value is true.
- Refresh
Interval intMs - The maximum frequency at which this materialized view will be refreshed. The default value is 1800000
- Query string
- A query whose result is persisted.
- Allow
Non boolIncremental Definition - Allow non incremental materialized view definition. The default value is false.
- Enable
Refresh bool - Specifies whether to use BigQuery's automatic refresh for this materialized view when the base table is updated. The default value is true.
- Refresh
Interval intMs - The maximum frequency at which this materialized view will be refreshed. The default value is 1800000
- query String
- A query whose result is persisted.
- allow
Non BooleanIncremental Definition - Allow non incremental materialized view definition. The default value is false.
- enable
Refresh Boolean - Specifies whether to use BigQuery's automatic refresh for this materialized view when the base table is updated. The default value is true.
- refresh
Interval IntegerMs - The maximum frequency at which this materialized view will be refreshed. The default value is 1800000
- query string
- A query whose result is persisted.
- allow
Non booleanIncremental Definition - Allow non incremental materialized view definition. The default value is false.
- enable
Refresh boolean - Specifies whether to use BigQuery's automatic refresh for this materialized view when the base table is updated. The default value is true.
- refresh
Interval numberMs - The maximum frequency at which this materialized view will be refreshed. The default value is 1800000
- query str
- A query whose result is persisted.
- allow_
non_ boolincremental_ definition - Allow non incremental materialized view definition. The default value is false.
- enable_
refresh bool - Specifies whether to use BigQuery's automatic refresh for this materialized view when the base table is updated. The default value is true.
- refresh_
interval_ intms - The maximum frequency at which this materialized view will be refreshed. The default value is 1800000
- query String
- A query whose result is persisted.
- allow
Non BooleanIncremental Definition - Allow non incremental materialized view definition. The default value is false.
- enable
Refresh Boolean - Specifies whether to use BigQuery's automatic refresh for this materialized view when the base table is updated. The default value is true.
- refresh
Interval NumberMs - The maximum frequency at which this materialized view will be refreshed. The default value is 1800000
TableRangePartitioning, TableRangePartitioningArgs
- Field string
- The field used to determine how to create a range-based partition.
- Range
Table
Range Partitioning Range - Information required to partition based on ranges. Structure is documented below.
- Field string
- The field used to determine how to create a range-based partition.
- Range
Table
Range Partitioning Range - Information required to partition based on ranges. Structure is documented below.
- field String
- The field used to determine how to create a range-based partition.
- range
Table
Range Partitioning Range - Information required to partition based on ranges. Structure is documented below.
- field string
- The field used to determine how to create a range-based partition.
- range
Table
Range Partitioning Range - Information required to partition based on ranges. Structure is documented below.
- field str
- The field used to determine how to create a range-based partition.
- range
Table
Range Partitioning Range - Information required to partition based on ranges. Structure is documented below.
- field String
- The field used to determine how to create a range-based partition.
- range Property Map
- Information required to partition based on ranges. Structure is documented below.
TableRangePartitioningRange, TableRangePartitioningRangeArgs
TableTableConstraints, TableTableConstraintsArgs
- Foreign
Keys List<TableTable Constraints Foreign Key> - Present only if the table has a foreign key. The foreign key is not enforced. Structure is documented below.
- Primary
Key TableTable Constraints Primary Key - Represents the primary key constraint on a table's columns. Present only if the table has a primary key. The primary key is not enforced. Structure is documented below.
- Foreign
Keys []TableTable Constraints Foreign Key - Present only if the table has a foreign key. The foreign key is not enforced. Structure is documented below.
- Primary
Key TableTable Constraints Primary Key - Represents the primary key constraint on a table's columns. Present only if the table has a primary key. The primary key is not enforced. Structure is documented below.
- foreign
Keys List<TableTable Constraints Foreign Key> - Present only if the table has a foreign key. The foreign key is not enforced. Structure is documented below.
- primary
Key TableTable Constraints Primary Key - Represents the primary key constraint on a table's columns. Present only if the table has a primary key. The primary key is not enforced. Structure is documented below.
- foreign
Keys TableTable Constraints Foreign Key[] - Present only if the table has a foreign key. The foreign key is not enforced. Structure is documented below.
- primary
Key TableTable Constraints Primary Key - Represents the primary key constraint on a table's columns. Present only if the table has a primary key. The primary key is not enforced. Structure is documented below.
- foreign_
keys Sequence[TableTable Constraints Foreign Key] - Present only if the table has a foreign key. The foreign key is not enforced. Structure is documented below.
- primary_
key TableTable Constraints Primary Key - Represents the primary key constraint on a table's columns. Present only if the table has a primary key. The primary key is not enforced. Structure is documented below.
- foreign
Keys List<Property Map> - Present only if the table has a foreign key. The foreign key is not enforced. Structure is documented below.
- primary
Key Property Map - Represents the primary key constraint on a table's columns. Present only if the table has a primary key. The primary key is not enforced. Structure is documented below.
TableTableConstraintsForeignKey, TableTableConstraintsForeignKeyArgs
- Column
References TableTable Constraints Foreign Key Column References - The pair of the foreign key column and primary key column. Structure is documented below.
- Referenced
Table TableTable Constraints Foreign Key Referenced Table - The table that holds the primary key and is referenced by this foreign key. Structure is documented below.
- Name string
- Set only if the foreign key constraint is named.
- Column
References TableTable Constraints Foreign Key Column References - The pair of the foreign key column and primary key column. Structure is documented below.
- Referenced
Table TableTable Constraints Foreign Key Referenced Table - The table that holds the primary key and is referenced by this foreign key. Structure is documented below.
- Name string
- Set only if the foreign key constraint is named.
- column
References TableTable Constraints Foreign Key Column References - The pair of the foreign key column and primary key column. Structure is documented below.
- referenced
Table TableTable Constraints Foreign Key Referenced Table - The table that holds the primary key and is referenced by this foreign key. Structure is documented below.
- name String
- Set only if the foreign key constraint is named.
- column
References TableTable Constraints Foreign Key Column References - The pair of the foreign key column and primary key column. Structure is documented below.
- referenced
Table TableTable Constraints Foreign Key Referenced Table - The table that holds the primary key and is referenced by this foreign key. Structure is documented below.
- name string
- Set only if the foreign key constraint is named.
- column_
references TableTable Constraints Foreign Key Column References - The pair of the foreign key column and primary key column. Structure is documented below.
- referenced_
table TableTable Constraints Foreign Key Referenced Table - The table that holds the primary key and is referenced by this foreign key. Structure is documented below.
- name str
- Set only if the foreign key constraint is named.
- column
References Property Map - The pair of the foreign key column and primary key column. Structure is documented below.
- referenced
Table Property Map - The table that holds the primary key and is referenced by this foreign key. Structure is documented below.
- name String
- Set only if the foreign key constraint is named.
TableTableConstraintsForeignKeyColumnReferences, TableTableConstraintsForeignKeyColumnReferencesArgs
- Referenced
Column string - The column in the primary key that are referenced by the referencingColumn
- Referencing
Column string - The column that composes the foreign key.
- Referenced
Column string - The column in the primary key that are referenced by the referencingColumn
- Referencing
Column string - The column that composes the foreign key.
- referenced
Column String - The column in the primary key that are referenced by the referencingColumn
- referencing
Column String - The column that composes the foreign key.
- referenced
Column string - The column in the primary key that are referenced by the referencingColumn
- referencing
Column string - The column that composes the foreign key.
- referenced_
column str - The column in the primary key that are referenced by the referencingColumn
- referencing_
column str - The column that composes the foreign key.
- referenced
Column String - The column in the primary key that are referenced by the referencingColumn
- referencing
Column String - The column that composes the foreign key.
TableTableConstraintsForeignKeyReferencedTable, TableTableConstraintsForeignKeyReferencedTableArgs
- Dataset
Id string - The ID of the dataset containing this table.
- Project
Id string - The ID of the project containing this table.
- Table
Id string - The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters. Certain operations allow suffixing of the table ID with a partition decorator, such as sample_table$20190123.
- Dataset
Id string - The ID of the dataset containing this table.
- Project
Id string - The ID of the project containing this table.
- Table
Id string - The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters. Certain operations allow suffixing of the table ID with a partition decorator, such as sample_table$20190123.
- dataset
Id String - The ID of the dataset containing this table.
- project
Id String - The ID of the project containing this table.
- table
Id String - The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters. Certain operations allow suffixing of the table ID with a partition decorator, such as sample_table$20190123.
- dataset
Id string - The ID of the dataset containing this table.
- project
Id string - The ID of the project containing this table.
- table
Id string - The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters. Certain operations allow suffixing of the table ID with a partition decorator, such as sample_table$20190123.
- dataset_
id str - The ID of the dataset containing this table.
- project_
id str - The ID of the project containing this table.
- table_
id str - The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters. Certain operations allow suffixing of the table ID with a partition decorator, such as sample_table$20190123.
- dataset
Id String - The ID of the dataset containing this table.
- project
Id String - The ID of the project containing this table.
- table
Id String - The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters. Certain operations allow suffixing of the table ID with a partition decorator, such as sample_table$20190123.
TableTableConstraintsPrimaryKey, TableTableConstraintsPrimaryKeyArgs
- Columns List<string>
- The columns that are composed of the primary key constraint.
- Columns []string
- The columns that are composed of the primary key constraint.
- columns List<String>
- The columns that are composed of the primary key constraint.
- columns string[]
- The columns that are composed of the primary key constraint.
- columns Sequence[str]
- The columns that are composed of the primary key constraint.
- columns List<String>
- The columns that are composed of the primary key constraint.
TableTableReplicationInfo, TableTableReplicationInfoArgs
- Source
Dataset stringId - The ID of the source dataset.
- Source
Project stringId - The ID of the source project.
- Source
Table stringId - The ID of the source materialized view.
- Replication
Interval intMs - The interval at which the source materialized view is polled for updates. The default is 300000.
- Source
Dataset stringId - The ID of the source dataset.
- Source
Project stringId - The ID of the source project.
- Source
Table stringId - The ID of the source materialized view.
- Replication
Interval intMs - The interval at which the source materialized view is polled for updates. The default is 300000.
- source
Dataset StringId - The ID of the source dataset.
- source
Project StringId - The ID of the source project.
- source
Table StringId - The ID of the source materialized view.
- replication
Interval IntegerMs - The interval at which the source materialized view is polled for updates. The default is 300000.
- source
Dataset stringId - The ID of the source dataset.
- source
Project stringId - The ID of the source project.
- source
Table stringId - The ID of the source materialized view.
- replication
Interval numberMs - The interval at which the source materialized view is polled for updates. The default is 300000.
- source_
dataset_ strid - The ID of the source dataset.
- source_
project_ strid - The ID of the source project.
- source_
table_ strid - The ID of the source materialized view.
- replication_
interval_ intms - The interval at which the source materialized view is polled for updates. The default is 300000.
- source
Dataset StringId - The ID of the source dataset.
- source
Project StringId - The ID of the source project.
- source
Table StringId - The ID of the source materialized view.
- replication
Interval NumberMs - The interval at which the source materialized view is polled for updates. The default is 300000.
TableTimePartitioning, TableTimePartitioningArgs
- Type string
- The supported types are DAY, HOUR, MONTH, and YEAR, which will generate one partition per day, hour, month, and year, respectively.
- Expiration
Ms int - Number of milliseconds for which to keep the storage for a partition.
- Field string
- The field used to determine how to create a time-based partition. If time-based partitioning is enabled without this value, the table is partitioned based on the load time.
- Require
Partition boolFilter - If set to true, queries over this table
require a partition filter that can be used for partition elimination to be
specified.
require_partition_filter
is deprecated and will be removed in a future major release. Use the top level field with the same name instead.
- Type string
- The supported types are DAY, HOUR, MONTH, and YEAR, which will generate one partition per day, hour, month, and year, respectively.
- Expiration
Ms int - Number of milliseconds for which to keep the storage for a partition.
- Field string
- The field used to determine how to create a time-based partition. If time-based partitioning is enabled without this value, the table is partitioned based on the load time.
- Require
Partition boolFilter - If set to true, queries over this table
require a partition filter that can be used for partition elimination to be
specified.
require_partition_filter
is deprecated and will be removed in a future major release. Use the top level field with the same name instead.
- type String
- The supported types are DAY, HOUR, MONTH, and YEAR, which will generate one partition per day, hour, month, and year, respectively.
- expiration
Ms Integer - Number of milliseconds for which to keep the storage for a partition.
- field String
- The field used to determine how to create a time-based partition. If time-based partitioning is enabled without this value, the table is partitioned based on the load time.
- require
Partition BooleanFilter - If set to true, queries over this table
require a partition filter that can be used for partition elimination to be
specified.
require_partition_filter
is deprecated and will be removed in a future major release. Use the top level field with the same name instead.
- type string
- The supported types are DAY, HOUR, MONTH, and YEAR, which will generate one partition per day, hour, month, and year, respectively.
- expiration
Ms number - Number of milliseconds for which to keep the storage for a partition.
- field string
- The field used to determine how to create a time-based partition. If time-based partitioning is enabled without this value, the table is partitioned based on the load time.
- require
Partition booleanFilter - If set to true, queries over this table
require a partition filter that can be used for partition elimination to be
specified.
require_partition_filter
is deprecated and will be removed in a future major release. Use the top level field with the same name instead.
- type str
- The supported types are DAY, HOUR, MONTH, and YEAR, which will generate one partition per day, hour, month, and year, respectively.
- expiration_
ms int - Number of milliseconds for which to keep the storage for a partition.
- field str
- The field used to determine how to create a time-based partition. If time-based partitioning is enabled without this value, the table is partitioned based on the load time.
- require_
partition_ boolfilter - If set to true, queries over this table
require a partition filter that can be used for partition elimination to be
specified.
require_partition_filter
is deprecated and will be removed in a future major release. Use the top level field with the same name instead.
- type String
- The supported types are DAY, HOUR, MONTH, and YEAR, which will generate one partition per day, hour, month, and year, respectively.
- expiration
Ms Number - Number of milliseconds for which to keep the storage for a partition.
- field String
- The field used to determine how to create a time-based partition. If time-based partitioning is enabled without this value, the table is partitioned based on the load time.
- require
Partition BooleanFilter - If set to true, queries over this table
require a partition filter that can be used for partition elimination to be
specified.
require_partition_filter
is deprecated and will be removed in a future major release. Use the top level field with the same name instead.
TableView, TableViewArgs
- Query string
- A query that BigQuery executes when the view is referenced.
- Use
Legacy boolSql - Specifies whether to use BigQuery's legacy SQL for this view. The default value is true. If set to false, the view will use BigQuery's standard SQL.
- Query string
- A query that BigQuery executes when the view is referenced.
- Use
Legacy boolSql - Specifies whether to use BigQuery's legacy SQL for this view. The default value is true. If set to false, the view will use BigQuery's standard SQL.
- query String
- A query that BigQuery executes when the view is referenced.
- use
Legacy BooleanSql - Specifies whether to use BigQuery's legacy SQL for this view. The default value is true. If set to false, the view will use BigQuery's standard SQL.
- query string
- A query that BigQuery executes when the view is referenced.
- use
Legacy booleanSql - Specifies whether to use BigQuery's legacy SQL for this view. The default value is true. If set to false, the view will use BigQuery's standard SQL.
- query str
- A query that BigQuery executes when the view is referenced.
- use_
legacy_ boolsql - Specifies whether to use BigQuery's legacy SQL for this view. The default value is true. If set to false, the view will use BigQuery's standard SQL.
- query String
- A query that BigQuery executes when the view is referenced.
- use
Legacy BooleanSql - Specifies whether to use BigQuery's legacy SQL for this view. The default value is true. If set to false, the view will use BigQuery's standard SQL.
Import
BigQuery tables can be imported using any of these accepted formats:
projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}
{{project}}/{{dataset_id}}/{{table_id}}
{{dataset_id}}/{{table_id}}
When using the pulumi import
command, BigQuery tables can be imported using one of the formats above. For example:
$ pulumi import gcp:bigquery/table:Table default projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}
$ pulumi import gcp:bigquery/table:Table default {{project}}/{{dataset_id}}/{{table_id}}
$ pulumi import gcp:bigquery/table:Table default {{dataset_id}}/{{table_id}}
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- Google Cloud (GCP) Classic pulumi/pulumi-gcp
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
google-beta
Terraform Provider.