alicloud.log.OssShipper
Explore with Pulumi AI
Log service data delivery management, this service provides the function of delivering data in logstore to oss product storage. Refer to details.
NOTE: Available in 1.121.0+
DEPRECATED: This resource has been deprecated from version
1.215.0
. Please use new resource alicloud_log_oss_export.
Example Usage
Basic Usage
import * as pulumi from "@pulumi/pulumi";
import * as alicloud from "@pulumi/alicloud";
import * as random from "@pulumi/random";
const _default = new random.index.Integer("default", {
max: 99999,
min: 10000,
});
const example = new alicloud.log.Project("example", {
name: `terraform-example-${_default.result}`,
description: "terraform-example",
tags: {
Created: "TF",
For: "example",
},
});
const exampleStore = new alicloud.log.Store("example", {
project: example.name,
name: "example-store",
retentionPeriod: 3650,
autoSplit: true,
maxSplitShardCount: 60,
appendMeta: true,
});
const exampleOssShipper = new alicloud.log.OssShipper("example", {
projectName: example.name,
logstoreName: exampleStore.name,
shipperName: "terraform-example",
ossBucket: "example_bucket",
ossPrefix: "root",
bufferInterval: 300,
bufferSize: 250,
compressType: "none",
pathFormat: "%Y/%m/%d/%H/%M",
format: "json",
jsonEnableTag: true,
});
import pulumi
import pulumi_alicloud as alicloud
import pulumi_random as random
default = random.index.Integer("default",
max=99999,
min=10000)
example = alicloud.log.Project("example",
name=f"terraform-example-{default['result']}",
description="terraform-example",
tags={
"Created": "TF",
"For": "example",
})
example_store = alicloud.log.Store("example",
project=example.name,
name="example-store",
retention_period=3650,
auto_split=True,
max_split_shard_count=60,
append_meta=True)
example_oss_shipper = alicloud.log.OssShipper("example",
project_name=example.name,
logstore_name=example_store.name,
shipper_name="terraform-example",
oss_bucket="example_bucket",
oss_prefix="root",
buffer_interval=300,
buffer_size=250,
compress_type="none",
path_format="%Y/%m/%d/%H/%M",
format="json",
json_enable_tag=True)
package main
import (
"fmt"
"github.com/pulumi/pulumi-alicloud/sdk/v3/go/alicloud/log"
"github.com/pulumi/pulumi-random/sdk/v4/go/random"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := random.NewInteger(ctx, "default", &random.IntegerArgs{
Max: 99999,
Min: 10000,
})
if err != nil {
return err
}
example, err := log.NewProject(ctx, "example", &log.ProjectArgs{
Name: pulumi.Sprintf("terraform-example-%v", _default.Result),
Description: pulumi.String("terraform-example"),
Tags: pulumi.StringMap{
"Created": pulumi.String("TF"),
"For": pulumi.String("example"),
},
})
if err != nil {
return err
}
exampleStore, err := log.NewStore(ctx, "example", &log.StoreArgs{
Project: example.Name,
Name: pulumi.String("example-store"),
RetentionPeriod: pulumi.Int(3650),
AutoSplit: pulumi.Bool(true),
MaxSplitShardCount: pulumi.Int(60),
AppendMeta: pulumi.Bool(true),
})
if err != nil {
return err
}
_, err = log.NewOssShipper(ctx, "example", &log.OssShipperArgs{
ProjectName: example.Name,
LogstoreName: exampleStore.Name,
ShipperName: pulumi.String("terraform-example"),
OssBucket: pulumi.String("example_bucket"),
OssPrefix: pulumi.String("root"),
BufferInterval: pulumi.Int(300),
BufferSize: pulumi.Int(250),
CompressType: pulumi.String("none"),
PathFormat: pulumi.String("%Y/%m/%d/%H/%M"),
Format: pulumi.String("json"),
JsonEnableTag: pulumi.Bool(true),
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using AliCloud = Pulumi.AliCloud;
using Random = Pulumi.Random;
return await Deployment.RunAsync(() =>
{
var @default = new Random.Index.Integer("default", new()
{
Max = 99999,
Min = 10000,
});
var example = new AliCloud.Log.Project("example", new()
{
Name = $"terraform-example-{@default.Result}",
Description = "terraform-example",
Tags =
{
{ "Created", "TF" },
{ "For", "example" },
},
});
var exampleStore = new AliCloud.Log.Store("example", new()
{
Project = example.Name,
Name = "example-store",
RetentionPeriod = 3650,
AutoSplit = true,
MaxSplitShardCount = 60,
AppendMeta = true,
});
var exampleOssShipper = new AliCloud.Log.OssShipper("example", new()
{
ProjectName = example.Name,
LogstoreName = exampleStore.Name,
ShipperName = "terraform-example",
OssBucket = "example_bucket",
OssPrefix = "root",
BufferInterval = 300,
BufferSize = 250,
CompressType = "none",
PathFormat = "%Y/%m/%d/%H/%M",
Format = "json",
JsonEnableTag = true,
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.random.integer;
import com.pulumi.random.IntegerArgs;
import com.pulumi.alicloud.log.Project;
import com.pulumi.alicloud.log.ProjectArgs;
import com.pulumi.alicloud.log.Store;
import com.pulumi.alicloud.log.StoreArgs;
import com.pulumi.alicloud.log.OssShipper;
import com.pulumi.alicloud.log.OssShipperArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var default_ = new Integer("default", IntegerArgs.builder()
.max(99999)
.min(10000)
.build());
var example = new Project("example", ProjectArgs.builder()
.name(String.format("terraform-example-%s", default_.result()))
.description("terraform-example")
.tags(Map.ofEntries(
Map.entry("Created", "TF"),
Map.entry("For", "example")
))
.build());
var exampleStore = new Store("exampleStore", StoreArgs.builder()
.project(example.name())
.name("example-store")
.retentionPeriod(3650)
.autoSplit(true)
.maxSplitShardCount(60)
.appendMeta(true)
.build());
var exampleOssShipper = new OssShipper("exampleOssShipper", OssShipperArgs.builder()
.projectName(example.name())
.logstoreName(exampleStore.name())
.shipperName("terraform-example")
.ossBucket("example_bucket")
.ossPrefix("root")
.bufferInterval(300)
.bufferSize(250)
.compressType("none")
.pathFormat("%Y/%m/%d/%H/%M")
.format("json")
.jsonEnableTag(true)
.build());
}
}
resources:
default:
type: random:integer
properties:
max: 99999
min: 10000
example:
type: alicloud:log:Project
properties:
name: terraform-example-${default.result}
description: terraform-example
tags:
Created: TF
For: example
exampleStore:
type: alicloud:log:Store
name: example
properties:
project: ${example.name}
name: example-store
retentionPeriod: 3650
autoSplit: true
maxSplitShardCount: 60
appendMeta: true
exampleOssShipper:
type: alicloud:log:OssShipper
name: example
properties:
projectName: ${example.name}
logstoreName: ${exampleStore.name}
shipperName: terraform-example
ossBucket: example_bucket
ossPrefix: root
bufferInterval: 300
bufferSize: 250
compressType: none
pathFormat: '%Y/%m/%d/%H/%M'
format: json
jsonEnableTag: true
Create OssShipper Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new OssShipper(name: string, args: OssShipperArgs, opts?: CustomResourceOptions);
@overload
def OssShipper(resource_name: str,
args: OssShipperArgs,
opts: Optional[ResourceOptions] = None)
@overload
def OssShipper(resource_name: str,
opts: Optional[ResourceOptions] = None,
format: Optional[str] = None,
buffer_size: Optional[int] = None,
shipper_name: Optional[str] = None,
project_name: Optional[str] = None,
buffer_interval: Optional[int] = None,
path_format: Optional[str] = None,
oss_bucket: Optional[str] = None,
logstore_name: Optional[str] = None,
csv_config_delimiter: Optional[str] = None,
csv_config_quote: Optional[str] = None,
json_enable_tag: Optional[bool] = None,
csv_config_nullidentifier: Optional[str] = None,
csv_config_linefeed: Optional[str] = None,
oss_prefix: Optional[str] = None,
parquet_configs: Optional[Sequence[OssShipperParquetConfigArgs]] = None,
csv_config_header: Optional[bool] = None,
csv_config_columns: Optional[Sequence[str]] = None,
role_arn: Optional[str] = None,
compress_type: Optional[str] = None)
func NewOssShipper(ctx *Context, name string, args OssShipperArgs, opts ...ResourceOption) (*OssShipper, error)
public OssShipper(string name, OssShipperArgs args, CustomResourceOptions? opts = null)
public OssShipper(String name, OssShipperArgs args)
public OssShipper(String name, OssShipperArgs args, CustomResourceOptions options)
type: alicloud:log:OssShipper
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args OssShipperArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args OssShipperArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args OssShipperArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args OssShipperArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args OssShipperArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var ossShipperResource = new AliCloud.Log.OssShipper("ossShipperResource", new()
{
Format = "string",
BufferSize = 0,
ShipperName = "string",
ProjectName = "string",
BufferInterval = 0,
PathFormat = "string",
OssBucket = "string",
LogstoreName = "string",
CsvConfigDelimiter = "string",
CsvConfigQuote = "string",
JsonEnableTag = false,
CsvConfigNullidentifier = "string",
CsvConfigLinefeed = "string",
OssPrefix = "string",
ParquetConfigs = new[]
{
new AliCloud.Log.Inputs.OssShipperParquetConfigArgs
{
Name = "string",
Type = "string",
},
},
CsvConfigHeader = false,
CsvConfigColumns = new[]
{
"string",
},
RoleArn = "string",
CompressType = "string",
});
example, err := log.NewOssShipper(ctx, "ossShipperResource", &log.OssShipperArgs{
Format: pulumi.String("string"),
BufferSize: pulumi.Int(0),
ShipperName: pulumi.String("string"),
ProjectName: pulumi.String("string"),
BufferInterval: pulumi.Int(0),
PathFormat: pulumi.String("string"),
OssBucket: pulumi.String("string"),
LogstoreName: pulumi.String("string"),
CsvConfigDelimiter: pulumi.String("string"),
CsvConfigQuote: pulumi.String("string"),
JsonEnableTag: pulumi.Bool(false),
CsvConfigNullidentifier: pulumi.String("string"),
CsvConfigLinefeed: pulumi.String("string"),
OssPrefix: pulumi.String("string"),
ParquetConfigs: log.OssShipperParquetConfigArray{
&log.OssShipperParquetConfigArgs{
Name: pulumi.String("string"),
Type: pulumi.String("string"),
},
},
CsvConfigHeader: pulumi.Bool(false),
CsvConfigColumns: pulumi.StringArray{
pulumi.String("string"),
},
RoleArn: pulumi.String("string"),
CompressType: pulumi.String("string"),
})
var ossShipperResource = new OssShipper("ossShipperResource", OssShipperArgs.builder()
.format("string")
.bufferSize(0)
.shipperName("string")
.projectName("string")
.bufferInterval(0)
.pathFormat("string")
.ossBucket("string")
.logstoreName("string")
.csvConfigDelimiter("string")
.csvConfigQuote("string")
.jsonEnableTag(false)
.csvConfigNullidentifier("string")
.csvConfigLinefeed("string")
.ossPrefix("string")
.parquetConfigs(OssShipperParquetConfigArgs.builder()
.name("string")
.type("string")
.build())
.csvConfigHeader(false)
.csvConfigColumns("string")
.roleArn("string")
.compressType("string")
.build());
oss_shipper_resource = alicloud.log.OssShipper("ossShipperResource",
format="string",
buffer_size=0,
shipper_name="string",
project_name="string",
buffer_interval=0,
path_format="string",
oss_bucket="string",
logstore_name="string",
csv_config_delimiter="string",
csv_config_quote="string",
json_enable_tag=False,
csv_config_nullidentifier="string",
csv_config_linefeed="string",
oss_prefix="string",
parquet_configs=[{
"name": "string",
"type": "string",
}],
csv_config_header=False,
csv_config_columns=["string"],
role_arn="string",
compress_type="string")
const ossShipperResource = new alicloud.log.OssShipper("ossShipperResource", {
format: "string",
bufferSize: 0,
shipperName: "string",
projectName: "string",
bufferInterval: 0,
pathFormat: "string",
ossBucket: "string",
logstoreName: "string",
csvConfigDelimiter: "string",
csvConfigQuote: "string",
jsonEnableTag: false,
csvConfigNullidentifier: "string",
csvConfigLinefeed: "string",
ossPrefix: "string",
parquetConfigs: [{
name: "string",
type: "string",
}],
csvConfigHeader: false,
csvConfigColumns: ["string"],
roleArn: "string",
compressType: "string",
});
type: alicloud:log:OssShipper
properties:
bufferInterval: 0
bufferSize: 0
compressType: string
csvConfigColumns:
- string
csvConfigDelimiter: string
csvConfigHeader: false
csvConfigLinefeed: string
csvConfigNullidentifier: string
csvConfigQuote: string
format: string
jsonEnableTag: false
logstoreName: string
ossBucket: string
ossPrefix: string
parquetConfigs:
- name: string
type: string
pathFormat: string
projectName: string
roleArn: string
shipperName: string
OssShipper Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The OssShipper resource accepts the following input properties:
- Buffer
Interval int - How often is it delivered every interval.
- Buffer
Size int - Automatically control the creation interval of delivery tasks and set the upper limit of an OSS object size (calculated in uncompressed), unit:
MB
. - Format string
- Storage format, only supports three types:
json
,parquet
,csv
. According to the different format, please select the following parameters- format =
json
json_enable_tag
- (Optional) Whether to deliver the label. - format =
csv
csv_config_delimiter
- (Optional) Separator configuration in csv configuration format.csv_config_columns
- (Optional) Field configuration in csv configuration format.csv_config_nullidentifier
- (Optional) Invalid field content.csv_config_quote
- (Optional) Escape character under csv configuration.csv_config_header
- (Optional) Indicates whether to write the field name to the CSV file, the default value isfalse
.csv_config_linefeed
- (Optional) lineFeed in csv configuration. - format =
parquet
parquet_config
- (Optional) Configure to use parquet storage format.name
- (Required) The name of the key.type
- (Required) Type of configuration name.
- format =
- Logstore
Name string - The name of the log logstore.
- Oss
Bucket string - The name of the oss bucket.
- Path
Format string - The OSS Bucket directory is dynamically generated according to the creation time of the shipper task, it cannot start with a forward slash
/
, the default value is%Y/%m/%d/%H/%M
. - Project
Name string - The name of the log project. It is the only in one Alicloud account.
- Shipper
Name string - Delivery configuration name, it can only contain lowercase letters, numbers, dashes
-
and underscores_
. It must start and end with lowercase letters or numbers, and the name must be 2 to 128 characters long. - Compress
Type string - OSS data storage compression method, support: none, snappy. Among them, none means that the original data is not compressed, and snappy means that the data is compressed using the snappy algorithm, which can reduce the storage space usage of the
OSS Bucket
. - Csv
Config List<string>Columns - Csv
Config stringDelimiter - Csv
Config boolHeader - Csv
Config stringLinefeed - Csv
Config stringNullidentifier - Csv
Config stringQuote - Json
Enable boolTag - Oss
Prefix string - The data synchronized from Log Service to OSS will be stored in this directory of Bucket.
- Parquet
Configs List<Pulumi.Ali Cloud. Log. Inputs. Oss Shipper Parquet Config> - Role
Arn string - Used for access control, the OSS Bucket owner creates the role mark, such as
acs:ram::13234:role/logrole
- Buffer
Interval int - How often is it delivered every interval.
- Buffer
Size int - Automatically control the creation interval of delivery tasks and set the upper limit of an OSS object size (calculated in uncompressed), unit:
MB
. - Format string
- Storage format, only supports three types:
json
,parquet
,csv
. According to the different format, please select the following parameters- format =
json
json_enable_tag
- (Optional) Whether to deliver the label. - format =
csv
csv_config_delimiter
- (Optional) Separator configuration in csv configuration format.csv_config_columns
- (Optional) Field configuration in csv configuration format.csv_config_nullidentifier
- (Optional) Invalid field content.csv_config_quote
- (Optional) Escape character under csv configuration.csv_config_header
- (Optional) Indicates whether to write the field name to the CSV file, the default value isfalse
.csv_config_linefeed
- (Optional) lineFeed in csv configuration. - format =
parquet
parquet_config
- (Optional) Configure to use parquet storage format.name
- (Required) The name of the key.type
- (Required) Type of configuration name.
- format =
- Logstore
Name string - The name of the log logstore.
- Oss
Bucket string - The name of the oss bucket.
- Path
Format string - The OSS Bucket directory is dynamically generated according to the creation time of the shipper task, it cannot start with a forward slash
/
, the default value is%Y/%m/%d/%H/%M
. - Project
Name string - The name of the log project. It is the only in one Alicloud account.
- Shipper
Name string - Delivery configuration name, it can only contain lowercase letters, numbers, dashes
-
and underscores_
. It must start and end with lowercase letters or numbers, and the name must be 2 to 128 characters long. - Compress
Type string - OSS data storage compression method, support: none, snappy. Among them, none means that the original data is not compressed, and snappy means that the data is compressed using the snappy algorithm, which can reduce the storage space usage of the
OSS Bucket
. - Csv
Config []stringColumns - Csv
Config stringDelimiter - Csv
Config boolHeader - Csv
Config stringLinefeed - Csv
Config stringNullidentifier - Csv
Config stringQuote - Json
Enable boolTag - Oss
Prefix string - The data synchronized from Log Service to OSS will be stored in this directory of Bucket.
- Parquet
Configs []OssShipper Parquet Config Args - Role
Arn string - Used for access control, the OSS Bucket owner creates the role mark, such as
acs:ram::13234:role/logrole
- buffer
Interval Integer - How often is it delivered every interval.
- buffer
Size Integer - Automatically control the creation interval of delivery tasks and set the upper limit of an OSS object size (calculated in uncompressed), unit:
MB
. - format String
- Storage format, only supports three types:
json
,parquet
,csv
. According to the different format, please select the following parameters- format =
json
json_enable_tag
- (Optional) Whether to deliver the label. - format =
csv
csv_config_delimiter
- (Optional) Separator configuration in csv configuration format.csv_config_columns
- (Optional) Field configuration in csv configuration format.csv_config_nullidentifier
- (Optional) Invalid field content.csv_config_quote
- (Optional) Escape character under csv configuration.csv_config_header
- (Optional) Indicates whether to write the field name to the CSV file, the default value isfalse
.csv_config_linefeed
- (Optional) lineFeed in csv configuration. - format =
parquet
parquet_config
- (Optional) Configure to use parquet storage format.name
- (Required) The name of the key.type
- (Required) Type of configuration name.
- format =
- logstore
Name String - The name of the log logstore.
- oss
Bucket String - The name of the oss bucket.
- path
Format String - The OSS Bucket directory is dynamically generated according to the creation time of the shipper task, it cannot start with a forward slash
/
, the default value is%Y/%m/%d/%H/%M
. - project
Name String - The name of the log project. It is the only in one Alicloud account.
- shipper
Name String - Delivery configuration name, it can only contain lowercase letters, numbers, dashes
-
and underscores_
. It must start and end with lowercase letters or numbers, and the name must be 2 to 128 characters long. - compress
Type String - OSS data storage compression method, support: none, snappy. Among them, none means that the original data is not compressed, and snappy means that the data is compressed using the snappy algorithm, which can reduce the storage space usage of the
OSS Bucket
. - csv
Config List<String>Columns - csv
Config StringDelimiter - csv
Config BooleanHeader - csv
Config StringLinefeed - csv
Config StringNullidentifier - csv
Config StringQuote - json
Enable BooleanTag - oss
Prefix String - The data synchronized from Log Service to OSS will be stored in this directory of Bucket.
- parquet
Configs List<OssShipper Parquet Config> - role
Arn String - Used for access control, the OSS Bucket owner creates the role mark, such as
acs:ram::13234:role/logrole
- buffer
Interval number - How often is it delivered every interval.
- buffer
Size number - Automatically control the creation interval of delivery tasks and set the upper limit of an OSS object size (calculated in uncompressed), unit:
MB
. - format string
- Storage format, only supports three types:
json
,parquet
,csv
. According to the different format, please select the following parameters- format =
json
json_enable_tag
- (Optional) Whether to deliver the label. - format =
csv
csv_config_delimiter
- (Optional) Separator configuration in csv configuration format.csv_config_columns
- (Optional) Field configuration in csv configuration format.csv_config_nullidentifier
- (Optional) Invalid field content.csv_config_quote
- (Optional) Escape character under csv configuration.csv_config_header
- (Optional) Indicates whether to write the field name to the CSV file, the default value isfalse
.csv_config_linefeed
- (Optional) lineFeed in csv configuration. - format =
parquet
parquet_config
- (Optional) Configure to use parquet storage format.name
- (Required) The name of the key.type
- (Required) Type of configuration name.
- format =
- logstore
Name string - The name of the log logstore.
- oss
Bucket string - The name of the oss bucket.
- path
Format string - The OSS Bucket directory is dynamically generated according to the creation time of the shipper task, it cannot start with a forward slash
/
, the default value is%Y/%m/%d/%H/%M
. - project
Name string - The name of the log project. It is the only in one Alicloud account.
- shipper
Name string - Delivery configuration name, it can only contain lowercase letters, numbers, dashes
-
and underscores_
. It must start and end with lowercase letters or numbers, and the name must be 2 to 128 characters long. - compress
Type string - OSS data storage compression method, support: none, snappy. Among them, none means that the original data is not compressed, and snappy means that the data is compressed using the snappy algorithm, which can reduce the storage space usage of the
OSS Bucket
. - csv
Config string[]Columns - csv
Config stringDelimiter - csv
Config booleanHeader - csv
Config stringLinefeed - csv
Config stringNullidentifier - csv
Config stringQuote - json
Enable booleanTag - oss
Prefix string - The data synchronized from Log Service to OSS will be stored in this directory of Bucket.
- parquet
Configs OssShipper Parquet Config[] - role
Arn string - Used for access control, the OSS Bucket owner creates the role mark, such as
acs:ram::13234:role/logrole
- buffer_
interval int - How often is it delivered every interval.
- buffer_
size int - Automatically control the creation interval of delivery tasks and set the upper limit of an OSS object size (calculated in uncompressed), unit:
MB
. - format str
- Storage format, only supports three types:
json
,parquet
,csv
. According to the different format, please select the following parameters- format =
json
json_enable_tag
- (Optional) Whether to deliver the label. - format =
csv
csv_config_delimiter
- (Optional) Separator configuration in csv configuration format.csv_config_columns
- (Optional) Field configuration in csv configuration format.csv_config_nullidentifier
- (Optional) Invalid field content.csv_config_quote
- (Optional) Escape character under csv configuration.csv_config_header
- (Optional) Indicates whether to write the field name to the CSV file, the default value isfalse
.csv_config_linefeed
- (Optional) lineFeed in csv configuration. - format =
parquet
parquet_config
- (Optional) Configure to use parquet storage format.name
- (Required) The name of the key.type
- (Required) Type of configuration name.
- format =
- logstore_
name str - The name of the log logstore.
- oss_
bucket str - The name of the oss bucket.
- path_
format str - The OSS Bucket directory is dynamically generated according to the creation time of the shipper task, it cannot start with a forward slash
/
, the default value is%Y/%m/%d/%H/%M
. - project_
name str - The name of the log project. It is the only in one Alicloud account.
- shipper_
name str - Delivery configuration name, it can only contain lowercase letters, numbers, dashes
-
and underscores_
. It must start and end with lowercase letters or numbers, and the name must be 2 to 128 characters long. - compress_
type str - OSS data storage compression method, support: none, snappy. Among them, none means that the original data is not compressed, and snappy means that the data is compressed using the snappy algorithm, which can reduce the storage space usage of the
OSS Bucket
. - csv_
config_ Sequence[str]columns - csv_
config_ strdelimiter - csv_
config_ boolheader - csv_
config_ strlinefeed - csv_
config_ strnullidentifier - csv_
config_ strquote - json_
enable_ booltag - oss_
prefix str - The data synchronized from Log Service to OSS will be stored in this directory of Bucket.
- parquet_
configs Sequence[OssShipper Parquet Config Args] - role_
arn str - Used for access control, the OSS Bucket owner creates the role mark, such as
acs:ram::13234:role/logrole
- buffer
Interval Number - How often is it delivered every interval.
- buffer
Size Number - Automatically control the creation interval of delivery tasks and set the upper limit of an OSS object size (calculated in uncompressed), unit:
MB
. - format String
- Storage format, only supports three types:
json
,parquet
,csv
. According to the different format, please select the following parameters- format =
json
json_enable_tag
- (Optional) Whether to deliver the label. - format =
csv
csv_config_delimiter
- (Optional) Separator configuration in csv configuration format.csv_config_columns
- (Optional) Field configuration in csv configuration format.csv_config_nullidentifier
- (Optional) Invalid field content.csv_config_quote
- (Optional) Escape character under csv configuration.csv_config_header
- (Optional) Indicates whether to write the field name to the CSV file, the default value isfalse
.csv_config_linefeed
- (Optional) lineFeed in csv configuration. - format =
parquet
parquet_config
- (Optional) Configure to use parquet storage format.name
- (Required) The name of the key.type
- (Required) Type of configuration name.
- format =
- logstore
Name String - The name of the log logstore.
- oss
Bucket String - The name of the oss bucket.
- path
Format String - The OSS Bucket directory is dynamically generated according to the creation time of the shipper task, it cannot start with a forward slash
/
, the default value is%Y/%m/%d/%H/%M
. - project
Name String - The name of the log project. It is the only in one Alicloud account.
- shipper
Name String - Delivery configuration name, it can only contain lowercase letters, numbers, dashes
-
and underscores_
. It must start and end with lowercase letters or numbers, and the name must be 2 to 128 characters long. - compress
Type String - OSS data storage compression method, support: none, snappy. Among them, none means that the original data is not compressed, and snappy means that the data is compressed using the snappy algorithm, which can reduce the storage space usage of the
OSS Bucket
. - csv
Config List<String>Columns - csv
Config StringDelimiter - csv
Config BooleanHeader - csv
Config StringLinefeed - csv
Config StringNullidentifier - csv
Config StringQuote - json
Enable BooleanTag - oss
Prefix String - The data synchronized from Log Service to OSS will be stored in this directory of Bucket.
- parquet
Configs List<Property Map> - role
Arn String - Used for access control, the OSS Bucket owner creates the role mark, such as
acs:ram::13234:role/logrole
Outputs
All input properties are implicitly available as output properties. Additionally, the OssShipper resource produces the following output properties:
- Id string
- The provider-assigned unique ID for this managed resource.
- Id string
- The provider-assigned unique ID for this managed resource.
- id String
- The provider-assigned unique ID for this managed resource.
- id string
- The provider-assigned unique ID for this managed resource.
- id str
- The provider-assigned unique ID for this managed resource.
- id String
- The provider-assigned unique ID for this managed resource.
Look up Existing OssShipper Resource
Get an existing OssShipper resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: OssShipperState, opts?: CustomResourceOptions): OssShipper
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
buffer_interval: Optional[int] = None,
buffer_size: Optional[int] = None,
compress_type: Optional[str] = None,
csv_config_columns: Optional[Sequence[str]] = None,
csv_config_delimiter: Optional[str] = None,
csv_config_header: Optional[bool] = None,
csv_config_linefeed: Optional[str] = None,
csv_config_nullidentifier: Optional[str] = None,
csv_config_quote: Optional[str] = None,
format: Optional[str] = None,
json_enable_tag: Optional[bool] = None,
logstore_name: Optional[str] = None,
oss_bucket: Optional[str] = None,
oss_prefix: Optional[str] = None,
parquet_configs: Optional[Sequence[OssShipperParquetConfigArgs]] = None,
path_format: Optional[str] = None,
project_name: Optional[str] = None,
role_arn: Optional[str] = None,
shipper_name: Optional[str] = None) -> OssShipper
func GetOssShipper(ctx *Context, name string, id IDInput, state *OssShipperState, opts ...ResourceOption) (*OssShipper, error)
public static OssShipper Get(string name, Input<string> id, OssShipperState? state, CustomResourceOptions? opts = null)
public static OssShipper get(String name, Output<String> id, OssShipperState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Buffer
Interval int - How often is it delivered every interval.
- Buffer
Size int - Automatically control the creation interval of delivery tasks and set the upper limit of an OSS object size (calculated in uncompressed), unit:
MB
. - Compress
Type string - OSS data storage compression method, support: none, snappy. Among them, none means that the original data is not compressed, and snappy means that the data is compressed using the snappy algorithm, which can reduce the storage space usage of the
OSS Bucket
. - Csv
Config List<string>Columns - Csv
Config stringDelimiter - Csv
Config boolHeader - Csv
Config stringLinefeed - Csv
Config stringNullidentifier - Csv
Config stringQuote - Format string
- Storage format, only supports three types:
json
,parquet
,csv
. According to the different format, please select the following parameters- format =
json
json_enable_tag
- (Optional) Whether to deliver the label. - format =
csv
csv_config_delimiter
- (Optional) Separator configuration in csv configuration format.csv_config_columns
- (Optional) Field configuration in csv configuration format.csv_config_nullidentifier
- (Optional) Invalid field content.csv_config_quote
- (Optional) Escape character under csv configuration.csv_config_header
- (Optional) Indicates whether to write the field name to the CSV file, the default value isfalse
.csv_config_linefeed
- (Optional) lineFeed in csv configuration. - format =
parquet
parquet_config
- (Optional) Configure to use parquet storage format.name
- (Required) The name of the key.type
- (Required) Type of configuration name.
- format =
- Json
Enable boolTag - Logstore
Name string - The name of the log logstore.
- Oss
Bucket string - The name of the oss bucket.
- Oss
Prefix string - The data synchronized from Log Service to OSS will be stored in this directory of Bucket.
- Parquet
Configs List<Pulumi.Ali Cloud. Log. Inputs. Oss Shipper Parquet Config> - Path
Format string - The OSS Bucket directory is dynamically generated according to the creation time of the shipper task, it cannot start with a forward slash
/
, the default value is%Y/%m/%d/%H/%M
. - Project
Name string - The name of the log project. It is the only in one Alicloud account.
- Role
Arn string - Used for access control, the OSS Bucket owner creates the role mark, such as
acs:ram::13234:role/logrole
- Shipper
Name string - Delivery configuration name, it can only contain lowercase letters, numbers, dashes
-
and underscores_
. It must start and end with lowercase letters or numbers, and the name must be 2 to 128 characters long.
- Buffer
Interval int - How often is it delivered every interval.
- Buffer
Size int - Automatically control the creation interval of delivery tasks and set the upper limit of an OSS object size (calculated in uncompressed), unit:
MB
. - Compress
Type string - OSS data storage compression method, support: none, snappy. Among them, none means that the original data is not compressed, and snappy means that the data is compressed using the snappy algorithm, which can reduce the storage space usage of the
OSS Bucket
. - Csv
Config []stringColumns - Csv
Config stringDelimiter - Csv
Config boolHeader - Csv
Config stringLinefeed - Csv
Config stringNullidentifier - Csv
Config stringQuote - Format string
- Storage format, only supports three types:
json
,parquet
,csv
. According to the different format, please select the following parameters- format =
json
json_enable_tag
- (Optional) Whether to deliver the label. - format =
csv
csv_config_delimiter
- (Optional) Separator configuration in csv configuration format.csv_config_columns
- (Optional) Field configuration in csv configuration format.csv_config_nullidentifier
- (Optional) Invalid field content.csv_config_quote
- (Optional) Escape character under csv configuration.csv_config_header
- (Optional) Indicates whether to write the field name to the CSV file, the default value isfalse
.csv_config_linefeed
- (Optional) lineFeed in csv configuration. - format =
parquet
parquet_config
- (Optional) Configure to use parquet storage format.name
- (Required) The name of the key.type
- (Required) Type of configuration name.
- format =
- Json
Enable boolTag - Logstore
Name string - The name of the log logstore.
- Oss
Bucket string - The name of the oss bucket.
- Oss
Prefix string - The data synchronized from Log Service to OSS will be stored in this directory of Bucket.
- Parquet
Configs []OssShipper Parquet Config Args - Path
Format string - The OSS Bucket directory is dynamically generated according to the creation time of the shipper task, it cannot start with a forward slash
/
, the default value is%Y/%m/%d/%H/%M
. - Project
Name string - The name of the log project. It is the only in one Alicloud account.
- Role
Arn string - Used for access control, the OSS Bucket owner creates the role mark, such as
acs:ram::13234:role/logrole
- Shipper
Name string - Delivery configuration name, it can only contain lowercase letters, numbers, dashes
-
and underscores_
. It must start and end with lowercase letters or numbers, and the name must be 2 to 128 characters long.
- buffer
Interval Integer - How often is it delivered every interval.
- buffer
Size Integer - Automatically control the creation interval of delivery tasks and set the upper limit of an OSS object size (calculated in uncompressed), unit:
MB
. - compress
Type String - OSS data storage compression method, support: none, snappy. Among them, none means that the original data is not compressed, and snappy means that the data is compressed using the snappy algorithm, which can reduce the storage space usage of the
OSS Bucket
. - csv
Config List<String>Columns - csv
Config StringDelimiter - csv
Config BooleanHeader - csv
Config StringLinefeed - csv
Config StringNullidentifier - csv
Config StringQuote - format String
- Storage format, only supports three types:
json
,parquet
,csv
. According to the different format, please select the following parameters- format =
json
json_enable_tag
- (Optional) Whether to deliver the label. - format =
csv
csv_config_delimiter
- (Optional) Separator configuration in csv configuration format.csv_config_columns
- (Optional) Field configuration in csv configuration format.csv_config_nullidentifier
- (Optional) Invalid field content.csv_config_quote
- (Optional) Escape character under csv configuration.csv_config_header
- (Optional) Indicates whether to write the field name to the CSV file, the default value isfalse
.csv_config_linefeed
- (Optional) lineFeed in csv configuration. - format =
parquet
parquet_config
- (Optional) Configure to use parquet storage format.name
- (Required) The name of the key.type
- (Required) Type of configuration name.
- format =
- json
Enable BooleanTag - logstore
Name String - The name of the log logstore.
- oss
Bucket String - The name of the oss bucket.
- oss
Prefix String - The data synchronized from Log Service to OSS will be stored in this directory of Bucket.
- parquet
Configs List<OssShipper Parquet Config> - path
Format String - The OSS Bucket directory is dynamically generated according to the creation time of the shipper task, it cannot start with a forward slash
/
, the default value is%Y/%m/%d/%H/%M
. - project
Name String - The name of the log project. It is the only in one Alicloud account.
- role
Arn String - Used for access control, the OSS Bucket owner creates the role mark, such as
acs:ram::13234:role/logrole
- shipper
Name String - Delivery configuration name, it can only contain lowercase letters, numbers, dashes
-
and underscores_
. It must start and end with lowercase letters or numbers, and the name must be 2 to 128 characters long.
- buffer
Interval number - How often is it delivered every interval.
- buffer
Size number - Automatically control the creation interval of delivery tasks and set the upper limit of an OSS object size (calculated in uncompressed), unit:
MB
. - compress
Type string - OSS data storage compression method, support: none, snappy. Among them, none means that the original data is not compressed, and snappy means that the data is compressed using the snappy algorithm, which can reduce the storage space usage of the
OSS Bucket
. - csv
Config string[]Columns - csv
Config stringDelimiter - csv
Config booleanHeader - csv
Config stringLinefeed - csv
Config stringNullidentifier - csv
Config stringQuote - format string
- Storage format, only supports three types:
json
,parquet
,csv
. According to the different format, please select the following parameters- format =
json
json_enable_tag
- (Optional) Whether to deliver the label. - format =
csv
csv_config_delimiter
- (Optional) Separator configuration in csv configuration format.csv_config_columns
- (Optional) Field configuration in csv configuration format.csv_config_nullidentifier
- (Optional) Invalid field content.csv_config_quote
- (Optional) Escape character under csv configuration.csv_config_header
- (Optional) Indicates whether to write the field name to the CSV file, the default value isfalse
.csv_config_linefeed
- (Optional) lineFeed in csv configuration. - format =
parquet
parquet_config
- (Optional) Configure to use parquet storage format.name
- (Required) The name of the key.type
- (Required) Type of configuration name.
- format =
- json
Enable booleanTag - logstore
Name string - The name of the log logstore.
- oss
Bucket string - The name of the oss bucket.
- oss
Prefix string - The data synchronized from Log Service to OSS will be stored in this directory of Bucket.
- parquet
Configs OssShipper Parquet Config[] - path
Format string - The OSS Bucket directory is dynamically generated according to the creation time of the shipper task, it cannot start with a forward slash
/
, the default value is%Y/%m/%d/%H/%M
. - project
Name string - The name of the log project. It is the only in one Alicloud account.
- role
Arn string - Used for access control, the OSS Bucket owner creates the role mark, such as
acs:ram::13234:role/logrole
- shipper
Name string - Delivery configuration name, it can only contain lowercase letters, numbers, dashes
-
and underscores_
. It must start and end with lowercase letters or numbers, and the name must be 2 to 128 characters long.
- buffer_
interval int - How often is it delivered every interval.
- buffer_
size int - Automatically control the creation interval of delivery tasks and set the upper limit of an OSS object size (calculated in uncompressed), unit:
MB
. - compress_
type str - OSS data storage compression method, support: none, snappy. Among them, none means that the original data is not compressed, and snappy means that the data is compressed using the snappy algorithm, which can reduce the storage space usage of the
OSS Bucket
. - csv_
config_ Sequence[str]columns - csv_
config_ strdelimiter - csv_
config_ boolheader - csv_
config_ strlinefeed - csv_
config_ strnullidentifier - csv_
config_ strquote - format str
- Storage format, only supports three types:
json
,parquet
,csv
. According to the different format, please select the following parameters- format =
json
json_enable_tag
- (Optional) Whether to deliver the label. - format =
csv
csv_config_delimiter
- (Optional) Separator configuration in csv configuration format.csv_config_columns
- (Optional) Field configuration in csv configuration format.csv_config_nullidentifier
- (Optional) Invalid field content.csv_config_quote
- (Optional) Escape character under csv configuration.csv_config_header
- (Optional) Indicates whether to write the field name to the CSV file, the default value isfalse
.csv_config_linefeed
- (Optional) lineFeed in csv configuration. - format =
parquet
parquet_config
- (Optional) Configure to use parquet storage format.name
- (Required) The name of the key.type
- (Required) Type of configuration name.
- format =
- json_
enable_ booltag - logstore_
name str - The name of the log logstore.
- oss_
bucket str - The name of the oss bucket.
- oss_
prefix str - The data synchronized from Log Service to OSS will be stored in this directory of Bucket.
- parquet_
configs Sequence[OssShipper Parquet Config Args] - path_
format str - The OSS Bucket directory is dynamically generated according to the creation time of the shipper task, it cannot start with a forward slash
/
, the default value is%Y/%m/%d/%H/%M
. - project_
name str - The name of the log project. It is the only in one Alicloud account.
- role_
arn str - Used for access control, the OSS Bucket owner creates the role mark, such as
acs:ram::13234:role/logrole
- shipper_
name str - Delivery configuration name, it can only contain lowercase letters, numbers, dashes
-
and underscores_
. It must start and end with lowercase letters or numbers, and the name must be 2 to 128 characters long.
- buffer
Interval Number - How often is it delivered every interval.
- buffer
Size Number - Automatically control the creation interval of delivery tasks and set the upper limit of an OSS object size (calculated in uncompressed), unit:
MB
. - compress
Type String - OSS data storage compression method, support: none, snappy. Among them, none means that the original data is not compressed, and snappy means that the data is compressed using the snappy algorithm, which can reduce the storage space usage of the
OSS Bucket
. - csv
Config List<String>Columns - csv
Config StringDelimiter - csv
Config BooleanHeader - csv
Config StringLinefeed - csv
Config StringNullidentifier - csv
Config StringQuote - format String
- Storage format, only supports three types:
json
,parquet
,csv
. According to the different format, please select the following parameters- format =
json
json_enable_tag
- (Optional) Whether to deliver the label. - format =
csv
csv_config_delimiter
- (Optional) Separator configuration in csv configuration format.csv_config_columns
- (Optional) Field configuration in csv configuration format.csv_config_nullidentifier
- (Optional) Invalid field content.csv_config_quote
- (Optional) Escape character under csv configuration.csv_config_header
- (Optional) Indicates whether to write the field name to the CSV file, the default value isfalse
.csv_config_linefeed
- (Optional) lineFeed in csv configuration. - format =
parquet
parquet_config
- (Optional) Configure to use parquet storage format.name
- (Required) The name of the key.type
- (Required) Type of configuration name.
- format =
- json
Enable BooleanTag - logstore
Name String - The name of the log logstore.
- oss
Bucket String - The name of the oss bucket.
- oss
Prefix String - The data synchronized from Log Service to OSS will be stored in this directory of Bucket.
- parquet
Configs List<Property Map> - path
Format String - The OSS Bucket directory is dynamically generated according to the creation time of the shipper task, it cannot start with a forward slash
/
, the default value is%Y/%m/%d/%H/%M
. - project
Name String - The name of the log project. It is the only in one Alicloud account.
- role
Arn String - Used for access control, the OSS Bucket owner creates the role mark, such as
acs:ram::13234:role/logrole
- shipper
Name String - Delivery configuration name, it can only contain lowercase letters, numbers, dashes
-
and underscores_
. It must start and end with lowercase letters or numbers, and the name must be 2 to 128 characters long.
Supporting Types
OssShipperParquetConfig, OssShipperParquetConfigArgs
Import
Log oss shipper can be imported using the id or name, e.g.
$ pulumi import alicloud:log/ossShipper:OssShipper example tf-log-project:tf-log-logstore:tf-log-shipper
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- Alibaba Cloud pulumi/pulumi-alicloud
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
alicloud
Terraform Provider.