-
Notifications
You must be signed in to change notification settings - Fork 3.4k
Expand file tree
/
Copy pathconfig.go
More file actions
153 lines (130 loc) · 6.53 KB
/
config.go
File metadata and controls
153 lines (130 loc) · 6.53 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package awss3exporter // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awss3exporter"
import (
"errors"
"time"
s3types "github.com/aws/aws-sdk-go-v2/service/s3/types"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/config/configcompression"
"go.opentelemetry.io/collector/config/configoptional"
"go.opentelemetry.io/collector/exporter/exporterhelper"
"go.uber.org/multierr"
)
const (
DefaultRetryMode = "standard"
DefaultRetryMaxAttempts = 3
DefaultRetryMaxBackoff = 20 * time.Second
)
// S3UploaderConfig contains aws s3 uploader related config to controls things
// like bucket, prefix, batching, connections, retries, etc.
type S3UploaderConfig struct {
Region string `mapstructure:"region"`
// S3Bucket is the bucket name to be uploaded to.
S3Bucket string `mapstructure:"s3_bucket"`
// S3BasePrefix is the root key (directory) prefix used to write the file.
S3BasePrefix string `mapstructure:"s3_base_prefix"`
// S3Prefix is the key (directory) prefix to write to inside the bucket. Appended to S3BasePrefix if provided.
S3Prefix string `mapstructure:"s3_prefix"`
// S3PartitionFormat is used to provide the rollup on how data is written. Uses [strftime](https://www.man7.org/linux/man-pages/man3/strftime.3.html) formatting.
S3PartitionFormat string `mapstructure:"s3_partition_format"`
// S3PartitionTimezone is used to provide timezone for partition time. Defaults to Local timezone.
S3PartitionTimezone string `mapstructure:"s3_partition_timezone"`
// FilePrefix is the filename prefix used for the file to avoid any potential collisions.
FilePrefix string `mapstructure:"file_prefix"`
// Endpoint is the URL used for communicated with S3.
Endpoint string `mapstructure:"endpoint"`
// RoleArn is the role policy to use when interacting with S3
RoleArn string `mapstructure:"role_arn"`
// S3ForcePathStyle sets the value for force path style.
S3ForcePathStyle bool `mapstructure:"s3_force_path_style"`
// DisableSLL forces communication to happen via HTTP instead of HTTPS.
DisableSSL bool `mapstructure:"disable_ssl"`
// ACL is the canned ACL to use when uploading objects.
ACL string `mapstructure:"acl"`
StorageClass string `mapstructure:"storage_class"`
// Compression sets the algorithm used to process the payload
// before uploading to S3.
// Valid values are: `gzip`, `zstd`, or no value set.
Compression configcompression.Type `mapstructure:"compression"`
// RetryMode specifies the retry mode for S3 client, default is "standard".
// Valid values are: "standard", "adaptive", or "nop".
// "nop" will disable retry by setting the retryer to aws.NopRetryer.
RetryMode string `mapstructure:"retry_mode"`
// RetryMaxAttempts specifies the maximum number of attempts for S3 client.
// Default is 3 (SDK default).
RetryMaxAttempts int `mapstructure:"retry_max_attempts"`
// RetryMaxBackoff specifies the maximum backoff delay for S3 client.
// Default is 20 seconds (SDK default).
RetryMaxBackoff time.Duration `mapstructure:"retry_max_backoff"`
// UniqueKeyFuncName specifies a function to use for generating a unique string as part of the S3 key.
// If unspecified, a default function will be used that generates a random string.
// Valid values are: "uuidv7"
UniqueKeyFuncName string `mapstructure:"unique_key_func_name"`
}
type MarshalerType string
const (
OtlpProtobuf MarshalerType = "otlp_proto"
OtlpJSON MarshalerType = "otlp_json"
SumoIC MarshalerType = "sumo_ic"
Body MarshalerType = "body"
)
// ResourceAttrsToS3 defines the mapping of S3 uploading configuration values to resource attribute values.
type ResourceAttrsToS3 struct {
// S3Bucket indicates the mapping of the bucket name used for uploading to a specific resource attribute value.
S3Bucket string `mapstructure:"s3_bucket"`
// S3Prefix indicates the mapping of the key (directory) prefix used for writing into the bucket to a specific resource attribute value.
S3Prefix string `mapstructure:"s3_prefix"`
// prevent unkeyed literal initialization
_ struct{}
}
// Config contains the main configuration options for the s3 exporter
type Config struct {
QueueSettings configoptional.Optional[exporterhelper.QueueBatchConfig] `mapstructure:"sending_queue"`
TimeoutSettings exporterhelper.TimeoutConfig `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct.
S3Uploader S3UploaderConfig `mapstructure:"s3uploader"`
MarshalerName MarshalerType `mapstructure:"marshaler"`
// Encoding to apply. If present, overrides the marshaler configuration option.
Encoding *component.ID `mapstructure:"encoding"`
EncodingFileExtension string `mapstructure:"encoding_file_extension"`
ResourceAttrsToS3 ResourceAttrsToS3 `mapstructure:"resource_attrs_to_s3"`
}
func (c *Config) Validate() error {
var errs error
validStorageClasses := make(map[s3types.StorageClass]bool)
for _, sc := range s3types.StorageClassStandard.Values() {
validStorageClasses[sc] = true
}
validACLs := make(map[s3types.ObjectCannedACL]bool)
for _, acl := range s3types.ObjectCannedACLPrivate.Values() {
validACLs[acl] = true
}
validUniqueKeyFuncs := map[string]bool{
"uuidv7": true,
}
if c.S3Uploader.Region == "" {
errs = multierr.Append(errs, errors.New("region is required"))
}
if c.S3Uploader.S3Bucket == "" && c.S3Uploader.Endpoint == "" {
errs = multierr.Append(errs, errors.New("bucket or endpoint is required"))
}
if !validStorageClasses[s3types.StorageClass(c.S3Uploader.StorageClass)] {
errs = multierr.Append(errs, errors.New("invalid StorageClass"))
}
if c.S3Uploader.ACL != "" && !validACLs[s3types.ObjectCannedACL(c.S3Uploader.ACL)] {
errs = multierr.Append(errs, errors.New("invalid ACL"))
}
compression := c.S3Uploader.Compression
if compression.IsCompressed() {
if compression != configcompression.TypeGzip && compression != configcompression.TypeZstd {
errs = multierr.Append(errs, errors.New("unknown compression type"))
}
}
if c.S3Uploader.RetryMode != "nop" && c.S3Uploader.RetryMode != "standard" && c.S3Uploader.RetryMode != "adaptive" {
errs = multierr.Append(errs, errors.New("invalid retry mode, must be either 'standard', 'adaptive' or 'nop'"))
}
if c.S3Uploader.UniqueKeyFuncName != "" && !validUniqueKeyFuncs[c.S3Uploader.UniqueKeyFuncName] {
errs = multierr.Append(errs, errors.New("invalid UniqueKeyFuncName"))
}
return errs
}