Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[receiver/awsfirehose] Add support for encoding extensions #37262

Open
wants to merge 11 commits into
base: main
Choose a base branch
from
27 changes: 27 additions & 0 deletions .chloggen/firehose-encoding-extension.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
# Use this changelog template to create an entry for release notes.

# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
change_type: enhancement

# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver)
component: awsfirehosereceiver

# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
note: Add support for encoding extensions

# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists.
issues: [37113]

# (Optional) One or more lines of additional information to render under the primary note.
# These lines will be padded with 2 spaces and then inserted directly into the document.
# Use pipe (|) for multiline entries.
subtext: Adds `encoding` config setting, and deprecates the `record_type` setting.

# If your change doesn't affect end users or the exported elements of any package,
# you should instead start your pull request title with [chore] or use the "Skip Changelog" label.
# Optional: The change log or logs in which this entry should be included.
# e.g. '[user]' or '[user, api]'
# Include 'user' if the change is relevant to end users.
# Include 'api' if there is a change to a library API.
# Default: '[user]'
change_logs: [user]
19 changes: 11 additions & 8 deletions receiver/awsfirehosereceiver/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -45,25 +45,28 @@ See [documentation](https://github.com/open-telemetry/opentelemetry-collector/bl

A `cert_file` and `key_file` are required.

### record_type:
The type of record being received from the delivery stream. Each unmarshaler handles a specific type, so the field allows the receiver to use the correct one.
### encoding:

The ID of an [encoding extension](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/extension/encoding) for decoding logs or metrics.
This configuration also supports the built-in encodings listed in the [Encodings](#encodings) section.
If no encoding is specified, then the receiver will default to a signal-specific encoding: `cwmetrics` for metrics, and `cwlogs` for logs.

default: `cwmetrics`
### record_type:

See the [Record Types](#record-types) section for all available options.
Deprecated, use `encoding` instead. `record_type` will be removed in a future release; it is an alias for `encoding`.

### access_key (Optional):
The access key to be checked on each request received. This can be set when creating or updating the delivery stream.
See [documentation](https://docs.aws.amazon.com/firehose/latest/dev/create-destination.html#create-destination-http) for details.

## Record Types
## Encodings

### cwmetrics
The record type for the CloudWatch metric stream. Expects the format for the records to be JSON.
The encoding for the CloudWatch metric stream. Expects the format for the records to be JSON.
See [documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Metric-Streams.html) for details.

### cwlogs
The record type for the CloudWatch log stream. Expects the format for the records to be JSON.
The encoding for the CloudWatch log stream. Expects the format for the records to be JSON.
For example:

```json
Expand All @@ -84,5 +87,5 @@ For example:
```

### otlp_v1
The OTLP v1 format as produced by CloudWatch metric streams.
The OTLP v1 encoding as produced by CloudWatch metric streams.
See [documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-metric-streams-formats-opentelemetry-100.html) for details.
116 changes: 116 additions & 0 deletions receiver/awsfirehosereceiver/benchmark_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,116 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0

package awsfirehosereceiver

import (
"bytes"
"compress/gzip"
"context"
"fmt"
"math/rand/v2"
"net/http"
"testing"

"go.opentelemetry.io/collector/consumer/consumertest"
"go.uber.org/zap"

"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsfirehosereceiver/internal/unmarshaler/cwlog"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsfirehosereceiver/internal/unmarshaler/cwmetricstream"
)

func BenchmarkLogsConsumer_cwlogs(b *testing.B) {
// numLogGroups is the maximum number of unique log groups
// to use across the generated logs, using a random generator.
const numLogGroups = 10

// numRecords is the number of records in the Firehose envelope.
for _, numRecords := range []int{10, 100} {
// numLogs is the number of CoudWatch log records within a Firehose record.
for _, numLogs := range []int{1, 10} {
b.Run(fmt.Sprintf("%dresources_%drecords_%dlogs", numLogGroups, numRecords, numLogs), func(b *testing.B) {
lc := &logsConsumer{
unmarshaler: cwlog.NewUnmarshaler(zap.NewNop()),
consumer: consumertest.NewNop(),
}
records := make([][]byte, numRecords)
for i := range records {
records[i] = makeCloudWatchLogRecord(numLogs, numLogGroups)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
code, err := lc.Consume(context.Background(), records, nil)
if err != nil {
b.Fatal(err)
}
if code != http.StatusOK {
b.Fatalf("expected status code 200, got %d", code)
}
}
})
}
}
}

func BenchmarkMetricsConsumer_cwmetrics(b *testing.B) {
// numStreams is the maximum number of unique metric streams
// to use across the generated metrics, using a random generator.
const numStreams = 10

// numRecords is the number of records in the Firehose envelope.
for _, numRecords := range []int{10, 100} {
// numMetrics is the number of CoudWatch metrics within a Firehose record.
for _, numMetrics := range []int{1, 10} {
b.Run(fmt.Sprintf("%dresources_%drecords_%dmetrics", numStreams, numRecords, numMetrics), func(b *testing.B) {
mc := &metricsConsumer{
unmarshaler: cwmetricstream.NewUnmarshaler(zap.NewNop()),
consumer: consumertest.NewNop(),
}
records := make([][]byte, numRecords)
for i := range records {
records[i] = makeCloudWatchMetricRecord(numMetrics, numStreams)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
code, err := mc.Consume(context.Background(), records, nil)
if err != nil {
b.Fatal(err)
}
if code != http.StatusOK {
b.Fatalf("expected status code 200, got %d", code)
}
}
})
}
}
}

func makeCloudWatchLogRecord(numLogs, numLogGroups int) []byte {
var buf bytes.Buffer
w := gzip.NewWriter(&buf)
for i := 0; i < numLogs; i++ {
group := rand.IntN(numLogGroups)
fmt.Fprintf(w,
`{"messageType":"DATA_MESSAGE","owner":"123","logGroup":"group_%d","logStream":"stream","logEvents":[{"id":"the_id","timestamp":1725594035523,"message":"message %d"}]}`,
group, i,
)
fmt.Fprintln(w)
}
if err := w.Close(); err != nil {
panic(err)
}
return buf.Bytes()
}

func makeCloudWatchMetricRecord(numMetrics, numStreams int) []byte {
var buf bytes.Buffer
for i := 0; i < numMetrics; i++ {
stream := rand.IntN(numStreams)
fmt.Fprintf(&buf,
`{"metric_stream_name":"stream_%d","account_id":"1234567890","region":"us-east-1","namespace":"AWS/NATGateway","metric_name":"metric_%d","dimensions":{"NatGatewayId":"nat-01a4160dfb995b990"},"timestamp":1643916720000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":2.0},"unit":"Count"}`,
stream, i,
)
fmt.Fprintln(&buf)
}
return buf.Bytes()
}
25 changes: 19 additions & 6 deletions receiver/awsfirehosereceiver/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,15 +8,24 @@ import (

"go.opentelemetry.io/collector/config/confighttp"
"go.opentelemetry.io/collector/config/configopaque"
"go.uber.org/zap"
)

var errRecordTypeEncodingSet = errors.New("record_type must not be set when encoding is set")

type Config struct {
// ServerConfig is used to set up the Firehose delivery
// endpoint. The Firehose delivery stream expects an HTTPS
// endpoint, so TLSSettings must be used to enable that.
confighttp.ServerConfig `mapstructure:",squash"`
// RecordType is the key used to determine which unmarshaler to use
// when receiving the requests.
// Encoding identifies the encoding of records received from
// Firehose. Defaults to telemetry-specific encodings: "cwlog"
// for logs, and "cwmetrics" for metrics.
Encoding string `mapstructure:"encoding"`
// RecordType is an alias for Encoding for backwards compatibility.
// It is an error to specify both encoding and record_type.
//
// Deprecated: use Encoding instead.
RecordType string `mapstructure:"record_type"`
// AccessKey is checked against the one received with each request.
// This can be set when creating or updating the Firehose delivery
Expand All @@ -30,10 +39,14 @@ func (c *Config) Validate() error {
if c.Endpoint == "" {
return errors.New("must specify endpoint")
}
// If a record type is specified, it must be valid.
// An empty string is acceptable, however, because it will use a telemetry-type-specific default.
if c.RecordType != "" {
return validateRecordType(c.RecordType)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't think validation of the record type or encoding can be deferred. This has to fail fast to alert the user to their configuration error rather than allowing the collector to start and then failing to process received data.

Copy link
Contributor Author

@axw axw Jan 17, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The collector will still fail fast. e.g.

$ cat local/config.yaml 
receivers:
  awsfirehose:
    record_type: invalid

exporters:
  debug: {}

service:
  pipelines:
    logs:
      receivers: [awsfirehose]
      processors: []
      exporters: [debug]

$ ./bin/otelcontribcol_linux_amd64 --config local/config.yaml
2025-01-17T10:51:28.527+0800    info    [email protected]/service.go:164   Setting up own telemetry...
2025-01-17T10:51:28.527+0800    info    telemetry/metrics.go:70 Serving metrics {"address": "localhost:8888", "metrics level": "Normal"}
2025-01-17T10:51:28.527+0800    info    builders/builders.go:26 Development component. May change in the future.        {"kind": "exporter", "data_type": "logs", "name": "debug"}
2025-01-17T10:51:28.527+0800    warn    [email protected]/config.go:48       record_type is deprecated, and will be removed in a future version. Use encoding instead.       {"kind": "receiver", "name": "awsfirehose", "data_type": "logs"}
2025-01-17T10:51:28.530+0800    info    [email protected]/service.go:230   Starting otelcontribcol...      {"Version": "0.117.0-dev", "NumCPU": 16}
2025-01-17T10:51:28.530+0800    info    extensions/extensions.go:39     Starting extensions...
2025-01-17T10:51:28.530+0800    error   graph/graph.go:426      Failed to start component       {"error": "unknown encoding extension \"invalid\"", "type": "Receiver", "id": "awsfirehose"}
2025-01-17T10:51:28.530+0800    info    [email protected]/service.go:295   Starting shutdown...
2025-01-17T10:51:28.530+0800    info    extensions/extensions.go:66     Stopping extensions...
2025-01-17T10:51:28.530+0800    info    [email protected]/service.go:309   Shutdown complete.
Error: cannot start pipelines: unknown encoding extension "invalid"
2025/01/17 10:51:28 collector server run finished with error: cannot start pipelines: unknown encoding extension "invalid"

It's doing a bit more work than before it gets to the error, but AFAIK it's not possible to access extensions earlier than the Start method.

if c.RecordType != "" && c.Encoding != "" {
return errors.New("record_type must not be set when encoding is set")
}
return nil
}

func handleDeprecatedConfig(cfg *Config, logger *zap.Logger) {
if cfg.RecordType != "" {
logger.Warn("record_type is deprecated, and will be removed in a future version. Use encoding instead.")
}
}
45 changes: 28 additions & 17 deletions receiver/awsfirehosereceiver/config_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ import (

func TestLoadConfig(t *testing.T) {
for _, configType := range []string{
"cwmetrics", "cwlogs", "otlp_v1", "invalid",
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Testing for an invalid record type or encoding is different from testing that both an encoding and record type have been provided. Both tests should remain.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Because we're now supporting extensions, the record type is only known to be valid/invalid at the time we call Start. There's a test case in there for invalid encoding/record type. See the WithUnknownEncoding test cases for TestLogsReceiver_Start and TestMetricsReceiver_Start.

"cwmetrics", "cwlogs", "otlp_v1",
} {
t.Run(configType, func(t *testing.T) {
fileName := configType + "_config.yaml"
Expand All @@ -34,24 +34,35 @@ func TestLoadConfig(t *testing.T) {
require.NoError(t, sub.Unmarshal(cfg))

err = component.ValidateConfig(cfg)
if configType == "invalid" {
assert.Error(t, err)
} else {
assert.NoError(t, err)
require.Equal(t, &Config{
RecordType: configType,
AccessKey: "some_access_key",
ServerConfig: confighttp.ServerConfig{
Endpoint: "0.0.0.0:4433",
TLSSetting: &configtls.ServerConfig{
Config: configtls.Config{
CertFile: "server.crt",
KeyFile: "server.key",
},
assert.NoError(t, err)
require.Equal(t, &Config{
RecordType: configType,
AccessKey: "some_access_key",
ServerConfig: confighttp.ServerConfig{
Endpoint: "0.0.0.0:4433",
TLSSetting: &configtls.ServerConfig{
Config: configtls.Config{
CertFile: "server.crt",
KeyFile: "server.key",
},
},
}, cfg)
}
},
}, cfg)
})
}
}

func TestLoadConfigInvalid(t *testing.T) {
cm, err := confmaptest.LoadConf(filepath.Join("testdata", "invalid_config.yaml"))
require.NoError(t, err)

factory := NewFactory()
cfg := factory.CreateDefaultConfig()

sub, err := cm.Sub(component.NewIDWithName(metadata.Type, "").String())
require.NoError(t, err)
require.NoError(t, sub.Unmarshal(cfg))

err = component.ValidateConfig(cfg)
assert.Equal(t, errRecordTypeEncodingSet, err)
}
51 changes: 6 additions & 45 deletions receiver/awsfirehosereceiver/factory.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,34 +5,19 @@ package awsfirehosereceiver // import "github.com/open-telemetry/opentelemetry-c

import (
"context"
"errors"

"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/config/confighttp"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/receiver"
"go.uber.org/zap"

"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsfirehosereceiver/internal/metadata"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsfirehosereceiver/internal/unmarshaler"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsfirehosereceiver/internal/unmarshaler/cwlog"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsfirehosereceiver/internal/unmarshaler/cwmetricstream"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsfirehosereceiver/internal/unmarshaler/otlpmetricstream"
)

const (
defaultEndpoint = "localhost:4433"
)

var (
errUnrecognizedRecordType = errors.New("unrecognized record type")
availableRecordTypes = map[string]bool{
cwmetricstream.TypeStr: true,
cwlog.TypeStr: true,
otlpmetricstream.TypeStr: true,
}
)

// NewFactory creates a receiver factory for awsfirehose. Currently, only
// available in metrics pipelines.
func NewFactory() receiver.Factory {
Expand All @@ -43,34 +28,6 @@ func NewFactory() receiver.Factory {
receiver.WithLogs(createLogsReceiver, metadata.LogsStability))
}

// validateRecordType checks the available record types for the
// passed in one and returns an error if not found.
func validateRecordType(recordType string) error {
if _, ok := availableRecordTypes[recordType]; !ok {
return errUnrecognizedRecordType
}
return nil
}

// defaultMetricsUnmarshalers creates a map of the available metrics
// unmarshalers.
func defaultMetricsUnmarshalers(logger *zap.Logger) map[string]unmarshaler.MetricsUnmarshaler {
cwmsu := cwmetricstream.NewUnmarshaler(logger)
otlpv1msu := otlpmetricstream.NewUnmarshaler(logger)
return map[string]unmarshaler.MetricsUnmarshaler{
cwmsu.Type(): cwmsu,
otlpv1msu.Type(): otlpv1msu,
}
}

// defaultLogsUnmarshalers creates a map of the available logs unmarshalers.
func defaultLogsUnmarshalers(logger *zap.Logger) map[string]unmarshaler.LogsUnmarshaler {
u := cwlog.NewUnmarshaler(logger)
return map[string]unmarshaler.LogsUnmarshaler{
u.Type(): u,
}
}

// createDefaultConfig creates a default config with the endpoint set
// to port 8443 and the record type set to the CloudWatch metric stream.
func createDefaultConfig() component.Config {
Expand All @@ -88,7 +45,9 @@ func createMetricsReceiver(
cfg component.Config,
nextConsumer consumer.Metrics,
) (receiver.Metrics, error) {
return newMetricsReceiver(cfg.(*Config), set, defaultMetricsUnmarshalers(set.Logger), nextConsumer)
c := cfg.(*Config)
handleDeprecatedConfig(c, set.Logger)
return newMetricsReceiver(c, set, nextConsumer)
}

// createMetricsReceiver implements the CreateMetricsReceiver function type.
Expand All @@ -98,5 +57,7 @@ func createLogsReceiver(
cfg component.Config,
nextConsumer consumer.Logs,
) (receiver.Logs, error) {
return newLogsReceiver(cfg.(*Config), set, defaultLogsUnmarshalers(set.Logger), nextConsumer)
c := cfg.(*Config)
handleDeprecatedConfig(c, set.Logger)
return newLogsReceiver(c, set, nextConsumer)
}
Loading
Loading