Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 29 additions & 0 deletions .github/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,35 @@

The following guide describes how to setup the OpenTelemetry demo with Elastic Observability using [Docker compose](#docker-compose) or [Kubernetes](#kubernetes).

## Devoteam Elastic Cloud setup tldr;

This demo writes Opentelemetry data to multiple outputs:
* Jaeger
* Prometheus
* Elasticsearch APM
* Opensearch Observability

The configuration has already been setup to write to the DevoTeam Elastic Cloud deployment. See `src/otelcollector/otelcol-config.yml`.
The docker-compose file has been updated to include a 2-node Opensearch cluster.

How to run locally using docker-compose:
```
### Opensearch/Elasticsearch requires a change to some memory config

sudo sysctl -w vm.max_map_count=262144
docker compose pull
docker compose up -d
```

Once everything is running, the following endpoints should be available:
* Webstore: http://localhost:8080/
* Grafana: http://localhost:8080/grafana/
* Feature Flags UI: http://localhost:8080/feature/
* Load Generator UI: http://localhost:8080/loadgen/
* Jaeger UI: http://localhost:8080/jaeger/ui/
* Opensearch Dashboard: http://localhost:5601 (username/pw: admin)
* Elasticsearch Kibana: https://devoteam-observability.kb.westeurope.azure.elastic-cloud.com:9243

## Docker compose

1. Start a free trial on [Elastic Cloud](https://cloud.elastic.co/) and copy the `endpoint` and `secretToken` from the Elastic APM setup instructions in your Kibana.
Expand Down
74 changes: 74 additions & 0 deletions docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -711,3 +711,77 @@ services:
- recommendationservice
- shippingservice
- quoteservice

#
# Opensearch stuff
#

opensearch-node1: # This is also the hostname of the container within the Docker network (i.e. https://opensearch-node1/)
image: opensearchproject/opensearch:latest # Specifying the latest available image - modify if you want a specific version
container_name: opensearch-node1
environment:
- cluster.name=opensearch-cluster # Name the cluster
- node.name=opensearch-node1 # Name the node that will run in this container
- discovery.seed_hosts=opensearch-node1,opensearch-node2 # Nodes to look for when discovering the cluster
- cluster.initial_cluster_manager_nodes=opensearch-node1,opensearch-node2 # Nodes eligible to serve as cluster manager
- bootstrap.memory_lock=true # Disable JVM heap memory swapping
- "OPENSEARCH_JAVA_OPTS=-Xms512m -Xmx512m" # Set min and max JVM heap sizes to at least 50% of system RAM
ulimits:
memlock:
soft: -1 # Set memlock to unlimited (no soft or hard limit)
hard: -1
nofile:
soft: 65536 # Maximum number of open files for the opensearch user - set to at least 65536
hard: 65536
volumes:
- opensearch-data1:/usr/share/opensearch/data # Creates volume called opensearch-data1 and mounts it to the container
ports:
- "9200:9200"
- "9600:9600"

opensearch-node2:
image: opensearchproject/opensearch:latest # This should be the same image used for opensearch-node1 to avoid issues
container_name: opensearch-node2
environment:
- cluster.name=opensearch-cluster
- node.name=opensearch-node2
- discovery.seed_hosts=opensearch-node1,opensearch-node2
- cluster.initial_cluster_manager_nodes=opensearch-node1,opensearch-node2
- bootstrap.memory_lock=true
- "OPENSEARCH_JAVA_OPTS=-Xms512m -Xmx512m"
ulimits:
memlock:
soft: -1
hard: -1
nofile:
soft: 65536
hard: 65536
volumes:
- opensearch-data2:/usr/share/opensearch/data

opensearch-dashboards:
image: opensearchproject/opensearch-dashboards:latest # Make sure the version of opensearch-dashboards matches the version of opensearch installed on other nodes
container_name: opensearch-dashboards
ports:
- 5601:5601 # Map host port 5601 to container port 5601
expose:
- "5601" # Expose port 5601 for web access to OpenSearch Dashboards
environment:
OPENSEARCH_HOSTS: '["https://opensearch-node1:9200","https://opensearch-node2:9200"]' # Define the OpenSearch nodes that OpenSearch Dashboards will query

data-prepper:
restart: unless-stopped
container_name: data-prepper
image: opensearchproject/data-prepper:2
volumes:
- ./src/opensearch/entry-pipeline.yml:/usr/share/data-prepper/pipelines/pipelines.yaml
- ./src/opensearch/data_prepper_config.yml:/usr/share/data-prepper/config/data-prepper-config.yaml
ports:
- "21890:21890"
depends_on:
- opensearch-node1
- opensearch-node2

volumes:
opensearch-data1:
opensearch-data2:
1 change: 1 addition & 0 deletions src/opensearch/data_prepper_config.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
ssl: false
39 changes: 39 additions & 0 deletions src/opensearch/entry-pipeline.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
entry-pipeline:
delay: "100"
source:
otel_trace_source:
ssl: false
sink:
- pipeline:
name: "raw-pipeline"
- pipeline:
name: "service-map-pipeline"
raw-pipeline:
source:
pipeline:
name: "entry-pipeline"
processor:
- otel_trace_raw:
sink:
- opensearch:
hosts: [ "https://opensearch-node1:9200" ]
# cert: "/usr/share/data-prepper/root-ca.pem"
insecure: true
username: "admin"
password: "admin"
index_type: trace-analytics-raw
service-map-pipeline:
delay: "100"
source:
pipeline:
name: "entry-pipeline"
processor:
- service_map_stateful:
sink:
- opensearch:
hosts: ["https://opensearch-node1:9200"]
# cert: "/usr/share/data-prepper/root-ca.pem"
insecure: true
username: "admin"
password: "admin"
index_type: trace-analytics-service-map
46 changes: 23 additions & 23 deletions src/otelcollector/otelcol-config-extras.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,28 +2,28 @@
# SPDX-License-Identifier: Apache-2.0
# extra settings to be merged into OpenTelemetry Collector configuration
# do not delete this file
exporters:
otlp/elastic:
# !!! Elastic APM https endpoint WITHOUT the "https://" prefix
endpoint: "YOUR_APM_ENDPOINT_WITHOUT_HTTPS_PREFIX"
headers:
Authorization: "Bearer YOUR_APM_SECRET_TOKEN"
# exporters:
# otlp/elastic:
# # !!! Elastic APM https endpoint WITHOUT the "https://" prefix
# endpoint: "devoteam-observability.apm.westeurope.azure.elastic-cloud.com:443"
# headers:
# Authorization: ""

processors:
spanmetrics/elastic:
metrics_exporter: otlp/elastic
# processors:
# spanmetrics/elastic:
# metrics_exporter: otlp/elastic

service:
pipelines:
traces:
receivers: [otlp]
processors: [spanmetrics/elastic, batch]
exporters: [otlp/elastic]
metrics:
receivers: [otlp]
processors: [filter, transform, batch]
exporters: [otlp/elastic]
logs:
receivers: [otlp]
processors: [batch]
exporters: [otlp/elastic]
# service:
# pipelines:
# traces:
# receivers: [otlp]
# processors: [spanmetrics/elastic, batch]
# exporters: [otlp/elastic]
# metrics:
# receivers: [otlp]
# processors: [filter, transform, batch]
# exporters: [otlp/elastic]
# logs:
# receivers: [otlp]
# processors: [batch]
# exporters: [otlp/elastic]
23 changes: 20 additions & 3 deletions src/otelcollector/otelcol-config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -13,16 +13,30 @@ receivers:
- "https://*"

exporters:
# Jaeger output
otlp:
endpoint: "jaeger:4317"
tls:
insecure: true
# Opensearch dataprepper output
otlp/2:
endpoint: data-prepper:21890
tls:
insecure: true
insecure_skip_verify: true
logging:
# Prometheus for grafana
prometheus:
endpoint: "otelcol:9464"
resource_to_telemetry_conversion:
enabled: true
enable_open_metrics: true
# Elastic output
otlp/elastic:
# !!! Elastic APM https endpoint WITHOUT the "https://" prefix
endpoint: "devoteam-observability.apm.westeurope.azure.elastic-cloud.com:443"
headers:
Authorization: ""

processors:
batch:
Expand All @@ -37,6 +51,8 @@ processors:
match_type: strict
metric_names:
- queueSize
spanmetrics/elastic:
metrics_exporter: otlp/elastic

connectors:
spanmetrics:
Expand All @@ -46,12 +62,13 @@ service:
traces:
receivers: [otlp]
processors: [batch]
exporters: [otlp, logging, spanmetrics]
exporters: [otlp, logging, spanmetrics,otlp/elastic,otlp/2]
metrics:
receivers: [otlp, spanmetrics]
processors: [filter, transform, batch]
exporters: [prometheus, logging]
exporters: [prometheus, logging,otlp/elastic]
logs:
receivers: [otlp]
processors: [batch]
exporters: [logging]
exporters: [logging,otlp/elastic]