Skip to content
This repository was archived by the owner on Nov 7, 2025. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 15 additions & 0 deletions examples/elasticsearch-to-clickhouse-starter/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
Quesma "elasticsearch-to-clickhouse" starter
============================================

This is a very simplistic Quesma setup to get you started.
Quesma exposes Elasticsearch-compatible HTTP REST API at http://localhost:8080.
You can view your data though Kibana at http://localhost:5601.

docker-compose file located in this folder creates four containers: Quesma, Elasticsearch, ClickHouse and Kibana.

Everything is stored in ClickHouse. No sample data sets are being loaded.

To run this example, simply execute:
```shell
docker-compose up -d
```
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
frontendConnectors:
- name: elastic-ingest
type: elasticsearch-fe-ingest
config:
listenPort: 8080
disableAuth: true
- name: elastic-query
type: elasticsearch-fe-query
config:
listenPort: 8080
backendConnectors:
- name: my-minimal-elasticsearch
type: elasticsearch
config:
url: "http://elasticsearch:9200"
adminUrl: "http://localhost:5601"
- name: my-clickhouse-data-source
type: clickhouse-os
config:
url: "clickhouse://clickhouse:9000"
adminUrl: "http://localhost:8123/play"
ingestStatistics: true
processors:
- name: my-query-processor
type: quesma-v1-processor-query
config:
indexes:
"*":
useCommonTable: true
target:
- my-clickhouse-data-source
- name: my-ingest-processor
type: quesma-v1-processor-ingest
config:
indexes:
"*":
useCommonTable: true
target:
- my-clickhouse-data-source
pipelines:
- name: my-pipeline-elasticsearch-query-clickhouse
frontendConnectors: [ elastic-query ]
processors: [ my-query-processor ]
backendConnectors: [ my-minimal-elasticsearch, my-clickhouse-data-source ]
- name: my-pipeline-elasticsearch-ingest-to-clickhouse
frontendConnectors: [ elastic-ingest ]
processors: [ my-ingest-processor ]
backendConnectors: [ my-minimal-elasticsearch, my-clickhouse-data-source ]
70 changes: 70 additions & 0 deletions examples/elasticsearch-to-clickhouse-starter/docker-compose.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
services:
quesma:
image: quesma/quesma:latest
environment:
- QUESMA_elasticsearch_url=http://elasticsearch:9200
- QUESMA_port=8080
- QUESMA_CONFIG_FILE=/config/starter-config.yml
depends_on:
elasticsearch:
condition: service_healthy
ports:
- "9999:9999"
- "8080:8080"
volumes:
- ./config:/config
deploy:
resources:
limits:
memory: 512M
restart: unless-stopped
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:8.11.1
container_name: elasticsearch
environment:
- discovery.type=single-node
- xpack.security.enabled=false
- "ES_JAVA_OPTS=-Xmx2G"
ports:
- "9201:9200"
- "9300:9300"
healthcheck:
test: curl -s http://elasticsearch:9200 >/dev/null || exit 1
start_period: 1m
interval: 1s
timeout: 1s
deploy:
resources:
limits:
memory: 4G
kibana:
image: docker.elastic.co/kibana/kibana:8.11.1
environment:
ELASTICSEARCH_HOSTS: '["http://quesma:8080"]'
XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY: 'QUESMAQUESMAQUESMAQUESMAQUESMAQUESMAQUESMAQUESMA' # Just to get rid of annoying ERROR in logs
depends_on:
quesma:
condition: service_healthy
elasticsearch:
condition: service_healthy
ports:
- "5601:5601"
restart: unless-stopped
healthcheck:
test: "curl -s http://localhost:5601/api/status >/dev/null || exit 1"
start_period: 2m
interval: 1s
timeout: 1s
# volumes:
# - ./kibana/config/kibana.yml:/usr/share/kibana/config/kibana.yml:ro
clickhouse:
# user: 'default', no password
image: clickhouse/clickhouse-server:24.5.3.5-alpine
ports:
- "8123:8123"
- "9000:9000"
healthcheck:
test: wget --no-verbose --tries=1 --spider http://clickhouse:8123/ping || exit 1
interval: 1s
timeout: 1s
start_period: 1m
Loading