diff --git a/templates/compose/posthog.yaml b/templates/compose/posthog.yaml
index d480451030..4f31dca510 100644
--- a/templates/compose/posthog.yaml
+++ b/templates/compose/posthog.yaml
@@ -1,694 +1,94 @@
-# ignore: true
# documentation: https://posthog.com
# slogan: The single platform to analyze, test, observe, and deploy new features
# category: analytics
# tags: analytics, product, open-source, self-hosted, ab-testing, event-tracking
# logo: svgs/posthog.svg
# minversion: 4.0.0-beta.222
-
services:
db:
- image: postgres:12-alpine
- volumes:
- - posthog-postgres-data:/var/lib/postgresql/data
+ image: postgres:15.12-alpine
environment:
- POSTGRES_USER=posthog
- POSTGRES_DB=posthog
- - POSTGRES_PASSWORD=$SERVICE_PASSWORD_POSTGRES
+ - POSTGRES_PASSWORD=${SERVICE_PASSWORD_POSTGRES}
healthcheck:
- test: ["CMD-SHELL", "pg_isready -U posthog"]
- interval: 2s
- timeout: 10s
- retries: 15
+ test: ['CMD-SHELL', 'pg_isready -U posthog']
+ interval: 5s
+ timeout: 30s
+ retries: 30
+ start_period: 10s
+ volumes:
+ - posthog-postgres-data:/var/lib/postgresql/data
redis:
- image: redis:6.2.7-alpine
- command: redis-server --maxmemory-policy allkeys-lru --maxmemory 200mb
-
- clickhouse:
- image: clickhouse/clickhouse-server:23.11.2.11-alpine
+ image: redis:7.2-alpine
+ command: redis-server --maxmemory-policy allkeys-lru --maxmemory 500mb
+ healthcheck:
+ test: ['CMD', 'redis-cli', 'ping']
+ interval: 3s
+ timeout: 10s
+ retries: 10
volumes:
- - type: bind
- source: ./idl/events_dead_letter_queue.json
- target: /idl/events_dead_letter_queue.json
- content: |
- {
- "$schema": "https://json-schema.org/draft/2020-12/schema",
- "$id": "file://posthog/idl/events_dead_letter_queue.json",
- "title": "events_dead_letter_queue",
- "description": "Events that failed to be validated or processed and are sent to the DLQ",
- "type": "object",
- "properties": {
- "id": {
- "description": "uuid for the submission",
- "type": "string"
- },
- "event_uuid": {
- "description": "uuid for the event",
- "type": "string"
- },
- "event": {
- "description": "event type",
- "type": "string"
- },
- "properties": {
- "description": "String representation of the properties json object",
- "type": "string"
- },
- "distinct_id": {
- "description": "PostHog distinct_id",
- "type": "string"
- },
- "team_id": {
- "description": "team_id (maps to the project under the organization)",
- "type": "number"
- },
- "elements_chain": {
- "description": "Used for autocapture. DOM element hierarchy",
- "type": "string"
- },
- "created_at": {
- "description": "Used for autocapture. DOM element hierarchy",
- "type": "number"
- },
- "ip": {
- "description": "IP Address of the associated with the event",
- "type": "string"
- },
- "site_url": {
- "description": "Site URL associated with the event the event",
- "type": "string"
- },
- "now": {
- "description": "Timestamp of the DLQ event",
- "type": "number"
- },
- "raw_payload": {
- "description": "Raw payload of the event that failed to be consumed",
- "type": "string"
- },
- "error_timestamp": {
- "description": "Timestamp that the error of ingestion occurred",
- "type": "number"
- },
- "error_location": {
- "description": "Source of error if known",
- "type": "string"
- },
- "error": {
- "description": "Error if known",
- "type": "string"
- },
- "tags": {
- "description": "Tags associated with the error or event",
- "type": "array",
- "items": {
- "type": "string"
- }
- }
- },
- "required": ["raw_payload"]
- }
- - type: bind
- source: ./idl/events_json.json
- target: /idl/events_json.json
- content: |
- {
- "$schema": "https://json-schema.org/draft/2020-12/schema",
- "$id": "file://posthog/idl/events_json.json",
- "title": "events_json",
- "description": "Event schema that is destined for ClickHouse",
- "type": "object",
- "properties": {
- "uuid": {
- "description": "uuid for the event",
- "type": "string"
- },
- "event": {
- "description": "event type",
- "type": "string"
- },
- "properties": {
- "description": "String representation of the properties json object",
- "type": "string"
- },
- "timestamp": {
- "description": "Timestamp that the event occurred",
- "type": "number"
- },
- "team_id": {
- "description": "team_id (maps to the project under the organization)",
- "type": "number"
- },
- "distinct_id": {
- "description": "PostHog distinct_id",
- "type": "string"
- },
- "elements_chain": {
- "description": "Used for autocapture. DOM element hierarchy",
- "type": "string"
- },
- "created_at": {
- "description": "Timestamp when event was created",
- "type": "number"
- },
- "person_id": {
- "description": "UUID for the associated person if available",
- "type": "string"
- },
- "person_created_at": {
- "description": "Timestamp for when the associated person was created",
- "type": "number"
- },
- "person_properties": {
- "description": "String representation of the person JSON object",
- "type": "string"
- },
- "group0_properties": {
- "description": "String representation of a group's properties",
- "type": "string"
- },
- "group1_properties": {
- "description": "String representation of a group's properties",
- "type": "string"
- },
- "group2_properties": {
- "description": "String representation of a group's properties",
- "type": "string"
- },
- "group3_properties": {
- "description": "String representation of a group's properties",
- "type": "string"
- },
- "group4_properties": {
- "description": "String representation of a group's properties",
- "type": "string"
- },
- "group0_created_at": {
- "description": "Group's creation timestamp",
- "type": "number"
- },
- "group1_created_at": {
- "description": "Group's creation timestamp",
- "type": "number"
- },
- "group2_created_at": {
- "description": "Group's creation timestamp",
- "type": "number"
- },
- "group3_created_at": {
- "description": "Group's creation timestamp",
- "type": "number"
- },
- "group4_created_at": {
- "description": "Group's creation timestamp",
- "type": "number"
- }
- },
- "required": ["uuid", "event", "properties", "timestamp", "team_id"]
- }
- - type: bind
- source: ./idl/groups.json
- target: /idl/groups.json
- content: |
- {
- "$schema": "https://json-schema.org/draft/2020-12/schema",
- "$id": "file://posthog/idl/groups.json",
- "title": "groups",
- "description": "Groups schema that is destined for ClickHouse",
- "type": "object",
- "properties": {
- "group_type_index": {
- "description": "Group type index",
- "type": "number"
- },
- "group_key": {
- "description": "Group Key",
- "type": "string"
- },
- "created_at": {
- "description": "Group creation timestamp",
- "type": "number"
- },
- "team_id": {
- "description": "Team ID associated with group",
- "type": "number"
- },
- "group_properties": {
- "description": "String representation of group JSON properties object",
- "type": "string"
- }
- },
- "required": ["group_type_index", "group_key", "created_at", "team_id", "group_properties"]
- }
- - type: bind
- source: ./idl/idl.md
- target: /idl/idl.md
- content: |
- # IDL - Interface Definition Language
+ - posthog-redis-data:/data
- This directory is responsible for defining the schemas of the data between services.
- Primarily this will be between services and ClickHouse, but can be really any thing at the boundry of services.
-
- The reason why we do this is because it makes generating code, validating data, and understanding the system a whole lot easier. We've had a few customers request this of us for engineering a deeper integration with us.
- - type: bind
- source: ./idl/person.json
- target: /idl/person.json
- content: |
- {
- "$schema": "https://json-schema.org/draft/2020-12/schema",
- "$id": "file://posthog/idl/person.json",
- "title": "person",
- "description": "Person schema that is destined for ClickHouse",
- "type": "object",
- "properties": {
- "id": {
- "description": "UUID for the person",
- "type": "string"
- },
- "created_at": {
- "description": "Person creation timestamp",
- "type": "number"
- },
- "team_id": {
- "description": "Team ID associated with person",
- "type": "number"
- },
- "properties": {
- "description": "String representation of person JSON properties object",
- "type": "string"
- },
- "is_identified": {
- "description": "Boolean is the person identified?",
- "type": "boolean"
- },
- "is_deleted": {
- "description": "Boolean is the person deleted?",
- "type": "boolean"
- },
- "version": {
- "description": "Version field for collapsing later (psuedo-tombstone)",
- "type": "number"
- }
- },
- "required": ["id", "created_at", "team_id", "properties", "is_identified", "is_deleted", "version"]
- }
- - type: bind
- source: ./idl/person_distinct_id.json
- target: /idl/person_distinct_id.json
- content: |
- {
- "$schema": "https://json-schema.org/draft/2020-12/schema",
- "$id": "file://posthog/idl/person_distinct_id.json",
- "title": "person_distinct_id",
- "description": "Person distinct id schema that is destined for ClickHouse",
- "type": "object",
- "properties": {
- "distinct_id": {
- "description": "User provided ID for the distinct user",
- "type": "string"
- },
- "person_id": {
- "description": "UUID of the person",
- "type": "string"
- },
- "team_id": {
- "description": "Team ID associated with person_distinct_id",
- "type": "number"
- },
- "_sign": {
- "description": "Used for collapsing later different versions of a distinct id (psuedo-tombstone)",
- "type": "number"
- },
- "is_deleted": {
- "description": "Boolean is the person distinct_id deleted?",
- "type": "boolean"
- }
- },
- "required": ["distinct_id", "person_id", "team_id", "_sign", "is_deleted"]
- }
- - type: bind
- source: ./idl/person_distinct_id2.json
- target: /idl/person_distinct_id2.json
- content: |
- {
- "$schema": "https://json-schema.org/draft/2020-12/schema",
- "$id": "file://posthog/idl/person_distinct_id2.json",
- "title": "person_distinct_id2",
- "description": "Person distinct id2 schema that is destined for ClickHouse",
- "type": "object",
- "properties": {
- "distinct_id": {
- "description": "User provided ID for the distinct user",
- "type": "string"
- },
- "person_id": {
- "description": "UUID of the person",
- "type": "string"
- },
- "team_id": {
- "description": "Team ID associated with person_distinct_id",
- "type": "number"
- },
- "version": {
- "description": "Used for collapsing later different versions of a distinct id (psuedo-tombstone)",
- "type": "number"
- },
- "is_deleted": {
- "description": "Boolean is the person distinct_id deleted?",
- "type": "boolean"
- }
- },
- "required": ["distinct_id", "person_id", "team_id", "version", "is_deleted"]
- }
- - type: bind
- source: ./idl/plugin_log_entries.json
- target: /idl/plugin_log_entries.json
- content: |
- {
- "$schema": "https://json-schema.org/draft/2020-12/schema",
- "$id": "file://posthog/idl/plugin_log_entries.json",
- "title": "plugin_log_entries",
- "description": "Plugin log entries that are destined for ClickHouse",
- "type": "object",
- "properties": {
- "id": {
- "description": "UUID for the log entry",
- "type": "string"
- },
- "team_id": {
- "description": "Team ID associated with person_distinct_id",
- "type": "number"
- },
- "plugin_id": {
- "description": "Plugin ID associated with the log entry",
- "type": "number"
- },
- "plugin_config_id": {
- "description": "Plugin Config ID associated with the log entry",
- "type": "number"
- },
- "timestamp": {
- "description": "Timestamp for when the log entry was created",
- "type": "number"
- },
- "source": {
- "description": "Source of the log entry",
- "type": "string"
- },
- "type": {
- "description": "Log entry type",
- "type": "string"
- },
- "message": {
- "description": "Log entry body",
- "type": "string"
- },
- "instance_id": {
- "description": "UUID of the instance that generated the log entry",
- "type": "string"
- }
- },
- "required": [
- "id",
- "team_id",
- "plugin_id",
- "plugin_config_id",
- "timestamp",
- "source",
- "type",
- "message",
- "instance_id"
- ]
- }
- - type: bind
- source: ./docker/clickhouse/docker-entrypoint-initdb.d/init-db.sh
- target: /docker-entrypoint-initdb.d/init-db.sh
- content: |
- #!/bin/bash
- set -e
+ zookeeper:
+ image: zookeeper:3.7.0
+ volumes:
+ - posthog-zookeeper-data:/data
+ - posthog-zookeeper-datalog:/datalog
+ - posthog-zookeeper-logs:/logs
- cp -r /idl/* /var/lib/clickhouse/format_schemas/
+ clickhouse:
+ image: clickhouse/clickhouse-server:25.12.5.44
+ depends_on:
+ kafka:
+ condition: service_started
+ zookeeper:
+ condition: service_started
+ environment:
+ - CLICKHOUSE_SKIP_USER_SETUP=1
+ - KAFKA_HOSTS=kafka:9092
+ healthcheck:
+ test: ['CMD-SHELL', 'wget --no-verbose --tries=1 --spider http://localhost:8123/ping || exit 1']
+ interval: 5s
+ timeout: 10s
+ retries: 30
+ start_period: 30s
+ volumes:
- type: bind
- source: ./docker/clickhouse/config.xml
+ source: ./clickhouse/config.xml
target: /etc/clickhouse-server/config.xml
+ read_only: true
content: |
-
+
-
- trace
+ warning
/var/log/clickhouse-server/clickhouse-server.log
/var/log/clickhouse-server/clickhouse-server.err.log
-
1000M
10
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
8123
-
-
- 9000
-
-
9004
-
-
9005
-
-
8443
-
-
9440
-
-
-
-
-
9009
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
4096
-
-
3
-
-
-
- false
-
-
- /path/to/ssl_cert_file
- /path/to/ssl_key_file
-
-
- false
-
-
- /path/to/ssl_ca_cert_file
-
-
- none
-
-
- 0
-
-
- -1
- -1
-
-
- false
-
-
-
-
-
+
/etc/clickhouse-server/server.crt
/etc/clickhouse-server/server.key
-
/etc/clickhouse-server/dhparam.pem
none
true
@@ -697,42 +97,31 @@ services:
true
-
+
true
true
sslv2,sslv3
true
-
-
RejectCertificateHandler
-
-
-
- 100
+ 200
@@ -745,10 +134,14 @@ services:
of the time, in which case a higher number of threads might be required.
-->
+ 10
10000
+
+ 4
+
+ If the pool is full, connection will be drained synchronously. -->
4194304
0
+ correct maximum value. -->
8589934592
5368709120
-
-
1000
@@ -829,124 +201,9 @@ services:
/var/lib/clickhouse/tmp/
-
-
-
/var/lib/clickhouse/user_files/
-
-
-
-
-
-
-
-
@@ -957,237 +214,23 @@ services:
/var/lib/clickhouse/access/
-
-
default
-
-
-
-
-
-
-
-
default
-
-
-
-
-
-
-
true
false
- ' | sed -e 's|.*>\(.*\)<.*|\1|')
- wget https://github.com/ClickHouse/clickhouse-jdbc-bridge/releases/download/v$PKG_VER/clickhouse-jdbc-bridge_$PKG_VER-1_all.deb
- apt install --no-install-recommends -f ./clickhouse-jdbc-bridge_$PKG_VER-1_all.deb
- clickhouse-jdbc-bridge &
-
- * [CentOS/RHEL]
- export MVN_URL=https://repo1.maven.org/maven2/ru/yandex/clickhouse/clickhouse-jdbc-bridge
- export PKG_VER=$(curl -sL $MVN_URL/maven-metadata.xml | grep '' | sed -e 's|.*>\(.*\)<.*|\1|')
- wget https://github.com/ClickHouse/clickhouse-jdbc-bridge/releases/download/v$PKG_VER/clickhouse-jdbc-bridge-$PKG_VER-1.noarch.rpm
- yum localinstall -y clickhouse-jdbc-bridge-$PKG_VER-1.noarch.rpm
- clickhouse-jdbc-bridge &
-
- Please refer to https://github.com/ClickHouse/clickhouse-jdbc-bridge#usage for more information.
- ]]>
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- localhost
- 9000
-
-
-
-
-
-
-
-
-
-
-
.*
-
-
-
-
zookeeper
@@ -1195,176 +238,81 @@ services:
-
-
-
- 01
- ch1
-
-
-
3600
-
3600
60
-
-
-
-
-
-
-
-
system
-
toYYYYMM(event_date)
-
-
-
- 7500
+ 500
+
+ 8388608
+
+ 8192
+
+ 524288
+
+ true
+
+
+
+
+ event_date + INTERVAL 12 DAY
+ 1
-
-
- system
-
-
- toYYYYMM(event_date)
- 7500
-
-
+ Used only for queries with setting log_query_threads = 1. -->
system
toYYYYMM(event_date)
7500
+ 8388608
+ 8192
+ 524288
+ false
+
+ event_date + INTERVAL 5 DAY
+ 1
+ Used only for queries with setting log_query_views = 1. -->
system
toYYYYMM(event_date)
7500
+
+ event_date + INTERVAL 5 DAY
+ 1
-
-
- system
-
- toYYYYMM(event_date)
- 7500
-
-
@@ -1373,6 +321,9 @@ services:
7500
1000
+
+ event_date + INTERVAL 5 DAY
+ 1
7000
+
+ event_date + INTERVAL 5 DAY
+ 1
+ This table is normally empty. -->
system
1000
+ 1024
+ 1024
+ 512
+ true
@@ -1431,26 +389,26 @@ services:
toYYYYMM(event_date)
7500
-
-
-
-
-
+ event_date + INTERVAL 5 DAY
+ 1
+
-
-
+
+ system
+
+ toYYYYMM(event_date)
+ 7500
+ event_date + INTERVAL 5 DAY
+ 1
+
-
*_dictionary.xml
*_function.xml
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
/clickhouse/task_queue/ddl
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
@@ -1594,7 +461,7 @@ services:
/var/lib/clickhouse/format_schemas/
@@ -1616,96 +483,16 @@ services:
-
-
-
-
-
-
- false
-
- false
-
-
- https://6f33034cfe684dd7a3ab9875e57b1c8d@o388870.ingest.sentry.io/5226277
-
-
-
-
-
-
-
-
+
+ 1073741824
+ 2
+ 2
+
+
- type: bind
- source: ./docker/clickhouse/users.xml
+ source: ./clickhouse/users.xml
target: /etc/clickhouse-server/users.xml
+ read_only: true
content: |
@@ -1718,19 +505,31 @@ services:
10000000000
+
random
1
+ 0
+ 25.6
+ global
+ 0
+ 1
+ 1
+ 1
+ 1
+ false
+ 1
+
@@ -1738,6 +537,14 @@ services:
1
+
+ default
+
+
+
+ default
+
+
@@ -1746,70 +553,70 @@ services:
::/0
@@ -1822,8 +629,35 @@ services:
default
-
+ 1
+ 1
+ 1
+ 1
+
+
+ apipass
+
+
+ ::/0
+
+
+ api
+
+ default
+
+
+
+ apppass
+
+
+ ::/0
+
+
+ app
+
+ default
+
@@ -1845,287 +679,996 @@ services:
- - clickhouse-data:/var/lib/clickhouse
- depends_on:
- - kafka
- - zookeeper
+ - type: bind
+ source: ./clickhouse/config.d/default.xml
+ target: /etc/clickhouse-server/config.d/default.xml
+ read_only: true
+ content: |
+
+ 9000
- zookeeper:
- image: zookeeper:3.7.0
- volumes:
- - zookeeper-datalog:/datalog
- - zookeeper-data:/data
- - zookeeper-logs:/logs
+
+
+
+
+ clickhouse
+ 9000
+
+
+
+
+
+
+ clickhouse
+ 9000
+
+
+
+
+
+
+ clickhouse
+ 9000
+
+
+
+
+
+
+ clickhouse
+ 9000
+
+
+
+
+
+
+ clickhouse
+ 9000
+
+
+
+
- kafka:
- image: ghcr.io/posthog/kafka-container:v2.8.2
+
+
+
+
+
+
+
+
+
+
+ 01
+ ch1
+ online
+ data
+
+
+ - type: bind
+ source: ./clickhouse/user_defined_function.xml
+ target: /etc/clickhouse-server/user_defined_function.xml
+ read_only: true
+ content: |
+
+
+ executable_pool
+ aggregate_funnel
+ Array(Tuple(Int8, Nullable(String), Array(Float64), Array(Array(UUID)), UInt32))
+ result
+
+ UInt8
+ num_steps
+
+
+ UInt64
+ conversion_window_limit
+
+
+ String
+ breakdown_attribution_type
+
+
+ String
+ funnel_order_type
+
+
+ Array(Nullable(String))
+ prop_vals
+
+
+ Array(Int8)
+ optional_steps
+
+
+ Array(Tuple(Nullable(Float64), UUID, Nullable(String), Array(Int8)))
+ value
+
+ JSONEachRow
+ aggregate_funnel steps
+ 600
+
+
+
+ executable_pool
+ aggregate_funnel_cohort
+ Array(Tuple(Int8, UInt64, Array(Float64), Array(Array(UUID)), UInt32))
+ result
+
+ UInt8
+ num_steps
+
+
+ UInt64
+ conversion_window_limit
+
+
+ String
+ breakdown_attribution_type
+
+
+ String
+ funnel_order_type
+
+
+ Array(UInt64)
+ prop_vals
+
+
+ Array(Int8)
+ optional_steps
+
+
+ Array(Tuple(Nullable(Float64), UUID, UInt64, Array(Int8)))
+ value
+
+ JSONEachRow
+ aggregate_funnel steps
+ 600
+
+
+
+ executable_pool
+ aggregate_funnel_array
+ Array(Tuple(Int8, Array(String), Array(Float64), Array(Array(UUID)), UInt32))
+ result
+
+ UInt8
+ num_steps
+
+
+ UInt64
+ conversion_window_limit
+
+
+ String
+ breakdown_attribution_type
+
+
+ String
+ funnel_order_type
+
+
+ Array(Array(String))
+ prop_vals
+
+
+ Array(Int8)
+ optional_steps
+
+
+ Array(Tuple(Nullable(Float64), UUID, Array(String), Array(Int8)))
+ value
+
+ JSONEachRow
+ aggregate_funnel steps
+ 600
+
+
+
+ executable_pool
+ aggregate_funnel_test
+ String
+ result
+
+ UInt8
+ num_steps
+
+
+ UInt64
+ conversion_window_limit
+
+
+ String
+ breakdown_attribution_type
+
+
+ String
+ funnel_order_type
+
+
+ Array(Array(String))
+ prop_vals
+
+
+ Array(Int8)
+ optional_steps
+
+
+ Array(Tuple(Nullable(Float64), UUID, Nullable(String), Array(Int8)))
+ value
+
+ JSONEachRow
+ aggregate_funnel_test.py
+ 600
+
+
+
+ executable_pool
+ aggregate_funnel_trends
+ Array(Tuple(UInt64, Int8, Nullable(String), UUID))
+ result
+
+ UInt8
+ from_step
+
+
+ UInt8
+ to_step
+
+
+ UInt8
+ num_steps
+
+
+ UInt64
+ conversion_window_limit
+
+
+ String
+ breakdown_attribution_type
+
+
+ String
+ funnel_order_type
+
+
+ Array(Nullable(String))
+ prop_vals
+
+
+ Array(Tuple(Nullable(Float64), UInt64, UUID, Nullable(String), Array(Int8)))
+ value
+
+ JSONEachRow
+ aggregate_funnel trends
+ 600
+
+
+
+ executable_pool
+ aggregate_funnel_array_trends
+
+ Array(Tuple(UInt64, Int8, Array(String), UUID))
+ result
+
+ UInt8
+ from_step
+
+
+ UInt8
+ to_step
+
+
+ UInt8
+ num_steps
+
+
+ UInt64
+ conversion_window_limit
+
+
+ String
+ breakdown_attribution_type
+
+
+ String
+ funnel_order_type
+
+
+ Array(Array(String))
+ prop_vals
+
+
+ Array(Tuple(Nullable(Float64), UInt64, UUID, Array(String), Array(Int8)))
+ value
+
+ JSONEachRow
+ aggregate_funnel trends
+ 600
+
+
+
+ executable_pool
+ aggregate_funnel_cohort_trends
+
+ Array(Tuple(UInt64, Int8, UInt64, UUID))
+ result
+
+ UInt8
+ from_step
+
+
+ UInt8
+ to_step
+
+
+ UInt8
+ num_steps
+
+
+ UInt64
+ conversion_window_limit
+
+
+ String
+ breakdown_attribution_type
+
+
+ String
+ funnel_order_type
+
+
+ Array(UInt64)
+ prop_vals
+
+
+ Array(Tuple(Nullable(Float64), UInt64, UUID, UInt64, Array(Int8)))
+ value
+
+ JSONEachRow
+ aggregate_funnel trends
+ 600
+
+
+
+ executable_pool
+ aggregate_funnel_array_trends_test
+ String
+ result
+
+ UInt8
+ from_step
+
+
+ UInt8
+ to_step
+
+
+ UInt8
+ num_steps
+
+
+ UInt64
+ conversion_window_limit
+
+
+ String
+ breakdown_attribution_type
+
+
+ String
+ funnel_order_type
+
+
+ Array(Array(String))
+ prop_vals
+
+
+ Array(Tuple(Nullable(Float64), UInt64, UUID, Array(String), Array(Int8)))
+ value
+
+ JSONEachRow
+ aggregate_funnel_array_trends_test.py
+ 600
+
+
+ - posthog-clickhouse-data:/var/lib/clickhouse
+
+ # Workaround for ClickHouse crash_log table not existing on fresh installs.
+ # See: https://github.com/PostHog/posthog/issues/40300
+ clickhouse-init:
+ image: alpine:3.19
+ entrypoint: /bin/sh
+ command:
+ - '-c'
+ - |
+ apk add --no-cache curl
+ until curl -sf http://clickhouse:8123/ping >/dev/null 2>&1; do sleep 2; done
+ curl -sf 'http://clickhouse:8123/' --data "CREATE TABLE IF NOT EXISTS system.crash_log (hostname LowCardinality(String) DEFAULT hostName(), event_date Date, event_time DateTime, timestamp_ns UInt64, signal Int32, thread_id UInt64, query_id String, trace Array(UInt64), trace_full Array(String), version String, revision UInt32, build_id String) ENGINE = MergeTree ORDER BY (event_date, event_time) TTL event_date + INTERVAL 30 DAY"
depends_on:
- - zookeeper
+ clickhouse:
+ condition: service_healthy
+ restart: 'no'
+
+ kafka:
+ image: docker.redpanda.com/redpandadata/redpanda:v25.1.9
+ command:
+ - redpanda
+ - start
+ - '--kafka-addr internal://0.0.0.0:9092'
+ - '--advertise-kafka-addr internal://kafka:9092'
+ - '--pandaproxy-addr internal://0.0.0.0:8082'
+ - '--advertise-pandaproxy-addr internal://kafka:8082'
+ - '--schema-registry-addr internal://0.0.0.0:8081'
+ - '--rpc-addr kafka:33145'
+ - '--advertise-rpc-addr kafka:33145'
+ - '--mode dev-container'
+ - '--smp 1'
+ - '--memory 1500M'
+ - '--reserve-memory 200M'
+ - '--overprovisioned'
+ - '--set redpanda.empty_seed_starts_cluster=false'
+ - '--seeds kafka:33145'
+ - '--set redpanda.auto_create_topics_enabled=true'
environment:
- - KAFKA_BROKER_ID=1001
- - KAFKA_CFG_RESERVED_BROKER_MAX_ID=1001
- - KAFKA_CFG_LISTENERS=PLAINTEXT://:9092
- - KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://kafka:9092
- - KAFKA_CFG_ZOOKEEPER_CONNECT=zookeeper:2181
- - ALLOW_PLAINTEXT_LISTENER=yes
-
- object_storage:
- image: ghcr.io/coollabsio/minio:RELEASE.2025-10-15T17-29-55Z # Released on 15 October 2025
+ - ALLOW_PLAINTEXT_LISTENER=true
+ healthcheck:
+ test: curl -f http://localhost:9644/v1/status/ready || exit 1
+ interval: 5s
+ timeout: 10s
+ retries: 30
+ start_period: 30s
+ volumes:
+ - posthog-kafka-data:/var/lib/redpanda/data
+
+ kafka-init:
+ image: docker.redpanda.com/redpandadata/redpanda:v25.1.9
+ entrypoint: /bin/sh
+ command:
+ - '-c'
+ - |
+ TIMEOUT=120
+ ELAPSED=0
+ until rpk topic list --brokers kafka:9092 2>/dev/null; do
+ sleep 3
+ ELAPSED=$$((ELAPSED + 3))
+ if [ $$ELAPSED -ge $$TIMEOUT ]; then exit 1; fi
+ done
+ for topic in exceptions_ingestion clickhouse_events_json events_plugin_ingestion; do
+ rpk topic create "$$topic" --brokers kafka:9092 -p 1 -r 1 2>&1 || true
+ done
+ depends_on:
+ kafka:
+ condition: service_healthy
+ restart: 'no'
+
+ objectstorage:
+ image: minio/minio:RELEASE.2025-04-22T22-12-26Z
environment:
- - MINIO_ROOT_USER=$SERVICE_USER_MINIO
- - MINIO_ROOT_PASSWORD=$SERVICE_PASSWORD_MINIO
+ - MINIO_ROOT_USER=${SERVICE_USER_MINIO}
+ - MINIO_ROOT_PASSWORD=${SERVICE_PASSWORD_MINIO}
entrypoint: sh
- command: -c 'mkdir -p /data/posthog && minio server --address ":9000" --console-address ":9001" /data'
+ command: -c 'mkdir -p /data/posthog && minio server --address ":19000" --console-address ":19001" /data'
+ volumes:
+ - posthog-objectstorage:/data
+
+ seaweedfs:
+ image: chrislusf/seaweedfs:4.03
+ entrypoint:
+ - /bin/sh
+ - '-c'
+ - |
+ /usr/bin/weed "$$@" &
+ WEED_PID=$$!
+ while true; do
+ sleep 5
+ if echo "s3.bucket.list" | /usr/bin/weed shell -master=localhost:9333 2>&1 | grep -q "posthog"; then break; fi
+ echo "s3.bucket.create -name posthog" | /usr/bin/weed shell -master=localhost:9333 2>&1 || true
+ done
+ wait $$WEED_PID
+ - '--'
+ command:
+ - server
+ - '-s3'
+ - '-s3.port=8333'
+ - '-dir=/data'
volumes:
- - object_storage:/data
+ - posthog-seaweedfs:/data
healthcheck:
- test: ["CMD", "mc", "ready", "local"]
+ test: ['CMD', 'sh', '-c', "echo 's3.bucket.list' | /usr/bin/weed shell -master=localhost:9333 2>&1 | grep -q posthog"]
interval: 5s
- timeout: 20s
- retries: 10
+ timeout: 10s
+ retries: 30
+ start_period: 15s
- maildev:
- image: maildev/maildev:2.0.5
+ temporal:
+ image: temporalio/auto-setup:1.20.0
+ environment:
+ - DB=postgresql
+ - DB_PORT=5432
+ - POSTGRES_USER=posthog
+ - POSTGRES_PWD=${SERVICE_PASSWORD_POSTGRES}
+ - POSTGRES_SEEDS=db
+ - DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development-sql.yaml
+ - ENABLE_ES=false
+ healthcheck:
+ test: ['CMD-SHELL', 'nc -z $$(hostname) 7233']
+ interval: 5s
+ timeout: 5s
+ retries: 30
+ start_period: 300s
+ depends_on:
+ db:
+ condition: service_healthy
+ volumes:
+ - type: bind
+ source: ./temporal/dynamicconfig/development-sql.yaml
+ target: /etc/temporal/config/dynamicconfig/development-sql.yaml
+ read_only: true
+ content: |
+ limit.maxIDLength:
+ - value: 255
+ constraints: {}
+ system.forceSearchAttributesCacheRefreshOnRead:
+ - value: true
+ constraints: {}
- flower:
- image: mher/flower:2.0.0
+ temporal-admin-tools:
+ image: temporalio/admin-tools:1.20.0
environment:
- FLOWER_PORT: 5555
- CELERY_BROKER_URL: redis://redis:6379
+ - TEMPORAL_CLI_ADDRESS=temporal:7233
+ depends_on:
+ - temporal
+
+ temporal-ui:
+ image: temporalio/ui:2.31.2
+ environment:
+ - TEMPORAL_ADDRESS=temporal:7233
+ - TEMPORAL_CORS_ORIGINS=http://localhost:3000
+ - TEMPORAL_CSRF_COOKIE_INSECURE=true
+ depends_on:
+ temporal:
+ condition: service_started
+ db:
+ condition: service_healthy
web:
image: posthog/posthog:latest
- command: /compose/start
- volumes:
- - type: bind
- source: ./compose/start
- target: /compose/start
- content: |
- #!/bin/bash
- /compose/wait
- ./bin/migrate
- ./bin/docker-server
- - type: bind
- source: ./compose/wait
- target: /compose/wait
- content: |
- #!/usr/bin/env python3
-
- import socket
- import time
-
- def loop():
- print("Waiting for ClickHouse and Postgres to be ready")
- try:
- with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
- s.connect(('clickhouse', 9000))
- print("Clickhouse is ready")
- with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
- s.connect(('db', 5432))
- print("Postgres is ready")
- except ConnectionRefusedError as e:
- time.sleep(5)
- loop()
-
- loop()
+ command:
+ - /bin/bash
+ - '-c'
+ - './bin/migrate && ./bin/docker-server'
environment:
- - SERVICE_URL_WEB_8000
- - OPT_OUT_CAPTURING=true
+ - SERVICE_URL_POSTHOG_8000=/
+ - OTEL_SDK_DISABLED=true
- DISABLE_SECURE_SSL_REDIRECT=true
- IS_BEHIND_PROXY=true
- - TRUST_ALL_PROXIES=true
- - DATABASE_URL=postgres://posthog:$SERVICE_PASSWORD_POSTGRES@db:5432/posthog
+ - DATABASE_URL=postgres://posthog:${SERVICE_PASSWORD_POSTGRES}@db:5432/posthog
- CLICKHOUSE_HOST=clickhouse
- CLICKHOUSE_DATABASE=posthog
- CLICKHOUSE_SECURE=false
- CLICKHOUSE_VERIFY=false
+ - CLICKHOUSE_API_USER=api
+ - CLICKHOUSE_API_PASSWORD=apipass
+ - CLICKHOUSE_APP_USER=app
+ - CLICKHOUSE_APP_PASSWORD=apppass
- KAFKA_HOSTS=kafka
- REDIS_URL=redis://redis:6379/
- PGHOST=db
- PGUSER=posthog
- - PGPASSWORD=$SERVICE_PASSWORD_POSTGRES
+ - PGPASSWORD=${SERVICE_PASSWORD_POSTGRES}
- DEPLOYMENT=hobby
- - SITE_URL=$SERVICE_URL_WEB
- - SECRET_KEY=$SERVICE_BASE64_64_SECRETKEY
- - 'ENCRYPTION_SALT_KEYS=${SERVICE_ENCRYPTION_SALT_KEYS:-00beef0000beef0000beef0000beef00}'
+ - CDP_API_URL=http://plugins:6738
+ - FLAGS_REDIS_ENABLED=false
+ - SITE_URL=${SERVICE_URL_POSTHOG}
+ - SECRET_KEY=${SERVICE_BASE64_64_SECRETKEY}
+ - ENCRYPTION_SALT_KEYS=${SERVICE_PASSWORD_ENCRYPTION}
+ - OBJECT_STORAGE_ACCESS_KEY_ID=${SERVICE_USER_MINIO}
+ - OBJECT_STORAGE_SECRET_ACCESS_KEY=${SERVICE_PASSWORD_MINIO}
+ - OBJECT_STORAGE_ENDPOINT=http://objectstorage:19000
+ - OBJECT_STORAGE_PUBLIC_ENDPOINT=${SERVICE_URL_POSTHOG}
+ - SESSION_RECORDING_V2_S3_ENDPOINT=http://seaweedfs:8333
+ - SESSION_RECORDING_V2_S3_ACCESS_KEY_ID=any
+ - SESSION_RECORDING_V2_S3_SECRET_ACCESS_KEY=any
+ - OBJECT_STORAGE_ENABLED=true
+ - RECORDING_API_URL=http://plugins:6738
+ - LIVESTREAM_HOST=${SERVICE_URL_POSTHOG}/livestream
+ - USE_GRANIAN=true
+ - GRANIAN_WORKERS=2
depends_on:
- - db
- - redis
- - clickhouse
- - kafka
- - object_storage
+ db:
+ condition: service_healthy
+ redis:
+ condition: service_healthy
+ clickhouse:
+ condition: service_healthy
+ clickhouse-init:
+ condition: service_completed_successfully
+ kafka:
+ condition: service_healthy
+ kafka-init:
+ condition: service_completed_successfully
+ objectstorage:
+ condition: service_started
+ seaweedfs:
+ condition: service_healthy
+
worker:
image: posthog/posthog:latest
command: ./bin/docker-worker-celery --with-scheduler
environment:
- - OPT_OUT_CAPTURING=true
+ - OTEL_SDK_DISABLED=true
- DISABLE_SECURE_SSL_REDIRECT=true
- IS_BEHIND_PROXY=true
- - TRUST_ALL_PROXIES=true
- - DATABASE_URL=postgres://posthog:$SERVICE_PASSWORD_POSTGRES@db:5432/posthog
+ - DATABASE_URL=postgres://posthog:${SERVICE_PASSWORD_POSTGRES}@db:5432/posthog
- CLICKHOUSE_HOST=clickhouse
- CLICKHOUSE_DATABASE=posthog
- CLICKHOUSE_SECURE=false
- CLICKHOUSE_VERIFY=false
+ - CLICKHOUSE_API_USER=api
+ - CLICKHOUSE_API_PASSWORD=apipass
+ - CLICKHOUSE_APP_USER=app
+ - CLICKHOUSE_APP_PASSWORD=apppass
- KAFKA_HOSTS=kafka
- REDIS_URL=redis://redis:6379/
- PGHOST=db
- PGUSER=posthog
- - PGPASSWORD=$SERVICE_PASSWORD_POSTGRES
+ - PGPASSWORD=${SERVICE_PASSWORD_POSTGRES}
- DEPLOYMENT=hobby
- - SITE_URL=$SERVICE_URL_WEB
- - SECRET_KEY=$SERVICE_BASE64_64_SECRETKEY
- - 'ENCRYPTION_SALT_KEYS=${SERVICE_ENCRYPTION_SALT_KEYS:-00beef0000beef0000beef0000beef00}'
+ - CDP_API_URL=http://plugins:6738
+ - FLAGS_REDIS_ENABLED=false
+ - SITE_URL=${SERVICE_URL_POSTHOG}
+ - SECRET_KEY=${SERVICE_BASE64_64_SECRETKEY}
+ - ENCRYPTION_SALT_KEYS=${SERVICE_PASSWORD_ENCRYPTION}
+ - OBJECT_STORAGE_ACCESS_KEY_ID=${SERVICE_USER_MINIO}
+ - OBJECT_STORAGE_SECRET_ACCESS_KEY=${SERVICE_PASSWORD_MINIO}
+ - OBJECT_STORAGE_ENDPOINT=http://objectstorage:19000
+ - OBJECT_STORAGE_PUBLIC_ENDPOINT=${SERVICE_URL_POSTHOG}
+ - SESSION_RECORDING_V2_S3_ENDPOINT=http://seaweedfs:8333
+ - SESSION_RECORDING_V2_S3_ACCESS_KEY_ID=any
+ - SESSION_RECORDING_V2_S3_SECRET_ACCESS_KEY=any
+ - OBJECT_STORAGE_ENABLED=true
+ - RECORDING_API_URL=http://plugins:6738
+ - POSTHOG_SKIP_MIGRATION_CHECKS=1
depends_on:
- - db
- - redis
- - clickhouse
- - kafka
- - object_storage
-
- # capture:
- # image: ghcr.io/posthog/capture:main
- # environment:
- # ADDRESS: "0.0.0.0:3000"
- # KAFKA_TOPIC: "events_plugin_ingestion"
- # KAFKA_HOSTS: "kafka:9092"
- # REDIS_URL: "redis://redis:6379/"
- # depends_on:
- # - redis
- # - kafka
+ db:
+ condition: service_healthy
+ redis:
+ condition: service_healthy
+ clickhouse:
+ condition: service_healthy
+ kafka:
+ condition: service_healthy
+ web:
+ condition: service_started
plugins:
- image: posthog/posthog:latest
- command: ./bin/plugin-server --no-restart-loop
+ image: posthog/posthog-node:latest
+ command: node nodejs/dist/index.js
environment:
- - DATABASE_URL=postgres://posthog:$SERVICE_PASSWORD_POSTGRES@db:5432/posthog
+ - DATABASE_URL=postgres://posthog:${SERVICE_PASSWORD_POSTGRES}@db:5432/posthog
+ - PERSONS_DATABASE_URL=postgres://posthog:${SERVICE_PASSWORD_POSTGRES}@db:5432/posthog
+ - BEHAVIORAL_COHORTS_DATABASE_URL=postgres://posthog:${SERVICE_PASSWORD_POSTGRES}@db:5432/posthog
+ - CYCLOTRON_DATABASE_URL=postgres://posthog:${SERVICE_PASSWORD_POSTGRES}@db:5432/posthog
- KAFKA_HOSTS=kafka:9092
- REDIS_URL=redis://redis:6379/
+ - POSTHOG_REDIS_HOST=redis
+ - POSTHOG_REDIS_PORT=6379
+ - INGESTION_REDIS_HOST=redis
+ - INGESTION_REDIS_PORT=6379
+ - LOGS_REDIS_HOST=redis
+ - LOGS_REDIS_PORT=6379
+ - LOGS_REDIS_TLS=false
+ - CDP_REDIS_HOST=redis
+ - CDP_REDIS_PORT=6379
+ - SESSION_RECORDING_API_REDIS_HOST=redis
+ - SESSION_RECORDING_API_REDIS_PORT=6379
+ - COOKIELESS_REDIS_HOST=redis
+ - COOKIELESS_REDIS_PORT=6379
- CLICKHOUSE_HOST=clickhouse
- CLICKHOUSE_DATABASE=posthog
- CLICKHOUSE_SECURE=false
- CLICKHOUSE_VERIFY=false
- - SITE_URL=$SERVICE_URL_WEB
- - SECRET_KEY=$SERVICE_BASE64_64_SECRETKEY
- - 'ENCRYPTION_SALT_KEYS=${SERVICE_ENCRYPTION_SALT_KEYS:-00beef0000beef0000beef0000beef00}'
- depends_on:
- - db
- - redis
- - clickhouse
- - kafka
- - object_storage
-
- # migrate:
- # image: posthog/posthog:latest
- # restart: "no"
- # command: sh -c "python manage.py migrate && python manage.py migrate_clickhouse && python manage.py run_async_migrations"
- # environment:
- # - DISABLE_SECURE_SSL_REDIRECT=true
- # - IS_BEHIND_PROXY=true
- # - TRUST_ALL_PROXIES=true
- # - DATABASE_URL=postgres://posthog:$SERVICE_PASSWORD_POSTGRES@db:5432/posthog
- # - CLICKHOUSE_HOST=clickhouse
- # - CLICKHOUSE_DATABASE=posthog
- # - CLICKHOUSE_SECURE=false
- # - CLICKHOUSE_VERIFY=false
- # - KAFKA_HOSTS=kafka
- # - REDIS_URL=redis://redis:6379/
- # - PGHOST=db
- # - PGUSER=posthog
- # - PGPASSWORD=$SERVICE_PASSWORD_POSTGRES
- # - DEPLOYMENT=hobby
- # - SITE_URL=$SERVICE_URL_WEB
- # - SECRET_KEY=$SERVICE_BASE64_64_SECRETKEY
- # - 'ENCRYPTION_SALT_KEYS=${SERVICE_ENCRYPTION_SALT_KEYS:-00beef0000beef0000beef0000beef00}'
- # depends_on:
- # - db
- # - redis
- # - clickhouse
- # - kafka
- # - object_storage
-
- # Temporal containers
- elasticsearch:
- image: elasticsearch:7.16.2
- environment:
- - cluster.routing.allocation.disk.threshold_enabled=true
- - cluster.routing.allocation.disk.watermark.low=512mb
- - cluster.routing.allocation.disk.watermark.high=256mb
- - cluster.routing.allocation.disk.watermark.flood_stage=128mb
- - discovery.type=single-node
- - ES_JAVA_OPTS=-Xms256m -Xmx256m
- - xpack.security.enabled=false
- volumes:
- - elasticsearch-data:/var/lib/elasticsearch/data
- temporal:
- image: temporalio/auto-setup:1.20.0
- environment:
- - DB=postgresql
- - DB_PORT=5432
- - POSTGRES_USER=posthog
- - POSTGRES_PWD=$SERVICE_PASSWORD_POSTGRES
- - POSTGRES_SEEDS=db
- - DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development-sql.yaml
- - ENABLE_ES=true
- - ES_SEEDS=elasticsearch
- - ES_VERSION=v7
- - ENABLE_ES=false
+ - SITE_URL=${SERVICE_URL_POSTHOG}
+ - SECRET_KEY=${SERVICE_BASE64_64_SECRETKEY}
+ - ENCRYPTION_SALT_KEYS=${SERVICE_PASSWORD_ENCRYPTION}
+ - OBJECT_STORAGE_ACCESS_KEY_ID=${SERVICE_USER_MINIO}
+ - OBJECT_STORAGE_SECRET_ACCESS_KEY=${SERVICE_PASSWORD_MINIO}
+ - SESSION_RECORDING_V2_S3_ACCESS_KEY_ID=any
+ - SESSION_RECORDING_V2_S3_SECRET_ACCESS_KEY=any
+ - SESSION_RECORDING_V2_S3_TIMEOUT_MS=120000
+ - OBJECT_STORAGE_ENDPOINT=http://objectstorage:19000
+ - OBJECT_STORAGE_PUBLIC_ENDPOINT=${SERVICE_URL_POSTHOG}
+ - SESSION_RECORDING_V2_S3_ENDPOINT=http://seaweedfs:8333
+ - OBJECT_STORAGE_ENABLED=true
depends_on:
db:
condition: service_healthy
- volumes:
- - type: bind
- source: ./docker/temporal/dynamicconfig/development-sql.yaml
- target: /etc/temporal/config/dynamicconfig/development-sql.yaml
- content: |
- limit.maxIDLength:
- - value: 255
- constraints: {}
- system.forceSearchAttributesCacheRefreshOnRead:
- - value: false
- constraints: {}
- temporal-admin-tools:
- image: temporalio/admin-tools:1.20.0
- depends_on:
- - temporal
- environment:
- - TEMPORAL_CLI_ADDRESS=temporal:7233
- stdin_open: true
- tty: true
- temporal-ui:
- image: temporalio/ui:2.10.3
- depends_on:
- - temporal
- environment:
- - TEMPORAL_ADDRESS=temporal:7233
- - TEMPORAL_CORS_ORIGINS=http://localhost:3000
+ redis:
+ condition: service_healthy
+ clickhouse:
+ condition: service_healthy
+ kafka:
+ condition: service_healthy
+ objectstorage:
+ condition: service_started
+ seaweedfs:
+ condition: service_healthy
+ web:
+ condition: service_started
temporal-django-worker:
image: posthog/posthog:latest
command: ./bin/temporal-django-worker
environment:
+ - OTEL_SDK_DISABLED=true
- DISABLE_SECURE_SSL_REDIRECT=true
- IS_BEHIND_PROXY=true
- - TRUST_ALL_PROXIES=true
- - DATABASE_URL=postgres://posthog:$SERVICE_PASSWORD_POSTGRES@db:5432/posthog
+ - DATABASE_URL=postgres://posthog:${SERVICE_PASSWORD_POSTGRES}@db:5432/posthog
- CLICKHOUSE_HOST=clickhouse
- CLICKHOUSE_DATABASE=posthog
- CLICKHOUSE_SECURE=false
- CLICKHOUSE_VERIFY=false
+ - CLICKHOUSE_API_USER=api
+ - CLICKHOUSE_API_PASSWORD=apipass
+ - CLICKHOUSE_APP_USER=app
+ - CLICKHOUSE_APP_PASSWORD=apppass
- KAFKA_HOSTS=kafka
- REDIS_URL=redis://redis:6379/
- PGHOST=db
- PGUSER=posthog
- - PGPASSWORD=$SERVICE_PASSWORD_POSTGRES
+ - PGPASSWORD=${SERVICE_PASSWORD_POSTGRES}
- DEPLOYMENT=hobby
- - SITE_URL=$SERVICE_URL_WEB
- - SECRET_KEY=$SERVICE_BASE64_64_SECRETKEY
- - 'ENCRYPTION_SALT_KEYS=${SERVICE_ENCRYPTION_SALT_KEYS:-00beef0000beef0000beef0000beef00}'
+ - CDP_API_URL=http://plugins:6738
+ - FLAGS_REDIS_ENABLED=false
+ - SITE_URL=${SERVICE_URL_POSTHOG}
+ - SECRET_KEY=${SERVICE_BASE64_64_SECRETKEY}
+ - ENCRYPTION_SALT_KEYS=${SERVICE_PASSWORD_ENCRYPTION}
+ - OBJECT_STORAGE_ACCESS_KEY_ID=${SERVICE_USER_MINIO}
+ - OBJECT_STORAGE_SECRET_ACCESS_KEY=${SERVICE_PASSWORD_MINIO}
+ - OBJECT_STORAGE_ENDPOINT=http://objectstorage:19000
+ - OBJECT_STORAGE_PUBLIC_ENDPOINT=${SERVICE_URL_POSTHOG}
+ - SESSION_RECORDING_V2_S3_ENDPOINT=http://seaweedfs:8333
+ - SESSION_RECORDING_V2_S3_ACCESS_KEY_ID=any
+ - SESSION_RECORDING_V2_S3_SECRET_ACCESS_KEY=any
+ - OBJECT_STORAGE_ENABLED=true
+ - RECORDING_API_URL=http://plugins:6738
- TEMPORAL_HOST=temporal
depends_on:
- - db
- - redis
- - clickhouse
- - kafka
- - object_storage
- - temporal
+ db:
+ condition: service_healthy
+ redis:
+ condition: service_healthy
+ clickhouse:
+ condition: service_healthy
+ kafka:
+ condition: service_healthy
+ objectstorage:
+ condition: service_started
+ seaweedfs:
+ condition: service_healthy
+ temporal:
+ condition: service_healthy
+
+ asyncmigrationscheck:
+ image: posthog/posthog:latest
+ command: python manage.py run_async_migrations --check
+ restart: 'no'
+ environment:
+ - OTEL_SDK_DISABLED=true
+ - DISABLE_SECURE_SSL_REDIRECT=true
+ - IS_BEHIND_PROXY=true
+ - DATABASE_URL=postgres://posthog:${SERVICE_PASSWORD_POSTGRES}@db:5432/posthog
+ - CLICKHOUSE_HOST=clickhouse
+ - CLICKHOUSE_DATABASE=posthog
+ - CLICKHOUSE_SECURE=false
+ - CLICKHOUSE_VERIFY=false
+ - CLICKHOUSE_API_USER=api
+ - CLICKHOUSE_API_PASSWORD=apipass
+ - CLICKHOUSE_APP_USER=app
+ - CLICKHOUSE_APP_PASSWORD=apppass
+ - KAFKA_HOSTS=kafka
+ - REDIS_URL=redis://redis:6379/
+ - PGHOST=db
+ - PGUSER=posthog
+ - PGPASSWORD=${SERVICE_PASSWORD_POSTGRES}
+ - DEPLOYMENT=hobby
+ - CDP_API_URL=http://plugins:6738
+ - FLAGS_REDIS_ENABLED=false
+ - SITE_URL=${SERVICE_URL_POSTHOG}
+ - SECRET_KEY=${SERVICE_BASE64_64_SECRETKEY}
+ - ENCRYPTION_SALT_KEYS=${SERVICE_PASSWORD_ENCRYPTION}
+ - OBJECT_STORAGE_ACCESS_KEY_ID=${SERVICE_USER_MINIO}
+ - OBJECT_STORAGE_SECRET_ACCESS_KEY=${SERVICE_PASSWORD_MINIO}
+ - OBJECT_STORAGE_ENDPOINT=http://objectstorage:19000
+ - OBJECT_STORAGE_PUBLIC_ENDPOINT=${SERVICE_URL_POSTHOG}
+ - SESSION_RECORDING_V2_S3_ENDPOINT=http://seaweedfs:8333
+ - SESSION_RECORDING_V2_S3_ACCESS_KEY_ID=any
+ - SESSION_RECORDING_V2_S3_SECRET_ACCESS_KEY=any
+ - OBJECT_STORAGE_ENABLED=true
+ - RECORDING_API_URL=http://plugins:6738
+ - SKIP_ASYNC_MIGRATIONS_SETUP=0
+ depends_on:
+ db:
+ condition: service_healthy
+ redis:
+ condition: service_healthy
+ clickhouse:
+ condition: service_healthy
+ kafka:
+ condition: service_healthy
+ web:
+ condition: service_started
+
+ capture:
+ image: ghcr.io/posthog/posthog/capture:master
+ environment:
+ - SERVICE_URL_POSTHOG_3000=/e
+ - ADDRESS=0.0.0.0:3000
+ - KAFKA_TOPIC=events_plugin_ingestion
+ - KAFKA_HOSTS=kafka:9092
+ - REDIS_URL=redis://redis:6379/
+ - CAPTURE_MODE=events
+ - RUST_LOG=info,rdkafka=warn
+ depends_on:
+ kafka:
+ condition: service_healthy
+ redis:
+ condition: service_healthy
+
+ replay-capture:
+ image: ghcr.io/posthog/posthog/capture:master
+ environment:
+ - SERVICE_URL_POSTHOG_3000=/s
+ - ADDRESS=0.0.0.0:3000
+ - KAFKA_TOPIC=session_recording_snapshot_item_events
+ - KAFKA_HOSTS=kafka:9092
+ - REDIS_URL=redis://redis:6379/
+ - CAPTURE_MODE=recordings
+ depends_on:
+ kafka:
+ condition: service_healthy
+ redis:
+ condition: service_healthy
+
+ livestream:
+ image: ghcr.io/posthog/posthog/livestream:master
+ environment:
+ - SERVICE_URL_POSTHOG_8080=/livestream
+ - LIVESTREAM_JWT_SECRET=${SERVICE_BASE64_64_SECRETKEY}
+ - LIVESTREAM_MMDB_PATH=/share/GeoLite2-City.mmdb
+ - LIVESTREAM_KAFKA_BROKERS=kafka:9092
+ - LIVESTREAM_KAFKA_TOPIC=events_plugin_ingestion
+ - LIVESTREAM_KAFKA_GROUP_ID=livestream
+ - LIVESTREAM_REDIS_ADDRESS=redis:6379
+ depends_on:
+ geoip-init:
+ condition: service_completed_successfully
+ kafka:
+ condition: service_started
+ volumes:
+ - posthog-share:/share
+ - type: bind
+ source: ./livestream/configs.yml
+ target: /code/configs/configs.yml
+ read_only: true
+ content: |
+ debug: false
+ kafka:
+ brokers: 'kafka:9092'
+ topic: 'events_plugin_ingestion'
+ group_id: 'livestream'
+ security_protocol: 'PLAINTEXT'
+ session_recording_enabled: true
+ session_recording_security_protocol: 'PLAINTEXT'
+ redis:
+ address: 'redis:6379'
+ mmdb:
+ path: '/share/GeoLite2-City.mmdb'
+
+ feature-flags:
+ image: ghcr.io/posthog/posthog/feature-flags:master
+ healthcheck:
+ test: ['CMD', 'curl', '-f', 'http://localhost:3001/_readiness']
+ interval: 5s
+ timeout: 5s
+ retries: 12
+ start_period: 10s
+ environment:
+ - SERVICE_URL_POSTHOG_3001=/flags
+ - WRITE_DATABASE_URL=postgres://posthog:${SERVICE_PASSWORD_POSTGRES}@db:5432/posthog
+ - READ_DATABASE_URL=postgres://posthog:${SERVICE_PASSWORD_POSTGRES}@db:5432/posthog
+ - PERSONS_WRITE_DATABASE_URL=postgres://posthog:${SERVICE_PASSWORD_POSTGRES}@db:5432/posthog
+ - PERSONS_READ_DATABASE_URL=postgres://posthog:${SERVICE_PASSWORD_POSTGRES}@db:5432/posthog
+ - MAXMIND_DB_PATH=/share/GeoLite2-City.mmdb
+ - REDIS_URL=redis://redis:6379/
+ - ADDRESS=0.0.0.0:3001
+ - RUST_LOG=info
+ - COOKIELESS_REDIS_HOST=redis
+ - COOKIELESS_REDIS_PORT=6379
+ volumes:
+ - posthog-share:/share
+ depends_on:
+ geoip-init:
+ condition: service_completed_successfully
+ db:
+ condition: service_healthy
+ redis:
+ condition: service_healthy
+
+ property-defs-rs:
+ image: ghcr.io/posthog/posthog/property-defs-rs:master
+ environment:
+ - DATABASE_URL=postgres://posthog:${SERVICE_PASSWORD_POSTGRES}@db:5432/posthog
+ - KAFKA_HOSTS=kafka:9092
+ - SKIP_WRITES=false
+ - SKIP_READS=false
+ - FILTER_MODE=opt-out
+ depends_on:
+ kafka-init:
+ condition: service_completed_successfully
+ db:
+ condition: service_healthy
+
+ cyclotron-janitor:
+ image: ghcr.io/posthog/posthog/cyclotron-janitor:master
+ environment:
+ - DATABASE_URL=postgres://posthog:${SERVICE_PASSWORD_POSTGRES}@db:5432/posthog
+ - KAFKA_HOSTS=kafka:9092
+ - KAFKA_TOPIC=clickhouse_app_metrics2
+ depends_on:
+ db:
+ condition: service_healthy
+ kafka:
+ condition: service_started
+
+ geoip-init:
+ image: alpine:3.19
+ entrypoint: /bin/sh
+ command:
+ - '-c'
+ - |
+ if [ -f /share/GeoLite2-City.mmdb ]; then echo "GeoIP DB already exists"; exit 0; fi
+ apk add --no-cache curl brotli
+ curl -L 'https://mmdbcdn.posthog.net/' --http1.1 | brotli --decompress > /share/GeoLite2-City.mmdb
+ chmod 644 /share/GeoLite2-City.mmdb
+ restart: 'no'
+ volumes:
+ - posthog-share:/share
+
+ cymbal:
+ image: ghcr.io/posthog/posthog/cymbal:master
+ environment:
+ - KAFKA_HOSTS=kafka:9092
+ - KAFKA_CONSUMER_GROUP=cymbal
+ - KAFKA_CONSUMER_TOPIC=exceptions_ingestion
+ - OBJECT_STORAGE_BUCKET=posthog
+ - OBJECT_STORAGE_ACCESS_KEY_ID=any
+ - OBJECT_STORAGE_SECRET_ACCESS_KEY=any
+ - OBJECT_STORAGE_ENDPOINT=http://seaweedfs:8333
+ - OBJECT_STORAGE_FORCE_PATH_STYLE=true
+ - BIND_HOST=0.0.0.0
+ - BIND_PORT=3302
+ - DATABASE_URL=postgres://posthog:${SERVICE_PASSWORD_POSTGRES}@db:5432/posthog
+ - PERSONS_URL=postgres://posthog:${SERVICE_PASSWORD_POSTGRES}@db:5432/posthog
+ - MAXMIND_DB_PATH=/share/GeoLite2-City.mmdb
+ - REDIS_URL=redis://redis:6379/
+ - ISSUE_BUCKETS_REDIS_URL=redis://redis:6379/
+ - RUST_LOG=info
+ volumes:
+ - posthog-share:/share
+ depends_on:
+ geoip-init:
+ condition: service_completed_successfully
+ kafka-init:
+ condition: service_completed_successfully
+ seaweedfs:
+ condition: service_healthy
+ db:
+ condition: service_healthy
+ redis:
+ condition: service_healthy
+
+