-
Notifications
You must be signed in to change notification settings - Fork 4
Expand file tree
/
Copy pathapplication.conf
More file actions
161 lines (130 loc) · 3.64 KB
/
application.conf
File metadata and controls
161 lines (130 loc) · 3.64 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
akka.http.server.request-timeout = 600 s
akka {
license-key = ${?AKKA_LICENSE_KEY}
loglevel = DEBUG
actor {
provider = cluster
serialization-bindings {
"akka.projection.testing.CborSerializable" = jackson-cbor
}
}
remote {
artery {
canonical.port = 2552
}
}
cluster {
downing-provider-class = "akka.cluster.sbr.SplitBrainResolverProvider"
roles = ["engine", "write-model", "read-model"]
sharding {
least-shard-allocation-strategy {
rebalance-absolute-limit = 10
rebalance-relative-limit = 0.1
}
passivation {
strategy = default-strategy
default-strategy.active-entity-limit = 50000
}
}
}
persistence {
journal-plugin-fallback {
circuit-breaker {
max-failures = 10
call-timeout = 30s
reset-timeout = 30s
}
}
max-concurrent-recoveries = 50
}
projection {
# The strategy to use to recover from unhandled exceptions without causing the projection to fail
# recovery-strategy {
# strategy = retry-and-fail
# retries = 3
# retry-delay = 250ms
# }
# The configuration to use to restart the projection after an underlying streams failure
# The Akka streams restart source is used to facilitate this behaviour
# See the streams documentation for more details
restart-backoff {
min-backoff = 200ms
max-backoff = 2s
random-factor = 0.2
# -1 will not cap the amount of restarts
# 0 will disable restarts
max-restarts = -1
}
grouped {
group-after-envelopes = 20
group-after-duration = 1000 ms
}
}
management {
cluster.bootstrap {
contact-point-discovery {
discovery-method = kubernetes-api
required-contact-point-nr = 1
required-contact-point-nr = ${?REQUIRED_CONTACT_POINT_NR}
}
}
}
}
event-processor {
# Type of projection: at-least-once, exactly-once, grouped, logging-only
projection-type = at-least-once
# Number of tags or slice ranges per projection
# Increase for realisitc testing
parallelism = 8
# how many projections to run. Each will do the same thing
# and the end result is validated for all
# each projectin uses its own set of tags to increase load on the tag_* tables as it is better if those
# reads/writes fail for creating issues than writes to the messages table
# Increasing this isn't that realistic but does put a large load on the events by tag infrastructure
nr-projections = 1
# Set to on for performance testing without validation
read-only = off
# fail one in every N messages, causing a restart of the projection
projection-failure-every = "off"
}
test {
# Overridden based on the remoting port
http.port = 8080
#throttle-actors-per-second = 500
throttle-actors-per-second = off
# Define for gRPC projections to go via a load balancer/ingress something instead of connecting to other cluster nodes
service-dns-name = ""
service-dns-name = ${?SERVICE_DNS_NAME}
}
# cinnamon
cinnamon.prometheus {
exporters += http-server
summary {
# for scrape frequency of 10s
max-age = 20s
age-buckets = 2
significant-value-digits = 3
}
}
cinnamon.akka {
projection.metrics {
envelope-sources = on
}
persistence.entities {
// sharded:? will expand to /system/sharding/?/*
"sharded:?" {
report-by = group
}
}
cluster {
shard-region-info = on
node-metrics = on
}
}
//cinnamon.akka.actors {
// "configurable" {
// report-by = group
// includes = ["/system/sharding/configurable/*"]
// excludes = ["akka.cluster.sharding.Shard"]
// }
//}