Skip to content

Commit 8f62057

Browse files
committed
Add http4k benchmark (see renaissance-benchmarks#451)
Introduce a Kotlin-based HTTP benchmark using the http4k framework with an Undertow server backend. The benchmark creates a product management REST API and exercises it with concurrent client requests using Kotlin coroutines. Based on the original work in renaissance-benchmarks#451 by kristian-petras, with the following changes addressing reviewer feedback: - Add BoundedUndertow server config wrapper to control Undertow worker thread count via a new server_threads parameter (default $cpu.count), replacing the default 32*cpu which prevented CPU saturation on large machines - Set kotlincJvmTarget to match the project-wide Java 11 release target - Use port 0 for auto-allocation and 127.0.0.1 to avoid IPv6 CI issues - Categorize as @group("web") consistent with finagle-http - Update to http4k 5.47.0.0 (last v5 release, Java 11 compatible), Kotlin 2.1.20, sbt-kotlin-plugin 3.1.5 - Disable KotlinPlugin on incompatible projects (twitter-finagle, JMH)
1 parent 10dc6b3 commit 8f62057

11 files changed

Lines changed: 431 additions & 0 deletions

File tree

README.md

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -232,6 +232,10 @@ The following is the complete list of benchmarks, separated into groups.
232232
\
233233
Default repetitions: 12; APACHE2 license, MIT distribution; Supported JVM: 11 and later
234234

235+
- `http4k` - Sends concurrent HTTP requests to an http4k server with an Undertow backend.
236+
\
237+
Default repetitions: 20; APACHE2 license, MIT distribution; Supported JVM: 11 and later
238+
235239

236240

237241
The suite also contains a group of benchmarks intended solely for testing
Lines changed: 122 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,122 @@
1+
package org.renaissance.http4k
2+
3+
import kotlinx.coroutines.runBlocking
4+
import org.http4k.client.OkHttp
5+
import org.renaissance.Benchmark
6+
import org.renaissance.Benchmark.Configuration
7+
import org.renaissance.Benchmark.Group
8+
import org.renaissance.Benchmark.Licenses
9+
import org.renaissance.Benchmark.Name
10+
import org.renaissance.Benchmark.Parameter
11+
import org.renaissance.Benchmark.Repetitions
12+
import org.renaissance.Benchmark.Summary
13+
import org.renaissance.BenchmarkContext
14+
import org.renaissance.BenchmarkResult
15+
import org.renaissance.BenchmarkResult.Validators
16+
import org.renaissance.License
17+
import org.renaissance.http4k.workload.WorkloadClient
18+
import org.renaissance.http4k.workload.WorkloadConfiguration
19+
import org.renaissance.http4k.workload.WorkloadServer
20+
21+
@Name("http4k")
22+
@Group("web")
23+
@Group("http4k")
24+
@Summary("Sends concurrent HTTP requests to an http4k server with an Undertow backend.")
25+
@Licenses(License.APACHE2)
26+
@Repetitions(20)
27+
@Parameter(
28+
name = "host",
29+
defaultValue = "127.0.0.1",
30+
summary = "Host of the server."
31+
)
32+
@Parameter(
33+
name = "port",
34+
defaultValue = "0",
35+
summary = "Port of the server (0 for auto-allocation)."
36+
)
37+
@Parameter(
38+
name = "server_threads",
39+
defaultValue = "\$cpu.count",
40+
summary = "Number of Undertow worker threads."
41+
)
42+
@Parameter(
43+
name = "max_threads",
44+
defaultValue = "\$cpu.count",
45+
summary = "Maximum number of client coroutine threads."
46+
)
47+
@Parameter(
48+
name = "workload_count",
49+
defaultValue = "450",
50+
summary = "Number of workloads to generate."
51+
)
52+
@Parameter(
53+
name = "read_workload_repeat_count",
54+
defaultValue = "5",
55+
summary = "Number of read requests per workload."
56+
)
57+
@Parameter(
58+
name = "write_workload_repeat_count",
59+
defaultValue = "5",
60+
summary = "Number of write requests per workload."
61+
)
62+
@Parameter(
63+
name = "ddos_workload_repeat_count",
64+
defaultValue = "5",
65+
summary = "Number of ddos requests per workload."
66+
)
67+
@Parameter(
68+
name = "mixed_workload_repeat_count",
69+
defaultValue = "5",
70+
summary = "Number of mixed requests per workload."
71+
)
72+
@Parameter(
73+
name = "workload_selection_seed",
74+
defaultValue = "42",
75+
summary = "Seed used to generate random workloads."
76+
)
77+
@Configuration(
78+
name = "test",
79+
settings = [
80+
"server_threads = 2",
81+
"max_threads = 2",
82+
"workload_count = 100"
83+
]
84+
)
85+
@Configuration(name = "jmh")
86+
class Http4kBenchmark : Benchmark {
87+
private lateinit var server: WorkloadServer
88+
private lateinit var client: WorkloadClient
89+
private lateinit var configuration: WorkloadConfiguration
90+
91+
override fun run(context: BenchmarkContext): BenchmarkResult = runBlocking {
92+
val workloadSummary = client.workload()
93+
Validators.simple("Workload count", configuration.workloadCount.toLong(), workloadSummary.workloadCount)
94+
}
95+
96+
override fun setUpBeforeEach(context: BenchmarkContext) {
97+
configuration = context.toWorkloadConfiguration()
98+
server = WorkloadServer(configuration.port, configuration.serverThreads)
99+
server.start()
100+
101+
// If port value is 0, server allocates a free port which has to be saved for client requests.
102+
configuration = configuration.copy(port = server.port())
103+
client = WorkloadClient(OkHttp(), configuration)
104+
}
105+
106+
override fun tearDownAfterEach(context: BenchmarkContext) {
107+
server.stop()
108+
}
109+
110+
private fun BenchmarkContext.toWorkloadConfiguration(): WorkloadConfiguration = WorkloadConfiguration(
111+
host = parameter("host").value(),
112+
port = parameter("port").value().toInt(),
113+
serverThreads = parameter("server_threads").value().toInt(),
114+
readWorkloadRepeatCount = parameter("read_workload_repeat_count").value().toInt(),
115+
writeWorkloadRepeatCount = parameter("write_workload_repeat_count").value().toInt(),
116+
ddosWorkloadRepeatCount = parameter("ddos_workload_repeat_count").value().toInt(),
117+
mixedWorkloadRepeatCount = parameter("mixed_workload_repeat_count").value().toInt(),
118+
workloadCount = parameter("workload_count").value().toInt(),
119+
maxThreads = parameter("max_threads").value().toInt(),
120+
workloadSelectionSeed = parameter("workload_selection_seed").value().toLong()
121+
)
122+
}
Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
package org.renaissance.http4k.model
2+
3+
import org.http4k.core.Body
4+
import org.http4k.format.Moshi.auto
5+
6+
internal data class Product(val id: String, val name: String) {
7+
internal companion object {
8+
internal val productLens = Body.auto<Product>().toLens()
9+
internal val productsLens = Body.auto<Array<Product>>().toLens()
10+
}
11+
}
Lines changed: 49 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,49 @@
1+
package org.renaissance.http4k.workload
2+
3+
import io.undertow.Undertow
4+
import io.undertow.server.handlers.BlockingHandler
5+
import org.http4k.core.HttpHandler
6+
import org.http4k.server.Http4kServer
7+
import org.http4k.server.Http4kUndertowHttpHandler
8+
import org.http4k.server.ServerConfig
9+
import org.http4k.server.ServerConfig.StopMode
10+
import org.http4k.server.ServerConfig.StopMode.Immediate
11+
import java.net.InetSocketAddress
12+
13+
/**
14+
* Custom Undertow server configuration with a bounded number of worker threads.
15+
*
16+
* The default http4k [org.http4k.server.Undertow] uses 32 * availableProcessors()
17+
* worker threads, which prevents the server from becoming CPU-bound on large machines.
18+
* This wrapper allows controlling the worker thread count for more predictable
19+
* benchmark behavior.
20+
*
21+
* @param port Server port (0 for auto-allocation).
22+
* @param workerThreads Number of Undertow worker threads.
23+
*/
24+
internal class BoundedUndertow(
25+
private val port: Int = 0,
26+
private val workerThreads: Int = Runtime.getRuntime().availableProcessors()
27+
) : ServerConfig {
28+
override val stopMode: StopMode = Immediate
29+
30+
override fun toServer(http: HttpHandler): Http4kServer {
31+
val httpHandler = Http4kUndertowHttpHandler(http).let(::BlockingHandler)
32+
33+
return object : Http4kServer {
34+
val server: Undertow = Undertow.builder()
35+
.addHttpListener(port, "0.0.0.0")
36+
.setWorkerThreads(workerThreads)
37+
.setHandler(httpHandler)
38+
.build()
39+
40+
override fun start() = apply { server.start() }
41+
override fun stop() = apply { server.stop() }
42+
43+
override fun port(): Int = when {
44+
port > 0 -> port
45+
else -> (server.listenerInfo[0].address as InetSocketAddress).port
46+
}
47+
}
48+
}
49+
}
Lines changed: 137 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,137 @@
1+
package org.renaissance.http4k.workload
2+
3+
import kotlinx.coroutines.Dispatchers
4+
import kotlinx.coroutines.async
5+
import kotlinx.coroutines.awaitAll
6+
import kotlinx.coroutines.coroutineScope
7+
import kotlinx.coroutines.withContext
8+
import org.http4k.core.HttpHandler
9+
import org.http4k.core.Method
10+
import org.http4k.core.Request
11+
import org.renaissance.http4k.model.Product
12+
import java.util.UUID
13+
import java.util.concurrent.atomic.AtomicLong
14+
import kotlin.random.Random
15+
16+
/**
17+
* Client used to generate workloads for the http4k server.
18+
* The client sends requests to the server based on the workload type.
19+
* @param client HttpHandler used to send requests to the server.
20+
* @param configuration WorkloadConfiguration used to generate the workload.
21+
*/
22+
internal class WorkloadClient(
23+
private val client: HttpHandler, private val configuration: WorkloadConfiguration
24+
) {
25+
private val getProductsCounter = AtomicLong(0)
26+
private val getProductCounter = AtomicLong(0)
27+
private val postProductCounter = AtomicLong(0)
28+
29+
private val readCounter = AtomicLong(0)
30+
private val writeCounter = AtomicLong(0)
31+
private val ddosCounter = AtomicLong(0)
32+
private val mixedCounter = AtomicLong(0)
33+
34+
private val workloadCounter = AtomicLong(0)
35+
36+
private val dispatcher = Dispatchers.IO.limitedParallelism(configuration.maxThreads, "Workload")
37+
38+
/**
39+
* Starts the workload on the server based on [configuration].
40+
* Each workload consists of read, write, ddos and mixed requests.
41+
* The number of workloads is determined by [WorkloadConfiguration.workloadCount].
42+
* The number of requests for each workload type is determined by the corresponding configuration value.
43+
* Random workload is generated for each iteration based on the seed in [WorkloadConfiguration.workloadSelectionSeed].
44+
* @return WorkloadSummary containing number of requests per type used for validation.
45+
*/
46+
suspend fun workload(): WorkloadSummary = coroutineScope {
47+
val random = Random(configuration.workloadSelectionSeed)
48+
withContext(dispatcher) {
49+
range(configuration.workloadCount).flatMap {
50+
when (random.nextWorkload()) {
51+
WorkloadType.READ -> range(configuration.readWorkloadRepeatCount).map { async { client.readWorkload() } }
52+
WorkloadType.WRITE -> range(configuration.writeWorkloadRepeatCount).map { async { client.writeWorkload() } }
53+
WorkloadType.DDOS -> range(configuration.ddosWorkloadRepeatCount).map { async { client.ddosWorkload() } }
54+
WorkloadType.MIXED -> range(configuration.mixedWorkloadRepeatCount).map { async { client.mixedWorkload() } }
55+
}.also { workloadCounter.incrementAndGet() }
56+
}.awaitAll()
57+
58+
WorkloadSummary(
59+
getProductsCount = getProductsCounter.get(),
60+
getProductCount = getProductCounter.get(),
61+
postProductCount = postProductCounter.get(),
62+
readCount = readCounter.get(),
63+
writeCount = writeCounter.get(),
64+
ddosCount = ddosCounter.get(),
65+
mixedCount = mixedCounter.get(),
66+
workloadCount = workloadCounter.get()
67+
)
68+
}
69+
}
70+
71+
/**
72+
* Read workload gets all products and then iterates over each one and gets the specific product.
73+
*/
74+
private fun HttpHandler.readWorkload() {
75+
val products = getProducts()
76+
products.forEach { product ->
77+
getProduct(product.id)
78+
}
79+
readCounter.incrementAndGet()
80+
}
81+
82+
/**
83+
* Write workload creates a new product.
84+
*/
85+
private fun HttpHandler.writeWorkload() {
86+
val product = generateProduct()
87+
postProduct(product)
88+
writeCounter.incrementAndGet()
89+
}
90+
91+
/**
92+
* DDOS workload reads all products 10 times in a row.
93+
*/
94+
private fun HttpHandler.ddosWorkload() {
95+
repeat(10) {
96+
getProducts()
97+
}
98+
ddosCounter.incrementAndGet()
99+
}
100+
101+
/**
102+
* Mixed workload reads all products, then creates a new product and fetches it afterward.
103+
*/
104+
private fun HttpHandler.mixedWorkload() {
105+
getProducts()
106+
val product = generateProduct()
107+
postProduct(product)
108+
getProduct(product.id)
109+
mixedCounter.incrementAndGet()
110+
}
111+
112+
private fun HttpHandler.getProducts(): List<Product> =
113+
Product.productsLens(this(Request(Method.GET, configuration.url("product")))).toList()
114+
.also { getProductsCounter.incrementAndGet() }
115+
116+
private fun HttpHandler.getProduct(id: String) =
117+
this(Request(Method.GET, configuration.url("product/$id"))).also { getProductCounter.incrementAndGet() }
118+
119+
private fun HttpHandler.postProduct(product: Product) = this(
120+
Product.productLens(
121+
product,
122+
Request(Method.POST, configuration.url("product"))
123+
)
124+
).also { postProductCounter.incrementAndGet() }
125+
126+
private fun WorkloadConfiguration.url(endpoint: String) = "http://$host:$port/$endpoint"
127+
128+
private fun Random.nextWorkload() = WorkloadType.entries[nextInt(WorkloadType.entries.size)]
129+
130+
private fun generateProduct(): Product {
131+
val id = UUID.randomUUID().toString()
132+
val name = "Product $id"
133+
return Product(id, name)
134+
}
135+
136+
private fun range(end: Int) = (1..end)
137+
}
Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
package org.renaissance.http4k.workload
2+
3+
internal data class WorkloadConfiguration(
4+
val host: String,
5+
val port: Int,
6+
val serverThreads: Int,
7+
val readWorkloadRepeatCount: Int,
8+
val writeWorkloadRepeatCount: Int,
9+
val ddosWorkloadRepeatCount: Int,
10+
val mixedWorkloadRepeatCount: Int,
11+
val workloadCount: Int,
12+
val maxThreads: Int,
13+
val workloadSelectionSeed: Long
14+
)
Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,50 @@
1+
package org.renaissance.http4k.workload
2+
3+
import org.http4k.core.HttpHandler
4+
import org.http4k.core.Method
5+
import org.http4k.core.Response
6+
import org.http4k.core.Status
7+
import org.http4k.routing.bind
8+
import org.http4k.routing.path
9+
import org.http4k.routing.routes
10+
import org.http4k.server.Http4kServer
11+
import org.http4k.server.asServer
12+
import org.renaissance.http4k.model.Product
13+
import java.util.concurrent.ConcurrentHashMap
14+
15+
internal class WorkloadServer(port: Int, workerThreads: Int) : Http4kServer {
16+
private val server = app().asServer(BoundedUndertow(port, workerThreads))
17+
private val products: MutableMap<String, Product> = ConcurrentHashMap<String, Product>()
18+
19+
private fun app(): HttpHandler = routes(
20+
"/product" bind Method.GET to { Product.productsLens(products.values.toTypedArray(), Response(Status.OK)) },
21+
"/product/{id}" bind Method.GET to {
22+
when (val id = it.path("id")) {
23+
null -> Response(Status.BAD_REQUEST)
24+
!in products -> Response(Status.NOT_FOUND)
25+
else -> {
26+
val product = products[id] ?: error("Invariant error: Product $it should be present")
27+
Product.productLens(product, Response(Status.OK))
28+
}
29+
}
30+
},
31+
"/product" bind Method.POST to {
32+
val product = Product.productLens(it)
33+
products[product.id] = product
34+
Response(Status.CREATED)
35+
}
36+
)
37+
38+
override fun port(): Int = server.port()
39+
40+
override fun start(): Http4kServer {
41+
server.start()
42+
return this
43+
}
44+
45+
override fun stop(): Http4kServer {
46+
server.stop()
47+
products.clear()
48+
return this
49+
}
50+
}

0 commit comments

Comments
 (0)