|
7 | 7 | "log" |
8 | 8 | "testing" |
9 | 9 |
|
| 10 | + "github.com/go-echarts/go-echarts/v2/charts" |
| 11 | + "github.com/go-echarts/go-echarts/v2/opts" |
10 | 12 | "github.com/stretchr/testify/require" |
11 | 13 |
|
12 | 14 | "github.com/splunk/stef/benchmarks/encodings" |
@@ -53,187 +55,245 @@ func TestMain(m *testing.M) { |
53 | 55 | m.Run() |
54 | 56 | } |
55 | 57 |
|
| 58 | +func addZstdCompressTime( |
| 59 | + b *testing.B, |
| 60 | + encoding encodings.MetricEncoding, |
| 61 | + bodyBytes []byte, |
| 62 | + dataPointCount int, |
| 63 | +) { |
| 64 | + b.Run( |
| 65 | + fmt.Sprintf("%s/zstd", encoding.Name()), |
| 66 | + func(b *testing.B) { |
| 67 | + b.ResetTimer() |
| 68 | + for i := 0; i < b.N; i++ { |
| 69 | + zstdBytes := testutils.CompressZstd(bodyBytes) |
| 70 | + if zstdBytes == nil { |
| 71 | + log.Fatal("compression failed") |
| 72 | + } |
| 73 | + } |
| 74 | + chart.RecordStacked( |
| 75 | + b, |
| 76 | + encoding.LongName(), |
| 77 | + "Zstd Compress", |
| 78 | + float64(b.Elapsed().Nanoseconds())/float64(b.N*dataPointCount), |
| 79 | + ) |
| 80 | + }, |
| 81 | + ) |
| 82 | +} |
| 83 | + |
| 84 | +func addZstdDecompressTime( |
| 85 | + b *testing.B, |
| 86 | + encoding encodings.MetricEncoding, |
| 87 | + bodyBytes []byte, |
| 88 | + dataPointCount int, |
| 89 | +) { |
| 90 | + zstdBytes := testutils.CompressZstd(bodyBytes) |
| 91 | + if zstdBytes == nil { |
| 92 | + log.Fatal("compression failed") |
| 93 | + } |
| 94 | + b.Run( |
| 95 | + fmt.Sprintf("%s/zstd", encoding.Name()), |
| 96 | + func(b *testing.B) { |
| 97 | + b.ResetTimer() |
| 98 | + for i := 0; i < b.N; i++ { |
| 99 | + _, err := testutils.DecompressZstd(zstdBytes) |
| 100 | + if err != nil { |
| 101 | + log.Fatal(err) |
| 102 | + } |
| 103 | + } |
| 104 | + chart.RecordStacked( |
| 105 | + b, |
| 106 | + encoding.LongName(), |
| 107 | + "Zstd Decompress", |
| 108 | + float64(b.Elapsed().Nanoseconds())/float64(b.N*dataPointCount), |
| 109 | + ) |
| 110 | + }, |
| 111 | + ) |
| 112 | + |
| 113 | +} |
| 114 | + |
56 | 115 | func BenchmarkSerializeNative(b *testing.B) { |
57 | 116 | chart.BeginSection("Speed Benchmarks") |
58 | 117 |
|
59 | 118 | chart.BeginChart("Serialization Speed", b) |
60 | | - defer chart.EndChart("ns/point", "CPU time to serialize one data point") |
| 119 | + defer chart.EndChart( |
| 120 | + "ns/point", |
| 121 | + charts.WithColorsOpts(opts.Colors{"#92C5F9", "#12C5F9"}), |
| 122 | + ) |
61 | 123 |
|
62 | | - compressions := []string{"none"} |
63 | 124 | for _, dataVariation := range benchmarkDataVariations { |
64 | 125 | for _, encoding := range speedEncodings { |
65 | | - for _, compression := range compressions { |
66 | | - if _, ok := encoding.(*otelarrow.OtelArrowEncoding); ok { |
67 | | - // Skip Arrow, it does not have native serialization |
68 | | - continue |
69 | | - } |
70 | | - b.Run( |
71 | | - fmt.Sprintf("%s/%s", encoding.Name(), compression), |
72 | | - func(b *testing.B) { |
73 | | - batch := dataVariation.generator.Generate() |
74 | | - inmem, err := encoding.FromOTLP(batch) |
75 | | - require.NoError(b, err) |
76 | | - b.ResetTimer() |
77 | | - for i := 0; i < b.N; i++ { |
78 | | - bodyBytes, err := encoding.Encode(inmem) |
79 | | - require.NotNil(b, bodyBytes) |
80 | | - require.NoError(b, err) |
81 | | - if compression == "zstd" { |
82 | | - testutils.CompressZstd(bodyBytes) |
83 | | - } |
84 | | - } |
85 | | - chart.Record( |
86 | | - b, |
87 | | - encoding.LongName(), |
88 | | - float64(b.Elapsed().Nanoseconds())/float64(b.N*batch.DataPointCount()), |
89 | | - ) |
90 | | - }, |
91 | | - ) |
| 126 | + if _, ok := encoding.(*otelarrow.OtelArrowEncoding); ok { |
| 127 | + // Skip Arrow, it does not have native serialization |
| 128 | + continue |
92 | 129 | } |
| 130 | + batch := dataVariation.generator.Generate() |
| 131 | + inmem, err := encoding.FromOTLP(batch) |
| 132 | + require.NoError(b, err) |
| 133 | + b.Run( |
| 134 | + fmt.Sprintf("%s/serialize", encoding.Name()), |
| 135 | + func(b *testing.B) { |
| 136 | + b.ResetTimer() |
| 137 | + for i := 0; i < b.N; i++ { |
| 138 | + bodyBytes, err := encoding.Encode(inmem) |
| 139 | + if err != nil || bodyBytes == nil { |
| 140 | + log.Fatal(err) |
| 141 | + } |
| 142 | + } |
| 143 | + chart.RecordStacked( |
| 144 | + b, |
| 145 | + encoding.LongName(), |
| 146 | + "Serialize", |
| 147 | + float64(b.Elapsed().Nanoseconds())/float64(b.N*batch.DataPointCount()), |
| 148 | + ) |
| 149 | + }, |
| 150 | + ) |
| 151 | + bodyBytes, err := encoding.Encode(inmem) |
| 152 | + require.NotNil(b, bodyBytes) |
| 153 | + require.NoError(b, err) |
| 154 | + |
| 155 | + addZstdCompressTime(b, encoding, bodyBytes, batch.DataPointCount()) |
93 | 156 | } |
94 | 157 | } |
95 | 158 | b.ReportAllocs() |
96 | 159 | } |
97 | 160 |
|
98 | 161 | func BenchmarkDeserializeNative(b *testing.B) { |
99 | 162 | chart.BeginChart("Deserialization Speed", b) |
100 | | - defer chart.EndChart("ns/point", "CPU time to deserialize one data point") |
| 163 | + defer chart.EndChart( |
| 164 | + "ns/point", |
| 165 | + charts.WithColorsOpts(opts.Colors{"#92C5F9", "#12C5F9"}), |
| 166 | + ) |
101 | 167 |
|
102 | | - compressions := []string{"none"} |
103 | 168 | for _, dataVariation := range benchmarkDataVariations { |
104 | 169 | for _, encoding := range speedEncodings { |
105 | | - for _, compression := range compressions { |
106 | | - if _, ok := encoding.(*otelarrow.OtelArrowEncoding); ok { |
107 | | - // Skip Arrow, it does not have native serialization |
108 | | - continue |
109 | | - } |
110 | | - b.Run( |
111 | | - fmt.Sprintf("%s/%s", encoding.Name(), compression), |
112 | | - func(b *testing.B) { |
113 | | - batch := dataVariation.generator.Generate() |
114 | | - inmem, err := encoding.FromOTLP(batch) |
115 | | - require.NoError(b, err) |
116 | | - bodyBytes, err := encoding.Encode(inmem) |
| 170 | + if _, ok := encoding.(*otelarrow.OtelArrowEncoding); ok { |
| 171 | + // Skip Arrow, it does not have native serialization |
| 172 | + continue |
| 173 | + } |
| 174 | + batch := dataVariation.generator.Generate() |
| 175 | + inmem, err := encoding.FromOTLP(batch) |
| 176 | + require.NoError(b, err) |
| 177 | + b.Run( |
| 178 | + fmt.Sprintf("%s/deser", encoding.Name()), |
| 179 | + func(b *testing.B) { |
| 180 | + bodyBytes, err := encoding.Encode(inmem) |
| 181 | + if err != nil { |
| 182 | + log.Fatal(err) |
| 183 | + } |
| 184 | + |
| 185 | + b.ResetTimer() |
| 186 | + for i := 0; i < b.N; i++ { |
| 187 | + _, err = encoding.Decode(bodyBytes) |
117 | 188 | if err != nil { |
118 | 189 | log.Fatal(err) |
119 | 190 | } |
120 | | - var compressedBytes []byte |
121 | | - if compression == "zstd" { |
122 | | - compressedBytes = testutils.CompressZstd(bodyBytes) |
123 | | - } |
124 | | - |
125 | | - b.ResetTimer() |
126 | | - for i := 0; i < b.N; i++ { |
127 | | - if compression == "zstd" { |
128 | | - bodyBytes, err = testutils.DecompressZstd(compressedBytes) |
129 | | - require.NoError(b, err) |
130 | | - } |
131 | | - _, err = encoding.Decode(bodyBytes) |
132 | | - if err != nil { |
133 | | - log.Fatal(err) |
134 | | - } |
135 | | - } |
136 | | - chart.Record( |
137 | | - b, |
138 | | - encoding.LongName(), |
139 | | - float64(b.Elapsed().Nanoseconds())/float64(b.N*batch.DataPointCount()), |
140 | | - ) |
141 | | - }, |
142 | | - ) |
| 191 | + } |
| 192 | + chart.RecordStacked( |
| 193 | + b, |
| 194 | + encoding.LongName(), |
| 195 | + "Deserialize", |
| 196 | + float64(b.Elapsed().Nanoseconds())/float64(b.N*batch.DataPointCount()), |
| 197 | + ) |
| 198 | + }, |
| 199 | + ) |
| 200 | + bodyBytes, err := encoding.Encode(inmem) |
| 201 | + if err != nil { |
| 202 | + log.Fatal(err) |
143 | 203 | } |
| 204 | + addZstdDecompressTime(b, encoding, bodyBytes, batch.DataPointCount()) |
144 | 205 | } |
145 | 206 | } |
146 | 207 | b.ReportAllocs() |
147 | 208 | } |
148 | 209 |
|
149 | 210 | func BenchmarkSerializeFromPdata(b *testing.B) { |
150 | 211 | chart.BeginChart("Serialization From pdata Speed", b) |
151 | | - defer chart.EndChart("ns/point", "CPU time to serialize one data point") |
| 212 | + defer chart.EndChart( |
| 213 | + "ns/point", |
| 214 | + charts.WithColorsOpts(opts.Colors{"#92C5F9", "#12C5F9"}), |
| 215 | + ) |
152 | 216 |
|
153 | | - compressions := []string{"none"} |
154 | 217 | for _, dataVariation := range benchmarkDataVariations { |
155 | 218 | for _, encoding := range speedEncodings { |
156 | | - for _, compression := range compressions { |
157 | | - if dataVariation.generator.GetName() == "hostandcollector-otelmetrics.zst" && |
158 | | - encoding.Name() == "ARROW" { |
159 | | - // Skip due to bug in Arrow encoding |
160 | | - continue |
161 | | - } |
162 | | - b.Run( |
163 | | - fmt.Sprintf("%s/%s", encoding.Name(), compression), |
164 | | - func(b *testing.B) { |
165 | | - batch := dataVariation.generator.Generate() |
166 | | - b.ResetTimer() |
167 | | - for i := 0; i < b.N; i++ { |
168 | | - inmem, err := encoding.FromOTLP(batch) |
169 | | - require.NoError(b, err) |
170 | | - bodyBytes, err := encoding.Encode(inmem) |
171 | | - require.NotNil(b, bodyBytes) |
172 | | - require.NoError(b, err) |
173 | | - if compression == "zstd" { |
174 | | - testutils.CompressZstd(bodyBytes) |
175 | | - } |
176 | | - } |
177 | | - chart.Record( |
178 | | - b, |
179 | | - encoding.LongName(), |
180 | | - float64(b.Elapsed().Nanoseconds())/float64(b.N*batch.DataPointCount()), |
181 | | - ) |
182 | | - }, |
183 | | - ) |
| 219 | + if dataVariation.generator.GetName() == "hostandcollector-otelmetrics.zst" && |
| 220 | + encoding.Name() == "ARROW" { |
| 221 | + // Skip due to bug in Arrow encoding |
| 222 | + continue |
184 | 223 | } |
| 224 | + batch := dataVariation.generator.Generate() |
| 225 | + b.Run( |
| 226 | + fmt.Sprintf("%s/serialize", encoding.Name()), |
| 227 | + func(b *testing.B) { |
| 228 | + b.ResetTimer() |
| 229 | + for i := 0; i < b.N; i++ { |
| 230 | + inmem, err := encoding.FromOTLP(batch) |
| 231 | + require.NoError(b, err) |
| 232 | + bodyBytes, err := encoding.Encode(inmem) |
| 233 | + require.NotNil(b, bodyBytes) |
| 234 | + require.NoError(b, err) |
| 235 | + } |
| 236 | + chart.Record( |
| 237 | + b, |
| 238 | + encoding.LongName(), |
| 239 | + "CPU time to serialize one data point", |
| 240 | + float64(b.Elapsed().Nanoseconds())/float64(b.N*batch.DataPointCount()), |
| 241 | + ) |
| 242 | + }, |
| 243 | + ) |
| 244 | + inmem, err := encoding.FromOTLP(batch) |
| 245 | + require.NoError(b, err) |
| 246 | + bodyBytes, err := encoding.Encode(inmem) |
| 247 | + require.NotNil(b, bodyBytes) |
| 248 | + require.NoError(b, err) |
| 249 | + addZstdCompressTime(b, encoding, bodyBytes, batch.DataPointCount()) |
185 | 250 | } |
186 | 251 | } |
187 | 252 | b.ReportAllocs() |
188 | 253 | } |
189 | 254 |
|
190 | 255 | func BenchmarkDeserializeToPdata(b *testing.B) { |
191 | 256 | chart.BeginChart("Deserialization To pdata Speed", b) |
192 | | - defer chart.EndChart("ns/point", "CPU time to deserialize one data point") |
| 257 | + defer chart.EndChart( |
| 258 | + "ns/point", |
| 259 | + charts.WithColorsOpts(opts.Colors{"#92C5F9", "#12C5F9"}), |
| 260 | + ) |
193 | 261 |
|
194 | | - compressions := []string{"none"} |
195 | 262 | for _, dataVariation := range benchmarkDataVariations { |
196 | 263 | for _, encoding := range speedEncodings { |
197 | | - for _, compression := range compressions { |
198 | | - if dataVariation.generator.GetName() == "hostandcollector-otelmetrics.zst" && |
199 | | - encoding.Name() == "ARROW" { |
200 | | - // Skip due to bug in Arrow encoding |
201 | | - continue |
202 | | - } |
203 | | - b.Run( |
204 | | - fmt.Sprintf("%s/%s", encoding.Name(), compression), |
205 | | - func(b *testing.B) { |
206 | | - batch := dataVariation.generator.Generate() |
207 | | - inmem, err := encoding.FromOTLP(batch) |
208 | | - require.NoError(b, err) |
209 | | - bodyBytes, err := encoding.Encode(inmem) |
| 264 | + if dataVariation.generator.GetName() == "hostandcollector-otelmetrics.zst" && |
| 265 | + encoding.Name() == "ARROW" { |
| 266 | + // Skip due to bug in Arrow encoding |
| 267 | + continue |
| 268 | + } |
| 269 | + batch := dataVariation.generator.Generate() |
| 270 | + inmem, err := encoding.FromOTLP(batch) |
| 271 | + require.NoError(b, err) |
| 272 | + bodyBytes, err := encoding.Encode(inmem) |
| 273 | + if err != nil { |
| 274 | + log.Fatal(err) |
| 275 | + } |
| 276 | + |
| 277 | + b.Run( |
| 278 | + fmt.Sprintf("%s/deserialize", encoding.Name()), |
| 279 | + func(b *testing.B) { |
| 280 | + |
| 281 | + b.ResetTimer() |
| 282 | + for i := 0; i < b.N; i++ { |
| 283 | + _, err = encoding.ToOTLP(bodyBytes) |
210 | 284 | if err != nil { |
211 | 285 | log.Fatal(err) |
212 | 286 | } |
213 | | - var compressedBytes []byte |
214 | | - if compression == "zstd" { |
215 | | - compressedBytes = testutils.CompressZstd(bodyBytes) |
216 | | - } |
217 | | - |
218 | | - b.ResetTimer() |
219 | | - for i := 0; i < b.N; i++ { |
220 | | - if compression == "zstd" { |
221 | | - bodyBytes, err = testutils.DecompressZstd(compressedBytes) |
222 | | - require.NoError(b, err) |
223 | | - } |
224 | | - _, err = encoding.ToOTLP(bodyBytes) |
225 | | - if err != nil { |
226 | | - log.Fatal(err) |
227 | | - } |
228 | | - } |
229 | | - chart.Record( |
230 | | - b, |
231 | | - encoding.LongName(), |
232 | | - float64(b.Elapsed().Nanoseconds())/float64(b.N*batch.DataPointCount()), |
233 | | - ) |
234 | | - }, |
235 | | - ) |
236 | | - } |
| 287 | + } |
| 288 | + chart.Record( |
| 289 | + b, |
| 290 | + encoding.LongName(), |
| 291 | + "CPU time to deserialize one data point", |
| 292 | + float64(b.Elapsed().Nanoseconds())/float64(b.N*batch.DataPointCount()), |
| 293 | + ) |
| 294 | + }, |
| 295 | + ) |
| 296 | + addZstdDecompressTime(b, encoding, bodyBytes, batch.DataPointCount()) |
237 | 297 | } |
238 | 298 | } |
239 | 299 | b.ReportAllocs() |
|
0 commit comments