Skip to content

Update benchmark.yml #33

Update benchmark.yml

Update benchmark.yml #33

Workflow file for this run

name: 性能测试
on:
workflow_dispatch:
push:
branches: [ master ]
pull_request:
branches: [ master ]
jobs:
benchmark-serial:
runs-on: ubuntu-latest
services:
redis:
image: redis:latest
ports:
- 6379:6379
options: >-
--health-cmd "redis-cli ping"
--health-interval 10s
--health-timeout 5s
--health-retries 5
strategy:
matrix:
client_count: [1, 8, 128, 1024]
steps:
- name: 检出代码
uses: actions/checkout@v4
- name: 设置 Java 环境
uses: actions/setup-java@v4
with:
java-version: '8'
distribution: 'temurin'
cache: 'maven'
- name: 编译项目
run: mvn compile -q
- name: 运行性能测试 (${{ matrix.client_count }} 客户端)
run: |
echo "Running benchmark tests with ${{ matrix.client_count }} clients..."
# 定义测试参数
clients=("Redisun" "Redisson" "Jedis" "Lettuce")
methods=("asyncSet" "asyncGet" "concurrentSet" "concurrentGet")
# 创建结果目录
mkdir -p benchmark-results
# 循环执行所有测试
for client in "${clients[@]}"; do
for method in "${methods[@]}"; do
echo "Running ${client} ${method} test with ${{ matrix.client_count }} clients..."
log_file="benchmark-results/${client,,}_${method}_${{ matrix.client_count }}.log"
# 运行测试并捕获结果
if mvn test -Dtest=${client}Benchmark#${method} -Dclient.count=${{ matrix.client_count }} > "${log_file}" 2>&1; then
echo "✓ ${client} ${method} 测试完成"
else
echo "✗ ${client} ${method} 测试失败或超时"
# 继续执行其他测试,不退出
fi
# 显示关键结果
if [ -f "${log_file}" ]; then
echo "测试结果摘要:"
grep -E "(cost:|ops/s:|成功|失败|异常)" "${log_file}" | head -10 || true
fi
echo "----------------------------------------"
done
done
- name: 上传测试结果
uses: actions/upload-artifact@v4
with:
name: benchmark-results-${{ matrix.client_count }}
path: |
benchmark-results/*.log
!benchmark-results/**/*.class
generate-report:
needs: benchmark-serial
runs-on: ubuntu-latest
steps:
- name: 检出代码
uses: actions/checkout@v4
- name: 下载所有测试结果
uses: actions/download-artifact@v4
with:
path: ./artifacts
pattern: benchmark-results-*
merge-multiple: true
- name: 生成性能测试报告
run: |
echo "# Redis客户端性能测试报告" > benchmark-report.md
echo "测试时间: $(date)" >> benchmark-report.md
echo "" >> benchmark-report.md
echo "## 测试环境" >> benchmark-report.md
echo "- 操作系统: Ubuntu 20.04" >> benchmark-report.md
echo "- Java 版本: JDK 8" >> benchmark-report.md
echo "- Redis 版本: latest" >> benchmark-report.md
echo "- 测试模式: 串行执行" >> benchmark-report.md
echo "" >> benchmark-report.md
# 提取测试结果的函数
extract_metrics() {
local log_file=$1
if [ -f "$log_file" ]; then
# 提取耗时(毫秒)
local time=$(grep "cost:" "$log_file" | awk '{print $(NF-1)}' | grep -Eo '[0-9]+' | head -1)
# 提取OPS
local ops=$(grep "ops/s:" "$log_file" | awk '{print $(NF-1)}' | grep -Eo '[0-9]+' | head -1)
# 提取测试结果状态
local status=$(if grep -q "成功\|PASSED" "$log_file"; then echo "成功"; else echo "失败"; fi)
echo "${time:-N/A}|${ops:-N/A}|${status}"
else
echo "N/A|N/A|文件不存在"
fi
}
# 客户端和测试方法定义
clients=("redisun" "redisson" "jedis" "lettuce")
client_names=("Redisun" "Redisson" "Jedis" "Lettuce")
methods=("asyncSet" "asyncGet" "concurrentSet" "concurrentGet")
method_names=("异步SET" "异步GET" "并发SET" "并发GET")
client_counts=(1 8 128 1024)
# 生成各测试类型的报告
for method_idx in "${!methods[@]}"; do
method=${methods[$method_idx]}
method_name=${method_names[$method_idx]}
echo "## ${method_name}性能对比" >> benchmark-report.md
echo "" >> benchmark-report.md
echo "| 客户端数量 | Redisun(耗时/OPS) | Redisson(耗时/OPS) | Jedis(耗时/OPS) | Lettuce(耗时/OPS) |" >> benchmark-report.md
echo "|------------|-------------------|-------------------|-----------------|-------------------|" >> benchmark-report.md
for client_count in "${client_counts[@]}"; do
line="| ${client_count}"
for client in "${clients[@]}"; do
log_file="./artifacts/benchmark-results-${client_count}/benchmark-results/${client}_${method}_${client_count}.log"
metrics=$(extract_metrics "$log_file")
time=$(echo "$metrics" | cut -d'|' -f1)
ops=$(echo "$metrics" | cut -d'|' -f2)
status=$(echo "$metrics" | cut -d'|' -f3)
if [ "$status" = "成功" ] && [ "$time" != "N/A" ] && [ "$ops" != "N/A" ]; then
line="${line} | ${time}ms/${ops}"
else
line="${line} | 测试失败"
fi
done
echo "${line} |" >> benchmark-report.md
done
echo "" >> benchmark-report.md
done
# 添加总结部分
echo "## 测试总结" >> benchmark-report.md
echo "" >> benchmark-report.md
echo "1. **测试规模**: 覆盖了从 1 到 1024 个客户端的多种并发场景" >> benchmark-report.md
echo "2. **测试类型**: 包含异步和并发的 SET/GET 操作" >> benchmark-report.md
echo "3. **执行方式**: 所有测试串行执行,确保环境一致性" >> benchmark-report.md
echo "4. **数据收集**: 自动提取耗时和OPS指标" >> benchmark-report.md
echo "" >> benchmark-report.md
echo "> 注意:由于 GitHub Actions 运行环境的限制,测试结果的绝对值可能不如专用测试环境准确,但相对比较具有参考价值。" >> benchmark-report.md
# 显示报告内容
echo "生成的测试报告:"
cat benchmark-report.md
- name: 上传测试报告
uses: actions/upload-artifact@v4
with:
name: performance-report
path: benchmark-report.md
- name: 在Workflow中显示报告摘要
run: |
echo "=== 性能测试报告摘要 ==="
if [ -f "benchmark-report.md" ]; then
# 显示报告的关键部分
grep -A 20 "##" benchmark-report.md | head -30
else
echo "报告生成失败"
fi