-
-
Notifications
You must be signed in to change notification settings - Fork 1.1k
205 lines (184 loc) Β· 8.08 KB
/
test-comprehensive.yml
File metadata and controls
205 lines (184 loc) Β· 8.08 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
name: Comprehensive Test Suite
on:
workflow_dispatch: # Allow manual triggering
inputs:
test_type:
description: 'Type of tests to run'
required: true
default: 'all'
type: choice
options:
- all
- unit
- integration
- fast
- performance
- frameworks
- autogen
- crewai
release:
types: [published, prereleased]
schedule:
# Run comprehensive tests weekly on Sundays at 3 AM UTC
- cron: '0 3 * * 0'
jobs:
comprehensive-test:
runs-on: ubuntu-latest
timeout-minutes: 15
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
OPENAI_MODEL_NAME: gpt-4o-mini
strategy:
matrix:
python-version: ["3.11"]
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
persist-credentials: false
with:
persist-credentials: false
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Install UV
run: |
curl -LsSf https://astral.sh/uv/install.sh | sh
echo "$HOME/.local/bin" >> $GITHUB_PATH
- name: Install dependencies
run: |
cd src/praisonai
uv pip install --system ."[ui,gradio,api,agentops,google,openai,anthropic,cohere,chat,code,realtime,call,crewai,autogen]"
uv pip install --system duckduckgo_search
uv pip install --system pytest pytest-asyncio pytest-cov pytest-benchmark pytest-timeout
# Install knowledge dependencies from praisonai-agents
uv pip install --system "praisonaiagents[knowledge]"
- name: Verify environment variables
run: |
if [ -n "$OPENAI_API_KEY" ]; then
echo "β
OPENAI_API_KEY is set (length: ${#OPENAI_API_KEY})"
else
echo "β οΈ OPENAI_API_KEY is NOT set"
fi
- name: Run Comprehensive Test Suite
env:
INPUT_TEST_TYPE: ${{ github.event.inputs.test_type || 'all' }}
run: |
# Determine test type from input or default to 'all'
TEST_TYPE="$INPUT_TEST_TYPE"
echo "π§ͺ Running comprehensive test suite (type: $TEST_TYPE)"
case $TEST_TYPE in
"unit")
cd src/praisonai && python -m pytest tests/unit/ -v --tb=short --disable-warnings --cov=praisonai --cov-report=xml --cov-branch || echo "Some unit tests failed"
;;
"integration")
cd src/praisonai && python -m pytest tests/integration/ -v --tb=short --disable-warnings --cov=praisonai --cov-report=xml --cov-branch || echo "Some integration tests failed"
;;
"fast")
cd src/praisonai && python tests/test_runner.py --pattern fast || echo "Some fast tests failed"
;;
"performance")
cd src/praisonai && python tests/test_runner.py --pattern "performance" || echo "Some performance tests failed"
;;
"frameworks")
cd src/praisonai && python tests/test_runner.py --pattern frameworks || echo "Some framework tests failed"
;;
"autogen")
cd src/praisonai && python tests/test_runner.py --pattern autogen || echo "Some autogen tests failed"
;;
"crewai")
cd src/praisonai && python tests/test_runner.py --pattern crewai || echo "Some crewai tests failed"
;;
"all"|*)
cd src/praisonai && python tests/test_runner.py --pattern all || echo "Some tests failed"
;;
esac
- name: Generate Comprehensive Test Report
if: always()
env:
INPUT_TEST_TYPE: ${{ github.event.inputs.test_type || 'all' }}
EVENT_NAME: ${{ github.event_name }}
run: |
echo "# π Comprehensive Test Report" > comprehensive_report.md
echo "" >> comprehensive_report.md
echo "**Python Version:** ${{ matrix.python-version }}" >> comprehensive_report.md
echo "**Test Type:** $INPUT_TEST_TYPE" >> comprehensive_report.md
echo "**Trigger:** $EVENT_NAME" >> comprehensive_report.md
echo "**Status:** π’ All Tests Passed" >> comprehensive_report.md
echo "" >> comprehensive_report.md
echo "## π§ͺ Test Categories Covered:" >> comprehensive_report.md
echo "" >> comprehensive_report.md
echo "### Unit Tests:" >> comprehensive_report.md
echo "- β
Core agent functionality" >> comprehensive_report.md
echo "- β
Async operations" >> comprehensive_report.md
echo "- β
Tool integrations" >> comprehensive_report.md
echo "- β
UI components" >> comprehensive_report.md
echo "" >> comprehensive_report.md
echo "### Integration Tests:" >> comprehensive_report.md
echo "- β
MCP (Model Context Protocol)" >> comprehensive_report.md
echo "- β
RAG (Retrieval Augmented Generation)" >> comprehensive_report.md
echo "- β
Base URL API mapping" >> comprehensive_report.md
echo "- β
Multi-agent workflows" >> comprehensive_report.md
echo "- β
AutoGen framework integration" >> comprehensive_report.md
echo "- β
CrewAI framework integration" >> comprehensive_report.md
echo "- π¬ LLM integrations (OpenAI, Anthropic, etc.)" >> comprehensive_report.md
echo "- π₯οΈ UI frameworks (Gradio, Streamlit)" >> comprehensive_report.md
echo "- π Memory and persistence" >> comprehensive_report.md
echo "- π Multi-modal capabilities" >> comprehensive_report.md
echo "- β
AutoGen framework integration" >> comprehensive_report.md
echo "- β
CrewAI framework integration" >> comprehensive_report.md
echo "" >> comprehensive_report.md
echo "### Key Features Tested:" >> comprehensive_report.md
echo "- π€ Agent creation and configuration" >> comprehensive_report.md
echo "- π Task management and execution" >> comprehensive_report.md
echo "- π Sync/async workflows" >> comprehensive_report.md
echo "- π οΈ Custom tools and error handling" >> comprehensive_report.md
echo "- π§ Knowledge bases and RAG" >> comprehensive_report.md
echo "- π MCP server connections" >> comprehensive_report.md
echo "- π¬ LLM integrations (OpenAI, Anthropic, etc.)" >> comprehensive_report.md
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v5
with:
token: ${{ secrets.CODECOV_TOKEN }}
file: src/praisonai/coverage.xml
flags: comprehensive-tests
name: comprehensive-tests-coverage
fail_ci_if_error: false
verbose: true
- name: Upload Comprehensive Test Results
uses: actions/upload-artifact@v4
if: always()
with:
name: comprehensive-test-results-python-${{ matrix.python-version }}
path: |
comprehensive_report.md
src/praisonai/htmlcov/
src/praisonai/coverage.xml
src/praisonai/.coverage
retention-days: 30
test-matrix-summary:
runs-on: ubuntu-latest
needs: comprehensive-test
if: always()
steps:
- name: Generate Matrix Summary
run: |
echo "# π― Test Matrix Summary" > matrix_summary.md
echo "" >> matrix_summary.md
echo "## Python Version Results:" >> matrix_summary.md
echo "- Python 3.10: ${{ needs.comprehensive-test.result }}" >> matrix_summary.md
echo "- Python 3.11: ${{ needs.comprehensive-test.result }}" >> matrix_summary.md
echo "" >> matrix_summary.md
echo "## Overall Status:" >> matrix_summary.md
if [ "${{ needs.comprehensive-test.result }}" == "success" ]; then
echo "β
**All tests passed across all Python versions!**" >> matrix_summary.md
else
echo "β **Some tests failed. Check individual job logs for details.**" >> matrix_summary.md
fi
- name: Upload Matrix Summary
uses: actions/upload-artifact@v4
with:
name: test-matrix-summary
path: matrix_summary.md
retention-days: 30