Skip to content

Commit c286f9a

Browse files
committed
test
1 parent cbd6d95 commit c286f9a

2 files changed

Lines changed: 71 additions & 69 deletions

File tree

.github/workflows/mac.yml

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -450,25 +450,25 @@ jobs:
450450
# cmd: 'tests/python_tests/test_llm_pipeline.py tests/python_tests/test_llm_pipeline_static.py tests/python_tests/test_vlm_pipeline.py tests/python_tests/test_structured_output.py'
451451
# run_condition: ${{ fromJSON(needs.smart_ci.outputs.affected_components).visual_language.test || fromJSON(needs.smart_ci.outputs.affected_components).LLM.test }}
452452
# timeout: 180
453-
- name: 'GGUF Reader tests'
454-
cmd: 'python -m pytest -v ./tests/python_tests/test_gguf_reader.py'
455-
run_condition: ${{ fromJSON(needs.smart_ci.outputs.affected_components).GGUF.test || fromJSON(needs.smart_ci.outputs.affected_components).LLM.test }}
456-
timeout: 360
457-
- name: 'Tokenizer tests'
458-
cmd: 'python -m pytest -v ./tests/python_tests/test_tokenizer.py'
459-
run_condition: ${{ fromJSON(needs.smart_ci.outputs.affected_components).tokenizers.test }}
460-
timeout: 60
453+
# - name: 'GGUF Reader tests'
454+
# cmd: 'python -m pytest -v ./tests/python_tests/test_gguf_reader.py'
455+
# run_condition: ${{ fromJSON(needs.smart_ci.outputs.affected_components).GGUF.test || fromJSON(needs.smart_ci.outputs.affected_components).LLM.test }}
456+
# timeout: 360
457+
# - name: 'Tokenizer tests'
458+
# cmd: 'python -m pytest -v ./tests/python_tests/test_tokenizer.py'
459+
# run_condition: ${{ fromJSON(needs.smart_ci.outputs.affected_components).tokenizers.test }}
460+
# timeout: 60
461461
# Only supported on X64 or ARM with SVE support
462462
# - name: 'API tests'
463463
# cmd: 'tests/python_tests/test_continuous_batching.py tests/python_tests/test_generation_config.py tests/python_tests/test_sampling.py tests/python_tests/test_text_streamer.py'
464464
# run_condition: ${{ fromJSON(needs.smart_ci.outputs.affected_components).continuous_batching.test || fromJSON(needs.smart_ci.outputs.affected_components).sampling.test || fromJSON(needs.smart_ci.outputs.affected_components).text_streamer.test }}
465465
# timeout: 60
466-
- name: 'Rag tests'
467-
cmd: 'python -m pytest -v ./tests/python_tests/test_rag.py'
468-
run_condition: ${{ fromJSON(needs.smart_ci.outputs.affected_components).RAG.test }}
469-
timeout: 30
466+
# - name: 'Rag tests'
467+
# cmd: 'python -m pytest -v ./tests/python_tests/test_rag.py'
468+
# run_condition: ${{ fromJSON(needs.smart_ci.outputs.affected_components).RAG.test }}
469+
# timeout: 30
470470
- name: 'WWB tests'
471-
cmd: 'python -m pytest -v ./tools/who_what_benchmark/tests -m "not nanollava"'
471+
cmd: 'python -m pytest -v ./tools/who_what_benchmark/tests -m reranking"
472472
run_condition: ${{ fromJSON(needs.smart_ci.outputs.affected_components).WWB.test }}
473473
timeout: 180
474474
- name: 'WWB tests (nanollava)'

tools/who_what_benchmark/tests/test_cli_reranking.py

Lines changed: 58 additions & 56 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
logger = logging.getLogger(__name__)
99

1010

11+
@pytest.mark.reranking
1112
@pytest.mark.parametrize(
1213
("model_id", "model_type"),
1314
[
@@ -43,53 +44,54 @@ def test_reranking_basic(model_id, model_type, tmp_path):
4344
"--hf",
4445
])
4546

46-
# test Optimum
47-
run_wwb([
48-
"--target-model",
49-
MODEL_PATH,
50-
"--num-samples",
51-
"1",
52-
"--gt-data",
53-
GT_FILE,
54-
"--device",
55-
"CPU",
56-
"--model-type",
57-
model_type,
58-
])
47+
# # test Optimum
48+
# run_wwb([
49+
# "--target-model",
50+
# MODEL_PATH,
51+
# "--num-samples",
52+
# "1",
53+
# "--gt-data",
54+
# GT_FILE,
55+
# "--device",
56+
# "CPU",
57+
# "--model-type",
58+
# model_type,
59+
# ])
5960

60-
# test GenAI
61-
run_wwb([
62-
"--target-model",
63-
MODEL_PATH,
64-
"--num-samples",
65-
"1",
66-
"--gt-data",
67-
GT_FILE,
68-
"--device",
69-
"CPU",
70-
"--model-type",
71-
model_type,
72-
"--genai",
73-
"--output",
74-
tmp_path,
75-
])
61+
# # test GenAI
62+
# run_wwb([
63+
# "--target-model",
64+
# MODEL_PATH,
65+
# "--num-samples",
66+
# "1",
67+
# "--gt-data",
68+
# GT_FILE,
69+
# "--device",
70+
# "CPU",
71+
# "--model-type",
72+
# model_type,
73+
# "--genai",
74+
# "--output",
75+
# tmp_path,
76+
# ])
7677

77-
# test w/o models
78-
run_wwb([
79-
"--target-data",
80-
tmp_path / "target.csv",
81-
"--num-samples",
82-
"1",
83-
"--gt-data",
84-
GT_FILE,
85-
"--device",
86-
"CPU",
87-
"--model-type",
88-
model_type,
89-
"--genai",
90-
])
78+
# # test w/o models
79+
# run_wwb([
80+
# "--target-data",
81+
# tmp_path / "target.csv",
82+
# "--num-samples",
83+
# "1",
84+
# "--gt-data",
85+
# GT_FILE,
86+
# "--device",
87+
# "CPU",
88+
# "--model-type",
89+
# model_type,
90+
# "--genai",
91+
# ])
9192

9293

94+
@pytest.mark.reranking
9395
@pytest.mark.parametrize(
9496
("model_id", "model_type"),
9597
[
@@ -125,16 +127,16 @@ def test_reranking_qwen(model_id, model_type, tmp_path):
125127
"--hf",
126128
])
127129

128-
# test Optimum
129-
run_wwb([
130-
"--target-model",
131-
MODEL_PATH,
132-
"--num-samples",
133-
"1",
134-
"--gt-data",
135-
GT_FILE,
136-
"--device",
137-
"CPU",
138-
"--model-type",
139-
model_type,
140-
])
130+
# # test Optimum
131+
# run_wwb([
132+
# "--target-model",
133+
# MODEL_PATH,
134+
# "--num-samples",
135+
# "1",
136+
# "--gt-data",
137+
# GT_FILE,
138+
# "--device",
139+
# "CPU",
140+
# "--model-type",
141+
# model_type,
142+
# ])

0 commit comments

Comments
 (0)