|
17 | 17 |
|
18 | 18 | from pathlib import Path |
19 | 19 | from typing import Set, Type |
20 | | -from unittest.mock import Mock, patch |
21 | 20 |
|
22 | 21 | import pytest |
23 | 22 | import toml |
|
34 | 33 | TestRun, |
35 | 34 | TestScenario, |
36 | 35 | TestScenarioParser, |
37 | | - TestScenarioParsingError, |
38 | 36 | TestTemplate, |
39 | 37 | ) |
40 | 38 | from cloudai.models.scenario import TestRunModel, TestScenarioModel |
@@ -529,62 +527,3 @@ def test_get_reporters_nccl(self): |
529 | 527 | assert len(reporters) == 2 |
530 | 528 | assert NcclTestPerformanceReportGenerationStrategy in reporters |
531 | 529 | assert NcclTestPredictionReportGenerationStrategy in reporters |
532 | | - |
533 | | - |
534 | | -class TestReportMetricsDSE: |
535 | | - @pytest.fixture |
536 | | - def tname(self) -> str: |
537 | | - return "nccl" |
538 | | - |
539 | | - @pytest.fixture |
540 | | - def test_info(self, tname: str) -> TestRunModel: |
541 | | - return TestRunModel(id="main1", test_name=tname, time_limit="01:00:00", weight=10, iterations=1, num_nodes=1) |
542 | | - |
543 | | - @pytest.fixture |
544 | | - def ts_parser(self, test_scenario_parser: TestScenarioParser) -> TestScenarioParser: |
545 | | - nccl = NCCLTestDefinition( |
546 | | - name="nccl", |
547 | | - description="desc", |
548 | | - test_template_name="NcclTest", |
549 | | - cmd_args=NCCLCmdArgs(docker_image_url="fake://url/nccl"), |
550 | | - extra_env_vars={"DSE": ["v1", "v2"]}, |
551 | | - ) |
552 | | - test_scenario_parser.test_mapping["nccl"] = Test(test_definition=nccl, test_template=Mock()) |
553 | | - return test_scenario_parser |
554 | | - |
555 | | - def test_raises_on_unknown_metric( |
556 | | - self, ts_parser: TestScenarioParser, tname: str, test_info: TestRunModel, caplog: pytest.LogCaptureFixture |
557 | | - ): |
558 | | - test_info.agent_metrics = ["unknown"] |
559 | | - |
560 | | - with pytest.raises(TestScenarioParsingError) as exc_info: |
561 | | - ts_parser._create_test_run(test_info=test_info, normalized_weight=1.0) |
562 | | - |
563 | | - mapping_str = ( |
564 | | - f"'{NcclTestPerformanceReportGenerationStrategy.__name__}': " |
565 | | - f"{NcclTestPerformanceReportGenerationStrategy.metrics}" |
566 | | - ) |
567 | | - msg = ( |
568 | | - f"Test '{test_info.id}' is a DSE job with agent_metrics='{test_info.agent_metrics}', " |
569 | | - "but no report generation strategy is defined for it. " |
570 | | - f"Available report-metrics mapping: {{{mapping_str}}}" |
571 | | - ) |
572 | | - assert str(exc_info.value) == msg |
573 | | - assert caplog.records[0].levelname == "ERROR" |
574 | | - assert caplog.records[0].message == f"Failed to parse Test Scenario definition: {ts_parser.file_path}" |
575 | | - assert caplog.records[1].levelname == "ERROR" |
576 | | - assert caplog.records[1].message == msg |
577 | | - |
578 | | - @patch("cloudai.test_scenario_parser.get_reporters", return_value=set()) |
579 | | - def test_raises_if_no_reports_defined(self, _, ts_parser: TestScenarioParser, test_info: TestRunModel, tname: str): |
580 | | - tdef = ts_parser.test_mapping[tname].test_definition |
581 | | - tdef.agent_metrics = ["default"] |
582 | | - |
583 | | - with pytest.raises(TestScenarioParsingError) as exc_info: |
584 | | - ts_parser._create_test_run(test_info=test_info, normalized_weight=1.0) |
585 | | - |
586 | | - assert str(exc_info.value) == ( |
587 | | - f"Test '{test_info.id}' is a DSE job with agent_metrics='{tdef.agent_metrics}', " |
588 | | - "but no report generation strategy is defined for it. " |
589 | | - "Available report-metrics mapping: {}" |
590 | | - ) |
0 commit comments