Skip to content

Commit 7a4f355

Browse files
committed
fix wrong import
1 parent 4172595 commit 7a4f355

9 files changed

+17
-17
lines changed

tests/model_serving/model_server/llmd/test_llmd_auth.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import pytest
22

3-
from tests.model_serving.model_server.llmd_v2.utils import (
3+
from tests.model_serving.model_server.llmd.utils import (
44
ns_from_file,
55
parse_completion_text,
66
send_chat_completions,

tests/model_serving/model_server/llmd/test_llmd_connection_cpu.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11
import pytest
22
from ocp_resources.llm_inference_service import LLMInferenceService
33

4-
from tests.model_serving.model_server.llmd_v2.llmd_configs import TinyLlamaHfConfig, TinyLlamaS3Config
5-
from tests.model_serving.model_server.llmd_v2.utils import (
4+
from tests.model_serving.model_server.llmd.llmd_configs import TinyLlamaHfConfig, TinyLlamaS3Config
5+
from tests.model_serving.model_server.llmd.utils import (
66
ns_from_file,
77
parse_completion_text,
88
send_chat_completions,

tests/model_serving/model_server/llmd/test_llmd_connection_gpu.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11
import pytest
22
from ocp_resources.llm_inference_service import LLMInferenceService
33

4-
from tests.model_serving.model_server.llmd_v2.llmd_configs import QwenHfConfig, QwenS3Config
5-
from tests.model_serving.model_server.llmd_v2.utils import (
4+
from tests.model_serving.model_server.llmd.llmd_configs import QwenHfConfig, QwenS3Config
5+
from tests.model_serving.model_server.llmd.utils import (
66
ns_from_file,
77
parse_completion_text,
88
send_chat_completions,

tests/model_serving/model_server/llmd/test_llmd_kueue_integration.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,8 @@
33
from ocp_resources.llm_inference_service import LLMInferenceService
44
from timeout_sampler import TimeoutExpiredError, TimeoutSampler
55

6-
from tests.model_serving.model_server.llmd_v2.llmd_configs import TinyLlamaOciConfig
7-
from tests.model_serving.model_server.llmd_v2.utils import (
6+
from tests.model_serving.model_server.llmd.llmd_configs import TinyLlamaOciConfig
7+
from tests.model_serving.model_server.llmd.utils import (
88
ns_from_file,
99
parse_completion_text,
1010
send_chat_completions,

tests/model_serving/model_server/llmd/test_llmd_no_scheduler.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11
import pytest
22
from ocp_resources.llm_inference_service import LLMInferenceService
33

4-
from tests.model_serving.model_server.llmd_v2.llmd_configs import QwenS3Config
5-
from tests.model_serving.model_server.llmd_v2.utils import (
4+
from tests.model_serving.model_server.llmd.llmd_configs import QwenS3Config
5+
from tests.model_serving.model_server.llmd.utils import (
66
ns_from_file,
77
parse_completion_text,
88
send_chat_completions,

tests/model_serving/model_server/llmd/test_llmd_prefill_decode.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11
import pytest
22
from ocp_resources.llm_inference_service import LLMInferenceService
33

4-
from tests.model_serving.model_server.llmd_v2.llmd_configs import PrefillDecodeConfig
5-
from tests.model_serving.model_server.llmd_v2.utils import (
4+
from tests.model_serving.model_server.llmd.llmd_configs import PrefillDecodeConfig
5+
from tests.model_serving.model_server.llmd.utils import (
66
ns_from_file,
77
parse_completion_text,
88
send_chat_completions,

tests/model_serving/model_server/llmd/test_llmd_singlenode_estimated_prefix_cache.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,8 @@
33
from ocp_resources.llm_inference_service import LLMInferenceService
44
from ocp_resources.prometheus import Prometheus
55

6-
from tests.model_serving.model_server.llmd_v2.llmd_configs import EstimatedPrefixCacheConfig
7-
from tests.model_serving.model_server.llmd_v2.utils import (
6+
from tests.model_serving.model_server.llmd.llmd_configs import EstimatedPrefixCacheConfig
7+
from tests.model_serving.model_server.llmd.utils import (
88
assert_prefix_cache_routing,
99
get_llmd_router_scheduler_pod,
1010
get_llmd_workload_pods,

tests/model_serving/model_server/llmd/test_llmd_singlenode_precise_prefix_cache.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,8 @@
33
from ocp_resources.llm_inference_service import LLMInferenceService
44
from ocp_resources.prometheus import Prometheus
55

6-
from tests.model_serving.model_server.llmd_v2.llmd_configs import PrecisePrefixCacheConfig
7-
from tests.model_serving.model_server.llmd_v2.utils import (
6+
from tests.model_serving.model_server.llmd.llmd_configs import PrecisePrefixCacheConfig
7+
from tests.model_serving.model_server.llmd.utils import (
88
assert_prefix_cache_routing,
99
assert_scheduler_routing,
1010
get_llmd_router_scheduler_pod,

tests/model_serving/model_server/llmd/test_llmd_smoke.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11
import pytest
22
from ocp_resources.llm_inference_service import LLMInferenceService
33

4-
from tests.model_serving.model_server.llmd_v2.llmd_configs import TinyLlamaOciConfig
5-
from tests.model_serving.model_server.llmd_v2.utils import (
4+
from tests.model_serving.model_server.llmd.llmd_configs import TinyLlamaOciConfig
5+
from tests.model_serving.model_server.llmd.utils import (
66
ns_from_file,
77
parse_completion_text,
88
send_chat_completions,

0 commit comments

Comments
 (0)