-
Notifications
You must be signed in to change notification settings - Fork 350
Expand file tree
/
Copy pathconftest.py
More file actions
740 lines (616 loc) · 25 KB
/
conftest.py
File metadata and controls
740 lines (616 loc) · 25 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import random
import time
import unittest.mock
from datetime import datetime
from io import StringIO
from typing import Callable, TypedDict
import pytest
import ray
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from nemo_rl.distributed.virtual_cluster import init_ray
dir_path = os.path.dirname(os.path.abspath(__file__))
def pytest_addoption(parser):
"""Add custom command line options for controlling test execution."""
parser.addoption(
"--hf-gated",
action="store_true",
default=False,
help="Include tests that require HuggingFace token access",
)
parser.addoption(
"--mcore-only",
action="store_true",
default=False,
help="Run ONLY mcore tests (combine with --hf-gated to include mcore+hf_gated tests)",
)
parser.addoption(
"--automodel-only",
action="store_true",
default=False,
help="Run ONLY automodel tests",
)
parser.addoption(
"--vllm-only",
action="store_true",
default=False,
help="Run ONLY vllm tests",
)
parser.addoption(
"--sglang-only",
action="store_true",
default=False,
help="Run ONLY sglang tests",
)
parser.addoption(
"--nemo-gym-only",
action="store_true",
default=False,
help="Run ONLY nemo_gym tests",
)
def pytest_collection_modifyitems(config, items):
"""Modify test collection to skip tests based on markers unless explicitly requested."""
run_hf_gated = config.getoption("--hf-gated")
run_mcore_only = config.getoption("--mcore-only")
run_automodel_only = config.getoption("--automodel-only")
run_vllm_only = config.getoption("--vllm-only")
run_sglang_only = config.getoption("--sglang-only")
run_nemo_gym_only = config.getoption("--nemo-gym-only")
# Check for mutually exclusive options
exclusive_options = [
run_mcore_only,
run_automodel_only,
run_vllm_only,
run_sglang_only,
run_nemo_gym_only,
]
if sum(exclusive_options) > 1:
raise ValueError(
"--mcore-only, --automodel-only, --vllm-only, --sglang-only, and --nemo-gym-only are mutually exclusive"
)
marker_expr = config.getoption("-m", default="")
# If user specified -m marker expressions, still prioritize run_first tests
if marker_expr:
items.sort(key=lambda item: 0 if item.get_closest_marker("run_first") else 1)
return
# Start with all items and apply filters sequentially
new_items = list(items)
# Filter by hf_gated marker
if not run_hf_gated:
# Exclude hf_gated tests unless explicitly requested
new_items = [
item for item in new_items if not item.get_closest_marker("hf_gated")
]
# Filter by mcore marker
if run_mcore_only:
# Validate that megatron.core is available
try:
import megatron.core # noqa: F401
except ImportError:
raise ImportError(
"Cannot run mcore tests: megatron.core is not available.\n"
"Please run tests with: uv run --extra mcore --group test pytest ..."
)
# Include only mcore tests
new_items = [item for item in new_items if item.get_closest_marker("mcore")]
else:
# Exclude mcore tests by default
new_items = [item for item in new_items if not item.get_closest_marker("mcore")]
# Filter by automodel marker
if run_automodel_only:
# Validate that nemo_automodel is available
try:
import nemo_automodel # noqa: F401
except ImportError:
raise ImportError(
"Cannot run automodel tests: nemo_automodel is not available.\n"
"Please run tests with: uv run --extra automodel --group test pytest ..."
)
# Include only automodel tests
new_items = [item for item in new_items if item.get_closest_marker("automodel")]
else:
# Exclude automodel tests by default
new_items = [
item for item in new_items if not item.get_closest_marker("automodel")
]
# Filter by vllm marker
if run_vllm_only:
# Validate that vllm is available
try:
import vllm # noqa: F401
except ImportError:
raise ImportError(
"Cannot run vllm tests: vllm is not available.\n"
"Please run tests with: uv run --extra vllm --group test pytest ..."
)
# Include only vllm tests
new_items = [item for item in new_items if item.get_closest_marker("vllm")]
else:
# Exclude vllm tests by default
new_items = [item for item in new_items if not item.get_closest_marker("vllm")]
# Filter by sglang marker
if run_sglang_only:
# Validate that sglang is available
try:
import sglang # noqa: F401
except ImportError:
raise ImportError(
"Cannot run sglang tests: sglang is not available.\n"
"Please run tests with: uv run --extra sglang --group test pytest ..."
)
# Include only sglang tests
new_items = [item for item in new_items if item.get_closest_marker("sglang")]
else:
# Exclude sglang tests by default
new_items = [
item for item in new_items if not item.get_closest_marker("sglang")
]
# Filter by nemo_gym marker
if run_nemo_gym_only:
# Validate that nemo_gym is available
try:
from nemo_gym import config_types # noqa: F401
except ImportError:
raise ImportError(
"Cannot run nemo_gym tests: nemo_gym is not available.\n"
"Please run tests with: uv run --extra nemo_gym --group test pytest ..."
)
# Include only nemo_gym tests
new_items = [item for item in new_items if item.get_closest_marker("nemo_gym")]
else:
# Exclude nemo_gym tests by default
new_items = [
item for item in new_items if not item.get_closest_marker("nemo_gym")
]
# Ensure run_first tests are prioritized
new_items.sort(key=lambda item: 0 if item.get_closest_marker("run_first") else 1)
# Update the items list in-place
items[:] = new_items
TEST_ASSETS_DIR = os.path.join(dir_path, "test_assets")
UNIT_RESULTS_FILE = os.path.join(dir_path, "unit_results.json")
UNIT_RESULTS_FILE_DATED = os.path.join(
dir_path, f"unit_results/{datetime.now().strftime('%Y%m%d_%H%M%S')}.json"
)
class UnitTestData(TypedDict):
exit_status: int | str
git_commit: str
start_time: str
metrics: dict
gpu_types: list[str]
coverage: str
###################################
# Meta Session Fixtures and Hooks #
###################################
@pytest.fixture(scope="session", autouse=True)
def _unit_test_data(request):
"""Initializes the unit test data storage at the beginning of the session."""
session = request.session
# Delete the unit results file at the start of a new test session
if os.path.exists(UNIT_RESULTS_FILE):
try:
os.remove(UNIT_RESULTS_FILE)
print(f"Deleted existing results file: {UNIT_RESULTS_FILE}")
except Exception as e:
print(f"Warning: Failed to delete results file: {e}")
# Get the git commit hash
try:
import subprocess
result = subprocess.run(
["git", "-C", dir_path, "rev-parse", "HEAD"],
capture_output=True,
text=True,
check=True,
)
git_commit = result.stdout.strip()
except Exception as e:
git_commit = f"Error getting git commit: {str(e)}"
unit_test_data = UnitTestData(
exit_status="was not set",
git_commit=git_commit,
start_time=datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
metrics={},
gpu_types=[],
coverage="[n/a] run with --cov=nemo_rl",
)
session.config._unit_test_data = unit_test_data
return unit_test_data
@pytest.fixture(scope="session", autouse=True)
def session_data(request, init_ray_cluster, _unit_test_data):
"""Session-level fixture to store and save metrics data.
This fixture tracks both metrics from tests and metadata about the test environment.
The metrics are stored in the 'metrics' dictionary.
It's set to autouse so that we track metadata and coverage even if no test selected
explicitly track metrics.
"""
# Pass init_ray_cluster so that we can access ray metadata
############################################################
# 1. Gather all the unit test data #
############################################################
unit_test_data: UnitTestData = _unit_test_data
yield unit_test_data
############################################################
# 2. Gather the ray metadata #
############################################################
from nemo_rl.utils.logger import RayGpuMonitorLogger
logger = RayGpuMonitorLogger(
collection_interval=float("inf"),
flush_interval=float("inf"),
metric_prefix="test",
step_metric="test/step",
parent_logger=None,
)
unit_test_data["gpu_types"] = list(set(logger._collect_gpu_sku().values()))
############################################################
# 3. Gather the coverage data #
############################################################
# We directly access the coverage controller from the plugin manager
# so we can access the coverage total before the pytest session finishes.
cov_controller = None
if request.config.pluginmanager.hasplugin("_cov"):
plugin = request.config.pluginmanager.getplugin("_cov")
if plugin.cov_controller:
cov_controller = plugin.cov_controller
if not cov_controller:
# Means the user didn't run with --cov=...
return
# We currently don't use the cov_report since we can always access the coverage.json later, but
# in the future if we want to report the coverage more granularly as part of the session finish,
# we can access it here.
cov_report = StringIO()
cov_total = cov_controller.summary(cov_report)
unit_test_data["coverage"] = cov_total
@pytest.fixture
def tracker(request, session_data, ray_gpu_monitor):
"""Test-level fixture that automatically captures test function info."""
# Get fully qualified test name (module::test_function)
module_name = request.module.__name__
test_name = request.function.__name__
qualified_name = f"{module_name}::{test_name}"
# Initialize an empty dict for this test if it doesn't exist
if qualified_name not in session_data:
session_data["metrics"][qualified_name] = {}
class Tracker:
def track(self, metric_name: str, value):
"""Tracking an arbitrary metric."""
session_data["metrics"][qualified_name][metric_name] = value
def get_max_mem(self):
metrics = ray_gpu_monitor._collect_metrics()
max_mem = 0
for m_name, m_value in metrics.items():
if m_name.endswith(".memory"):
max_mem = max(max_mem, m_value)
return max_mem
def log_max_mem(self, metric_name: str):
session_data["metrics"][qualified_name][metric_name] = self.get_max_mem()
start_time = time.time()
yield Tracker()
end_time = time.time()
# Prefix with `_` to indicate it's automatically collected
session_data["metrics"][qualified_name]["_elapsed"] = end_time - start_time
def pytest_sessionstart(session):
os.makedirs(TEST_ASSETS_DIR, exist_ok=True)
def pytest_sessionfinish(session, exitstatus):
if not hasattr(session.config, "_unit_test_data"):
return
data = session.config._unit_test_data
data["exit_status"] = exitstatus
print(f"\nSaving unit test data to {UNIT_RESULTS_FILE}")
print(f"and saving to {UNIT_RESULTS_FILE_DATED}")
with open(UNIT_RESULTS_FILE, "w") as f:
json.dump(data, f, indent=2)
os.makedirs(os.path.dirname(UNIT_RESULTS_FILE_DATED), exist_ok=True)
with open(UNIT_RESULTS_FILE_DATED, "w") as f:
json.dump(data, f, indent=2)
################
# Ray Fixtures #
################
@pytest.fixture(scope="session", autouse=True)
def init_ray_cluster():
"""Initialize Ray for the test module and clean up afterward.
This fixture doesn't need to be called directly.
"""
init_ray()
yield
ray.shutdown()
@pytest.fixture(scope="session", autouse=True)
def ray_gpu_monitor(init_ray_cluster):
"""Initialize Ray for the test module and clean up afterward.
This fixture doesn't need to be called directly.
"""
from nemo_rl.utils.logger import RayGpuMonitorLogger
gpu_monitor = RayGpuMonitorLogger(
collection_interval=1,
flush_interval=float("inf"), # Disabling flushing since we will do it manually
metric_prefix="test",
step_metric="test/step",
parent_logger=None,
)
gpu_monitor.start()
yield gpu_monitor
gpu_monitor.stop()
####################################
# Fixtures for Distributed Testing #
####################################
def _setup_distributed(rank, world_size, port, backend="nccl"):
"""Initialize the distributed environment for a test (internal use only)"""
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = str(port) # Use the same port for all processes
# Initialize the process group
dist.init_process_group(backend=backend, rank=rank, world_size=world_size)
# Set the device for this process
torch.cuda.set_device(rank)
def _cleanup_distributed():
"""Clean up the distributed environment after a test (internal use only)"""
dist.destroy_process_group()
@pytest.fixture
def distributed_test_runner():
"""Fixture that returns a function to run distributed tests.
This fixture provides a reusable way to run a test function across multiple processes
with PyTorch distributed communication set up.
"""
def run_distributed_test(
test_fn: Callable, world_size: int, backend: str = "nccl"
) -> None:
"""Run a test function in a distributed environment.
Args:
test_fn: The test function to run on each process
world_size: Number of processes to spawn
backend: PyTorch distributed backend to use
"""
# Skip if CUDA is not available and using NCCL backend
if backend == "nccl" and not torch.cuda.is_available():
pytest.skip("CUDA not available, skipping CUDA-based test")
# Skip if we don't have enough GPUs for NCCL backend
if backend == "nccl" and torch.cuda.device_count() < world_size:
pytest.skip(
f"Not enough GPUs available. Need {world_size}, got {torch.cuda.device_count()}"
)
# Generate a single random port in the main process
port = random.randint(10000, 20000)
# Run the test on multiple processes
mp.spawn(
_distributed_test_wrapper,
args=(test_fn, world_size, port, backend),
nprocs=world_size,
join=True,
)
return run_distributed_test
def _distributed_test_wrapper(
rank: int, test_fn: Callable, world_size: int, port: int, backend: str
) -> None:
"""Wrapper function that sets up the distributed environment before running the test function.
Internal use only - use distributed_test_runner fixture instead.
Args:
rank: Process rank
test_fn: The test function to run
world_size: Total number of processes
port: Port to use for distributed communication
backend: PyTorch distributed backend to use
"""
try:
# Setup the distributed environment
_setup_distributed(rank, world_size, port, backend=backend)
# Run the actual test function
test_fn(rank, world_size)
# Clean up
_cleanup_distributed()
except Exception as e:
print(f"Error in rank {rank}: {e}")
_cleanup_distributed()
raise
@pytest.fixture
def mock_2gpu_distributed_env():
"""Mock distributed environment variables and initialization.
This fixture should be used when testing the underlying ray actors that need torch distributed initialized.
"""
# Save original environment
old_env = os.environ.copy()
# Set required environment variables
os.environ["RANK"] = "0"
os.environ["WORLD_SIZE"] = "2"
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "12355"
# Create a more sophisticated mock device mesh
# First, create individual mesh mocks for dp and tp
dp_mesh = unittest.mock.MagicMock()
dp_mesh.device_type = "cuda"
dp_mesh.size.return_value = 1 # Set dp_size
tp_mesh = unittest.mock.MagicMock()
tp_mesh.device_type = "cuda"
tp_mesh.size.return_value = 2 # Set tp_size to match your test case
# Create the 2D mesh that acts like a dictionary (this is so we can test DTensorPolicyWorker with TP > 1)
mesh_2d = unittest.mock.MagicMock()
mesh_2d.__getitem__.side_effect = (
lambda key: dp_mesh if key == "dp" else tp_mesh if key == "tp" else None
)
mesh_2d.device_type = "cuda"
# Mock dist.is_initialized to prevent actual initialization
with (
unittest.mock.patch("torch.distributed.init_process_group") as mock_init,
unittest.mock.patch("torch.distributed.is_initialized", return_value=True),
unittest.mock.patch("torch.distributed.get_rank", return_value=0),
unittest.mock.patch("torch.distributed.get_world_size", return_value=2),
unittest.mock.patch(
"torch.distributed.device_mesh.init_device_mesh", return_value=mesh_2d
),
):
yield mock_init
# Restore original environment
os.environ.clear()
os.environ.update(old_env)
#######################
# Test Model Fixtures #
#######################
@pytest.fixture(scope="session")
def tiny_llama_model_path():
"""Fixture that returns a path to a tiny llama model with a dummy tokenizer."""
import shutil
from transformers import AutoTokenizer, LlamaConfig, LlamaForCausalLM
model_path = os.path.join(TEST_ASSETS_DIR, "tiny_llama_with_llama3.2_tokenizer")
# hidden_size//num_attention_heads = 32 (smallest value to not error due to vllm paged attention)
# vocab_size=128256 (so we can re-use llama3.2 1b tokenizer)
config = LlamaConfig(
num_hidden_layers=2,
hidden_size=64,
intermediate_size=32,
num_attention_heads=2,
vocab_size=128256,
tie_word_embeddings=False,
num_key_value_heads=None,
)
model = LlamaForCausalLM(config=config)
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-3.2-1B")
shutil.rmtree(model_path, ignore_errors=True)
model.save_pretrained(model_path)
tokenizer.save_pretrained(model_path)
del model, tokenizer
yield model_path
@pytest.fixture(scope="session")
def tiny_llama_tied_model_path():
"""Fixture that returns a path to a tiny llama model with a dummy tokenizer."""
import shutil
from transformers import AutoTokenizer, LlamaConfig, LlamaForCausalLM
model_path = os.path.join(
TEST_ASSETS_DIR, "tiny_llama_tied_with_llama3.2_tokenizer"
)
# hidden_size//num_attention_heads = 32 (smallest value to not error due to vllm paged attention)
# vocab_size=128256 (so we can re-use llama3.2 1b tokenizer)
config = LlamaConfig(
num_hidden_layers=2,
hidden_size=64,
intermediate_size=32,
num_attention_heads=2,
vocab_size=128256,
tie_word_embeddings=True,
num_key_value_heads=None,
)
model = LlamaForCausalLM(config=config)
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-3.2-1B")
shutil.rmtree(model_path, ignore_errors=True)
model.save_pretrained(model_path)
tokenizer.save_pretrained(model_path)
del model, tokenizer
yield model_path
@pytest.fixture(scope="session")
def tiny_qwen2_model_path():
"""Fixture that returns a path to a tiny llama model with a dummy tokenizer."""
import shutil
from transformers import AutoTokenizer, Qwen2Config, Qwen2ForCausalLM
model_path = os.path.join(TEST_ASSETS_DIR, "tiny_qwen2_with_qwen2_tokenizer")
# hidden_size//num_attention_heads = 32 (smallest value to not error due to vllm paged attention)
# vocab_size=151936 (so we can re-use qwen2 1.5b tokenizer)
config = Qwen2Config(
num_hidden_layers=2,
hidden_size=64,
intermediate_size=32,
num_attention_heads=2,
vocab_size=151936,
tie_word_embeddings=False,
num_key_value_heads=None,
)
model = Qwen2ForCausalLM(config=config)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2-1.5B")
shutil.rmtree(model_path, ignore_errors=True)
model.save_pretrained(model_path)
tokenizer.save_pretrained(model_path)
del model, tokenizer
yield model_path
@pytest.fixture(scope="session")
def tiny_qwen3_model_path():
"""Fixture that returns a path to a tiny llama model with a dummy tokenizer."""
import shutil
from transformers import AutoTokenizer, Qwen3Config, Qwen3ForCausalLM
model_path = os.path.join(TEST_ASSETS_DIR, "tiny_qwen3_with_qwen3_tokenizer")
# hidden_size//num_attention_heads = 32 (smallest value to not error due to vllm paged attention)
# vocab_size=151936 (so we can re-use qwen2 1.5b tokenizer)
config = Qwen3Config(
num_hidden_layers=2,
hidden_size=64,
intermediate_size=32,
num_attention_heads=2,
vocab_size=151936,
tie_word_embeddings=False,
num_key_value_heads=None,
)
model = Qwen3ForCausalLM(config=config)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-0.6B")
shutil.rmtree(model_path, ignore_errors=True)
model.save_pretrained(model_path)
tokenizer.save_pretrained(model_path)
del model, tokenizer
yield model_path
@pytest.fixture(scope="session")
def tiny_gemma3_model_path():
"""Fixture that returns a path to a tiny llama model with a dummy tokenizer."""
import shutil
from transformers import AutoTokenizer, Gemma3ForCausalLM, Gemma3TextConfig
model_path = os.path.join(TEST_ASSETS_DIR, "tiny_gemma3_with_gemma3_tokenizer")
# hidden_size//num_attention_heads = 32 (smallest value to not error due to vllm paged attention)
# vocab_size=262144 so we can re-use gemma-3-1b tokenizer
config = Gemma3TextConfig(
num_hidden_layers=2,
hidden_size=64,
intermediate_size=32,
num_attention_heads=2,
vocab_size=262144,
tie_word_embeddings=True,
num_key_value_heads=2,
)
model = Gemma3ForCausalLM(config=config)
tokenizer = AutoTokenizer.from_pretrained("google/gemma-3-1b-it")
shutil.rmtree(model_path, ignore_errors=True)
model.save_pretrained(model_path)
tokenizer.save_pretrained(model_path)
del model, tokenizer
yield model_path
def _build_tiny_nemotron5_h_checkpoint(model_path: str) -> None:
import shutil
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer
config = AutoConfig.from_pretrained(
"nvidia/Nemotron-H-8B-Base-8K", trust_remote_code=True
)
config.hybrid_override_pattern = "M*-"
config.num_hidden_layers = 3
config.intermediate_size = 32
config.hidden_size = 256
config.num_attention_heads = 8
config.mamba_num_heads = 8
config.num_key_value_heads = 8
config.n_groups = 1
model = AutoModelForCausalLM.from_config(config, trust_remote_code=True)
tokenizer = AutoTokenizer.from_pretrained(
"nvidia/Nemotron-H-8B-Base-8K", trust_remote_code=True
)
shutil.rmtree(model_path, ignore_errors=True)
model.save_pretrained(model_path)
tokenizer.save_pretrained(model_path)
@pytest.fixture(scope="session")
def tiny_nemotron5_h_model_path():
"""Fixture that returns a path to a tiny nemotron model with a dummy tokenizer.
If the asset hasn't been prepared by the prepare script, skip the tests that require it.
"""
model_path = os.path.join(
TEST_ASSETS_DIR, "tiny_nemotron5_h_with_nemotron_tokenizer"
)
config_file = os.path.join(model_path, "config.json")
if not os.path.exists(config_file):
pytest.skip(
"Tiny Nemotron-H test asset not prepared. Run `uv run tests/unit/prepare_unit_test_assets.py` first."
)
yield model_path