Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Binary file added runs/test_build_cache.npy
Binary file not shown.
6 changes: 5 additions & 1 deletion tests/conftest.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,15 @@
import pytest
import torch
from datasets import Dataset
from transformers import AutoConfig, AutoModelForCausalLM


@pytest.fixture
def model():
"""Create a small test model."""
"""Randomly initialize a small test model."""
torch.manual_seed(42)
torch.cuda.manual_seed(42)

config = AutoConfig.from_pretrained("trl-internal-testing/tiny-Phi3ForCausalLM")
return AutoModelForCausalLM.from_config(config)

Expand Down
43 changes: 26 additions & 17 deletions tests/test_build.py
Original file line number Diff line number Diff line change
@@ -1,29 +1,38 @@
import pytest

from bergson.data import load_gradients

try:
import torch

HAS_CUDA = torch.cuda.is_available()
except Exception:
HAS_CUDA = False

if not HAS_CUDA:
pytest.skip(
"Skipping GPU-only tests: no CUDA/NVIDIA driver available.",
allow_module_level=True,
)

from pathlib import Path

import numpy as np
import pytest
import torch
from transformers import AutoModelForCausalLM

from bergson import (
AttentionConfig,
GradientProcessor,
collect_gradients,
)
from bergson.data import load_gradients


@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
def test_build_consistency(tmp_path: Path, model, dataset):
collect_gradients(
model=model,
data=dataset,
processor=GradientProcessor(),
path=str(tmp_path),
skip_preconditioners=True,
)
index = load_gradients(str(tmp_path))

# Regenerate cache
cache_path = Path("runs/test_build_cache.npy")
if not cache_path.exists():
np.save(cache_path, index[index.dtype.names[0]][0])
cached_item_grad = np.load(cache_path)

first_module_grad = index[index.dtype.names[0]][0]

assert np.allclose(first_module_grad, cached_item_grad, atol=1e-6)


@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
Expand Down