|
| 1 | +# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. |
| 2 | +# SPDX-FileCopyrightText: Copyright (c) 2024 Arc Institute. All rights reserved. |
| 3 | +# SPDX-FileCopyrightText: Copyright (c) 2024 Michael Poli. All rights reserved. |
| 4 | +# SPDX-FileCopyrightText: Copyright (c) 2024 Stanford University. All rights reserved |
| 5 | +# SPDX-License-Identifier: LicenseRef-Apache2 |
| 6 | +# |
| 7 | +# Licensed under the Apache License, Version 2.0 (the "License"); |
| 8 | +# you may not use this file except in compliance with the License. |
| 9 | +# You may obtain a copy of the License at |
| 10 | +# |
| 11 | +# http://www.apache.org/licenses/LICENSE-2.0 |
| 12 | +# |
| 13 | +# Unless required by applicable law or agreed to in writing, software |
| 14 | +# distributed under the License is distributed on an "AS IS" BASIS, |
| 15 | +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 16 | +# See the License for the specific language governing permissions and |
| 17 | +# limitations under the License. |
| 18 | + |
| 19 | +import re |
| 20 | + |
| 21 | +import pytest |
| 22 | + |
| 23 | +from bionemo.testing.subprocess_utils import run_command_in_subprocess |
| 24 | + |
| 25 | +from .common import small_training_cmd, small_training_finetune_cmd |
| 26 | + |
| 27 | + |
| 28 | +def extract_val_losses(log_text: str, n: int): |
| 29 | + """ |
| 30 | + Extracts validation losses every n-th occurrence (starting at 0). |
| 31 | + Iteration index is derived by counting val_loss appearances. |
| 32 | +
|
| 33 | + Args: |
| 34 | + log_text (str): The log output as a string. |
| 35 | + n (int): Interval of occurrences (e.g., n=5 -> get val_loss at 0, 5, 10...). |
| 36 | +
|
| 37 | + Returns: |
| 38 | + List of tuples: (step, validation_loss_value). |
| 39 | + """ |
| 40 | + # Regex to capture val_loss values |
| 41 | + pattern = re.compile(r"val_loss: ([0-9.]+)") |
| 42 | + |
| 43 | + results = [] |
| 44 | + for idx, match in enumerate(pattern.finditer(log_text)): |
| 45 | + if idx % n == 0: # take every n-th val_loss occurrence |
| 46 | + results.append((idx, float(match.group(1)))) |
| 47 | + |
| 48 | + return results |
| 49 | + |
| 50 | + |
| 51 | +@pytest.mark.timeout(2048) # Optional: fail if the test takes too long. |
| 52 | +@pytest.mark.slow |
| 53 | +@pytest.mark.parametrize("with_peft", [True, False]) |
| 54 | +def test_train_evo2_finetune_runs(tmp_path, with_peft: bool): |
| 55 | + """ |
| 56 | + This test runs the `train_evo2` command with mock data in a temporary directory. |
| 57 | + It uses the temporary directory provided by pytest as the working directory. |
| 58 | + The command is run in a subshell, and we assert that it returns an exit code of 0. |
| 59 | + """ |
| 60 | + num_steps = 25 |
| 61 | + val_steps = 10 |
| 62 | + global_batch_size = 128 |
| 63 | + |
| 64 | + # Note: The command assumes that `train_evo2` is in your PATH. |
| 65 | + command = small_training_cmd( |
| 66 | + tmp_path / "pretrain", |
| 67 | + max_steps=num_steps, |
| 68 | + val_check=val_steps, |
| 69 | + global_batch_size=global_batch_size, |
| 70 | + additional_args=" --lr 0.1 ", |
| 71 | + ) |
| 72 | + stdout_pretrain: str = run_command_in_subprocess(command=command, path=str(tmp_path)) |
| 73 | + assert "Restoring model weights from RestoreConfig(path='" not in stdout_pretrain |
| 74 | + |
| 75 | + log_dir = tmp_path / "pretrain" / "evo2" |
| 76 | + checkpoints_dir = log_dir / "checkpoints" |
| 77 | + tensorboard_dir = log_dir / "dev" |
| 78 | + |
| 79 | + # Check if logs dir exists |
| 80 | + assert log_dir.exists(), "Logs folder should exist." |
| 81 | + # Check if checkpoints dir exists |
| 82 | + assert checkpoints_dir.exists(), "Checkpoints folder does not exist." |
| 83 | + |
| 84 | + expected_checkpoint_suffix = f"{num_steps * global_batch_size}.0-last" |
| 85 | + # Check if any subfolder ends with the expected suffix |
| 86 | + matching_subfolders = [ |
| 87 | + p for p in checkpoints_dir.iterdir() if p.is_dir() and (expected_checkpoint_suffix in p.name) |
| 88 | + ] |
| 89 | + |
| 90 | + assert matching_subfolders, ( |
| 91 | + f"No checkpoint subfolder ending with '{expected_checkpoint_suffix}' found in {checkpoints_dir}." |
| 92 | + ) |
| 93 | + |
| 94 | + # Check if directory with tensorboard logs exists |
| 95 | + assert tensorboard_dir.exists(), "TensorBoard logs folder does not exist." |
| 96 | + |
| 97 | + event_files = list(tensorboard_dir.rglob("events.out.tfevents*")) |
| 98 | + assert len(event_files) == 1, f"No or multiple TensorBoard event files found under {tensorboard_dir}" |
| 99 | + |
| 100 | + val_losses = extract_val_losses(stdout_pretrain, val_steps) |
| 101 | + |
| 102 | + for i in range(1, len(val_losses)): |
| 103 | + assert val_losses[i][1] <= val_losses[i - 1][1], ( |
| 104 | + f"Validation loss increased at step {val_losses[i][0]}: {val_losses[i][1]} > {val_losses[i - 1][1]}" |
| 105 | + ) |
| 106 | + |
| 107 | + # Check if directory with tensorboard logs exists |
| 108 | + assert tensorboard_dir.exists(), "TensorBoard logs folder does not exist." |
| 109 | + # Recursively search for files with tensorboard logger |
| 110 | + event_files = list(tensorboard_dir.rglob("events.out.tfevents*")) |
| 111 | + assert event_files, f"No TensorBoard event files found under {tensorboard_dir}" |
| 112 | + assert len(matching_subfolders) == 1, "Only one checkpoint subfolder should be found." |
| 113 | + if with_peft: |
| 114 | + result_dir = tmp_path / "lora_finetune" |
| 115 | + additional_args = "--lora-finetune --lr 0.1 " |
| 116 | + else: |
| 117 | + result_dir = tmp_path / "finetune" |
| 118 | + additional_args = " --lr 0.1 " |
| 119 | + |
| 120 | + command_finetune = small_training_finetune_cmd( |
| 121 | + result_dir, |
| 122 | + max_steps=num_steps, |
| 123 | + val_check=val_steps, |
| 124 | + global_batch_size=global_batch_size, |
| 125 | + prev_ckpt=matching_subfolders[0], |
| 126 | + create_tflops_callback=not with_peft, |
| 127 | + additional_args=additional_args, |
| 128 | + ) |
| 129 | + stdout_finetune: str = run_command_in_subprocess(command=command_finetune, path=str(tmp_path)) |
| 130 | + assert "Restoring model weights from RestoreConfig(path='" in stdout_finetune |
| 131 | + |
| 132 | + log_dir_ft = result_dir / "evo2" |
| 133 | + checkpoints_dir_ft = log_dir_ft / "checkpoints" |
| 134 | + tensorboard_dir_ft = log_dir_ft / "dev" |
| 135 | + |
| 136 | + # Check if logs dir exists |
| 137 | + assert log_dir_ft.exists(), "Logs folder should exist." |
| 138 | + # Check if checkpoints dir exists |
| 139 | + assert checkpoints_dir_ft.exists(), "Checkpoints folder does not exist." |
| 140 | + |
| 141 | + expected_checkpoint_suffix = f"{num_steps * global_batch_size}.0-last" |
| 142 | + # Check if any subfolder ends with the expected suffix |
| 143 | + matching_subfolders_finetune = [ |
| 144 | + p for p in checkpoints_dir_ft.iterdir() if p.is_dir() and (expected_checkpoint_suffix in p.name) |
| 145 | + ] |
| 146 | + |
| 147 | + assert matching_subfolders_finetune, ( |
| 148 | + f"No checkpoint subfolder ending with '{expected_checkpoint_suffix}' found in {checkpoints_dir_ft}." |
| 149 | + ) |
| 150 | + |
| 151 | + # Check if directory with tensorboard logs exists |
| 152 | + assert tensorboard_dir_ft.exists(), "TensorBoard logs folder does not exist." |
| 153 | + # Recursively search for files with tensorboard logger |
| 154 | + event_files_ft = list(tensorboard_dir_ft.rglob("events.out.tfevents*")) |
| 155 | + assert len(event_files_ft) == 1, f"No or multiple TensorBoard event files found under {tensorboard_dir_ft}" |
| 156 | + |
| 157 | + val_losses_ft = extract_val_losses(stdout_finetune, val_steps) |
| 158 | + |
| 159 | + # Check that each validation loss is less than or equal to the previous one |
| 160 | + for i in range(1, len(val_losses_ft)): |
| 161 | + assert val_losses_ft[i][1] <= val_losses_ft[i - 1][1], ( |
| 162 | + f"Validation loss increased at step {val_losses_ft[i][0]}: {val_losses_ft[i][1]} > {val_losses_ft[i - 1][1]}" |
| 163 | + ) |
| 164 | + |
| 165 | + assert len(matching_subfolders_finetune) == 1, "Only one checkpoint subfolder should be found." |
| 166 | + |
| 167 | + # With LoRA, test resuming from a saved LoRA checkpoint |
| 168 | + if with_peft: |
| 169 | + result_dir = tmp_path / "lora_finetune_resume" |
| 170 | + |
| 171 | + # Resume from LoRA checkpoint |
| 172 | + command_resume_finetune = small_training_finetune_cmd( |
| 173 | + result_dir, |
| 174 | + max_steps=num_steps, |
| 175 | + val_check=val_steps, |
| 176 | + global_batch_size=global_batch_size, |
| 177 | + prev_ckpt=matching_subfolders[0], |
| 178 | + create_tflops_callback=False, |
| 179 | + additional_args=f"--lora-finetune --lora-checkpoint-path {matching_subfolders_finetune[0]} --lr 0.1 ", |
| 180 | + ) |
| 181 | + stdout_finetune: str = run_command_in_subprocess(command=command_resume_finetune, path=str(tmp_path)) |
| 182 | + |
| 183 | + log_dir_ft = result_dir / "evo2" |
| 184 | + checkpoints_dir_ft = log_dir_ft / "checkpoints" |
| 185 | + tensorboard_dir_ft = log_dir_ft / "dev" |
| 186 | + |
| 187 | + # Check if logs dir exists |
| 188 | + assert log_dir_ft.exists(), "Logs folder should exist." |
| 189 | + # Check if checkpoints dir exists |
| 190 | + assert checkpoints_dir_ft.exists(), "Checkpoints folder does not exist." |
| 191 | + |
| 192 | + # Recursively search for files with tensorboard logger |
| 193 | + event_files_ft = list(tensorboard_dir_ft.rglob("events.out.tfevents*")) |
| 194 | + assert len(event_files_ft) == 1, f"No or multiple TensorBoard event files found under {tensorboard_dir_ft}" |
| 195 | + |
| 196 | + val_losses_ft = extract_val_losses(stdout_finetune, val_steps) |
| 197 | + |
| 198 | + # Check that each validation loss is less than or equal to the previous one |
| 199 | + for i in range(1, len(val_losses_ft)): |
| 200 | + assert val_losses_ft[i][1] <= val_losses_ft[i - 1][1], ( |
| 201 | + f"Validation loss increased at step {val_losses_ft[i][0]}: {val_losses_ft[i][1]} > {val_losses_ft[i - 1][1]}" |
| 202 | + ) |
0 commit comments