|
| 1 | +import os |
| 2 | +import random |
| 3 | +import time |
| 4 | + |
| 5 | +import torch |
| 6 | +import zmq |
| 7 | +from torch.multiprocessing import Queue, get_context |
| 8 | + |
| 9 | +from checkpoint_engine.ps import ParameterServer, _get_physical_gpu_id |
| 10 | +from checkpoint_engine.worker import update_weights_from_ipc |
| 11 | + |
| 12 | + |
| 13 | +def gen_test_tensors(rank: int) -> list[tuple[str, torch.Tensor]]: |
| 14 | + tensors = [] |
| 15 | + for layer in range(random.randint(10, 50)): |
| 16 | + for num in range(random.randint(50, 100)): |
| 17 | + r = random.randint(0, 16) |
| 18 | + if r < 4: |
| 19 | + dtype = torch.bfloat16 |
| 20 | + elif r < 10: |
| 21 | + dtype = torch.float16 |
| 22 | + elif r < 14: |
| 23 | + dtype = torch.float8_e4m3fn |
| 24 | + else: |
| 25 | + dtype = torch.float |
| 26 | + tensors.append( |
| 27 | + ( |
| 28 | + f"rank{rank}.layer{layer}.num{num}", |
| 29 | + torch.randn([random.randint(100, 500), random.randint(500, 1000)]).to(dtype), |
| 30 | + ) |
| 31 | + ) |
| 32 | + return tensors |
| 33 | + |
| 34 | + |
| 35 | +def receiver_proc_with_error( |
| 36 | + rank: int, device_uuid: str, named_tensors: dict[str, torch.Tensor], queue: Queue |
| 37 | +): |
| 38 | + torch.cuda.set_device(rank) |
| 39 | + named_tensors = {name: tensor.cuda() for name, tensor in named_tensors.items()} |
| 40 | + _zmq_ctx = zmq.Context() |
| 41 | + |
| 42 | + def trigger_error(socket_paths: list[tuple[str, str]]): |
| 43 | + socket_paths = dict(socket_paths) |
| 44 | + update_weights_from_ipc( |
| 45 | + _zmq_ctx, |
| 46 | + socket_paths[device_uuid], |
| 47 | + device_id=rank, |
| 48 | + run=error_run, |
| 49 | + post_hook=lambda: torch.cuda.synchronize(), |
| 50 | + ) |
| 51 | + |
| 52 | + def error_run(weights: list[tuple[str, torch.Tensor]]): |
| 53 | + weights = weights # unused |
| 54 | + time.sleep(random.uniform(0.1, 0.5)) |
| 55 | + if random.random() < 0.6: |
| 56 | + raise RuntimeError("Intentional Error for testing.") |
| 57 | + |
| 58 | + while True: |
| 59 | + socket_paths: list[tuple[str, str]] = queue.get() |
| 60 | + if socket_paths is None: |
| 61 | + break |
| 62 | + try: |
| 63 | + trigger_error(socket_paths) |
| 64 | + except: |
| 65 | + print(f"[rank{rank}] successfully triggered error.") |
| 66 | + raise |
| 67 | + |
| 68 | + |
| 69 | +def run(): |
| 70 | + rank = int(os.getenv("RANK")) |
| 71 | + ctx = get_context("spawn") |
| 72 | + queue = ctx.Queue() |
| 73 | + _device_uuid = _get_physical_gpu_id(rank) |
| 74 | + ps = ParameterServer(auto_pg=True) |
| 75 | + named_tensors = dict(gen_test_tensors(rank)) |
| 76 | + checkpoint_name = "test" |
| 77 | + proc = ctx.Process( |
| 78 | + target=receiver_proc_with_error, args=(rank, _device_uuid, named_tensors, queue) |
| 79 | + ) |
| 80 | + proc.daemon = True |
| 81 | + proc.start() |
| 82 | + try: |
| 83 | + ps.register_checkpoint(checkpoint_name, named_tensors=named_tensors) |
| 84 | + ps.gather_metas(checkpoint_name) |
| 85 | + ranks = [] |
| 86 | + ps.update(checkpoint_name, queue.put, ranks=ranks) |
| 87 | + # sleep 3s to wait process group is destroyed |
| 88 | + time.sleep(3) |
| 89 | + except RuntimeError as e: |
| 90 | + print(f"[rank{rank}] Caught exception from worker process: {e}") |
| 91 | + assert isinstance(e, RuntimeError) |
| 92 | + finally: |
| 93 | + ps.unregister_checkpoint(checkpoint_name) |
| 94 | + queue.put(None) |
| 95 | + |
| 96 | + |
| 97 | +if __name__ == "__main__": |
| 98 | + run() |
0 commit comments