Skip to content

Commit 4057071

Browse files
committed
fix: the position of torch.cuda.empty_cache()
1 parent 55f6a7c commit 4057071

2 files changed

Lines changed: 2 additions & 1 deletion

File tree

verl/utils/fsdp_utils.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -178,4 +178,5 @@ def summon_lora_params(fsdp_module) -> dict[str, torch.Tensor]:
178178
else param.detach().cpu()
179179
for name, param in lora_params.items()
180180
}
181+
torch.cuda.empty_cache()
181182
return lora_params

verl/workers/sharding_manager/fsdp_vllm.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,7 @@ def _collect_lora_params(self):
112112
if isinstance(param, DTensor):
113113
param = param.to(cuda_device).full_tensor()
114114
lora_params[name] = param.detach().cpu()
115-
torch.cuda.empty_cache()
115+
torch.cuda.empty_cache()
116116
return lora_params
117117

118118
def _sync_weight_to_vllm(self):

0 commit comments

Comments
 (0)