Skip to content

Commit ed331e0

Browse files
committed
Introduce ManagedDeviceMesh to integrate DeviceMesh with TorchFT
Summary: ManagedDeviceMesh allow users to manipulate DeviceMesh with TorchFT ManagedProcessGroup. ghstack-source-id: a9349a1096ab8bf2f9e2b231add1bfc395291b16 Pull Request resolved: #56
1 parent f31d3b1 commit ed331e0

File tree

4 files changed

+375
-14
lines changed

4 files changed

+375
-14
lines changed

pyproject.toml

+3-1
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,9 @@ dev = [
2525
"pytest",
2626
"black",
2727
"pyre-check",
28-
"parameterized"
28+
"parameterized",
29+
"expecttest",
30+
"numpy"
2931
]
3032

3133
[tool.maturin]

torchft/fsdp_test.py

+70
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,70 @@
1+
# Copyright (c) Meta Platforms, Inc. and affiliates.
2+
# All rights reserved.
3+
#
4+
# This source code is licensed under the BSD-style license found in the
5+
# LICENSE file in the root directory of this source tree.
6+
7+
import os
8+
from concurrent.futures import ThreadPoolExecutor
9+
from typing import Any, Dict, Tuple
10+
from unittest import TestCase, skipUnless
11+
from unittest.mock import Mock
12+
13+
import torch
14+
import torch.distributed as dist
15+
from torch import nn
16+
from torch._C._distributed_c10d import (
17+
AllgatherOptions,
18+
AllreduceOptions,
19+
BroadcastOptions,
20+
ReduceOp,
21+
_resolve_process_group,
22+
)
23+
from torch.distributed import (
24+
ReduceOp,
25+
TCPStore,
26+
Work,
27+
_functional_collectives,
28+
get_world_size,
29+
)
30+
from torch.distributed._composable.fsdp import fully_shard
31+
from torch.distributed.device_mesh import init_device_mesh
32+
from torch.testing._internal.common_distributed import MultiProcessTestCase
33+
34+
from torchft.manager import Manager
35+
from torchft.process_group import ManagedProcessGroup, ft_init_device_mesh
36+
37+
38+
class FSDPTest(MultiProcessTestCase):
39+
@property
40+
def world_size(self) -> int:
41+
return 4
42+
43+
def setUp(self) -> None:
44+
super().setUp()
45+
os.environ["TORCH_NCCL_DESYNC_DEBUG"] = "0"
46+
self._spawn_processes()
47+
48+
def test_fsdp(self) -> None:
49+
group_size = self.world_size // 2
50+
group = self.rank // group_size
51+
group_rank = self.rank % group_size
52+
53+
os.environ["MASTER_ADDR"] = "127.0.0.1"
54+
os.environ["MASTER_PORT"] = str(12346 + group)
55+
os.environ["RANK"] = str(group_rank)
56+
os.environ["WORLD_SIZE"] = str(group_size)
57+
58+
manager = Mock(spec=Manager)
59+
device_mesh = ft_init_device_mesh(
60+
device_type="cuda",
61+
mesh_shape=(2, 2),
62+
mesh_dim_names=("dp_replicate", "dp_shard"),
63+
replicate_dim=0,
64+
manager=manager,
65+
)
66+
manager.num_participants.return_value = 1
67+
model = nn.Linear(128, 128).cuda()
68+
batch = torch.randn(4, 128).cuda()
69+
shard_model = fully_shard(model, mesh=device_mesh)
70+
shard_model(batch).mean().backward()

torchft/process_group.py

+249-13
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
import threading
2121
from abc import ABC
2222
from datetime import timedelta
23-
from typing import TYPE_CHECKING, Dict, List, Optional, Type
23+
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Type, Union
2424

2525
import torch
2626
import torch.distributed as dist
@@ -38,6 +38,7 @@
3838
Store,
3939
TCPStore,
4040
get_rank,
41+
init_device_mesh,
4142
)
4243
from torch.distributed.distributed_c10d import Work, _world
4344
from torch.futures import Future
@@ -130,17 +131,7 @@ def size(self) -> int:
130131
def getBackendName(self) -> str:
131132
raise NotImplementedError("not implemented")
132133

133-
def register(self, name: str) -> "ProcessGroup":
134-
"""
135-
Registers the process group with the global registry. This enables usage
136-
with things like functional_collectives which are compilable.
137-
138-
This should only be called once.
139-
140-
Args:
141-
name: name must be a unique name for this process group
142-
"""
143-
134+
def _register(self, name: str) -> str:
144135
group_name = f"{self.getBackendName()}:{name}"
145136

146137
# This is needed for DeviceMesh and functional collectives to work.
@@ -158,6 +149,21 @@ def create_pg(
158149
devices = ["cpu"]
159150
dist.Backend.register_backend(group_name, create_pg, devices=devices)
160151

152+
return group_name
153+
154+
def register(self, name: str) -> "ProcessGroup":
155+
"""
156+
Registers the process group with the global registry. This enables usage
157+
with things like functional_collectives which are compilable.
158+
159+
This should only be called once.
160+
161+
Args:
162+
name: name must be a unique name for this process group
163+
"""
164+
165+
group_name = self._register(name)
166+
161167
return dist.new_group(
162168
ranks=[dist.get_rank()],
163169
backend=group_name,
@@ -496,6 +502,9 @@ def allreduce(self, tensors: List[torch.Tensor], opts: object) -> Work:
496502
def size(self) -> int:
497503
return self._manager.num_participants()
498504

505+
def getBackendName(self) -> str:
506+
return self._manager._pg.getBackendName()
507+
499508

500509
class _BabyWork(Work):
501510
def __init__(
@@ -689,7 +698,6 @@ def _future_handler(self, future_queue: mp.Queue) -> None:
689698
logger.exception(f"got unexpected error in future handler: {e}")
690699

691700
def _get_future(self, op_id: int) -> Future[object]:
692-
693701
with self._futures_lock:
694702
fut = Future() # pyre-fixme[29]: is not a function
695703
self._futures[op_id] = fut
@@ -797,3 +805,231 @@ def extend_device_mesh(
797805
mesh=mesh.mesh.unsqueeze(dim),
798806
mesh_dim_names=tuple(mesh_dim_names),
799807
)
808+
809+
810+
class ManagedDeviceMesh(DeviceMesh):
811+
def __init__(
812+
self,
813+
mesh: Optional[DeviceMesh],
814+
mesh_dim_names: Tuple[str, ...],
815+
replicate_pg: ManagedProcessGroup,
816+
replicate_dim: int,
817+
parent: Optional["ManagedDeviceMesh"],
818+
) -> None:
819+
if mesh is None and parent is None:
820+
raise ValueError(
821+
"ManagedDeviceMesh doesn't support both mesh and parent are None."
822+
)
823+
self.mesh = mesh
824+
self.mesh_dim_names = mesh_dim_names
825+
self.replicate_pg = replicate_pg
826+
self.replicate_dim = replicate_dim
827+
self.replicate_dim_name: str = mesh_dim_names[replicate_dim]
828+
self.parent = parent
829+
self.flatten_meshes: Dict[str, DeviceMesh] = {}
830+
self.device_type: str
831+
if mesh is not None:
832+
self.device_type = mesh.device_type
833+
else:
834+
assert parent is not None
835+
self.device_type = parent.device_type
836+
self._flatten_mesh_list: Tuple[DeviceMesh, ...] = tuple()
837+
self._thread_id: Optional[int] = None
838+
839+
def __getitem__(self, mesh_dim_names: Union[str, Tuple[str, ...]]) -> DeviceMesh:
840+
if isinstance(mesh_dim_names, str):
841+
if mesh_dim_names == self.replicate_dim_name:
842+
return ManagedDeviceMesh(
843+
mesh=None,
844+
mesh_dim_names=(mesh_dim_names,),
845+
replicate_pg=self.replicate_pg,
846+
replicate_dim=0,
847+
parent=self,
848+
)
849+
elif mesh_dim_names in self.flatten_meshes:
850+
return self.flatten_meshes[mesh_dim_names]
851+
else:
852+
assert self.mesh is not None
853+
return self.mesh[mesh_dim_names]
854+
else:
855+
assert isinstance(mesh_dim_names, tuple)
856+
if self.replicate_dim_name in mesh_dim_names:
857+
assert self.mesh is not None
858+
return self.mesh[mesh_dim_names]
859+
else:
860+
assert self.mesh is not None
861+
return ManagedDeviceMesh(
862+
self.mesh[mesh_dim_names],
863+
mesh_dim_names,
864+
self.replicate_pg,
865+
mesh_dim_names.index(self.replicate_dim_name),
866+
parent=self,
867+
)
868+
869+
def _real_mesh_dim(self, mesh_dim: int) -> int:
870+
return mesh_dim - 1 if mesh_dim > self.replicate_dim else mesh_dim
871+
872+
def get_group(self, mesh_dim: Optional[Union[int, str]] = None) -> BaseProcessGroup:
873+
if isinstance(mesh_dim, str):
874+
dim = self.mesh_dim_names.index(mesh_dim)
875+
else:
876+
dim = 0 if mesh_dim is None else int(mesh_dim)
877+
878+
if mesh_dim is None:
879+
return self.replicate_pg
880+
elif dim == self.replicate_dim:
881+
return self.replicate_pg
882+
else:
883+
assert self.mesh is not None
884+
return self.mesh.get_group(self._real_mesh_dim(dim))
885+
886+
def _flatten(self, mesh_dim_name: Optional[str]) -> "DeviceMesh":
887+
flatten_mesh = _FlattenDeviceMesh(self)
888+
if mesh_dim_name is None:
889+
raise ValueError("ManagedDeviceMesh._flatten requires `mesh_dim_name`")
890+
if self.parent is None:
891+
self.flatten_meshes[mesh_dim_name] = flatten_mesh
892+
else:
893+
self.parent.flatten_meshes[mesh_dim_name] = flatten_mesh
894+
return flatten_mesh
895+
896+
def size(self, mesh_dim: Optional[int] = None) -> int:
897+
if mesh_dim is None:
898+
if self.mesh is None:
899+
return self.replicate_pg.size()
900+
else:
901+
assert self.mesh is not None
902+
return self.mesh.size() * self.replicate_pg.size()
903+
elif mesh_dim == self.replicate_dim:
904+
return self.replicate_pg.size()
905+
else:
906+
assert self.mesh is not None
907+
return self.mesh.size(self._real_mesh_dim(mesh_dim))
908+
909+
@property
910+
def ndim(self) -> int:
911+
assert self.mesh is not None
912+
return self.mesh.ndim + 1
913+
914+
@property
915+
def shape(self) -> Tuple[int, ...]:
916+
assert self.mesh is not None
917+
ret: List[int] = list(self.mesh.shape)
918+
ret.insert(self.replicate_dim, self.replicate_pg.size())
919+
return tuple(ret)
920+
921+
def get_rank(self) -> int:
922+
assert self.mesh is not None
923+
return self.mesh.get_rank()
924+
925+
def get_local_rank(self, mesh_dim: Optional[Union[int, str]] = None) -> int:
926+
if isinstance(mesh_dim, str):
927+
dim = self.mesh_dim_names.index(mesh_dim)
928+
else:
929+
dim = 0 if mesh_dim is None else int(mesh_dim)
930+
931+
if mesh_dim is None:
932+
if self.mesh is None:
933+
return get_rank(self.replicate_pg)
934+
935+
assert self.replicate_dim == 0, "replicate_dim must be the first one"
936+
assert self.mesh is not None
937+
other_dim_size = self.mesh.size()
938+
assert self.mesh is not None
939+
other_dim_rank = self.mesh.get_local_rank()
940+
replicate_pg_rank = get_rank(self.replicate_pg)
941+
return other_dim_size * replicate_pg_rank + other_dim_rank
942+
elif dim == self.replicate_dim:
943+
return get_rank(self.replicate_pg)
944+
else:
945+
assert self.mesh is not None
946+
return self.mesh.get_local_rank(self._real_mesh_dim(dim))
947+
948+
def get_coordinate(self) -> Optional[List[int]]:
949+
"""
950+
Return the relative indices of this rank relative to all
951+
dimensions of the mesh. If this rank is not part of the mesh, return None.
952+
"""
953+
assert self.mesh is not None
954+
return self.mesh._coordinate_on_dim if self.mesh._coordinate_on_dim else None
955+
956+
def get_all_groups(self) -> List[BaseProcessGroup]:
957+
raise NotImplementedError
958+
959+
960+
class _FlattenDeviceMesh(DeviceMesh):
961+
def __init__(self, managed_mesh: ManagedDeviceMesh) -> None:
962+
self.managed_mesh = managed_mesh
963+
964+
def __getitem__(self, mesh_dim_names: Union[str, Tuple[str, ...]]) -> DeviceMesh:
965+
raise NotImplementedError
966+
967+
def get_group(self, mesh_dim: Optional[Union[int, str]] = None) -> BaseProcessGroup:
968+
raise NotImplementedError
969+
970+
def _flatten(self, mesh_dim_name: Optional[str]) -> "DeviceMesh":
971+
raise NotImplementedError
972+
973+
def size(self, mesh_dim: Optional[int] = None) -> int:
974+
assert mesh_dim is None
975+
return self.managed_mesh.size()
976+
977+
@property
978+
def ndim(self) -> int:
979+
raise NotImplementedError
980+
981+
@property
982+
def shape(self) -> Tuple[int, ...]:
983+
raise NotImplementedError
984+
985+
def get_rank(self) -> int:
986+
raise NotImplementedError
987+
988+
def get_local_rank(self, mesh_dim: Optional[Union[int, str]] = None) -> int:
989+
assert mesh_dim is None
990+
return self.managed_mesh.get_local_rank()
991+
992+
def get_all_groups(self) -> List[BaseProcessGroup]:
993+
raise NotImplementedError
994+
995+
996+
def ft_init_device_mesh(
997+
*,
998+
device_type: str,
999+
mesh_shape: Tuple[int, ...],
1000+
mesh_dim_names: Tuple[str, ...],
1001+
replicate_dim: int,
1002+
manager: "Manager",
1003+
) -> "ManagedDeviceMesh":
1004+
# We need to mislead DeviceMesh into thinking that replicate_dim has only
1005+
# 1 rank.
1006+
_mesh_shape = list(mesh_shape)
1007+
_mesh_shape.pop(replicate_dim)
1008+
_mesh_dim_names = list(mesh_dim_names)
1009+
_mesh_dim_names.pop(replicate_dim)
1010+
mesh = init_device_mesh(
1011+
device_type,
1012+
mesh_shape=tuple(_mesh_shape),
1013+
mesh_dim_names=tuple(_mesh_dim_names),
1014+
)
1015+
1016+
if device_type == "cpu":
1017+
pg = ProcessGroupGloo()
1018+
elif device_type == "cuda":
1019+
pg = ProcessGroupNCCL()
1020+
else:
1021+
raise ValueError()
1022+
1023+
manager._pg = pg
1024+
replicate_pg = ManagedProcessGroup(manager)
1025+
# We have to use MultiProcessTestCase, otherwise c10d will complain
1026+
# the same backend has been registered.
1027+
replicate_pg.register(mesh_dim_names[replicate_dim])
1028+
1029+
return ManagedDeviceMesh(
1030+
mesh=mesh,
1031+
mesh_dim_names=mesh_dim_names,
1032+
replicate_pg=replicate_pg,
1033+
replicate_dim=replicate_dim,
1034+
parent=None,
1035+
)

0 commit comments

Comments
 (0)