Skip to content

Commit 8062693

Browse files
committed
Separate forward and backwad compilation
ghstack-source-id: 056ba4b15716a5e5fca0481de1d9a02c08415a63 Pull Request resolved: #856
1 parent 130582c commit 8062693

File tree

4 files changed

+201
-71
lines changed

4 files changed

+201
-71
lines changed

functorch/_src/aot_autograd.py

+84-34
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,14 @@
11
import torch
22
import torch.nn as nn
3-
from torch import Tensor
3+
from torch import Tensor, is_grad_enabled
44
from functorch import make_fx
55
from torch.fx import immutable_collections
66
import torch.utils._pytree as pytree
77
import torch.utils.dlpack
88
from torch.nn.utils import _stateless
99
from functorch._C import CompileCache
1010
from .decompositions import register_decomposition
11-
from .partitioners import default_partition
11+
from .partitioners import default_partition, _get_saved_values, _extract_fwd_bwd_modules
1212
from .named_members_polyfill import _named_parameters, _named_buffers
1313
from typing import Callable, List, Dict, Any, Tuple, Optional
1414
from functools import wraps
@@ -54,7 +54,7 @@ def _dict_unflatten(values: List[Any], context: Context) -> Dict[Any, Any]:
5454

5555
def create_joint_forward_backward(fn):
5656
def joint_forward_backward(
57-
primals: List[Any], tangents: List[Any]
57+
primals: List[Any], cotangents: List[Any]
5858
) -> Tuple[List[Any], List[Any]]:
5959
# Call the forward pass
6060
outs = fn(*primals)
@@ -68,21 +68,21 @@ def joint_forward_backward(
6868
grad_primals.append(p)
6969

7070
# Get the outputs that need gradients
71-
assert len(tangents) == len(outs)
71+
assert len(cotangents) == len(outs)
7272
needed_outs = []
73-
needed_tangents = []
74-
for out, tangent in zip(outs, tangents):
73+
needed_cotangents = []
74+
for out, cotangent in zip(outs, cotangents):
7575
if isinstance(out, Tensor) and out.requires_grad:
7676
needed_outs.append(out)
77-
needed_tangents.append(tangent)
77+
needed_cotangents.append(cotangent)
7878
backward_out = []
7979
# Call the backwards pass
8080
if grad_primals:
8181
backward_out = torch.autograd.grad(
8282
needed_outs,
8383
grad_primals,
84-
grad_outputs=needed_tangents,
85-
allow_unused=True,
84+
grad_outputs=needed_cotangents,
85+
allow_unused=True
8686
)
8787
backward_out_iter = iter(backward_out)
8888
return outs, [
@@ -138,14 +138,18 @@ def create_aot_autograd_function(
138138
joint_forward_backward = create_joint_forward_backward(flat_fn)
139139

140140
compiled_fw = None
141-
compiled_bw = None
141+
fw_module = None
142+
bw_modules = []
142143
num_outs = None
144+
saved_value_names = None
145+
aot_decompositions = {**aot_autograd_decompositions, **decompositions}
143146

144147
class CompiledFunction(torch.autograd.Function):
145148
@staticmethod
146149
@disable_torchdynamo
147150
def forward(ctx, *flat_tensor_args):
148-
nonlocal compiled_fw, compiled_bw, num_outs
151+
ctx.set_materialize_grads(False)
152+
nonlocal compiled_fw, num_outs, fw_module, saved_value_names
149153
if compiled_fw is None:
150154
with torch.set_grad_enabled(grad_state):
151155
out = flat_fn(*flat_tensor_args)
@@ -159,34 +163,78 @@ def forward(ctx, *flat_tensor_args):
159163
num_outs = 1
160164

161165
joint_inputs = (flat_tensor_args, out)
162-
aot_decompositions = {**aot_autograd_decompositions, **decompositions}
166+
# Need it because autograd.Function disables grad in forward
163167
with torch.set_grad_enabled(grad_state):
164168
fx_g = make_fx(joint_forward_backward, aot_decompositions)(
165169
*joint_inputs
166170
)
167-
fw_module, bw_module = partition_fn(fx_g, joint_inputs)
168-
# print(fw_module.code, bw_module.code)
169-
171+
# This means the forward and backward graphs are created based on the input fn
172+
# However we need to take in grad_out for the saved intermediates as well.
173+
fw_module, bw_module, saved_value_nodes = partition_fn(fx_g, joint_inputs)
174+
saved_value_names = [node.name for node in saved_value_nodes]
170175
compiled_fw = fw_compiler(fw_module, flat_tensor_args)
171176
fw_outs = normalize_as_list(compiled_fw(*flat_tensor_args))
172-
173-
bw_args = fw_outs[num_outs:] + fw_outs[0:num_outs]
174-
compiled_bw = bw_compiler(bw_module, bw_args)
175177
else:
176178
fw_outs = normalize_as_list(compiled_fw(*flat_tensor_args))
177-
ctx.save_for_backward(*fw_outs[num_outs:])
178-
return tuple(fw_outs[0:num_outs])
179+
180+
ctx.num_intermediate = len(fw_outs[num_outs:])
181+
ctx.num_inputs = len(flat_tensor_args)
182+
to_be_saved = fw_outs[num_outs:] + list(flat_tensor_args) + fw_outs[0:num_outs]
183+
ctx.save_for_backward(*to_be_saved)
184+
return tuple(fw_outs)
179185

180186
@staticmethod
181187
@disable_torchdynamo
182-
def backward(ctx, *flat_args):
183-
contiguous_args = [t.contiguous() for t in flat_args]
184-
# contiguous_args = [t for t in flat_args]
185-
out = normalize_as_list(compiled_bw(*ctx.saved_tensors, *contiguous_args))
186-
return tuple(out)
187-
188-
return CompiledFunction
189-
188+
def backward(ctx, *flat_grad_outs):
189+
nonlocal fw_module, bw_modules, saved_value_names
190+
intermediates = ctx.saved_tensors[:ctx.num_intermediate]
191+
inputs = ctx.saved_tensors[ctx.num_intermediate:ctx.num_intermediate+ctx.num_inputs]
192+
is_grad_enabled = torch.is_grad_enabled()
193+
194+
if not is_grad_enabled:
195+
input_flat_grad_outs = []
196+
for grad in flat_grad_outs:
197+
if grad is not None:
198+
input_flat_grad_outs.append(grad)
199+
with torch.set_grad_enabled(grad_state):
200+
fx_g_b = make_fx(joint_forward_backward, aot_decompositions)(inputs, input_flat_grad_outs)
201+
else:
202+
input_flat_grad_outs = flat_grad_outs
203+
j_b = create_joint_forward_backward(fw_module)
204+
with torch.set_grad_enabled(grad_state):
205+
fx_g_b = make_fx(j_b, aot_decompositions)(inputs, input_flat_grad_outs)
206+
207+
saved_value_nodes = _get_saved_values(fx_g_b, saved_value_names)
208+
assert len(saved_value_nodes) <= len(saved_value_names)
209+
fw_module_b, bw_module_b, saved_values_new = _extract_fwd_bwd_modules(fx_g_b, saved_value_nodes)
210+
bw_module_fn = None
211+
for elem in bw_modules:
212+
if elem.code == bw_module_b.code:
213+
bw_module_fn = elem
214+
if bw_module_fn is None:
215+
bw_modules.append(bw_module_b)
216+
bw_module_fn = bw_module_b
217+
218+
f = aot_function(bw_module_fn, bw_compiler, bw_compiler, partition_fn, aot_decompositions)
219+
220+
if len(saved_values_new) != len(saved_value_names):
221+
new_intermediates = []
222+
# Forward saves more intermediates than needed
223+
assert len(saved_values_new) < len(saved_value_names)
224+
j = 0
225+
for node in saved_values_new:
226+
while node.name != saved_value_names[j]:
227+
j+=1
228+
new_intermediates.append(intermediates[j])
229+
j+=1
230+
intermediates = new_intermediates
231+
out = f(*intermediates, *input_flat_grad_outs)
232+
return tuple(normalize_as_list(out))
233+
234+
def return_fn(*args, **kwargs):
235+
out = CompiledFunction.apply(*args, **kwargs)
236+
return out[0:num_outs]
237+
return return_fn
190238

191239
class _CompileCache(CompileCache):
192240
pass
@@ -275,7 +323,7 @@ def rearrange(tensor_args, static_args, static_argnums):
275323
return args
276324

277325

278-
KNOWN_TYPES = [torch.Tensor, int, str, float, bool]
326+
KNOWN_TYPES = [torch.Tensor, int, str, float, bool, None]
279327

280328

281329
def aot_function(
@@ -411,7 +459,9 @@ def returned_function(*args, **kwargs):
411459
hasher_type,
412460
*flat_args_for_cache,
413461
)
414-
462+
# print("fn_id: ", fn_id)
463+
# print("size: ", compile_cache.size())
464+
# print("num_tensor_args: ", num_tensor_args)
415465
# Compile the function and save it in the cache
416466
if cached_res is None:
417467
# Save the args_spec for flat_tensor_args to unflatten while tracing
@@ -436,7 +486,7 @@ def flat_fn(*flat_tensor_args):
436486
for i in flat_out:
437487
is_known_type = False
438488
for j in KNOWN_TYPES:
439-
if isinstance(i, j):
489+
if j is None or isinstance(i, j):
440490
is_known_type = True
441491
break
442492
if not is_known_type:
@@ -458,7 +508,7 @@ def flat_fn(*flat_tensor_args):
458508
partition_fn,
459509
decompositions,
460510
grad_state=torch.is_grad_enabled(),
461-
).apply
511+
)
462512
cached_res = (compiled_fn, out_spec)
463513

464514
# Save the compiled_fn in the cache
@@ -598,7 +648,7 @@ def aot_function_simplified(
598648
partition_fn,
599649
decompositions,
600650
grad_state=torch.is_grad_enabled(),
601-
).apply
651+
)
602652

603653
return compiled_fn
604654

@@ -620,4 +670,4 @@ def forward(self, *args, **kwargs):
620670

621671

622672
compiled_function = aot_function
623-
compiled_module = aot_module
673+
compiled_module = aot_module

functorch/_src/partitioners.py

+17-2
Original file line numberDiff line numberDiff line change
@@ -108,8 +108,23 @@ def _extract_fwd_bwd_modules(joint_module: fx.GraphModule, saved_values):
108108

109109
fwd_module = fx.GraphModule(joint_module, fwd_graph)
110110
bwd_module = fx.GraphModule(joint_module, bwd_graph)
111-
return fwd_module, bwd_module
111+
return fwd_module, bwd_module, saved_values
112112

113+
def _get_saved_values(new_module: fx.GraphModule, saved_value_names):
114+
saved_values = []
115+
for node in new_module.graph.nodes:
116+
if node.name in saved_value_names:
117+
if 'tensor_meta' not in node.meta and node.op == 'call_function':
118+
users = node.users
119+
assert all(user.target == operator.getitem for user in users)
120+
for user in users:
121+
saved_values.append(user)
122+
else:
123+
saved_values.append(node)
124+
125+
saved_values = list(saved_values)
126+
127+
return saved_values
113128

114129
def default_partition(
115130
joint_module: fx.GraphModule, _joint_inputs
@@ -153,8 +168,8 @@ def default_partition(
153168
saved_values.append(user)
154169
else:
155170
saved_values.append(node)
156-
saved_values = list(set(saved_values))
157171

172+
saved_values = list(saved_values)
158173
return _extract_fwd_bwd_modules(joint_module, saved_values)
159174

160175

0 commit comments

Comments
 (0)