@@ -54,7 +54,7 @@ def _dict_unflatten(values: List[Any], context: Context) -> Dict[Any, Any]:
54
54
55
55
def create_joint_forward_backward (fn ):
56
56
def joint_forward_backward (
57
- primals : List [Any ], tangents : List [Any ]
57
+ primals : List [Any ], cotangents : List [Any ]
58
58
) -> Tuple [List [Any ], List [Any ]]:
59
59
# Call the forward pass
60
60
outs = fn (* primals )
@@ -68,20 +68,20 @@ def joint_forward_backward(
68
68
grad_primals .append (p )
69
69
70
70
# Get the outputs that need gradients
71
- assert len (tangents ) == len (outs )
71
+ assert len (cotangents ) == len (outs )
72
72
needed_outs = []
73
- needed_tangents = []
74
- for out , tangent in zip (outs , tangents ):
73
+ needed_cotangents = []
74
+ for out , cotangent in zip (outs , cotangents ):
75
75
if isinstance (out , Tensor ) and out .requires_grad :
76
76
needed_outs .append (out )
77
- needed_tangents .append (tangent )
77
+ needed_cotangents .append (cotangent )
78
78
backward_out = []
79
79
# Call the backwards pass
80
80
if grad_primals :
81
81
backward_out = torch .autograd .grad (
82
82
needed_outs ,
83
83
grad_primals ,
84
- grad_outputs = needed_tangents ,
84
+ grad_outputs = needed_cotangents ,
85
85
allow_unused = True ,
86
86
)
87
87
backward_out_iter = iter (backward_out )
@@ -140,12 +140,14 @@ def create_aot_autograd_function(
140
140
compiled_fw = None
141
141
compiled_bw = None
142
142
num_outs = None
143
-
143
+ joint_inputs = None
144
+ fw_outs = None
145
+ aot_decompositions = {** aot_autograd_decompositions , ** decompositions }
144
146
class CompiledFunction (torch .autograd .Function ):
145
147
@staticmethod
146
148
@disable_torchdynamo
147
149
def forward (ctx , * flat_tensor_args ):
148
- nonlocal compiled_fw , compiled_bw , num_outs
150
+ nonlocal compiled_fw , num_outs , joint_inputs , fw_outs
149
151
if compiled_fw is None :
150
152
with torch .set_grad_enabled (grad_state ):
151
153
out = flat_fn (* flat_tensor_args )
@@ -159,29 +161,34 @@ def forward(ctx, *flat_tensor_args):
159
161
num_outs = 1
160
162
161
163
joint_inputs = (flat_tensor_args , out )
162
- aot_decompositions = { ** aot_autograd_decompositions , ** decompositions }
164
+ # Need it because autograd.Function disables grad in forward
163
165
with torch .set_grad_enabled (grad_state ):
164
166
fx_g = make_fx (joint_forward_backward , aot_decompositions )(
165
167
* joint_inputs
166
168
)
167
169
fw_module , bw_module = partition_fn (fx_g , joint_inputs )
168
- # print(fw_module.code, bw_module.code)
169
170
170
171
compiled_fw = fw_compiler (fw_module , flat_tensor_args )
171
172
fw_outs = normalize_as_list (compiled_fw (* flat_tensor_args ))
172
-
173
- bw_args = fw_outs [num_outs :] + fw_outs [0 :num_outs ]
174
- compiled_bw = bw_compiler (bw_module , bw_args )
173
+ if partition_fn is default_partition :
174
+ nonlocal compiled_bw
175
+ bw_args = fw_outs [num_outs :] + fw_outs [0 :num_outs ]
176
+ compiled_bw = bw_compiler (bw_module , bw_args )
175
177
else :
176
178
fw_outs = normalize_as_list (compiled_fw (* flat_tensor_args ))
177
179
ctx .save_for_backward (* fw_outs [num_outs :])
178
180
return tuple (fw_outs [0 :num_outs ])
179
181
180
182
@staticmethod
181
183
@disable_torchdynamo
182
- def backward (ctx , * flat_args ):
183
- contiguous_args = [t .contiguous () for t in flat_args ]
184
- # contiguous_args = [t for t in flat_args]
184
+ def backward (ctx , * flat_grad_outs ):
185
+ nonlocal compiled_bw
186
+ contiguous_args = [t .contiguous () for t in flat_grad_outs ]
187
+ if compiled_bw is None :
188
+ with torch .set_grad_enabled (grad_state ):
189
+ fx_g = make_fx (joint_forward_backward , aot_decompositions )(joint_inputs [0 ], contiguous_args )
190
+ fw_module , bw_module = partition_fn (fx_g , joint_inputs )
191
+ compiled_bw = bw_compiler (bw_module , fw_outs [num_outs :] + contiguous_args )
185
192
out = normalize_as_list (compiled_bw (* ctx .saved_tensors , * contiguous_args ))
186
193
return tuple (out )
187
194
0 commit comments