Skip to content

Commit 436bae8

Browse files
removed malloc and used allocate_temp to allocate memory in native layer norm
1 parent 3eea1f1 commit 436bae8

File tree

1 file changed

+8
-8
lines changed

1 file changed

+8
-8
lines changed

backends/cadence/fusion_g3/operators/op_native_layer_norm.cpp

+8-8
Original file line numberDiff line numberDiff line change
@@ -225,7 +225,10 @@ std::tuple<Tensor&, Tensor&, Tensor&> native_layer_norm_out(
225225
if (weight.has_value()) {
226226
weight_data = weight.value().mutable_data_ptr<float>();
227227
} else {
228-
weight_data = (float*)malloc(num_elm * sizeof(float));
228+
executorch::runtime::Result<void*> temp_mem_weight =
229+
ctx.allocate_temp(num_elm * sizeof(float));
230+
weight_data = (float*)(temp_mem_weight.get());
231+
229232
for (int i = 0; i < num_elm; i++) {
230233
weight_data[i] = 1;
231234
}
@@ -234,7 +237,10 @@ std::tuple<Tensor&, Tensor&, Tensor&> native_layer_norm_out(
234237
if (bias.has_value()) {
235238
bias_data = bias.value().mutable_data_ptr<float>();
236239
} else {
237-
bias_data = (float*)malloc(num_elm * sizeof(float));
240+
executorch::runtime::Result<void*> temp_mem_bias =
241+
ctx.allocate_temp(num_elm * sizeof(float));
242+
bias_data = (float*)(temp_mem_bias.get());
243+
238244
for (int i = 0; i < num_elm; i++) {
239245
bias_data[i] = 0;
240246
}
@@ -255,12 +261,6 @@ std::tuple<Tensor&, Tensor&, Tensor&> native_layer_norm_out(
255261
bias_data,
256262
(float)eps);
257263

258-
if (!bias.has_value()) {
259-
free(bias_data);
260-
}
261-
if (!weight.has_value()) {
262-
free(weight_data);
263-
}
264264
} else {
265265
ET_KERNEL_CHECK(
266266
ctx,

0 commit comments

Comments
 (0)