Skip to content

Commit e65807d

Browse files
authored
add ai edited test 100 3.27 (#78509)
* add ai edit test 3.27 * add ai edit test 3.27 * add ai edit test 3.27 2 * add ai edit test 3.27 2 * add ai edit test 3.27 2 * add ai edit test 3.27 3 * add ai edit test 3.27 4
1 parent 8dadc7d commit e65807d

File tree

84 files changed

+16949
-3
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

84 files changed

+16949
-3
lines changed

.coverage

-52 KB
Binary file not shown.

test/ai_edited_test/CMakeLists.txt

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,9 @@ string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}")
88
file(GLOB TEST_PY_FILES "${CMAKE_CURRENT_SOURCE_DIR}/test_*.py")
99
file(COPY ${TEST_PY_FILES} DESTINATION ${CMAKE_CURRENT_BINARY_DIR})
1010

11-
foreach(TEST_OP ${TEST_OPS})
12-
py_test_modules(${TEST_OP} MODULES ${TEST_OP})
13-
endforeach()
11+
if(WITH_COVERAGE)
12+
foreach(TEST_OP ${TEST_OPS})
13+
py_test_modules(${TEST_OP} MODULES ${TEST_OP})
14+
# set_tests_properties(${TEST_OP} PROPERTIES LABELS "RUN_TYPE=NIGHTLY")
15+
endforeach()
16+
endif()
Lines changed: 163 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,163 @@
1+
# Copyright (c) 2026 PaddlePaddle Authors. All Rights Reserved.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
"""
16+
混合精度训练高级测试 / Advanced Mixed Precision Training Tests
17+
18+
测试目标 / Test Target:
19+
paddle AMP (Automatic Mixed Precision) 功能
20+
21+
覆盖的模块 / Covered Modules:
22+
- paddle.amp.auto_cast: 自动混合精度上下文
23+
- paddle.amp.GradScaler: 梯度缩放器
24+
- paddle.amp.decorate: AMP装饰器
25+
26+
作用 / Purpose:
27+
补充混合精度训练API的测试,提升覆盖率。
28+
"""
29+
30+
import unittest
31+
32+
import paddle
33+
from paddle import nn
34+
35+
paddle.disable_static()
36+
37+
38+
class TestAutocast(unittest.TestCase):
39+
"""测试自动类型转换 / Test auto casting"""
40+
41+
def test_autocast_basic(self):
42+
"""测试基本autocast / Test basic autocast"""
43+
model = nn.Linear(4, 2)
44+
x = paddle.randn([4, 4])
45+
with paddle.amp.auto_cast():
46+
output = model(x)
47+
self.assertIsNotNone(output)
48+
49+
def test_autocast_disable(self):
50+
"""测试禁用autocast / Test disabled autocast"""
51+
model = nn.Linear(4, 2)
52+
x = paddle.randn([4, 4])
53+
with paddle.amp.auto_cast(enable=False):
54+
output = model(x)
55+
self.assertEqual(output.dtype, paddle.float32)
56+
57+
def test_autocast_nested(self):
58+
"""测试嵌套autocast / Test nested autocast"""
59+
model = nn.Linear(4, 2)
60+
x = paddle.randn([4, 4])
61+
with paddle.amp.auto_cast():
62+
y = model(x)
63+
with paddle.amp.auto_cast(enable=False):
64+
z = model(x)
65+
self.assertIsNotNone(y)
66+
self.assertIsNotNone(z)
67+
68+
69+
class TestGradScaler(unittest.TestCase):
70+
"""测试梯度缩放器 / Test gradient scaler"""
71+
72+
def test_grad_scaler_basic(self):
73+
"""测试基本梯度缩放 / Test basic gradient scaling"""
74+
model = nn.Linear(4, 2)
75+
optimizer = paddle.optimizer.Adam(parameters=model.parameters())
76+
scaler = paddle.amp.GradScaler(init_loss_scaling=1024)
77+
78+
x = paddle.randn([4, 4])
79+
y = paddle.randn([4, 2])
80+
81+
with paddle.amp.auto_cast():
82+
output = model(x)
83+
loss = nn.functional.mse_loss(output, y)
84+
85+
scaled_loss = scaler.scale(loss)
86+
scaled_loss.backward()
87+
scaler.step(optimizer)
88+
scaler.update()
89+
90+
def test_grad_scaler_state(self):
91+
"""测试梯度缩放器状态 / Test grad scaler state"""
92+
scaler = paddle.amp.GradScaler(init_loss_scaling=512)
93+
state = scaler.state_dict()
94+
self.assertIn('scale', state)
95+
96+
def test_grad_scaler_save_load(self):
97+
"""测试梯度缩放器保存加载 / Test grad scaler save/load"""
98+
scaler = paddle.amp.GradScaler(init_loss_scaling=1024)
99+
state = scaler.state_dict()
100+
101+
new_scaler = paddle.amp.GradScaler(init_loss_scaling=512)
102+
new_scaler.load_state_dict(state)
103+
new_state = new_scaler.state_dict()
104+
self.assertEqual(float(state['scale']), float(new_state['scale']))
105+
106+
107+
class TestAMPDecorate(unittest.TestCase):
108+
"""测试AMP装饰器 / Test AMP decorate"""
109+
110+
def test_decorate_model(self):
111+
"""测试模型AMP装饰 / Test model AMP decoration"""
112+
model = nn.Linear(4, 2)
113+
optimizer = paddle.optimizer.Adam(parameters=model.parameters())
114+
model, optimizer = paddle.amp.decorate(
115+
models=model, optimizers=optimizer, level='O1'
116+
)
117+
x = paddle.randn([4, 4])
118+
with paddle.amp.auto_cast():
119+
output = model(x)
120+
self.assertIsNotNone(output)
121+
122+
def test_decorate_level_o1(self):
123+
"""测试O1级别AMP / Test O1 level AMP"""
124+
model = nn.Sequential(
125+
nn.Conv2D(3, 8, 3, padding=1), nn.ReLU(), nn.AdaptiveAvgPool2D(1)
126+
)
127+
optimizer = paddle.optimizer.Adam(parameters=model.parameters())
128+
model, optimizer = paddle.amp.decorate(
129+
models=model, optimizers=optimizer, level='O1'
130+
)
131+
x = paddle.randn([2, 3, 16, 16])
132+
with paddle.amp.auto_cast():
133+
output = model(x)
134+
self.assertIsNotNone(output)
135+
136+
137+
class TestMixedPrecisionTraining(unittest.TestCase):
138+
"""测试混合精度训练 / Test mixed precision training"""
139+
140+
def test_full_amp_training_step(self):
141+
"""测试完整AMP训练步骤 / Test full AMP training step"""
142+
model = nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 2))
143+
optimizer = paddle.optimizer.Adam(parameters=model.parameters())
144+
scaler = paddle.amp.GradScaler()
145+
146+
x = paddle.randn([8, 4])
147+
y = paddle.randn([8, 2])
148+
149+
with paddle.amp.auto_cast():
150+
output = model(x)
151+
loss = nn.functional.mse_loss(output, y)
152+
153+
scaled_loss = scaler.scale(loss)
154+
scaled_loss.backward()
155+
scaler.step(optimizer)
156+
scaler.update()
157+
optimizer.clear_grad()
158+
159+
self.assertIsNotNone(loss)
160+
161+
162+
if __name__ == '__main__':
163+
unittest.main()

0 commit comments

Comments
 (0)