|
| 1 | +import unittest |
| 2 | +from functools import partial |
| 3 | + |
| 4 | +from absl.testing import parameterized |
| 5 | +from megablocks.layers.arguments import Arguments |
| 6 | +from megablocks.layers.glu import SparseGLU, GroupedGLU |
| 7 | +from megablocks.layers import testing |
| 8 | + |
| 9 | +import torch |
| 10 | +import stk |
| 11 | +import numpy as np |
| 12 | + |
| 13 | +def test_modules( |
| 14 | + hidden_size, |
| 15 | + ffn_hidden_size, |
| 16 | + grouped_mlp=False): |
| 17 | + init_method = partial(torch.nn.init.normal_, mean=0.0, std=0.1) |
| 18 | + args = Arguments( |
| 19 | + hidden_size=hidden_size, |
| 20 | + ffn_hidden_size=ffn_hidden_size, |
| 21 | + moe_num_experts=1, |
| 22 | + moe_top_k=1, |
| 23 | + init_method=init_method, |
| 24 | + memory_optimized_mlp=False, |
| 25 | + mlp_type='glu', |
| 26 | + grouped_mlp=grouped_mlp, |
| 27 | + fp16=False, |
| 28 | + bf16=True) |
| 29 | + |
| 30 | + glu = testing.GLU(args) |
| 31 | + dmoe_glu = GroupedGLU(args) if grouped_mlp else SparseGLU(args) |
| 32 | + |
| 33 | + dmoe_glu.cuda(torch.cuda.current_device()).to(torch.bfloat16) |
| 34 | + glu.cuda(torch.cuda.current_device()).to(torch.bfloat16) |
| 35 | + |
| 36 | + with torch.no_grad(): |
| 37 | + glu.w1.copy_(dmoe_glu.w1.T) |
| 38 | + glu.v1.copy_(dmoe_glu.v1.T) |
| 39 | + glu.w2.copy_(dmoe_glu.w2) |
| 40 | + |
| 41 | + return args, glu, dmoe_glu |
| 42 | + |
| 43 | +_DENSE_TESTS = ( |
| 44 | + (16, 1024, 512), |
| 45 | + (8, 2048, 512), |
| 46 | +) |
| 47 | + |
| 48 | +class GLUTest(parameterized.TestCase): |
| 49 | + |
| 50 | + @parameterized.parameters(*_DENSE_TESTS) |
| 51 | + def testGLU_ForwardGroupedMLP(self, bs, sl, hs): |
| 52 | + x = torch.randn(sl, bs, hs).to(torch.bfloat16).cuda() |
| 53 | + |
| 54 | + _, glu, dmoe_glu = test_modules( |
| 55 | + hidden_size=hs, |
| 56 | + ffn_hidden_size=hs * 2, |
| 57 | + grouped_mlp=True) |
| 58 | + |
| 59 | + expected_out = glu(x) |
| 60 | + tokens_per_expert = torch.tensor([bs * sl]).cuda() |
| 61 | + out = dmoe_glu(x.view(bs * sl, hs), tokens_per_expert) |
| 62 | + out = out.view(sl, bs, hs) |
| 63 | + |
| 64 | + self.assertSequenceEqual(out.shape, x.shape) |
| 65 | + self.assertSequenceEqual(expected_out.shape, x.shape) |
| 66 | + self.assertTrue(testing.allclose(out, expected_out)) |
| 67 | + |
| 68 | + @parameterized.parameters(*_DENSE_TESTS) |
| 69 | + def testGLU_ForwardSparseMLP(self, bs, sl, hs): |
| 70 | + x = torch.randn(sl, bs, hs).to(torch.bfloat16).cuda() |
| 71 | + |
| 72 | + _, glu, dmoe_glu = test_modules( |
| 73 | + hidden_size=hs, |
| 74 | + ffn_hidden_size=hs * 2, |
| 75 | + grouped_mlp=False) |
| 76 | + |
| 77 | + expected_out = glu(x) |
| 78 | + with torch.no_grad(): |
| 79 | + topo = stk.random.mask(bs * sl, hs * 2, 0, blocking=128).cuda() |
| 80 | + out = dmoe_glu(x.view(bs * sl, hs), topo) |
| 81 | + out = out.view(sl, bs, hs) |
| 82 | + |
| 83 | + self.assertSequenceEqual(out.shape, x.shape) |
| 84 | + self.assertSequenceEqual(expected_out.shape, x.shape) |
| 85 | + self.assertTrue(testing.allclose(out, expected_out)) |
| 86 | + |
| 87 | +if __name__ == '__main__': |
| 88 | + unittest.main() |
0 commit comments