|
9 | 9 | import numpy as np |
10 | 10 | import pytest |
11 | 11 | from mlir.dialects.memref import subview |
12 | | -from mlir.ir import MLIRError, Type, Value |
| 12 | +from mlir.ir import ( |
| 13 | + MLIRError, |
| 14 | + Type, |
| 15 | + UnrankedMemRefType, |
| 16 | + Value, |
| 17 | +) |
13 | 18 |
|
14 | 19 | from mlir.extras.ast.canonicalize import canonicalize |
15 | 20 | from mlir.extras.dialects import memref, arith |
|
21 | 26 | alloca_scope_return, |
22 | 27 | global_, |
23 | 28 | rank_reduce, |
| 29 | + reinterpret_cast, |
24 | 30 | S, |
25 | 31 | ) |
26 | 32 | from mlir.extras.dialects.scf import ( |
@@ -721,3 +727,122 @@ def test_dim(ctx: MLIRContext): |
721 | 727 |
|
722 | 728 | dims = mem_dynamic.dims() |
723 | 729 | assert isinstance(dims[1], Value) and isinstance(dims[1].owner.opview, memref.DimOp) |
| 730 | + |
| 731 | + |
| 732 | +def test_cast_ranked_memref_to_static_shape(ctx: MLIRContext): |
| 733 | + input = alloc((2, 3), T.f32()) |
| 734 | + reinterpret_cast(input, offsets=[0], sizes=[6, 1], strides=[1, 1]) |
| 735 | + |
| 736 | + # CHECK: %[[ALLOC:.*]] = memref.alloc() : memref<2x3xf32> |
| 737 | + # CHECK: %[[OUT:.*]] = memref.reinterpret_cast %[[ALLOC]] to offset: [0], sizes: [6, 1], strides: [1, 1] : memref<2x3xf32> to memref<6x1xf32> |
| 738 | + |
| 739 | + filecheck_with_comments(ctx.module) |
| 740 | + |
| 741 | + |
| 742 | +def test_cast_ranked_memref_to_dynamic_shape(ctx: MLIRContext): |
| 743 | + input = alloc((2, 3), T.f32()) |
| 744 | + c0 = constant(0, index=True) |
| 745 | + c1 = constant(1, index=True) |
| 746 | + c6 = constant(6, index=True) |
| 747 | + reinterpret_cast(input, offsets=[c0], sizes=[c1, c6], strides=[c6, c1]) |
| 748 | + |
| 749 | + # CHECK: %[[ALLOC:.*]] = memref.alloc() : memref<2x3xf32> |
| 750 | + # CHECK: %[[C0:.*]] = arith.constant 0 : index |
| 751 | + # CHECK: %[[C1:.*]] = arith.constant 1 : index |
| 752 | + # CHECK: %[[C6:.*]] = arith.constant 6 : index |
| 753 | + # CHECK: %[[OUT:.*]] = memref.reinterpret_cast %[[ALLOC]] to offset: [%[[C0]]], sizes: [%[[C1]], %[[C6]]], strides: [%[[C6]], %[[C1]]] : memref<2x3xf32> to memref<?x?xf32, strided<[?, ?], offset: ?>> |
| 754 | + |
| 755 | + filecheck_with_comments(ctx.module) |
| 756 | + |
| 757 | + |
| 758 | +def test_cast_unranked_memref_to_static_shape(ctx: MLIRContext): |
| 759 | + f32 = T.f32() |
| 760 | + input = alloc((2, 3), f32) |
| 761 | + unranked = memref.CastOp(UnrankedMemRefType.get(f32, None), input).result |
| 762 | + reinterpret_cast(unranked, offsets=[0], sizes=[6, 1], strides=[1, 1]) |
| 763 | + |
| 764 | + # CHECK: %[[ALLOC:.*]] = memref.alloc() : memref<2x3xf32> |
| 765 | + # CHECK: %[[CAST:.*]] = memref.cast %[[ALLOC]] : memref<2x3xf32> to memref<*xf32> |
| 766 | + # CHECK: %[[OUT:.*]] = memref.reinterpret_cast %[[CAST]] to offset: [0], sizes: [6, 1], strides: [1, 1] : memref<*xf32> to memref<6x1xf32> |
| 767 | + |
| 768 | + filecheck_with_comments(ctx.module) |
| 769 | + |
| 770 | + |
| 771 | +def test_cast_unranked_memref_to_dynamic_shape(ctx: MLIRContext): |
| 772 | + f32 = T.f32() |
| 773 | + input = alloc((2, 3), f32) |
| 774 | + unranked = memref.CastOp(UnrankedMemRefType.get(f32, None), input).result |
| 775 | + c0 = constant(0, index=True) |
| 776 | + c1 = constant(1, index=True) |
| 777 | + c6 = constant(6, index=True) |
| 778 | + reinterpret_cast(unranked, offsets=[c0], sizes=[c1, c6], strides=[c6, c1]) |
| 779 | + |
| 780 | + # CHECK: %[[ALLOC:.*]] = memref.alloc() : memref<2x3xf32> |
| 781 | + # CHECK: %[[CAST:.*]] = memref.cast %[[ALLOC]] : memref<2x3xf32> to memref<*xf32> |
| 782 | + # CHECK: %[[C0:.*]] = arith.constant 0 : index |
| 783 | + # CHECK: %[[C1:.*]] = arith.constant 1 : index |
| 784 | + # CHECK: %[[C6:.*]] = arith.constant 6 : index |
| 785 | + # CHECK: %[[OUT:.*]] = memref.reinterpret_cast %[[CAST]] to offset: [%[[C0]]], sizes: [%[[C1]], %[[C6]]], strides: [%[[C6]], %[[C1]]] : memref<*xf32> to memref<?x?xf32, strided<[?, ?], offset: ?>> |
| 786 | + |
| 787 | + filecheck_with_comments(ctx.module) |
| 788 | + |
| 789 | + |
| 790 | +def test_reinterpret_cast_mixed_sizes(ctx: MLIRContext): |
| 791 | + # Static first dim, dynamic second dim; static offset and strides. |
| 792 | + input = alloc((2, 3), T.f32()) |
| 793 | + c1 = constant(1, index=True) |
| 794 | + reinterpret_cast(input, offsets=[0], sizes=[6, c1], strides=[1, 1]) |
| 795 | + |
| 796 | + # CHECK: %[[ALLOC:.*]] = memref.alloc() : memref<2x3xf32> |
| 797 | + # CHECK: %[[C1:.*]] = arith.constant 1 : index |
| 798 | + # CHECK: %[[OUT:.*]] = memref.reinterpret_cast %[[ALLOC]] to offset: [0], sizes: [6, %[[C1]]], strides: [1, 1] : memref<2x3xf32> to memref<6x?xf32, strided<[1, 1]>> |
| 799 | + |
| 800 | + filecheck_with_comments(ctx.module) |
| 801 | + |
| 802 | + |
| 803 | +def test_reinterpret_cast_mixed_strides(ctx: MLIRContext): |
| 804 | + # Static sizes and offset; dynamic first stride, static second stride. |
| 805 | + input = alloc((2, 3), T.f32()) |
| 806 | + c6 = constant(6, index=True) |
| 807 | + reinterpret_cast(input, offsets=[0], sizes=[6, 1], strides=[c6, 1]) |
| 808 | + |
| 809 | + # CHECK: %[[ALLOC:.*]] = memref.alloc() : memref<2x3xf32> |
| 810 | + # CHECK: %[[C6:.*]] = arith.constant 6 : index |
| 811 | + # CHECK: %[[OUT:.*]] = memref.reinterpret_cast %[[ALLOC]] to offset: [0], sizes: [6, 1], strides: [%[[C6]], 1] : memref<2x3xf32> to memref<6x1xf32, strided<[?, 1]>> |
| 812 | + |
| 813 | + filecheck_with_comments(ctx.module) |
| 814 | + |
| 815 | + |
| 816 | +def test_reinterpret_cast_mixed_offset(ctx: MLIRContext): |
| 817 | + # Dynamic offset; static sizes and strides. |
| 818 | + input = alloc((2, 3), T.f32()) |
| 819 | + c0 = constant(0, index=True) |
| 820 | + reinterpret_cast(input, offsets=[c0], sizes=[6, 1], strides=[1, 1]) |
| 821 | + |
| 822 | + # CHECK: %[[ALLOC:.*]] = memref.alloc() : memref<2x3xf32> |
| 823 | + # CHECK: %[[C0:.*]] = arith.constant 0 : index |
| 824 | + # CHECK: %[[OUT:.*]] = memref.reinterpret_cast %[[ALLOC]] to offset: [%[[C0]]], sizes: [6, 1], strides: [1, 1] : memref<2x3xf32> to memref<6x1xf32, strided<[1, 1], offset: ?>> |
| 825 | + |
| 826 | + filecheck_with_comments(ctx.module) |
| 827 | + |
| 828 | + |
| 829 | +def test_reinterpret_cast_nonzero_static_offset(ctx: MLIRContext): |
| 830 | + input = alloc((2, 3), T.f32()) |
| 831 | + reinterpret_cast(input, offsets=[3], sizes=[6, 1], strides=[1, 1]) |
| 832 | + |
| 833 | + # CHECK: %[[ALLOC:.*]] = memref.alloc() : memref<2x3xf32> |
| 834 | + # CHECK: %[[OUT:.*]] = memref.reinterpret_cast %[[ALLOC]] to offset: [3], sizes: [6, 1], strides: [1, 1] : memref<2x3xf32> to memref<6x1xf32, strided<[1, 1], offset: 3>> |
| 835 | + |
| 836 | + filecheck_with_comments(ctx.module) |
| 837 | + |
| 838 | + |
| 839 | +def test_reinterpret_cast_nonzero_dynamic_offset(ctx: MLIRContext): |
| 840 | + input = alloc((2, 3), T.f32()) |
| 841 | + c3 = constant(3, index=True) |
| 842 | + reinterpret_cast(input, offsets=[c3], sizes=[6, 1], strides=[1, 1]) |
| 843 | + |
| 844 | + # CHECK: %[[ALLOC:.*]] = memref.alloc() : memref<2x3xf32> |
| 845 | + # CHECK: %[[C3:.*]] = arith.constant 3 : index |
| 846 | + # CHECK: %[[OUT:.*]] = memref.reinterpret_cast %[[ALLOC]] to offset: [%[[C3]]], sizes: [6, 1], strides: [1, 1] : memref<2x3xf32> to memref<6x1xf32, strided<[1, 1], offset: ?>> |
| 847 | + |
| 848 | + filecheck_with_comments(ctx.module) |
0 commit comments