Skip to content

Commit 257aaea

Browse files
committed
Update test
1 parent 6df8704 commit 257aaea

File tree

1 file changed

+116
-118
lines changed

1 file changed

+116
-118
lines changed

mlir/test/Target/LLVMIR/openmp-taskloop.mlir

Lines changed: 116 additions & 118 deletions
Original file line numberDiff line numberDiff line change
@@ -32,122 +32,120 @@ llvm.func @_QPtest() {
3232
llvm.return
3333
}
3434

35-
// CHECK: %struct.kmp_task_info = type { ptr, ptr, i32, ptr, ptr, i64, i64, i64 }
35+
// CHECK-LABEL: define void @_QPtest() {
36+
// CHECK: %[[STRUCTARG:.*]] = alloca { i64, i64, i64, ptr }, align 8
37+
// CHECK: %[[VAL_0:.*]] = alloca i32, i64 1, align 4
38+
// CHECK: %[[VAL_1:.*]] = alloca i32, i64 1, align 4
39+
// CHECK: store i32 20, ptr %[[VAL_1]], align 4
40+
// CHECK: br label %[[VAL_2:.*]]
41+
// CHECK: entry: ; preds = %[[VAL_3:.*]]
42+
// CHECK: br label %[[VAL_4:.*]]
43+
// CHECK: omp.private.init: ; preds = %[[VAL_2]]
44+
// CHECK: %[[VAL_5:.*]] = tail call ptr @malloc(i64 ptrtoint (ptr getelementptr ({ i32 }, ptr null, i32 1) to i64))
45+
// CHECK: %[[VAL_6:.*]] = getelementptr { i32 }, ptr %[[VAL_5]], i32 0, i32 0
46+
// CHECK: br label %[[VAL_7:.*]]
47+
// CHECK: omp.private.copy: ; preds = %[[VAL_4]]
48+
// CHECK: br label %[[VAL_8:.*]]
49+
// CHECK: omp.private.copy1: ; preds = %[[VAL_7]]
50+
// CHECK: %[[VAL_9:.*]] = load i32, ptr %[[VAL_1]], align 4
51+
// CHECK: store i32 %[[VAL_9]], ptr %[[VAL_6]], align 4
52+
// CHECK: br label %[[VAL_10:.*]]
53+
// CHECK: omp.taskloop.start: ; preds = %[[VAL_8]]
54+
// CHECK: br label %[[VAL_11:.*]]
55+
// CHECK: codeRepl: ; preds = %[[VAL_10]]
56+
// CHECK: %[[VAL_12:.*]] = getelementptr { i64, i64, i64, ptr }, ptr %[[STRUCTARG]], i32 0, i32 0
57+
// CHECK: store i64 1, ptr %[[VAL_12]], align 4
58+
// CHECK: %[[VAL_13:.*]] = getelementptr { i64, i64, i64, ptr }, ptr %[[STRUCTARG]], i32 0, i32 1
59+
// CHECK: store i64 5, ptr %[[VAL_13]], align 4
60+
// CHECK: %[[VAL_14:.*]] = getelementptr { i64, i64, i64, ptr }, ptr %[[STRUCTARG]], i32 0, i32 2
61+
// CHECK: store i64 1, ptr %[[VAL_14]], align 4
62+
// CHECK: %[[VAL_15:.*]] = getelementptr { i64, i64, i64, ptr }, ptr %[[STRUCTARG]], i32 0, i32 3
63+
// CHECK: store ptr %[[VAL_5]], ptr %[[VAL_15]], align 8
64+
// CHECK: %[[VAL_16:.*]] = call i32 @__kmpc_global_thread_num(ptr @1)
65+
// CHECK: call void @__kmpc_taskgroup(ptr @1, i32 %[[VAL_16]])
66+
// CHECK: %[[VAL_17:.*]] = call ptr @__kmpc_omp_task_alloc(ptr @1, i32 %[[VAL_16]], i32 1, i64 40, i64 32, ptr @_QPtest..omp_par)
67+
// CHECK: %[[VAL_18:.*]] = load ptr, ptr %[[VAL_17]], align 8
68+
// CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 1 %[[VAL_18]], ptr align 1 %[[STRUCTARG]], i64 32, i1 false)
69+
// CHECK: %[[VAL_19:.*]] = getelementptr { i64, i64, i64, ptr }, ptr %[[VAL_18]], i32 0, i32 0
70+
// CHECK: %[[VAL_20:.*]] = getelementptr { i64, i64, i64, ptr }, ptr %[[VAL_18]], i32 0, i32 1
71+
// CHECK: %[[VAL_21:.*]] = getelementptr { i64, i64, i64, ptr }, ptr %[[VAL_18]], i32 0, i32 2
72+
// CHECK: %[[VAL_22:.*]] = load i64, ptr %[[VAL_21]], align 4
73+
// CHECK: call void @__kmpc_taskloop(ptr @1, i32 %[[VAL_16]], ptr %[[VAL_17]], i32 1, ptr %[[VAL_19]], ptr %[[VAL_20]], i64 %[[VAL_22]], i32 1, i32 0, i64 0, ptr @omp_taskloop_dup)
74+
// CHECK: call void @__kmpc_end_taskgroup(ptr @1, i32 %[[VAL_16]])
75+
// CHECK: br label %[[VAL_23:.*]]
76+
// CHECK: taskloop.exit: ; preds = %[[VAL_11]]
77+
// CHECK: ret void
78+
79+
// CHECK-LABEL: define internal void @_QPtest..omp_par(
80+
// CHECK: taskloop.alloca:
81+
// CHECK: %[[VAL_24:.*]] = load ptr, ptr %[[VAL_25:.*]], align 8
82+
// CHECK: %[[VAL_26:.*]] = getelementptr { i64, i64, i64, ptr }, ptr %[[VAL_24]], i32 0, i32 0
83+
// CHECK: %[[VAL_27:.*]] = load i64, ptr %[[VAL_26]], align 4
84+
// CHECK: %[[VAL_28:.*]] = getelementptr { i64, i64, i64, ptr }, ptr %[[VAL_24]], i32 0, i32 1
85+
// CHECK: %[[VAL_29:.*]] = load i64, ptr %[[VAL_28]], align 4
86+
// CHECK: %[[VAL_30:.*]] = getelementptr { i64, i64, i64, ptr }, ptr %[[VAL_24]], i32 0, i32 2
87+
// CHECK: %[[VAL_31:.*]] = load i64, ptr %[[VAL_30]], align 4
88+
// CHECK: %[[VAL_32:.*]] = getelementptr { i64, i64, i64, ptr }, ptr %[[VAL_24]], i32 0, i32 3
89+
// CHECK: %[[VAL_33:.*]] = load ptr, ptr %[[VAL_32]], align 8, !align !1
90+
// CHECK: %[[VAL_34:.*]] = alloca i32, align 4
91+
// CHECK: br label %[[VAL_35:.*]]
92+
// CHECK: taskloop.body: ; preds = %[[VAL_36:.*]]
93+
// CHECK: %[[VAL_37:.*]] = getelementptr { i32 }, ptr %[[VAL_33]], i32 0, i32 0
94+
// CHECK: br label %[[VAL_38:.*]]
95+
// CHECK: omp.taskloop.region: ; preds = %[[VAL_35]]
96+
// CHECK: br label %[[VAL_39:.*]]
97+
// CHECK: omp_loop.preheader: ; preds = %[[VAL_38]]
98+
// CHECK: %[[VAL_40:.*]] = sub i64 %[[VAL_29]], %[[VAL_27]]
99+
// CHECK: %[[VAL_41:.*]] = sdiv i64 %[[VAL_40]], 1
100+
// CHECK: %[[VAL_42:.*]] = add i64 %[[VAL_41]], 1
101+
// CHECK: %[[VAL_43:.*]] = trunc i64 %[[VAL_42]] to i32
102+
// CHECK: %[[VAL_44:.*]] = trunc i64 %[[VAL_27]] to i32
103+
// CHECK: br label %[[VAL_45:.*]]
104+
// CHECK: omp_loop.header: ; preds = %[[VAL_46:.*]], %[[VAL_39]]
105+
// CHECK: %[[VAL_47:.*]] = phi i32 [ 0, %[[VAL_39]] ], [ %[[VAL_48:.*]], %[[VAL_46]] ]
106+
// CHECK: br label %[[VAL_49:.*]]
107+
// CHECK: omp_loop.cond: ; preds = %[[VAL_45]]
108+
// CHECK: %[[VAL_50:.*]] = icmp ult i32 %[[VAL_47]], %[[VAL_43]]
109+
// CHECK: br i1 %[[VAL_50]], label %[[VAL_51:.*]], label %[[VAL_52:.*]]
110+
// CHECK: omp_loop.exit: ; preds = %[[VAL_49]]
111+
// CHECK: br label %[[VAL_53:.*]]
112+
// CHECK: omp_loop.after: ; preds = %[[VAL_52]]
113+
// CHECK: br label %[[VAL_54:.*]]
114+
// CHECK: omp.region.cont: ; preds = %[[VAL_53]]
115+
// CHECK: tail call void @free(ptr %[[VAL_33]])
116+
// CHECK: br label %[[VAL_55:.*]]
117+
// CHECK: omp_loop.body: ; preds = %[[VAL_49]]
118+
// CHECK: %[[VAL_56:.*]] = mul i32 %[[VAL_47]], 1
119+
// CHECK: %[[VAL_57:.*]] = add i32 %[[VAL_56]], %[[VAL_44]]
120+
// CHECK: br label %[[VAL_58:.*]]
121+
// CHECK: omp.loop_nest.region: ; preds = %[[VAL_51]]
122+
// CHECK: store i32 %[[VAL_57]], ptr %[[VAL_34]], align 4
123+
// CHECK: %[[VAL_59:.*]] = load i32, ptr %[[VAL_37]], align 4
124+
// CHECK: %[[VAL_60:.*]] = add i32 %[[VAL_59]], 1
125+
// CHECK: store i32 %[[VAL_60]], ptr %[[VAL_37]], align 4
126+
// CHECK: br label %[[VAL_61:.*]]
127+
// CHECK: omp.region.cont2: ; preds = %[[VAL_58]]
128+
// CHECK: br label %[[VAL_46]]
129+
// CHECK: omp_loop.inc: ; preds = %[[VAL_61]]
130+
// CHECK: %[[VAL_48]] = add nuw i32 %[[VAL_47]], 1
131+
// CHECK: br label %[[VAL_45]]
132+
// CHECK: taskloop.exit.exitStub: ; preds = %[[VAL_54]]
133+
// CHECK: ret void
134+
135+
// CHECK-LABEL: define internal void @omp_taskloop_dup(
136+
// CHECK: entry:
137+
// CHECK: %[[VAL_62:.*]] = getelementptr { %[[VAL_63:.*]], { i64, i64, i64, ptr } }, ptr %[[VAL_64:.*]], i32 0, i32 1
138+
// CHECK: %[[VAL_65:.*]] = getelementptr { i64, i64, i64, ptr }, ptr %[[VAL_62]], i32 0, i32 3
139+
// CHECK: %[[VAL_66:.*]] = getelementptr { %[[VAL_63]], { i64, i64, i64, ptr } }, ptr %[[VAL_67:.*]], i32 0, i32 1
140+
// CHECK: %[[VAL_68:.*]] = getelementptr { i64, i64, i64, ptr }, ptr %[[VAL_66]], i32 0, i32 3
141+
// CHECK: %[[VAL_69:.*]] = load ptr, ptr %[[VAL_68]], align 8
142+
// CHECK: %[[VAL_70:.*]] = tail call ptr @malloc(i64 ptrtoint (ptr getelementptr ({ i32 }, ptr null, i32 1) to i64))
143+
// CHECK: store ptr %[[VAL_70]], ptr %[[VAL_65]], align 8
144+
// CHECK: %[[VAL_71:.*]] = getelementptr { i32 }, ptr %[[VAL_69]], i32 0, i32 0
145+
// CHECK: %[[VAL_72:.*]] = getelementptr { i32 }, ptr %[[VAL_70]], i32 0, i32 0
146+
// CHECK: br label %[[VAL_73:.*]]
147+
// CHECK: omp.private.copy: ; preds = %[[VAL_74:.*]]
148+
// CHECK: %[[VAL_75:.*]] = load i32, ptr %[[VAL_71]], align 4
149+
// CHECK: store i32 %[[VAL_75]], ptr %[[VAL_72]], align 4
150+
// CHECK: ret void
36151

37-
// CHECK-LABEL: define void @_QPtest() {
38-
// CHECK: %[[STRUCTARG:.*]] = alloca { ptr }, align 8
39-
// CHECK: %[[VAL1:.*]] = alloca i32, i64 1, align 4
40-
// CHECK: %[[VAL_X:.*]] = alloca i32, i64 1, align 4
41-
// CHECK: store i32 20, ptr %[[VAL_X]], align 4
42-
// CHECK: br label %entry
43-
44-
// CHECK: entry:
45-
// CHECK: br label %omp.private.init
46-
47-
// CHECK: omp.private.init: ; preds = %entry
48-
// CHECK: %[[OMP_TASK_CONTEXT_PTR:.*]] = tail call ptr @malloc(i64 ptrtoint (ptr getelementptr ({ i32 }, ptr null, i32 1) to i64))
49-
// CHECK: %[[PRIV_GEP:.*]] = getelementptr { i32 }, ptr %[[OMP_TASK_CONTEXT_PTR]], i32 0, i32 0
50-
// CHECK: br label %omp.private.copy
51-
52-
// CHECK: omp.private.copy:
53-
// CHECK: br label %omp.private.copy1
54-
55-
// CHECK: omp.private.copy1:
56-
// CHECK: %[[LOAD_X:.*]] = load i32, ptr %[[VAL_X]], align 4
57-
// CHECK: store i32 %[[LOAD_X]], ptr %[[PRIV_GEP]], align 4
58-
// CHECK: br label %omp.taskloop.start
59-
60-
// CHECK: omp.taskloop.start:
61-
// CHECK: br label %codeRepl
62-
63-
// CHECK: codeRepl:
64-
// CHECK: %[[GEP_OMP_TASK_CONTEXT_PTR:.*]] = getelementptr { ptr }, ptr %[[STRUCTARG]], i32 0, i32 0
65-
// CHECK: store ptr %[[OMP_TASK_CONTEXT_PTR]], ptr %[[GEP_OMP_TASK_CONTEXT_PTR]], align 8
66-
// CHECK: %[[GTID:.*]] = call i32 @__kmpc_global_thread_num(ptr @1)
67-
// CHECK: call void @__kmpc_taskgroup(ptr @1, i32 %[[GTID]])
68-
// CHECK: %[[TASK_PTR:.*]] = call ptr @__kmpc_omp_task_alloc(ptr @1, i32 %[[GTID]], i32 1, i64 64, i64 8, ptr @_QPtest..omp_par)
69-
// CHECK: %[[LB_GEP:.*]] = getelementptr inbounds nuw %struct.kmp_task_info, ptr %[[TASK_PTR]], i32 0, i32 5
70-
// CHECK: store i64 1, ptr %[[LB_GEP]], align 4
71-
// CHECK: %[[UB_GEP:.*]] = getelementptr inbounds nuw %struct.kmp_task_info, ptr %[[TASK_PTR]], i32 0, i32 6
72-
// CHECK: store i64 5, ptr %[[UB_GEP]], align 4
73-
// CHECK: %[[STEP_GEP:.*]] = getelementptr inbounds nuw %struct.kmp_task_info, ptr %[[TASK_PTR]], i32 0, i32 7
74-
// CHECK: store i64 1, ptr %[[STEP_GEP]], align 4
75-
// CHECK: %[[LOAD_STEP:.*]] = load i64, ptr %[[STEP_GEP]], align 4
76-
// CHECK: %10 = load ptr, ptr %[[TASK_PTR]], align 8
77-
// CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 1 %10, ptr align 1 %[[STRUCTARG]], i64 8, i1 false)
78-
// CHECK: call void @__kmpc_taskloop(ptr @1, i32 %[[GTID]], ptr %[[TASK_PTR]], i32 1, ptr %[[LB_GEP]], ptr %[[UB_GEP]], i64 %[[LOAD_STEP]], i32 1, i32 0, i64 0, ptr null)
79-
// CHECK: call void @__kmpc_end_taskgroup(ptr @1, i32 %[[GTID]])
80-
// CHECK: br label %taskloop.exit
81-
82-
// CHECK: taskloop.exit:
83-
// CHECK: tail call void @free(ptr %[[OMP_TASK_CONTEXT_PTR]])
84-
// CHECK: ret void
85-
// CHECK: }
86-
87-
// CHECK-LABEL: define internal void @_QPtest..omp_par
88-
// CHECK-SAME: i32 %[[GLOBAL_TID:.*]], ptr %[[TASK_PTR1:.*]]) {
89-
// CHECK: taskloop.alloca:
90-
// CHECK: %[[LOAD_TASK_PTR:.*]] = load ptr, ptr %[[TASK_PTR1]], align 8
91-
// CHECK: %[[GEP_LB:.*]] = getelementptr inbounds nuw %struct.kmp_task_info, ptr %[[TASK_PTR1]], i32 0, i32 5
92-
// CHECK: %[[LOAD_LB64:.*]] = load i64, ptr %[[GEP_LB]], align 4
93-
// CHECK: %[[LB:.*]] = trunc i64 %[[LOAD_LB64]] to i32
94-
// CHECK: %[[GEP_UB:.*]] = getelementptr inbounds nuw %struct.kmp_task_info, ptr %[[TASK_PTR1]], i32 0, i32 6
95-
// CHECK: %[[LOAD_UB64:.*]] = load i64, ptr %[[GEP_UB]], align 4
96-
// CHECK: %[[UB:.*]] = trunc i64 %[[LOAD_UB64]] to i32
97-
// CHECK: %[[GEP_OMP_TASK_CONTEXT_PTR:.*]] = getelementptr { ptr }, ptr %[[LOAD_TASK_PTR]], i32 0, i32 0
98-
// CHECK: %[[LOADGEP_OMP_TASK_CONTEXT_PTR:.*]] = load ptr, ptr %[[GEP_OMP_TASK_CONTEXT_PTR]], align 8, !align !1
99-
// CHECK: %[[OMP_PRIVATE_ALLOC:.*]] = alloca i32, align 4
100-
// CHECK: br label %taskloop.body
101-
102-
// CHECK: taskloop.body:
103-
// CHECK: %[[LOAD_X:.*]] = getelementptr { i32 }, ptr %[[LOADGEP_OMP_TASK_CONTEXT_PTR]], i32 0, i32 0
104-
// CHECK: br label %omp.taskloop.region
105-
106-
// CHECK: omp.taskloop.region:
107-
// CHECK: br label %omp_loop.preheader
108-
109-
// CHECK: omp_loop.preheader:
110-
// CHECK: %[[VAL2:.*]] = sub i32 %[[UB]], %[[LB]]
111-
// CHECK: %[[TRIP_CNT:.*]] = add i32 %[[VAL2]], 1
112-
// CHECK: br label %omp_loop.header
113-
114-
// CHECK: omp_loop.header:
115-
// CHECK: %[[OMP_LOOP_IV:.*]] = phi i32 [ 0, %omp_loop.preheader ], [ %omp_loop.next, %omp_loop.inc ]
116-
// CHECK: br label %omp_loop.cond
117-
118-
// CHECK: omp_loop.cond:
119-
// CHECK: %[[OMP_LOOP_CMP:.*]] = icmp ult i32 %[[OMP_LOOP_IV]], %[[TRIP_CNT]]
120-
// CHECK: br i1 %[[OMP_LOOP_CMP]], label %omp_loop.body, label %omp_loop.exit
121-
122-
// CHECK: omp_loop.exit:
123-
// CHECK: br label %omp_loop.after
124-
125-
// CHECK: omp_loop.after:
126-
// CHECK: br label %omp.region.cont
127-
128-
// CHECK: omp.region.cont:
129-
// CHECK: %[[IS_ALLOCATED:.*]] = icmp ne ptr %[[LOADGEP_OMP_TASK_CONTEXT_PTR]], null
130-
// CHECK: br label %taskloop.exit.exitStub
131-
132-
// CHECK: omp_loop.body:
133-
// CHECK: %[[VAL3:.*]] = mul i32 %[[OMP_LOOP_IV]], 1
134-
// CHECK: %[[VAL5:.*]] = add i32 %[[VAL3]], %[[LB]]
135-
// CHECK: br label %omp.loop_nest.region
136-
137-
// CHECK: omp.loop_nest.region:
138-
// CHECK: store i32 %[[VAL5]], ptr %[[OMP_PRIVATE_ALLOC]], align 4
139-
// CHECK: %[[VAL6:.*]] = load i32, ptr %[[LOAD_X]], align 4
140-
// CHECK: %[[RES:.*]] = add i32 %[[VAL6]], 1
141-
// CHECK: store i32 %[[RES]], ptr %[[LOAD_X]], align 4
142-
// CHECK: br label %omp.region.cont2
143-
144-
// CHECK: omp.region.cont2:
145-
// CHECK: br label %omp_loop.inc
146-
147-
// CHECK: omp_loop.inc:
148-
// CHECK: %omp_loop.next = add nuw i32 %[[OMP_LOOP_IV]], 1
149-
// CHECK: br label %omp_loop.header
150-
151-
// CHECK: taskloop.exit.exitStub:
152-
// CHECK: ret void
153-
// CHECK: }

0 commit comments

Comments
 (0)