xref: /llvm-project/llvm/test/CodeGen/Thumb2/LowOverheadLoops/arm_cmplx_dot_prod_f32.ll (revision e0ed0333f0fed2e73f805afd58b61176a87aa3ad)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -verify-machineinstrs %s -o - | FileCheck %s
3
4define void @arm_cmplx_dot_prod_f32(ptr %pSrcA, ptr %pSrcB, i32 %numSamples, ptr nocapture %realResult, ptr nocapture %imagResult) {
5; CHECK-LABEL: arm_cmplx_dot_prod_f32:
6; CHECK:       @ %bb.0: @ %entry
7; CHECK-NEXT:    .save {r4, r5, r7, lr}
8; CHECK-NEXT:    push {r4, r5, r7, lr}
9; CHECK-NEXT:    .vsave {d8, d9}
10; CHECK-NEXT:    vpush {d8, d9}
11; CHECK-NEXT:    ldr.w r12, [sp, #32]
12; CHECK-NEXT:    cmp r2, #8
13; CHECK-NEXT:    blo .LBB0_6
14; CHECK-NEXT:  @ %bb.1: @ %while.body.preheader
15; CHECK-NEXT:    lsrs r4, r2, #2
16; CHECK-NEXT:    mov.w lr, #2
17; CHECK-NEXT:    cmp r4, #2
18; CHECK-NEXT:    vldrw.u32 q2, [r1], #32
19; CHECK-NEXT:    vldrw.u32 q1, [r0], #32
20; CHECK-NEXT:    it lt
21; CHECK-NEXT:    lsrlt.w lr, r2, #2
22; CHECK-NEXT:    rsb r4, lr, r2, lsr #2
23; CHECK-NEXT:    vmov.i32 q0, #0x0
24; CHECK-NEXT:    add.w lr, r4, #1
25; CHECK-NEXT:  .LBB0_2: @ %while.body
26; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
27; CHECK-NEXT:    vcmla.f32 q0, q1, q2, #0
28; CHECK-NEXT:    vldrw.u32 q3, [r1, #-16]
29; CHECK-NEXT:    vldrw.u32 q4, [r0, #-16]
30; CHECK-NEXT:    vcmla.f32 q0, q1, q2, #90
31; CHECK-NEXT:    vldrw.u32 q1, [r0], #32
32; CHECK-NEXT:    vldrw.u32 q2, [r1], #32
33; CHECK-NEXT:    vcmla.f32 q0, q4, q3, #0
34; CHECK-NEXT:    vcmla.f32 q0, q4, q3, #90
35; CHECK-NEXT:    le lr, .LBB0_2
36; CHECK-NEXT:  @ %bb.3: @ %while.end
37; CHECK-NEXT:    vcmla.f32 q0, q1, q2, #0
38; CHECK-NEXT:    movs r4, #6
39; CHECK-NEXT:    vcmla.f32 q0, q1, q2, #90
40; CHECK-NEXT:    vldrw.u32 q1, [r1, #-16]
41; CHECK-NEXT:    vldrw.u32 q2, [r0, #-16]
42; CHECK-NEXT:    ands.w r2, r4, r2, lsl #1
43; CHECK-NEXT:    vcmla.f32 q0, q2, q1, #0
44; CHECK-NEXT:    vcmla.f32 q0, q2, q1, #90
45; CHECK-NEXT:    beq .LBB0_8
46; CHECK-NEXT:  @ %bb.4: @ %while.body9
47; CHECK-NEXT:    vctp.32 r2
48; CHECK-NEXT:    cmp r2, #4
49; CHECK-NEXT:    vpstttt
50; CHECK-NEXT:    vldrwt.u32 q1, [r1]
51; CHECK-NEXT:    vldrwt.u32 q2, [r0]
52; CHECK-NEXT:    vcmlat.f32 q0, q2, q1, #0
53; CHECK-NEXT:    vcmlat.f32 q0, q2, q1, #90
54; CHECK-NEXT:    bls .LBB0_8
55; CHECK-NEXT:  @ %bb.5: @ %while.body9.1
56; CHECK-NEXT:    subs r2, #4
57; CHECK-NEXT:    vctp.32 r2
58; CHECK-NEXT:    vpstttt
59; CHECK-NEXT:    vldrwt.u32 q1, [r1, #16]
60; CHECK-NEXT:    vldrwt.u32 q2, [r0, #16]
61; CHECK-NEXT:    vcmlat.f32 q0, q2, q1, #0
62; CHECK-NEXT:    vcmlat.f32 q0, q2, q1, #90
63; CHECK-NEXT:    b .LBB0_8
64; CHECK-NEXT:  .LBB0_6: @ %if.else
65; CHECK-NEXT:    lsls r4, r2, #1
66; CHECK-NEXT:    vmov.i32 q0, #0x0
67; CHECK-NEXT:    dlstp.32 lr, r4
68; CHECK-NEXT:  .LBB0_7: @ %do.body
69; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
70; CHECK-NEXT:    vldrw.u32 q1, [r1], #16
71; CHECK-NEXT:    vldrw.u32 q2, [r0], #16
72; CHECK-NEXT:    vcmla.f32 q0, q2, q1, #0
73; CHECK-NEXT:    vcmla.f32 q0, q2, q1, #90
74; CHECK-NEXT:    letp lr, .LBB0_7
75; CHECK-NEXT:  .LBB0_8: @ %if.end
76; CHECK-NEXT:    vadd.f32 s0, s0, s2
77; CHECK-NEXT:    vadd.f32 s2, s1, s3
78; CHECK-NEXT:    vstr s0, [r3]
79; CHECK-NEXT:    vstr s2, [r12]
80; CHECK-NEXT:    vpop {d8, d9}
81; CHECK-NEXT:    pop {r4, r5, r7, pc}
82entry:
83  %cmp = icmp ugt i32 %numSamples, 7
84  br i1 %cmp, label %while.body.preheader, label %if.else
85
86while.body.preheader:                             ; preds = %entry
87  %vecSrcA.0.in108 = bitcast ptr %pSrcA to ptr
88  %vecSrcA.0109 = load <4 x float>, ptr %vecSrcA.0.in108, align 4
89  %vecSrcB.0.in106 = bitcast ptr %pSrcB to ptr
90  %vecSrcB.0107 = load <4 x float>, ptr %vecSrcB.0.in106, align 4
91  %pSrcB.addr.0105 = getelementptr inbounds float, ptr %pSrcB, i32 4
92  %pSrcA.addr.0104 = getelementptr inbounds float, ptr %pSrcA, i32 4
93  %shr = lshr i32 %numSamples, 2
94  br label %while.body
95
96while.body:                                       ; preds = %while.body.preheader, %while.body
97  %vecSrcA.0118 = phi <4 x float> [ %vecSrcA.0, %while.body ], [ %vecSrcA.0109, %while.body.preheader ]
98  %vecSrcB.0117 = phi <4 x float> [ %vecSrcB.0, %while.body ], [ %vecSrcB.0107, %while.body.preheader ]
99  %pSrcB.addr.0116 = phi ptr [ %pSrcB.addr.0, %while.body ], [ %pSrcB.addr.0105, %while.body.preheader ]
100  %pSrcA.addr.0115 = phi ptr [ %pSrcA.addr.0, %while.body ], [ %pSrcA.addr.0104, %while.body.preheader ]
101  %vec_acc.0114 = phi <4 x float> [ %7, %while.body ], [ zeroinitializer, %while.body.preheader ]
102  %vecSrcB.0.in.in113 = phi ptr [ %add.ptr4, %while.body ], [ %pSrcB, %while.body.preheader ]
103  %vecSrcA.0.in.in112 = phi ptr [ %add.ptr3, %while.body ], [ %pSrcA, %while.body.preheader ]
104  %blkCnt.0.in111 = phi i32 [ %blkCnt.0, %while.body ], [ %shr, %while.body.preheader ]
105  %blkCnt.0 = add nsw i32 %blkCnt.0.in111, -1
106  %0 = tail call fast <4 x float> @llvm.arm.mve.vcmlaq.v4f32(i32 0, <4 x float> %vec_acc.0114, <4 x float> %vecSrcA.0118, <4 x float> %vecSrcB.0117)
107  %1 = bitcast ptr %pSrcA.addr.0115 to ptr
108  %2 = load <4 x float>, ptr %1, align 4
109  %add.ptr3 = getelementptr inbounds float, ptr %vecSrcA.0.in.in112, i32 8
110  %3 = tail call fast <4 x float> @llvm.arm.mve.vcmlaq.v4f32(i32 1, <4 x float> %0, <4 x float> %vecSrcA.0118, <4 x float> %vecSrcB.0117)
111  %4 = bitcast ptr %pSrcB.addr.0116 to ptr
112  %5 = load <4 x float>, ptr %4, align 4
113  %add.ptr4 = getelementptr inbounds float, ptr %vecSrcB.0.in.in113, i32 8
114  %6 = tail call fast <4 x float> @llvm.arm.mve.vcmlaq.v4f32(i32 0, <4 x float> %3, <4 x float> %2, <4 x float> %5)
115  %7 = tail call fast <4 x float> @llvm.arm.mve.vcmlaq.v4f32(i32 1, <4 x float> %6, <4 x float> %2, <4 x float> %5)
116  %pSrcA.addr.0 = getelementptr inbounds float, ptr %vecSrcA.0.in.in112, i32 12
117  %pSrcB.addr.0 = getelementptr inbounds float, ptr %vecSrcB.0.in.in113, i32 12
118  %vecSrcB.0.in = bitcast ptr %add.ptr4 to ptr
119  %vecSrcB.0 = load <4 x float>, ptr %vecSrcB.0.in, align 4
120  %vecSrcA.0.in = bitcast ptr %add.ptr3 to ptr
121  %vecSrcA.0 = load <4 x float>, ptr %vecSrcA.0.in, align 4
122  %cmp2 = icmp sgt i32 %blkCnt.0.in111, 2
123  br i1 %cmp2, label %while.body, label %while.end
124
125while.end:                                        ; preds = %while.body
126  %8 = tail call fast <4 x float> @llvm.arm.mve.vcmlaq.v4f32(i32 0, <4 x float> %7, <4 x float> %vecSrcA.0, <4 x float> %vecSrcB.0)
127  %9 = bitcast ptr %pSrcA.addr.0 to ptr
128  %10 = load <4 x float>, ptr %9, align 4
129  %11 = tail call fast <4 x float> @llvm.arm.mve.vcmlaq.v4f32(i32 1, <4 x float> %8, <4 x float> %vecSrcA.0, <4 x float> %vecSrcB.0)
130  %12 = bitcast ptr %pSrcB.addr.0 to ptr
131  %13 = load <4 x float>, ptr %12, align 4
132  %14 = tail call fast <4 x float> @llvm.arm.mve.vcmlaq.v4f32(i32 0, <4 x float> %11, <4 x float> %10, <4 x float> %13)
133  %15 = tail call fast <4 x float> @llvm.arm.mve.vcmlaq.v4f32(i32 1, <4 x float> %14, <4 x float> %10, <4 x float> %13)
134  %and = shl i32 %numSamples, 1
135  %mul = and i32 %and, 6
136  %cmp8123.not = icmp eq i32 %mul, 0
137  br i1 %cmp8123.not, label %if.end, label %while.body9
138
139while.body9:                                      ; preds = %while.end
140  %16 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %mul)
141  %add.ptr10 = getelementptr inbounds float, ptr %vecSrcA.0.in.in112, i32 16
142  %add.ptr11 = getelementptr inbounds float, ptr %vecSrcB.0.in.in113, i32 16
143  %17 = bitcast ptr %add.ptr10 to ptr
144  %18 = tail call fast <4 x float> @llvm.masked.load.v4f32.p0(ptr nonnull %17, i32 4, <4 x i1> %16, <4 x float> zeroinitializer)
145  %19 = bitcast ptr %add.ptr11 to ptr
146  %20 = tail call fast <4 x float> @llvm.masked.load.v4f32.p0(ptr nonnull %19, i32 4, <4 x i1> %16, <4 x float> zeroinitializer)
147  %21 = tail call fast <4 x float> @llvm.arm.mve.vcmlaq.predicated.v4f32.v4i1(i32 0, <4 x float> %15, <4 x float> %18, <4 x float> %20, <4 x i1> %16)
148  %22 = tail call fast <4 x float> @llvm.arm.mve.vcmlaq.predicated.v4f32.v4i1(i32 1, <4 x float> %21, <4 x float> %18, <4 x float> %20, <4 x i1> %16)
149  %cmp8 = icmp ugt i32 %mul, 4
150  br i1 %cmp8, label %while.body9.1, label %if.end
151
152if.else:                                          ; preds = %entry
153  %mul14 = shl nuw nsw i32 %numSamples, 1
154  br label %do.body
155
156do.body:                                          ; preds = %do.body, %if.else
157  %blkCnt.2 = phi i32 [ %mul14, %if.else ], [ %sub18, %do.body ]
158  %vec_acc.2 = phi <4 x float> [ zeroinitializer, %if.else ], [ %29, %do.body ]
159  %pSrcB.addr.2 = phi ptr [ %pSrcB, %if.else ], [ %add.ptr17, %do.body ]
160  %pSrcA.addr.2 = phi ptr [ %pSrcA, %if.else ], [ %add.ptr16, %do.body ]
161  %23 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %blkCnt.2)
162  %24 = bitcast ptr %pSrcA.addr.2 to ptr
163  %25 = tail call fast <4 x float> @llvm.masked.load.v4f32.p0(ptr %24, i32 4, <4 x i1> %23, <4 x float> zeroinitializer)
164  %26 = bitcast ptr %pSrcB.addr.2 to ptr
165  %27 = tail call fast <4 x float> @llvm.masked.load.v4f32.p0(ptr %26, i32 4, <4 x i1> %23, <4 x float> zeroinitializer)
166  %28 = tail call fast <4 x float> @llvm.arm.mve.vcmlaq.predicated.v4f32.v4i1(i32 0, <4 x float> %vec_acc.2, <4 x float> %25, <4 x float> %27, <4 x i1> %23)
167  %29 = tail call fast <4 x float> @llvm.arm.mve.vcmlaq.predicated.v4f32.v4i1(i32 1, <4 x float> %28, <4 x float> %25, <4 x float> %27, <4 x i1> %23)
168  %add.ptr16 = getelementptr inbounds float, ptr %pSrcA.addr.2, i32 4
169  %add.ptr17 = getelementptr inbounds float, ptr %pSrcB.addr.2, i32 4
170  %sub18 = add nsw i32 %blkCnt.2, -4
171  %cmp19 = icmp sgt i32 %blkCnt.2, 4
172  br i1 %cmp19, label %do.body, label %if.end
173
174if.end:                                           ; preds = %do.body, %while.body9, %while.body9.1, %while.end
175  %vec_acc.3 = phi <4 x float> [ %15, %while.end ], [ %22, %while.body9 ], [ %40, %while.body9.1 ], [ %29, %do.body ]
176  %30 = extractelement <4 x float> %vec_acc.3, i32 0
177  %31 = extractelement <4 x float> %vec_acc.3, i32 2
178  %add = fadd fast float %30, %31
179  %32 = extractelement <4 x float> %vec_acc.3, i32 1
180  %33 = extractelement <4 x float> %vec_acc.3, i32 3
181  %add20 = fadd fast float %32, %33
182  store float %add, ptr %realResult, align 4
183  store float %add20, ptr %imagResult, align 4
184  ret void
185
186while.body9.1:                                    ; preds = %while.body9
187  %sub12 = add nsw i32 %mul, -4
188  %34 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %sub12)
189  %add.ptr10.1 = getelementptr inbounds float, ptr %vecSrcA.0.in.in112, i32 20
190  %add.ptr11.1 = getelementptr inbounds float, ptr %vecSrcB.0.in.in113, i32 20
191  %35 = bitcast ptr %add.ptr10.1 to ptr
192  %36 = tail call fast <4 x float> @llvm.masked.load.v4f32.p0(ptr nonnull %35, i32 4, <4 x i1> %34, <4 x float> zeroinitializer)
193  %37 = bitcast ptr %add.ptr11.1 to ptr
194  %38 = tail call fast <4 x float> @llvm.masked.load.v4f32.p0(ptr nonnull %37, i32 4, <4 x i1> %34, <4 x float> zeroinitializer)
195  %39 = tail call fast <4 x float> @llvm.arm.mve.vcmlaq.predicated.v4f32.v4i1(i32 0, <4 x float> %22, <4 x float> %36, <4 x float> %38, <4 x i1> %34)
196  %40 = tail call fast <4 x float> @llvm.arm.mve.vcmlaq.predicated.v4f32.v4i1(i32 1, <4 x float> %39, <4 x float> %36, <4 x float> %38, <4 x i1> %34)
197  br label %if.end
198}
199
200declare <4 x float> @llvm.arm.mve.vcmlaq.v4f32(i32, <4 x float>, <4 x float>, <4 x float>) #1
201declare <4 x i1> @llvm.arm.mve.vctp32(i32) #1
202declare <4 x float> @llvm.masked.load.v4f32.p0(ptr, i32 immarg, <4 x i1>, <4 x float>) #2
203declare <4 x float> @llvm.arm.mve.vcmlaq.predicated.v4f32.v4i1(i32, <4 x float>, <4 x float>, <4 x float>, <4 x i1>) #1
204