1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc < %s --mattr=+mve.fp -o - | FileCheck %s 3 4target triple = "thumbv8.1m.main-none-none-eabi" 5 6; Expected to not transform 7define arm_aapcs_vfpcc <2 x half> @complex_mul_v2f16(<2 x half> %a, <2 x half> %b) { 8; CHECK-LABEL: complex_mul_v2f16: 9; CHECK: @ %bb.0: @ %entry 10; CHECK-NEXT: vmovx.f16 s2, s4 11; CHECK-NEXT: vmovx.f16 s8, s0 12; CHECK-NEXT: vmul.f16 s6, s2, s0 13; CHECK-NEXT: vfma.f16 s6, s4, s8 14; CHECK-NEXT: vmul.f16 s8, s8, s2 15; CHECK-NEXT: vfnms.f16 s8, s4, s0 16; CHECK-NEXT: vins.f16 s8, s6 17; CHECK-NEXT: vmov q0, q2 18; CHECK-NEXT: bx lr 19entry: 20 %a.real = shufflevector <2 x half> %a, <2 x half> poison, <1 x i32> <i32 0> 21 %a.imag = shufflevector <2 x half> %a, <2 x half> poison, <1 x i32> <i32 1> 22 %b.real = shufflevector <2 x half> %b, <2 x half> poison, <1 x i32> <i32 0> 23 %b.imag = shufflevector <2 x half> %b, <2 x half> poison, <1 x i32> <i32 1> 24 %0 = fmul fast <1 x half> %b.imag, %a.real 25 %1 = fmul fast <1 x half> %b.real, %a.imag 26 %2 = fadd fast <1 x half> %1, %0 27 %3 = fmul fast <1 x half> %b.real, %a.real 28 %4 = fmul fast <1 x half> %a.imag, %b.imag 29 %5 = fsub fast <1 x half> %3, %4 30 %interleaved.vec = shufflevector <1 x half> %5, <1 x half> %2, <2 x i32> <i32 0, i32 1> 31 ret <2 x half> %interleaved.vec 32} 33 34; Expected to not transform 35define arm_aapcs_vfpcc <4 x half> @complex_mul_v4f16(<4 x half> %a, <4 x half> %b) { 36; CHECK-LABEL: complex_mul_v4f16: 37; CHECK: @ %bb.0: @ %entry 38; CHECK-NEXT: .vsave {d8, d9} 39; CHECK-NEXT: vpush {d8, d9} 40; CHECK-NEXT: vmovx.f16 s8, s0 41; CHECK-NEXT: vmovx.f16 s2, s1 42; CHECK-NEXT: vins.f16 s8, s2 43; CHECK-NEXT: vmovx.f16 s12, s4 44; CHECK-NEXT: vmovx.f16 s2, s5 45; CHECK-NEXT: vins.f16 s0, s1 46; CHECK-NEXT: vins.f16 s12, s2 47; CHECK-NEXT: vins.f16 s4, s5 48; CHECK-NEXT: vmul.f16 q4, q3, q0 49; CHECK-NEXT: vfma.f16 q4, q1, q2 50; CHECK-NEXT: vmul.f16 q2, q2, q3 51; CHECK-NEXT: vneg.f16 q2, q2 52; CHECK-NEXT: vfma.f16 q2, q1, q0 53; CHECK-NEXT: vmovx.f16 s0, s16 54; CHECK-NEXT: vmovx.f16 s9, s8 55; CHECK-NEXT: vins.f16 s8, s16 56; CHECK-NEXT: vins.f16 s9, s0 57; CHECK-NEXT: vmov q0, q2 58; CHECK-NEXT: vpop {d8, d9} 59; CHECK-NEXT: bx lr 60entry: 61 %a.real = shufflevector <4 x half> %a, <4 x half> poison, <2 x i32> <i32 0, i32 2> 62 %a.imag = shufflevector <4 x half> %a, <4 x half> poison, <2 x i32> <i32 1, i32 3> 63 %b.real = shufflevector <4 x half> %b, <4 x half> poison, <2 x i32> <i32 0, i32 2> 64 %b.imag = shufflevector <4 x half> %b, <4 x half> poison, <2 x i32> <i32 1, i32 3> 65 %0 = fmul fast <2 x half> %b.imag, %a.real 66 %1 = fmul fast <2 x half> %b.real, %a.imag 67 %2 = fadd fast <2 x half> %1, %0 68 %3 = fmul fast <2 x half> %b.real, %a.real 69 %4 = fmul fast <2 x half> %a.imag, %b.imag 70 %5 = fsub fast <2 x half> %3, %4 71 %interleaved.vec = shufflevector <2 x half> %5, <2 x half> %2, <4 x i32> <i32 0, i32 2, i32 1, i32 3> 72 ret <4 x half> %interleaved.vec 73} 74 75; Expected to transform 76define arm_aapcs_vfpcc <8 x half> @complex_mul_v8f16(<8 x half> %a, <8 x half> %b) { 77; CHECK-LABEL: complex_mul_v8f16: 78; CHECK: @ %bb.0: @ %entry 79; CHECK-NEXT: vcmul.f16 q2, q0, q1, #0 80; CHECK-NEXT: vcmla.f16 q2, q0, q1, #90 81; CHECK-NEXT: vmov q0, q2 82; CHECK-NEXT: bx lr 83entry: 84 %a.real = shufflevector <8 x half> %a, <8 x half> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6> 85 %a.imag = shufflevector <8 x half> %a, <8 x half> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7> 86 %b.real = shufflevector <8 x half> %b, <8 x half> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6> 87 %b.imag = shufflevector <8 x half> %b, <8 x half> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7> 88 %0 = fmul fast <4 x half> %b.imag, %a.real 89 %1 = fmul fast <4 x half> %b.real, %a.imag 90 %2 = fadd fast <4 x half> %1, %0 91 %3 = fmul fast <4 x half> %b.real, %a.real 92 %4 = fmul fast <4 x half> %a.imag, %b.imag 93 %5 = fsub fast <4 x half> %3, %4 94 %interleaved.vec = shufflevector <4 x half> %5, <4 x half> %2, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7> 95 ret <8 x half> %interleaved.vec 96} 97 98; Expected to transform 99define arm_aapcs_vfpcc <16 x half> @complex_mul_v16f16(<16 x half> %a, <16 x half> %b) { 100; CHECK-LABEL: complex_mul_v16f16: 101; CHECK: @ %bb.0: @ %entry 102; CHECK-NEXT: .vsave {d8, d9} 103; CHECK-NEXT: vpush {d8, d9} 104; CHECK-NEXT: vcmul.f16 q4, q0, q2, #0 105; CHECK-NEXT: vcmla.f16 q4, q0, q2, #90 106; CHECK-NEXT: vcmul.f16 q2, q1, q3, #0 107; CHECK-NEXT: vcmla.f16 q2, q1, q3, #90 108; CHECK-NEXT: vmov q0, q4 109; CHECK-NEXT: vmov q1, q2 110; CHECK-NEXT: vpop {d8, d9} 111; CHECK-NEXT: bx lr 112entry: 113 %a.real = shufflevector <16 x half> %a, <16 x half> poison, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14> 114 %a.imag = shufflevector <16 x half> %a, <16 x half> poison, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15> 115 %b.real = shufflevector <16 x half> %b, <16 x half> poison, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14> 116 %b.imag = shufflevector <16 x half> %b, <16 x half> poison, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15> 117 %0 = fmul fast <8 x half> %b.imag, %a.real 118 %1 = fmul fast <8 x half> %b.real, %a.imag 119 %2 = fadd fast <8 x half> %1, %0 120 %3 = fmul fast <8 x half> %b.real, %a.real 121 %4 = fmul fast <8 x half> %a.imag, %b.imag 122 %5 = fsub fast <8 x half> %3, %4 123 %interleaved.vec = shufflevector <8 x half> %5, <8 x half> %2, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15> 124 ret <16 x half> %interleaved.vec 125} 126 127; Expected to transform 128define arm_aapcs_vfpcc <32 x half> @complex_mul_v32f16(<32 x half> %a, <32 x half> %b) { 129; CHECK-LABEL: complex_mul_v32f16: 130; CHECK: @ %bb.0: @ %entry 131; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13} 132; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13} 133; CHECK-NEXT: add r0, sp, #48 134; CHECK-NEXT: vmov q4, q0 135; CHECK-NEXT: vldrw.u32 q5, [r0] 136; CHECK-NEXT: add r0, sp, #64 137; CHECK-NEXT: vcmul.f16 q0, q0, q5, #0 138; CHECK-NEXT: vcmla.f16 q0, q4, q5, #90 139; CHECK-NEXT: vldrw.u32 q5, [r0] 140; CHECK-NEXT: add r0, sp, #80 141; CHECK-NEXT: vcmul.f16 q4, q1, q5, #0 142; CHECK-NEXT: vcmla.f16 q4, q1, q5, #90 143; CHECK-NEXT: vldrw.u32 q1, [r0] 144; CHECK-NEXT: add r0, sp, #96 145; CHECK-NEXT: vcmul.f16 q5, q2, q1, #0 146; CHECK-NEXT: vcmla.f16 q5, q2, q1, #90 147; CHECK-NEXT: vldrw.u32 q1, [r0] 148; CHECK-NEXT: vmov q2, q5 149; CHECK-NEXT: vcmul.f16 q6, q3, q1, #0 150; CHECK-NEXT: vcmla.f16 q6, q3, q1, #90 151; CHECK-NEXT: vmov q1, q4 152; CHECK-NEXT: vmov q3, q6 153; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13} 154; CHECK-NEXT: bx lr 155entry: 156 %a.real = shufflevector <32 x half> %a, <32 x half> poison, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30> 157 %a.imag = shufflevector <32 x half> %a, <32 x half> poison, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31> 158 %b.real = shufflevector <32 x half> %b, <32 x half> poison, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30> 159 %b.imag = shufflevector <32 x half> %b, <32 x half> poison, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31> 160 %0 = fmul fast <16 x half> %b.imag, %a.real 161 %1 = fmul fast <16 x half> %b.real, %a.imag 162 %2 = fadd fast <16 x half> %1, %0 163 %3 = fmul fast <16 x half> %b.real, %a.real 164 %4 = fmul fast <16 x half> %a.imag, %b.imag 165 %5 = fsub fast <16 x half> %3, %4 166 %interleaved.vec = shufflevector <16 x half> %5, <16 x half> %2, <32 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31> 167 ret <32 x half> %interleaved.vec 168} 169