1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc < %s --mattr=+mve -o - | FileCheck %s 3 4target triple = "thumbv8.1m.main-none-none-eabi" 5 6 7; Expected to not transform 8define arm_aapcs_vfpcc <2 x i8> @complex_add_v2i8(<2 x i8> %a, <2 x i8> %b) { 9; CHECK-LABEL: complex_add_v2i8: 10; CHECK: @ %bb.0: @ %entry 11; CHECK-NEXT: vmov r0, s0 12; CHECK-NEXT: vmov r1, s6 13; CHECK-NEXT: vmov r2, s4 14; CHECK-NEXT: add r0, r1 15; CHECK-NEXT: vmov r1, s2 16; CHECK-NEXT: subs r1, r2, r1 17; CHECK-NEXT: vmov q0[2], q0[0], r1, r0 18; CHECK-NEXT: bx lr 19entry: 20 %a.real = shufflevector <2 x i8> %a, <2 x i8> zeroinitializer, <1 x i32> <i32 0> 21 %a.imag = shufflevector <2 x i8> %a, <2 x i8> zeroinitializer, <1 x i32> <i32 1> 22 %b.real = shufflevector <2 x i8> %b, <2 x i8> zeroinitializer, <1 x i32> <i32 0> 23 %b.imag = shufflevector <2 x i8> %b, <2 x i8> zeroinitializer, <1 x i32> <i32 1> 24 %0 = sub <1 x i8> %b.real, %a.imag 25 %1 = add <1 x i8> %b.imag, %a.real 26 %interleaved.vec = shufflevector <1 x i8> %0, <1 x i8> %1, <2 x i32> <i32 0, i32 1> 27 ret <2 x i8> %interleaved.vec 28} 29 30; Expected to not transform 31define arm_aapcs_vfpcc <4 x i8> @complex_add_v4i8(<4 x i8> %a, <4 x i8> %b) { 32; CHECK-LABEL: complex_add_v4i8: 33; CHECK: @ %bb.0: @ %entry 34; CHECK-NEXT: vrev64.32 q2, q0 35; CHECK-NEXT: vmov r1, s6 36; CHECK-NEXT: vmov r0, s10 37; CHECK-NEXT: vrev64.32 q3, q1 38; CHECK-NEXT: vmov r2, s4 39; CHECK-NEXT: subs r0, r1, r0 40; CHECK-NEXT: vmov r1, s8 41; CHECK-NEXT: subs r1, r2, r1 42; CHECK-NEXT: vmov r2, s0 43; CHECK-NEXT: vmov q2[2], q2[0], r1, r0 44; CHECK-NEXT: vmov r0, s14 45; CHECK-NEXT: vmov r1, s2 46; CHECK-NEXT: add r0, r1 47; CHECK-NEXT: vmov r1, s12 48; CHECK-NEXT: add r1, r2 49; CHECK-NEXT: vmov q2[3], q2[1], r1, r0 50; CHECK-NEXT: vmov q0, q2 51; CHECK-NEXT: bx lr 52entry: 53 %a.real = shufflevector <4 x i8> %a, <4 x i8> zeroinitializer, <2 x i32> <i32 0, i32 2> 54 %a.imag = shufflevector <4 x i8> %a, <4 x i8> zeroinitializer, <2 x i32> <i32 1, i32 3> 55 %b.real = shufflevector <4 x i8> %b, <4 x i8> zeroinitializer, <2 x i32> <i32 0, i32 2> 56 %b.imag = shufflevector <4 x i8> %b, <4 x i8> zeroinitializer, <2 x i32> <i32 1, i32 3> 57 %0 = sub <2 x i8> %b.real, %a.imag 58 %1 = add <2 x i8> %b.imag, %a.real 59 %interleaved.vec = shufflevector <2 x i8> %0, <2 x i8> %1, <4 x i32> <i32 0, i32 2, i32 1, i32 3> 60 ret <4 x i8> %interleaved.vec 61} 62 63; Expected to transform 64define arm_aapcs_vfpcc <8 x i8> @complex_add_v8i8(<8 x i8> %a, <8 x i8> %b) { 65; CHECK-LABEL: complex_add_v8i8: 66; CHECK: @ %bb.0: @ %entry 67; CHECK-NEXT: vrev32.16 q2, q1 68; CHECK-NEXT: vadd.i32 q2, q2, q0 69; CHECK-NEXT: vrev32.16 q0, q0 70; CHECK-NEXT: vsub.i32 q0, q1, q0 71; CHECK-NEXT: vmovnt.i32 q0, q2 72; CHECK-NEXT: bx lr 73entry: 74 %a.real = shufflevector <8 x i8> %a, <8 x i8> zeroinitializer, <4 x i32> <i32 0, i32 2, i32 4, i32 6> 75 %a.imag = shufflevector <8 x i8> %a, <8 x i8> zeroinitializer, <4 x i32> <i32 1, i32 3, i32 5, i32 7> 76 %b.real = shufflevector <8 x i8> %b, <8 x i8> zeroinitializer, <4 x i32> <i32 0, i32 2, i32 4, i32 6> 77 %b.imag = shufflevector <8 x i8> %b, <8 x i8> zeroinitializer, <4 x i32> <i32 1, i32 3, i32 5, i32 7> 78 %0 = sub <4 x i8> %b.real, %a.imag 79 %1 = add <4 x i8> %b.imag, %a.real 80 %interleaved.vec = shufflevector <4 x i8> %0, <4 x i8> %1, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7> 81 ret <8 x i8> %interleaved.vec 82} 83 84; Expected to transform 85define arm_aapcs_vfpcc <16 x i8> @complex_add_v16i8(<16 x i8> %a, <16 x i8> %b) { 86; CHECK-LABEL: complex_add_v16i8: 87; CHECK: @ %bb.0: @ %entry 88; CHECK-NEXT: vcadd.i8 q0, q1, q0, #90 89; CHECK-NEXT: bx lr 90entry: 91 %a.real = shufflevector <16 x i8> %a, <16 x i8> zeroinitializer, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14> 92 %a.imag = shufflevector <16 x i8> %a, <16 x i8> zeroinitializer, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15> 93 %b.real = shufflevector <16 x i8> %b, <16 x i8> zeroinitializer, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14> 94 %b.imag = shufflevector <16 x i8> %b, <16 x i8> zeroinitializer, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15> 95 %0 = sub <8 x i8> %b.real, %a.imag 96 %1 = add <8 x i8> %b.imag, %a.real 97 %interleaved.vec = shufflevector <8 x i8> %0, <8 x i8> %1, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15> 98 ret <16 x i8> %interleaved.vec 99} 100 101; Expected to transform 102define arm_aapcs_vfpcc <32 x i8> @complex_add_v32i8(<32 x i8> %a, <32 x i8> %b) { 103; CHECK-LABEL: complex_add_v32i8: 104; CHECK: @ %bb.0: @ %entry 105; CHECK-NEXT: vcadd.i8 q0, q2, q0, #90 106; CHECK-NEXT: vcadd.i8 q1, q3, q1, #90 107; CHECK-NEXT: bx lr 108entry: 109 %a.real = shufflevector <32 x i8> %a, <32 x i8> zeroinitializer, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30> 110 %a.imag = shufflevector <32 x i8> %a, <32 x i8> zeroinitializer, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31> 111 %b.real = shufflevector <32 x i8> %b, <32 x i8> zeroinitializer, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30> 112 %b.imag = shufflevector <32 x i8> %b, <32 x i8> zeroinitializer, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31> 113 %0 = sub <16 x i8> %b.real, %a.imag 114 %1 = add <16 x i8> %b.imag, %a.real 115 %interleaved.vec = shufflevector <16 x i8> %0, <16 x i8> %1, <32 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31> 116 ret <32 x i8> %interleaved.vec 117} 118