1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s 3 4; ANDV 5 6define i1 @andv_nxv32i1(<vscale x 32 x i1> %a) { 7; CHECK-LABEL: andv_nxv32i1: 8; CHECK: // %bb.0: 9; CHECK-NEXT: ptrue p2.b 10; CHECK-NEXT: and p0.b, p0/z, p0.b, p1.b 11; CHECK-NEXT: nots p0.b, p2/z, p0.b 12; CHECK-NEXT: cset w0, eq 13; CHECK-NEXT: ret 14 %res = call i1 @llvm.vector.reduce.and.nxv32i1(<vscale x 32 x i1> %a) 15 ret i1 %res 16} 17 18define i1 @andv_nxv64i1(<vscale x 64 x i1> %a) { 19; CHECK-LABEL: andv_nxv64i1: 20; CHECK: // %bb.0: 21; CHECK-NEXT: and p1.b, p1/z, p1.b, p3.b 22; CHECK-NEXT: and p0.b, p0/z, p0.b, p2.b 23; CHECK-NEXT: and p0.b, p0/z, p0.b, p1.b 24; CHECK-NEXT: ptrue p1.b 25; CHECK-NEXT: nots p0.b, p1/z, p0.b 26; CHECK-NEXT: cset w0, eq 27; CHECK-NEXT: ret 28 %res = call i1 @llvm.vector.reduce.and.nxv64i1(<vscale x 64 x i1> %a) 29 ret i1 %res 30} 31 32; ORV 33 34define i1 @orv_nxv32i1(<vscale x 32 x i1> %a) { 35; CHECK-LABEL: orv_nxv32i1: 36; CHECK: // %bb.0: 37; CHECK-NEXT: sel p0.b, p0, p0.b, p1.b 38; CHECK-NEXT: ptest p0, p0.b 39; CHECK-NEXT: cset w0, ne 40; CHECK-NEXT: ret 41 %res = call i1 @llvm.vector.reduce.or.nxv32i1(<vscale x 32 x i1> %a) 42 ret i1 %res 43} 44 45; XORV 46 47define i1 @xorv_nxv32i1(<vscale x 32 x i1> %a) { 48; CHECK-LABEL: xorv_nxv32i1: 49; CHECK: // %bb.0: 50; CHECK-NEXT: ptrue p2.b 51; CHECK-NEXT: eor p0.b, p2/z, p0.b, p1.b 52; CHECK-NEXT: cntp x8, p2, p0.b 53; CHECK-NEXT: and w0, w8, #0x1 54; CHECK-NEXT: ret 55 %res = call i1 @llvm.vector.reduce.xor.nxv32i1(<vscale x 32 x i1> %a) 56 ret i1 %res 57} 58 59; SMAXV 60 61define i1 @smaxv_nxv32i1(<vscale x 32 x i1> %a) { 62; CHECK-LABEL: smaxv_nxv32i1: 63; CHECK: // %bb.0: 64; CHECK-NEXT: ptrue p2.b 65; CHECK-NEXT: and p0.b, p0/z, p0.b, p1.b 66; CHECK-NEXT: nots p0.b, p2/z, p0.b 67; CHECK-NEXT: cset w0, eq 68; CHECK-NEXT: ret 69 %res = call i1 @llvm.vector.reduce.smax.nxv32i1(<vscale x 32 x i1> %a) 70 ret i1 %res 71} 72 73; SMINV 74 75define i1 @sminv_nxv32i1(<vscale x 32 x i1> %a) { 76; CHECK-LABEL: sminv_nxv32i1: 77; CHECK: // %bb.0: 78; CHECK-NEXT: sel p0.b, p0, p0.b, p1.b 79; CHECK-NEXT: ptest p0, p0.b 80; CHECK-NEXT: cset w0, ne 81; CHECK-NEXT: ret 82 %res = call i1 @llvm.vector.reduce.smin.nxv32i1(<vscale x 32 x i1> %a) 83 ret i1 %res 84} 85 86; UMAXV 87 88define i1 @umaxv_nxv32i1(<vscale x 32 x i1> %a) { 89; CHECK-LABEL: umaxv_nxv32i1: 90; CHECK: // %bb.0: 91; CHECK-NEXT: sel p0.b, p0, p0.b, p1.b 92; CHECK-NEXT: ptest p0, p0.b 93; CHECK-NEXT: cset w0, ne 94; CHECK-NEXT: ret 95 %res = call i1 @llvm.vector.reduce.umax.nxv32i1(<vscale x 32 x i1> %a) 96 ret i1 %res 97} 98 99; UMINV 100 101define i1 @uminv_nxv32i1(<vscale x 32 x i1> %a) { 102; CHECK-LABEL: uminv_nxv32i1: 103; CHECK: // %bb.0: 104; CHECK-NEXT: ptrue p2.b 105; CHECK-NEXT: and p0.b, p0/z, p0.b, p1.b 106; CHECK-NEXT: nots p0.b, p2/z, p0.b 107; CHECK-NEXT: cset w0, eq 108; CHECK-NEXT: ret 109 %res = call i1 @llvm.vector.reduce.umin.nxv32i1(<vscale x 32 x i1> %a) 110 ret i1 %res 111} 112 113declare i1 @llvm.vector.reduce.and.nxv32i1(<vscale x 32 x i1>) 114declare i1 @llvm.vector.reduce.and.nxv64i1(<vscale x 64 x i1>) 115 116declare i1 @llvm.vector.reduce.or.nxv32i1(<vscale x 32 x i1>) 117 118declare i1 @llvm.vector.reduce.xor.nxv32i1(<vscale x 32 x i1>) 119 120declare i1 @llvm.vector.reduce.smax.nxv32i1(<vscale x 32 x i1>) 121 122declare i1 @llvm.vector.reduce.smin.nxv32i1(<vscale x 32 x i1>) 123 124declare i1 @llvm.vector.reduce.umax.nxv32i1(<vscale x 32 x i1>) 125 126declare i1 @llvm.vector.reduce.umin.nxv32i1(<vscale x 32 x i1>) 127