1; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s 2 3define <vscale x 16 x i8> @and_pred_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) { 4; CHECK-LABEL: and_pred_i8: 5; CHECK: and z0.b, p0/m, z0.b, z1.b 6; CHECK-NEXT: ret 7 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.and.nxv2i8(<vscale x 16 x i1> %pg, 8 <vscale x 16 x i8> %a, 9 <vscale x 16 x i8> %b) 10 ret <vscale x 16 x i8> %out 11} 12 13define <vscale x 8 x i16> @and_pred_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) { 14; CHECK-LABEL: and_pred_i16: 15; CHECK: and z0.h, p0/m, z0.h, z1.h 16; CHECK-NEXT: ret 17 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.and.nxv2i16(<vscale x 8 x i1> %pg, 18 <vscale x 8 x i16> %a, 19 <vscale x 8 x i16> %b) 20 ret <vscale x 8 x i16> %out 21} 22 23define <vscale x 4 x i32> @and_pred_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) { 24; CHECK-LABEL: and_pred_i32: 25; CHECK: and z0.s, p0/m, z0.s, z1.s 26; CHECK-NEXT: ret 27 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.and.nxv2i32(<vscale x 4 x i1> %pg, 28 <vscale x 4 x i32> %a, 29 <vscale x 4 x i32> %b) 30 ret <vscale x 4 x i32> %out 31} 32 33define <vscale x 2 x i64> @and_pred_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) { 34; CHECK-LABEL: and_pred_i64: 35; CHECK: and z0.d, p0/m, z0.d, z1.d 36; CHECK-NEXT: ret 37 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.and.nxv2i64(<vscale x 2 x i1> %pg, 38 <vscale x 2 x i64> %a, 39 <vscale x 2 x i64> %b) 40 ret <vscale x 2 x i64> %out 41} 42 43define <vscale x 16 x i8> @or_pred_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) { 44; CHECK-LABEL: or_pred_i8: 45; CHECK: orr z0.b, p0/m, z0.b, z1.b 46; CHECK-NEXT: ret 47 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.orr.nxv2i8(<vscale x 16 x i1> %pg, 48 <vscale x 16 x i8> %a, 49 <vscale x 16 x i8> %b) 50 ret <vscale x 16 x i8> %out 51} 52 53define <vscale x 8 x i16> @or_pred_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) { 54; CHECK-LABEL: or_pred_i16: 55; CHECK: orr z0.h, p0/m, z0.h, z1.h 56; CHECK-NEXT: ret 57 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.orr.nxv2i16(<vscale x 8 x i1> %pg, 58 <vscale x 8 x i16> %a, 59 <vscale x 8 x i16> %b) 60 ret <vscale x 8 x i16> %out 61} 62 63define <vscale x 4 x i32> @or_pred_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) { 64; CHECK-LABEL: or_pred_i32: 65; CHECK: orr z0.s, p0/m, z0.s, z1.s 66; CHECK-NEXT: ret 67 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.orr.nxv2i32(<vscale x 4 x i1> %pg, 68 <vscale x 4 x i32> %a, 69 <vscale x 4 x i32> %b) 70 ret <vscale x 4 x i32> %out 71} 72 73define <vscale x 2 x i64> @or_pred_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) { 74; CHECK-LABEL: or_pred_i64: 75; CHECK: orr z0.d, p0/m, z0.d, z1.d 76; CHECK-NEXT: ret 77 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.orr.nxv2i64(<vscale x 2 x i1> %pg, 78 <vscale x 2 x i64> %a, 79 <vscale x 2 x i64> %b) 80 ret <vscale x 2 x i64> %out 81} 82 83define <vscale x 16 x i8> @xor_pred_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) { 84; CHECK-LABEL: xor_pred_i8: 85; CHECK: eor z0.b, p0/m, z0.b, z1.b 86; CHECK-NEXT: ret 87 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.eor.nxv2i8(<vscale x 16 x i1> %pg, 88 <vscale x 16 x i8> %a, 89 <vscale x 16 x i8> %b) 90 ret <vscale x 16 x i8> %out 91} 92 93define <vscale x 8 x i16> @xor_pred_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) { 94; CHECK-LABEL: xor_pred_i16: 95; CHECK: eor z0.h, p0/m, z0.h, z1.h 96; CHECK-NEXT: ret 97 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.eor.nxv2i16(<vscale x 8 x i1> %pg, 98 <vscale x 8 x i16> %a, 99 <vscale x 8 x i16> %b) 100 ret <vscale x 8 x i16> %out 101} 102 103define <vscale x 4 x i32> @xor_pred_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) { 104; CHECK-LABEL: xor_pred_i32: 105; CHECK: eor z0.s, p0/m, z0.s, z1.s 106; CHECK-NEXT: ret 107 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.eor.nxv2i32(<vscale x 4 x i1> %pg, 108 <vscale x 4 x i32> %a, 109 <vscale x 4 x i32> %b) 110 ret <vscale x 4 x i32> %out 111} 112 113define <vscale x 2 x i64> @xor_pred_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) { 114; CHECK-LABEL: xor_pred_i64: 115; CHECK: eor z0.d, p0/m, z0.d, z1.d 116; CHECK-NEXT: ret 117 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.eor.nxv2i64(<vscale x 2 x i1> %pg, 118 <vscale x 2 x i64> %a, 119 <vscale x 2 x i64> %b) 120 ret <vscale x 2 x i64> %out 121} 122 123define <vscale x 16 x i8> @bic_pred_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) { 124; CHECK-LABEL: bic_pred_i8: 125; CHECK: bic z0.b, p0/m, z0.b, z1.b 126; CHECK-NEXT: ret 127 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.bic.nxv2i8(<vscale x 16 x i1> %pg, 128 <vscale x 16 x i8> %a, 129 <vscale x 16 x i8> %b) 130 ret <vscale x 16 x i8> %out 131} 132 133define <vscale x 8 x i16> @bic_pred_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) { 134; CHECK-LABEL: bic_pred_i16: 135; CHECK: bic z0.h, p0/m, z0.h, z1.h 136; CHECK-NEXT: ret 137 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.bic.nxv2i16(<vscale x 8 x i1> %pg, 138 <vscale x 8 x i16> %a, 139 <vscale x 8 x i16> %b) 140 ret <vscale x 8 x i16> %out 141} 142 143 144define <vscale x 4 x i32> @bic_pred_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) { 145; CHECK-LABEL: bic_pred_i32: 146; CHECK: bic z0.s, p0/m, z0.s, z1.s 147; CHECK-NEXT: ret 148 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.bic.nxv2i32(<vscale x 4 x i1> %pg, 149 <vscale x 4 x i32> %a, 150 <vscale x 4 x i32> %b) 151 ret <vscale x 4 x i32> %out 152} 153 154define <vscale x 2 x i64> @bic_pred_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) { 155; CHECK-LABEL: bic_pred_i64: 156; CHECK: bic z0.d, p0/m, z0.d, z1.d 157; CHECK-NEXT: ret 158 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.bic.nxv2i64(<vscale x 2 x i1> %pg, 159 <vscale x 2 x i64> %a, 160 <vscale x 2 x i64> %b) 161 ret <vscale x 2 x i64> %out 162} 163 164declare <vscale x 16 x i8> @llvm.aarch64.sve.and.nxv2i8(<vscale x 16 x i1>,<vscale x 16 x i8>,<vscale x 16 x i8>) 165declare <vscale x 8 x i16> @llvm.aarch64.sve.and.nxv2i16(<vscale x 8 x i1>,<vscale x 8 x i16>,<vscale x 8 x i16>) 166declare <vscale x 4 x i32> @llvm.aarch64.sve.and.nxv2i32(<vscale x 4 x i1>,<vscale x 4 x i32>,<vscale x 4 x i32>) 167declare <vscale x 2 x i64> @llvm.aarch64.sve.and.nxv2i64(<vscale x 2 x i1>,<vscale x 2 x i64>,<vscale x 2 x i64>) 168declare <vscale x 16 x i8> @llvm.aarch64.sve.orr.nxv2i8(<vscale x 16 x i1>,<vscale x 16 x i8>,<vscale x 16 x i8>) 169declare <vscale x 8 x i16> @llvm.aarch64.sve.orr.nxv2i16(<vscale x 8 x i1>,<vscale x 8 x i16>,<vscale x 8 x i16>) 170declare <vscale x 4 x i32> @llvm.aarch64.sve.orr.nxv2i32(<vscale x 4 x i1>,<vscale x 4 x i32>,<vscale x 4 x i32>) 171declare <vscale x 2 x i64> @llvm.aarch64.sve.orr.nxv2i64(<vscale x 2 x i1>,<vscale x 2 x i64>,<vscale x 2 x i64>) 172declare <vscale x 16 x i8> @llvm.aarch64.sve.eor.nxv2i8(<vscale x 16 x i1>,<vscale x 16 x i8>,<vscale x 16 x i8>) 173declare <vscale x 8 x i16> @llvm.aarch64.sve.eor.nxv2i16(<vscale x 8 x i1>,<vscale x 8 x i16>,<vscale x 8 x i16>) 174declare <vscale x 4 x i32> @llvm.aarch64.sve.eor.nxv2i32(<vscale x 4 x i1>,<vscale x 4 x i32>,<vscale x 4 x i32>) 175declare <vscale x 2 x i64> @llvm.aarch64.sve.eor.nxv2i64(<vscale x 2 x i1>,<vscale x 2 x i64>,<vscale x 2 x i64>) 176declare <vscale x 16 x i8> @llvm.aarch64.sve.bic.nxv2i8(<vscale x 16 x i1>,<vscale x 16 x i8>,<vscale x 16 x i8>) 177declare <vscale x 8 x i16> @llvm.aarch64.sve.bic.nxv2i16(<vscale x 8 x i1>,<vscale x 8 x i16>,<vscale x 8 x i16>) 178declare <vscale x 4 x i32> @llvm.aarch64.sve.bic.nxv2i32(<vscale x 4 x i1>,<vscale x 4 x i32>,<vscale x 4 x i32>) 179declare <vscale x 2 x i64> @llvm.aarch64.sve.bic.nxv2i64(<vscale x 2 x i1>,<vscale x 2 x i64>,<vscale x 2 x i64>) 180