1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s 3 4; 5; AND 6; 7 8define <vscale x 16 x i8> @and_i8(<vscale x 16 x i8> %a) { 9; CHECK-LABEL: and_i8: 10; CHECK: // %bb.0: 11; CHECK-NEXT: and z0.b, z0.b, #0x7 12; CHECK-NEXT: ret 13 %pg = shufflevector <vscale x 16 x i1> insertelement (<vscale x 16 x i1> undef, i1 true, i32 0), <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer 14 %b = shufflevector <vscale x 16 x i8> insertelement (<vscale x 16 x i8> undef, i8 7, i32 0), <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer 15 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.and.u.nxv16i8(<vscale x 16 x i1> %pg, 16 <vscale x 16 x i8> %a, 17 <vscale x 16 x i8> %b) 18 ret <vscale x 16 x i8> %out 19} 20 21define <vscale x 8 x i16> @and_i16(<vscale x 8 x i16> %a) { 22; CHECK-LABEL: and_i16: 23; CHECK: // %bb.0: 24; CHECK-NEXT: and z0.h, z0.h, #0xf0 25; CHECK-NEXT: ret 26 %pg = shufflevector <vscale x 8 x i1> insertelement (<vscale x 8 x i1> undef, i1 true, i32 0), <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer 27 %b = shufflevector <vscale x 8 x i16> insertelement (<vscale x 8 x i16> undef, i16 240, i32 0), <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer 28 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.and.u.nxv8i16(<vscale x 8 x i1> %pg, 29 <vscale x 8 x i16> %a, 30 <vscale x 8 x i16> %b) 31 ret <vscale x 8 x i16> %out 32} 33 34define <vscale x 4 x i32> @and_i32(<vscale x 4 x i32> %a) { 35; CHECK-LABEL: and_i32: 36; CHECK: // %bb.0: 37; CHECK-NEXT: and z0.s, z0.s, #0xffff00 38; CHECK-NEXT: ret 39 %pg = shufflevector <vscale x 4 x i1> insertelement (<vscale x 4 x i1> undef, i1 true, i32 0), <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer 40 %b = shufflevector <vscale x 4 x i32> insertelement (<vscale x 4 x i32> undef, i32 16776960, i32 0), <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer 41 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.and.u.nxv4i32(<vscale x 4 x i1> %pg, 42 <vscale x 4 x i32> %a, 43 <vscale x 4 x i32> %b) 44 ret <vscale x 4 x i32> %out 45} 46 47define <vscale x 2 x i64> @and_i64(<vscale x 2 x i64> %a) { 48; CHECK-LABEL: and_i64: 49; CHECK: // %bb.0: 50; CHECK-NEXT: and z0.d, z0.d, #0xfffc000000000000 51; CHECK-NEXT: ret 52 %pg = shufflevector <vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer 53 %b = shufflevector <vscale x 2 x i64> insertelement (<vscale x 2 x i64> undef, i64 18445618173802708992, i32 0), <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer 54 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.and.u.nxv2i64(<vscale x 2 x i1> %pg, 55 <vscale x 2 x i64> %a, 56 <vscale x 2 x i64> %b) 57 ret <vscale x 2 x i64> %out 58} 59 60; 61; BIC 62; 63 64define <vscale x 16 x i8> @bic_i8(<vscale x 16 x i8> %a) { 65; CHECK-LABEL: bic_i8: 66; CHECK: // %bb.0: 67; CHECK-NEXT: and z0.b, z0.b, #0x1 68; CHECK-NEXT: ret 69 %pg = shufflevector <vscale x 16 x i1> insertelement (<vscale x 16 x i1> undef, i1 true, i32 0), <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer 70 %b = shufflevector <vscale x 16 x i8> insertelement (<vscale x 16 x i8> undef, i8 254, i32 0), <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer 71 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.bic.u.nxv16i8(<vscale x 16 x i1> %pg, 72 <vscale x 16 x i8> %a, 73 <vscale x 16 x i8> %b) 74 ret <vscale x 16 x i8> %out 75} 76 77define <vscale x 8 x i16> @bic_i16(<vscale x 8 x i16> %a) { 78; CHECK-LABEL: bic_i16: 79; CHECK: // %bb.0: 80; CHECK-NEXT: and z0.h, z0.h, #0x1 81; CHECK-NEXT: ret 82 %pg = shufflevector <vscale x 8 x i1> insertelement (<vscale x 8 x i1> undef, i1 true, i32 0), <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer 83 %b = shufflevector <vscale x 8 x i16> insertelement (<vscale x 8 x i16> undef, i16 65534, i32 0), <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer 84 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.bic.u.nxv8i16(<vscale x 8 x i1> %pg, 85 <vscale x 8 x i16> %a, 86 <vscale x 8 x i16> %b) 87 ret <vscale x 8 x i16> %out 88} 89 90define <vscale x 4 x i32> @bic_i32(<vscale x 4 x i32> %a) { 91; CHECK-LABEL: bic_i32: 92; CHECK: // %bb.0: 93; CHECK-NEXT: and z0.s, z0.s, #0xff0000ff 94; CHECK-NEXT: ret 95 %pg = shufflevector <vscale x 4 x i1> insertelement (<vscale x 4 x i1> undef, i1 true, i32 0), <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer 96 %b = shufflevector <vscale x 4 x i32> insertelement (<vscale x 4 x i32> undef, i32 16776960, i32 0), <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer 97 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.bic.u.nxv4i32(<vscale x 4 x i1> %pg, 98 <vscale x 4 x i32> %a, 99 <vscale x 4 x i32> %b) 100 ret <vscale x 4 x i32> %out 101} 102 103define <vscale x 2 x i64> @bic_i64(<vscale x 2 x i64> %a) { 104; CHECK-LABEL: bic_i64: 105; CHECK: // %bb.0: 106; CHECK-NEXT: and z0.d, z0.d, #0x3ffffffffffff 107; CHECK-NEXT: ret 108 %pg = shufflevector <vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer 109 %b = shufflevector <vscale x 2 x i64> insertelement (<vscale x 2 x i64> undef, i64 18445618173802708992, i32 0), <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer 110 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.bic.u.nxv2i64(<vscale x 2 x i1> %pg, 111 <vscale x 2 x i64> %a, 112 <vscale x 2 x i64> %b) 113 ret <vscale x 2 x i64> %out 114} 115 116; 117; EOR 118; 119 120define <vscale x 16 x i8> @eor_i8(<vscale x 16 x i8> %a) { 121; CHECK-LABEL: eor_i8: 122; CHECK: // %bb.0: 123; CHECK-NEXT: eor z0.b, z0.b, #0xf 124; CHECK-NEXT: ret 125 %pg = shufflevector <vscale x 16 x i1> insertelement (<vscale x 16 x i1> undef, i1 true, i32 0), <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer 126 %b = shufflevector <vscale x 16 x i8> insertelement (<vscale x 16 x i8> undef, i8 15, i32 0), <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer 127 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.eor.u.nxv16i8(<vscale x 16 x i1> %pg, 128 <vscale x 16 x i8> %a, 129 <vscale x 16 x i8> %b) 130 ret <vscale x 16 x i8> %out 131} 132 133define <vscale x 8 x i16> @eor_i16(<vscale x 8 x i16> %a) { 134; CHECK-LABEL: eor_i16: 135; CHECK: // %bb.0: 136; CHECK-NEXT: eor z0.h, z0.h, #0xfc07 137; CHECK-NEXT: ret 138 %pg = shufflevector <vscale x 8 x i1> insertelement (<vscale x 8 x i1> undef, i1 true, i32 0), <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer 139 %b = shufflevector <vscale x 8 x i16> insertelement (<vscale x 8 x i16> undef, i16 64519, i32 0), <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer 140 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.eor.u.nxv8i16(<vscale x 8 x i1> %pg, 141 <vscale x 8 x i16> %a, 142 <vscale x 8 x i16> %b) 143 ret <vscale x 8 x i16> %out 144} 145 146define <vscale x 4 x i32> @eor_i32(<vscale x 4 x i32> %a) { 147; CHECK-LABEL: eor_i32: 148; CHECK: // %bb.0: 149; CHECK-NEXT: eor z0.s, z0.s, #0xffff00 150; CHECK-NEXT: ret 151 %pg = shufflevector <vscale x 4 x i1> insertelement (<vscale x 4 x i1> undef, i1 true, i32 0), <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer 152 %b = shufflevector <vscale x 4 x i32> insertelement (<vscale x 4 x i32> undef, i32 16776960, i32 0), <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer 153 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.eor.u.nxv4i32(<vscale x 4 x i1> %pg, 154 <vscale x 4 x i32> %a, 155 <vscale x 4 x i32> %b) 156 ret <vscale x 4 x i32> %out 157} 158 159define <vscale x 2 x i64> @eor_i64(<vscale x 2 x i64> %a) { 160; CHECK-LABEL: eor_i64: 161; CHECK: // %bb.0: 162; CHECK-NEXT: eor z0.d, z0.d, #0x1000000000000 163; CHECK-NEXT: ret 164 %pg = shufflevector <vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer 165 %b = shufflevector <vscale x 2 x i64> insertelement (<vscale x 2 x i64> undef, i64 281474976710656, i32 0), <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer 166 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.eor.u.nxv2i64(<vscale x 2 x i1> %pg, 167 <vscale x 2 x i64> %a, 168 <vscale x 2 x i64> %b) 169 ret <vscale x 2 x i64> %out 170} 171 172; 173; ORR 174; 175 176define <vscale x 16 x i8> @orr_i8(<vscale x 16 x i8> %a) { 177; CHECK-LABEL: orr_i8: 178; CHECK: // %bb.0: 179; CHECK-NEXT: orr z0.b, z0.b, #0x6 180; CHECK-NEXT: ret 181 %pg = shufflevector <vscale x 16 x i1> insertelement (<vscale x 16 x i1> undef, i1 true, i32 0), <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer 182 %b = shufflevector <vscale x 16 x i8> insertelement (<vscale x 16 x i8> undef, i8 6, i32 0), <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer 183 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.orr.u.nxv16i8(<vscale x 16 x i1> %pg, 184 <vscale x 16 x i8> %a, 185 <vscale x 16 x i8> %b) 186 ret <vscale x 16 x i8> %out 187} 188 189define <vscale x 8 x i16> @orr_i16(<vscale x 8 x i16> %a) { 190; CHECK-LABEL: orr_i16: 191; CHECK: // %bb.0: 192; CHECK-NEXT: orr z0.h, z0.h, #0x8001 193; CHECK-NEXT: ret 194 %pg = shufflevector <vscale x 8 x i1> insertelement (<vscale x 8 x i1> undef, i1 true, i32 0), <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer 195 %b = shufflevector <vscale x 8 x i16> insertelement (<vscale x 8 x i16> undef, i16 32769, i32 0), <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer 196 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.orr.u.nxv8i16(<vscale x 8 x i1> %pg, 197 <vscale x 8 x i16> %a, 198 <vscale x 8 x i16> %b) 199 ret <vscale x 8 x i16> %out 200} 201 202define <vscale x 4 x i32> @orr_i32(<vscale x 4 x i32> %a) { 203; CHECK-LABEL: orr_i32: 204; CHECK: // %bb.0: 205; CHECK-NEXT: orr z0.s, z0.s, #0xffff 206; CHECK-NEXT: ret 207 %pg = shufflevector <vscale x 4 x i1> insertelement (<vscale x 4 x i1> undef, i1 true, i32 0), <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer 208 %b = shufflevector <vscale x 4 x i32> insertelement (<vscale x 4 x i32> undef, i32 65535, i32 0), <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer 209 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.orr.u.nxv4i32(<vscale x 4 x i1> %pg, 210 <vscale x 4 x i32> %a, 211 <vscale x 4 x i32> %b) 212 ret <vscale x 4 x i32> %out 213} 214 215define <vscale x 2 x i64> @orr_i64(<vscale x 2 x i64> %a) { 216; CHECK-LABEL: orr_i64: 217; CHECK: // %bb.0: 218; CHECK-NEXT: orr z0.d, z0.d, #0x7ffc000000000000 219; CHECK-NEXT: ret 220 %pg = shufflevector <vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer 221 %b = shufflevector <vscale x 2 x i64> insertelement (<vscale x 2 x i64> undef, i64 9222246136947933184, i32 0), <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer 222 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.orr.u.nxv2i64(<vscale x 2 x i1> %pg, 223 <vscale x 2 x i64> %a, 224 <vscale x 2 x i64> %b) 225 ret <vscale x 2 x i64> %out 226} 227 228; As orr_i32 but where pg is i8 based and thus compatible for i32. 229define <vscale x 4 x i32> @orr_i32_ptrue_all_b(<vscale x 4 x i32> %a) { 230; CHECK-LABEL: orr_i32_ptrue_all_b: 231; CHECK: // %bb.0: 232; CHECK-NEXT: orr z0.s, z0.s, #0xffff 233; CHECK-NEXT: ret 234 %pg.b = tail call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31) 235 %pg.s = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg.b) 236 %b = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 65535) 237 %out = tail call <vscale x 4 x i32> @llvm.aarch64.sve.orr.u.nxv4i32(<vscale x 4 x i1> %pg.s, 238 <vscale x 4 x i32> %a, 239 <vscale x 4 x i32> %b) 240 ret <vscale x 4 x i32> %out 241} 242 243; As orr_i32 but where pg is i16 based and thus compatible for i32. 244define <vscale x 4 x i32> @orr_i32_ptrue_all_h(<vscale x 4 x i32> %a) { 245; CHECK-LABEL: orr_i32_ptrue_all_h: 246; CHECK: // %bb.0: 247; CHECK-NEXT: orr z0.s, z0.s, #0xffff 248; CHECK-NEXT: ret 249 %pg.h = tail call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31) 250 %pg.b = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %pg.h) 251 %pg.s = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg.b) 252 %b = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 65535) 253 %out = tail call <vscale x 4 x i32> @llvm.aarch64.sve.orr.u.nxv4i32(<vscale x 4 x i1> %pg.s, 254 <vscale x 4 x i32> %a, 255 <vscale x 4 x i32> %b) 256 ret <vscale x 4 x i32> %out 257} 258 259; As orr_i32 but where pg is i64 based, which is not compatibile for i32 and 260; thus inactive lanes are important and the immediate form cannot be used. 261define <vscale x 4 x i32> @orr_i32_ptrue_all_d(<vscale x 4 x i32> %a) { 262; CHECK-LABEL: orr_i32_ptrue_all_d: 263; CHECK: // %bb.0: 264; CHECK-NEXT: mov z1.s, #65535 // =0xffff 265; CHECK-NEXT: ptrue p0.d 266; CHECK-NEXT: orr z0.s, p0/m, z0.s, z1.s 267; CHECK-NEXT: ret 268 %pg.d = tail call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) 269 %pg.b = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %pg.d) 270 %pg.s = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg.b) 271 %b = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 65535) 272 %out = tail call <vscale x 4 x i32> @llvm.aarch64.sve.orr.nxv4i32(<vscale x 4 x i1> %pg.s, 273 <vscale x 4 x i32> %a, 274 <vscale x 4 x i32> %b) 275 ret <vscale x 4 x i32> %out 276} 277 278declare <vscale x 16 x i8> @llvm.aarch64.sve.and.u.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>) 279declare <vscale x 8 x i16> @llvm.aarch64.sve.and.u.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>) 280declare <vscale x 4 x i32> @llvm.aarch64.sve.and.u.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>) 281declare <vscale x 2 x i64> @llvm.aarch64.sve.and.u.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>) 282 283declare <vscale x 16 x i8> @llvm.aarch64.sve.bic.u.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>) 284declare <vscale x 8 x i16> @llvm.aarch64.sve.bic.u.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>) 285declare <vscale x 4 x i32> @llvm.aarch64.sve.bic.u.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>) 286declare <vscale x 2 x i64> @llvm.aarch64.sve.bic.u.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>) 287 288declare <vscale x 16 x i8> @llvm.aarch64.sve.eor.u.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>) 289declare <vscale x 8 x i16> @llvm.aarch64.sve.eor.u.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>) 290declare <vscale x 4 x i32> @llvm.aarch64.sve.eor.u.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>) 291declare <vscale x 2 x i64> @llvm.aarch64.sve.eor.u.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>) 292 293declare <vscale x 4 x i32> @llvm.aarch64.sve.orr.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>) 294 295declare <vscale x 16 x i8> @llvm.aarch64.sve.orr.u.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>) 296declare <vscale x 8 x i16> @llvm.aarch64.sve.orr.u.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>) 297declare <vscale x 4 x i32> @llvm.aarch64.sve.orr.u.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>) 298declare <vscale x 2 x i64> @llvm.aarch64.sve.orr.u.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>) 299 300declare <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1>) 301declare <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1>) 302declare <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1>) 303 304declare <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 16 x i1>) 305declare <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1>) 306declare <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1>) 307 308declare <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32) 309 310declare <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32) 311declare <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32) 312declare <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32) 313declare <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32) 314