1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=aarch64 -mattr=+sve2p1 < %s | FileCheck %s 3 4; == WHILEGE == 5 6define <vscale x 16 x i1> @whilege_x2_nxv16i1(i64 %m, i64 %n) nounwind { 7; CHECK-LABEL: whilege_x2_nxv16i1: 8; CHECK: // %bb.0: 9; CHECK-NEXT: whilege { p0.b, p1.b }, x0, x1 10; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1 11; CHECK-NEXT: ret 12 %pp = call { <vscale x 16 x i1>, <vscale x 16 x i1> } @llvm.aarch64.sve.whilege.x2.nxv16i1(i64 %m, i64 %n) 13 %res = extractvalue {<vscale x 16 x i1>, <vscale x 16 x i1>} %pp, 0 14 ret <vscale x 16 x i1> %res 15} 16 17define <vscale x 8 x i1> @whilege_x2_nxv8i1(i64 %m, i64 %n) nounwind { 18; CHECK-LABEL: whilege_x2_nxv8i1: 19; CHECK: // %bb.0: 20; CHECK-NEXT: whilege { p0.h, p1.h }, x0, x1 21; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1 22; CHECK-NEXT: ret 23 %pp = call { <vscale x 8 x i1>, <vscale x 8 x i1> } @llvm.aarch64.sve.whilege.x2.nxv8i1(i64 %m, i64 %n) 24 %res = extractvalue {<vscale x 8 x i1>, <vscale x 8 x i1>} %pp, 0 25 ret <vscale x 8 x i1> %res 26} 27 28define <vscale x 4 x i1> @whilege_x2_nxv4i1(i64 %m, i64 %n) nounwind { 29; CHECK-LABEL: whilege_x2_nxv4i1: 30; CHECK: // %bb.0: 31; CHECK-NEXT: whilege { p0.s, p1.s }, x0, x1 32; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1 33; CHECK-NEXT: ret 34 %pp = call { <vscale x 4 x i1>, <vscale x 4 x i1> } @llvm.aarch64.sve.whilege.x2.nxv4i1(i64 %m, i64 %n) 35 %res = extractvalue {<vscale x 4 x i1>, <vscale x 4 x i1>} %pp, 0 36 ret <vscale x 4 x i1> %res 37} 38 39define <vscale x 2 x i1> @whilege_x2_nxv2i1(i64 %m, i64 %n) nounwind { 40; CHECK-LABEL: whilege_x2_nxv2i1: 41; CHECK: // %bb.0: 42; CHECK-NEXT: whilege { p0.d, p1.d }, x0, x1 43; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1 44; CHECK-NEXT: ret 45 %pp = call { <vscale x 2 x i1>, <vscale x 2 x i1> } @llvm.aarch64.sve.whilege.x2.nxv2i1(i64 %m, i64 %n) 46 %res = extractvalue {<vscale x 2 x i1>, <vscale x 2 x i1>} %pp, 0 47 ret <vscale x 2 x i1> %res 48} 49 50 51; == WHILEGT == 52 53define <vscale x 16 x i1> @whilegt_x2_nxv16i1(i64 %m, i64 %n) nounwind { 54; CHECK-LABEL: whilegt_x2_nxv16i1: 55; CHECK: // %bb.0: 56; CHECK-NEXT: whilegt { p0.b, p1.b }, x0, x1 57; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1 58; CHECK-NEXT: ret 59 %pp = call { <vscale x 16 x i1>, <vscale x 16 x i1> } @llvm.aarch64.sve.whilegt.x2.nxv16i1(i64 %m, i64 %n) 60 %res = extractvalue {<vscale x 16 x i1>, <vscale x 16 x i1>} %pp, 0 61 ret <vscale x 16 x i1> %res 62} 63 64define <vscale x 8 x i1> @whilegt_x2_nxv8i1(i64 %m, i64 %n) nounwind { 65; CHECK-LABEL: whilegt_x2_nxv8i1: 66; CHECK: // %bb.0: 67; CHECK-NEXT: whilegt { p0.h, p1.h }, x0, x1 68; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1 69; CHECK-NEXT: ret 70 %pp = call { <vscale x 8 x i1>, <vscale x 8 x i1> } @llvm.aarch64.sve.whilegt.x2.nxv8i1(i64 %m, i64 %n) 71 %res = extractvalue {<vscale x 8 x i1>, <vscale x 8 x i1>} %pp, 0 72 ret <vscale x 8 x i1> %res 73} 74 75define <vscale x 4 x i1> @whilegt_x2_nxv4i1(i64 %m, i64 %n) nounwind { 76; CHECK-LABEL: whilegt_x2_nxv4i1: 77; CHECK: // %bb.0: 78; CHECK-NEXT: whilegt { p0.s, p1.s }, x0, x1 79; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1 80; CHECK-NEXT: ret 81 %pp = call { <vscale x 4 x i1>, <vscale x 4 x i1> } @llvm.aarch64.sve.whilegt.x2.nxv4i1(i64 %m, i64 %n) 82 %res = extractvalue {<vscale x 4 x i1>, <vscale x 4 x i1>} %pp, 0 83 ret <vscale x 4 x i1> %res 84} 85 86define <vscale x 2 x i1> @whilegt_x2_nxv2i1(i64 %m, i64 %n) nounwind { 87; CHECK-LABEL: whilegt_x2_nxv2i1: 88; CHECK: // %bb.0: 89; CHECK-NEXT: whilegt { p0.d, p1.d }, x0, x1 90; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1 91; CHECK-NEXT: ret 92 %pp = call { <vscale x 2 x i1>, <vscale x 2 x i1> } @llvm.aarch64.sve.whilegt.x2.nxv2i1(i64 %m, i64 %n) 93 %res = extractvalue {<vscale x 2 x i1>, <vscale x 2 x i1>} %pp, 0 94 ret <vscale x 2 x i1> %res 95} 96 97 98; == WHILEHI == 99 100define <vscale x 16 x i1> @whilehi_x2_nxv16i1(i64 %m, i64 %n) nounwind { 101; CHECK-LABEL: whilehi_x2_nxv16i1: 102; CHECK: // %bb.0: 103; CHECK-NEXT: whilehi { p0.b, p1.b }, x0, x1 104; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1 105; CHECK-NEXT: ret 106 %pp = call { <vscale x 16 x i1>, <vscale x 16 x i1> } @llvm.aarch64.sve.whilehi.x2.nxv16i1(i64 %m, i64 %n) 107 %res = extractvalue {<vscale x 16 x i1>, <vscale x 16 x i1>} %pp, 0 108 ret <vscale x 16 x i1> %res 109} 110 111define <vscale x 8 x i1> @whilehi_x2_nxv8i1(i64 %m, i64 %n) nounwind { 112; CHECK-LABEL: whilehi_x2_nxv8i1: 113; CHECK: // %bb.0: 114; CHECK-NEXT: whilehi { p0.h, p1.h }, x0, x1 115; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1 116; CHECK-NEXT: ret 117 %pp = call { <vscale x 8 x i1>, <vscale x 8 x i1> } @llvm.aarch64.sve.whilehi.x2.nxv8i1(i64 %m, i64 %n) 118 %res = extractvalue {<vscale x 8 x i1>, <vscale x 8 x i1>} %pp, 0 119 ret <vscale x 8 x i1> %res 120} 121 122define <vscale x 4 x i1> @whilehi_x2_nxv4i1(i64 %m, i64 %n) nounwind { 123; CHECK-LABEL: whilehi_x2_nxv4i1: 124; CHECK: // %bb.0: 125; CHECK-NEXT: whilehi { p0.s, p1.s }, x0, x1 126; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1 127; CHECK-NEXT: ret 128 %pp = call { <vscale x 4 x i1>, <vscale x 4 x i1> } @llvm.aarch64.sve.whilehi.x2.nxv4i1(i64 %m, i64 %n) 129 %res = extractvalue {<vscale x 4 x i1>, <vscale x 4 x i1>} %pp, 0 130 ret <vscale x 4 x i1> %res 131} 132 133define <vscale x 2 x i1> @whilehi_x2_nxv2i1(i64 %m, i64 %n) nounwind { 134; CHECK-LABEL: whilehi_x2_nxv2i1: 135; CHECK: // %bb.0: 136; CHECK-NEXT: whilehi { p0.d, p1.d }, x0, x1 137; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1 138; CHECK-NEXT: ret 139 %pp = call { <vscale x 2 x i1>, <vscale x 2 x i1> } @llvm.aarch64.sve.whilehi.x2.nxv2i1(i64 %m, i64 %n) 140 %res = extractvalue {<vscale x 2 x i1>, <vscale x 2 x i1>} %pp, 0 141 ret <vscale x 2 x i1> %res 142} 143 144 145; == WHILEHS == 146 147define <vscale x 16 x i1> @whilehs_x2_nxv16i1(i64 %m, i64 %n) nounwind { 148; CHECK-LABEL: whilehs_x2_nxv16i1: 149; CHECK: // %bb.0: 150; CHECK-NEXT: whilehs { p0.b, p1.b }, x0, x1 151; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1 152; CHECK-NEXT: ret 153 %pp = call { <vscale x 16 x i1>, <vscale x 16 x i1> } @llvm.aarch64.sve.whilehs.x2.nxv16i1(i64 %m, i64 %n) 154 %res = extractvalue {<vscale x 16 x i1>, <vscale x 16 x i1>} %pp, 0 155 ret <vscale x 16 x i1> %res 156} 157 158define <vscale x 8 x i1> @whilehs_x2_nxv8i1(i64 %m, i64 %n) nounwind { 159; CHECK-LABEL: whilehs_x2_nxv8i1: 160; CHECK: // %bb.0: 161; CHECK-NEXT: whilehs { p0.h, p1.h }, x0, x1 162; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1 163; CHECK-NEXT: ret 164 %pp = call { <vscale x 8 x i1>, <vscale x 8 x i1> } @llvm.aarch64.sve.whilehs.x2.nxv8i1(i64 %m, i64 %n) 165 %res = extractvalue {<vscale x 8 x i1>, <vscale x 8 x i1>} %pp, 0 166 ret <vscale x 8 x i1> %res 167} 168 169define <vscale x 4 x i1> @whilehs_x2_nxv4i1(i64 %m, i64 %n) nounwind { 170; CHECK-LABEL: whilehs_x2_nxv4i1: 171; CHECK: // %bb.0: 172; CHECK-NEXT: whilehs { p0.s, p1.s }, x0, x1 173; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1 174; CHECK-NEXT: ret 175 %pp = call { <vscale x 4 x i1>, <vscale x 4 x i1> } @llvm.aarch64.sve.whilehs.x2.nxv4i1(i64 %m, i64 %n) 176 %res = extractvalue {<vscale x 4 x i1>, <vscale x 4 x i1>} %pp, 0 177 ret <vscale x 4 x i1> %res 178} 179 180define <vscale x 2 x i1> @whilehs_x2_nxv2i1(i64 %m, i64 %n) nounwind { 181; CHECK-LABEL: whilehs_x2_nxv2i1: 182; CHECK: // %bb.0: 183; CHECK-NEXT: whilehs { p0.d, p1.d }, x0, x1 184; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1 185; CHECK-NEXT: ret 186 %pp = call { <vscale x 2 x i1>, <vscale x 2 x i1> } @llvm.aarch64.sve.whilehs.x2.nxv2i1(i64 %m, i64 %n) 187 %res = extractvalue {<vscale x 2 x i1>, <vscale x 2 x i1>} %pp, 0 188 ret <vscale x 2 x i1> %res 189} 190 191 192; == WHILELE == 193 194define <vscale x 16 x i1> @whilele_x2_nxv16i1(i64 %m, i64 %n) nounwind { 195; CHECK-LABEL: whilele_x2_nxv16i1: 196; CHECK: // %bb.0: 197; CHECK-NEXT: whilele { p0.b, p1.b }, x0, x1 198; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1 199; CHECK-NEXT: ret 200 %pp = call { <vscale x 16 x i1>, <vscale x 16 x i1> } @llvm.aarch64.sve.whilele.x2.nxv16i1(i64 %m, i64 %n) 201 %res = extractvalue {<vscale x 16 x i1>, <vscale x 16 x i1>} %pp, 0 202 ret <vscale x 16 x i1> %res 203} 204 205define <vscale x 8 x i1> @whilele_x2_nxv8i1(i64 %m, i64 %n) nounwind { 206; CHECK-LABEL: whilele_x2_nxv8i1: 207; CHECK: // %bb.0: 208; CHECK-NEXT: whilele { p0.h, p1.h }, x0, x1 209; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1 210; CHECK-NEXT: ret 211 %pp = call { <vscale x 8 x i1>, <vscale x 8 x i1> } @llvm.aarch64.sve.whilele.x2.nxv8i1(i64 %m, i64 %n) 212 %res = extractvalue {<vscale x 8 x i1>, <vscale x 8 x i1>} %pp, 0 213 ret <vscale x 8 x i1> %res 214} 215 216define <vscale x 4 x i1> @whilele_x2_nxv4i1(i64 %m, i64 %n) nounwind { 217; CHECK-LABEL: whilele_x2_nxv4i1: 218; CHECK: // %bb.0: 219; CHECK-NEXT: whilele { p0.s, p1.s }, x0, x1 220; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1 221; CHECK-NEXT: ret 222 %pp = call { <vscale x 4 x i1>, <vscale x 4 x i1> } @llvm.aarch64.sve.whilele.x2.nxv4i1(i64 %m, i64 %n) 223 %res = extractvalue {<vscale x 4 x i1>, <vscale x 4 x i1>} %pp, 0 224 ret <vscale x 4 x i1> %res 225} 226 227define <vscale x 2 x i1> @whilele_x2_nxv2i1(i64 %m, i64 %n) nounwind { 228; CHECK-LABEL: whilele_x2_nxv2i1: 229; CHECK: // %bb.0: 230; CHECK-NEXT: whilele { p0.d, p1.d }, x0, x1 231; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1 232; CHECK-NEXT: ret 233 %pp = call { <vscale x 2 x i1>, <vscale x 2 x i1> } @llvm.aarch64.sve.whilele.x2.nxv2i1(i64 %m, i64 %n) 234 %res = extractvalue {<vscale x 2 x i1>, <vscale x 2 x i1>} %pp, 0 235 ret <vscale x 2 x i1> %res 236} 237 238 239; == WHILELO == 240 241define <vscale x 16 x i1> @whilelo_x2_nxv16i1(i64 %m, i64 %n) nounwind { 242; CHECK-LABEL: whilelo_x2_nxv16i1: 243; CHECK: // %bb.0: 244; CHECK-NEXT: whilelo { p0.b, p1.b }, x0, x1 245; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1 246; CHECK-NEXT: ret 247 %pp = call { <vscale x 16 x i1>, <vscale x 16 x i1> } @llvm.aarch64.sve.whilelo.x2.nxv16i1(i64 %m, i64 %n) 248 %res = extractvalue {<vscale x 16 x i1>, <vscale x 16 x i1>} %pp, 0 249 ret <vscale x 16 x i1> %res 250} 251 252define <vscale x 8 x i1> @whilelo_x2_nxv8i1(i64 %m, i64 %n) nounwind { 253; CHECK-LABEL: whilelo_x2_nxv8i1: 254; CHECK: // %bb.0: 255; CHECK-NEXT: whilelo { p0.h, p1.h }, x0, x1 256; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1 257; CHECK-NEXT: ret 258 %pp = call { <vscale x 8 x i1>, <vscale x 8 x i1> } @llvm.aarch64.sve.whilelo.x2.nxv8i1(i64 %m, i64 %n) 259 %res = extractvalue {<vscale x 8 x i1>, <vscale x 8 x i1>} %pp, 0 260 ret <vscale x 8 x i1> %res 261} 262 263define <vscale x 4 x i1> @whilelo_x2_nxv4i1(i64 %m, i64 %n) nounwind { 264; CHECK-LABEL: whilelo_x2_nxv4i1: 265; CHECK: // %bb.0: 266; CHECK-NEXT: whilelo { p0.s, p1.s }, x0, x1 267; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1 268; CHECK-NEXT: ret 269 %pp = call { <vscale x 4 x i1>, <vscale x 4 x i1> } @llvm.aarch64.sve.whilelo.x2.nxv4i1(i64 %m, i64 %n) 270 %res = extractvalue {<vscale x 4 x i1>, <vscale x 4 x i1>} %pp, 0 271 ret <vscale x 4 x i1> %res 272} 273 274define <vscale x 2 x i1> @whilelo_x2_nxv2i1(i64 %m, i64 %n) nounwind { 275; CHECK-LABEL: whilelo_x2_nxv2i1: 276; CHECK: // %bb.0: 277; CHECK-NEXT: whilelo { p0.d, p1.d }, x0, x1 278; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1 279; CHECK-NEXT: ret 280 %pp = call { <vscale x 2 x i1>, <vscale x 2 x i1> } @llvm.aarch64.sve.whilelo.x2.nxv2i1(i64 %m, i64 %n) 281 %res = extractvalue {<vscale x 2 x i1>, <vscale x 2 x i1>} %pp, 0 282 ret <vscale x 2 x i1> %res 283} 284 285 286; == WHILELS == 287 288define <vscale x 16 x i1> @whilels_x2_nxv16i1(i64 %m, i64 %n) nounwind { 289; CHECK-LABEL: whilels_x2_nxv16i1: 290; CHECK: // %bb.0: 291; CHECK-NEXT: whilels { p0.b, p1.b }, x0, x1 292; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1 293; CHECK-NEXT: ret 294 %pp = call { <vscale x 16 x i1>, <vscale x 16 x i1> } @llvm.aarch64.sve.whilels.x2.nxv16i1(i64 %m, i64 %n) 295 %res = extractvalue {<vscale x 16 x i1>, <vscale x 16 x i1>} %pp, 0 296 ret <vscale x 16 x i1> %res 297} 298 299define <vscale x 8 x i1> @whilels_x2_nxv8i1(i64 %m, i64 %n) nounwind { 300; CHECK-LABEL: whilels_x2_nxv8i1: 301; CHECK: // %bb.0: 302; CHECK-NEXT: whilels { p0.h, p1.h }, x0, x1 303; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1 304; CHECK-NEXT: ret 305 %pp = call { <vscale x 8 x i1>, <vscale x 8 x i1> } @llvm.aarch64.sve.whilels.x2.nxv8i1(i64 %m, i64 %n) 306 %res = extractvalue {<vscale x 8 x i1>, <vscale x 8 x i1>} %pp, 0 307 ret <vscale x 8 x i1> %res 308} 309 310define <vscale x 4 x i1> @whilels_x2_nxv4i1(i64 %m, i64 %n) nounwind { 311; CHECK-LABEL: whilels_x2_nxv4i1: 312; CHECK: // %bb.0: 313; CHECK-NEXT: whilels { p0.s, p1.s }, x0, x1 314; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1 315; CHECK-NEXT: ret 316 %pp = call { <vscale x 4 x i1>, <vscale x 4 x i1> } @llvm.aarch64.sve.whilels.x2.nxv4i1(i64 %m, i64 %n) 317 %res = extractvalue {<vscale x 4 x i1>, <vscale x 4 x i1>} %pp, 0 318 ret <vscale x 4 x i1> %res 319} 320 321define <vscale x 2 x i1> @whilels_x2_nxv2i1(i64 %m, i64 %n) nounwind { 322; CHECK-LABEL: whilels_x2_nxv2i1: 323; CHECK: // %bb.0: 324; CHECK-NEXT: whilels { p0.d, p1.d }, x0, x1 325; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1 326; CHECK-NEXT: ret 327 %pp = call { <vscale x 2 x i1>, <vscale x 2 x i1> } @llvm.aarch64.sve.whilels.x2.nxv2i1(i64 %m, i64 %n) 328 %res = extractvalue {<vscale x 2 x i1>, <vscale x 2 x i1>} %pp, 0 329 ret <vscale x 2 x i1> %res 330} 331 332 333; == WHILELT == 334 335define <vscale x 16 x i1> @whilelt_x2_nxv16i1(i64 %m, i64 %n) nounwind { 336; CHECK-LABEL: whilelt_x2_nxv16i1: 337; CHECK: // %bb.0: 338; CHECK-NEXT: whilelt { p0.b, p1.b }, x0, x1 339; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1 340; CHECK-NEXT: ret 341 %pp = call { <vscale x 16 x i1>, <vscale x 16 x i1> } @llvm.aarch64.sve.whilelt.x2.nxv16i1(i64 %m, i64 %n) 342 %res = extractvalue {<vscale x 16 x i1>, <vscale x 16 x i1>} %pp, 0 343 ret <vscale x 16 x i1> %res 344} 345 346define <vscale x 8 x i1> @whilelt_x2_nxv8i1(i64 %m, i64 %n) nounwind { 347; CHECK-LABEL: whilelt_x2_nxv8i1: 348; CHECK: // %bb.0: 349; CHECK-NEXT: whilelt { p0.h, p1.h }, x0, x1 350; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1 351; CHECK-NEXT: ret 352 %pp = call { <vscale x 8 x i1>, <vscale x 8 x i1> } @llvm.aarch64.sve.whilelt.x2.nxv8i1(i64 %m, i64 %n) 353 %res = extractvalue {<vscale x 8 x i1>, <vscale x 8 x i1>} %pp, 0 354 ret <vscale x 8 x i1> %res 355} 356 357define <vscale x 4 x i1> @whilelt_x2_nxv4i1(i64 %m, i64 %n) nounwind { 358; CHECK-LABEL: whilelt_x2_nxv4i1: 359; CHECK: // %bb.0: 360; CHECK-NEXT: whilelt { p0.s, p1.s }, x0, x1 361; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1 362; CHECK-NEXT: ret 363 %pp = call { <vscale x 4 x i1>, <vscale x 4 x i1> } @llvm.aarch64.sve.whilelt.x2.nxv4i1(i64 %m, i64 %n) 364 %res = extractvalue {<vscale x 4 x i1>, <vscale x 4 x i1>} %pp, 0 365 ret <vscale x 4 x i1> %res 366} 367 368define <vscale x 2 x i1> @whilelt_x2_nxv2i1(i64 %m, i64 %n) nounwind { 369; CHECK-LABEL: whilelt_x2_nxv2i1: 370; CHECK: // %bb.0: 371; CHECK-NEXT: whilelt { p0.d, p1.d }, x0, x1 372; CHECK-NEXT: // kill: def $p0 killed $p0 killed $p0_p1 373; CHECK-NEXT: ret 374 %pp = call { <vscale x 2 x i1>, <vscale x 2 x i1> } @llvm.aarch64.sve.whilelt.x2.nxv2i1(i64 %m, i64 %n) 375 %res = extractvalue {<vscale x 2 x i1>, <vscale x 2 x i1>} %pp, 0 376 ret <vscale x 2 x i1> %res 377} 378 379 380; Test that we get good code quality when using while in combination with other intrinsics 381 382define <vscale x 32 x i1> @codegen_whilege_b16_x2(i64 noundef %op1, i64 noundef %op2) nounwind { 383; CHECK-LABEL: codegen_whilege_b16_x2: 384; CHECK: // %bb.0: // %entry 385; CHECK-NEXT: whilege { p0.h, p1.h }, x0, x1 386; CHECK-NEXT: ret 387entry: 388 %0 = tail call { <vscale x 8 x i1>, <vscale x 8 x i1> } @llvm.aarch64.sve.whilege.x2.nxv8i1(i64 %op1, i64 %op2) 389 %1 = extractvalue { <vscale x 8 x i1>, <vscale x 8 x i1> } %0, 0 390 %2 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %1) 391 %3 = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> poison, <vscale x 16 x i1> %2, i64 0) 392 %4 = extractvalue { <vscale x 8 x i1>, <vscale x 8 x i1> } %0, 1 393 %5 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %4) 394 %6 = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> %3, <vscale x 16 x i1> %5, i64 16) 395 ret <vscale x 32 x i1> %6 396} 397 398define <vscale x 32 x i1> @codegen_whilegt_b32_x2(i64 noundef %op1, i64 noundef %op2) nounwind { 399; CHECK-LABEL: codegen_whilegt_b32_x2: 400; CHECK: // %bb.0: // %entry 401; CHECK-NEXT: whilegt { p0.s, p1.s }, x0, x1 402; CHECK-NEXT: ret 403entry: 404 %0 = tail call { <vscale x 4 x i1>, <vscale x 4 x i1> } @llvm.aarch64.sve.whilegt.x2.nxv4i1(i64 %op1, i64 %op2) 405 %1 = extractvalue { <vscale x 4 x i1>, <vscale x 4 x i1> } %0, 0 406 %2 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %1) 407 %3 = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> poison, <vscale x 16 x i1> %2, i64 0) 408 %4 = extractvalue { <vscale x 4 x i1>, <vscale x 4 x i1> } %0, 1 409 %5 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %4) 410 %6 = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> %3, <vscale x 16 x i1> %5, i64 16) 411 ret <vscale x 32 x i1> %6 412} 413 414define <vscale x 32 x i1> @codegen_whilehi_b64_x2(i64 noundef %op1, i64 noundef %op2) nounwind { 415; CHECK-LABEL: codegen_whilehi_b64_x2: 416; CHECK: // %bb.0: // %entry 417; CHECK-NEXT: whilehi { p0.d, p1.d }, x0, x1 418; CHECK-NEXT: ret 419entry: 420 %0 = tail call { <vscale x 2 x i1>, <vscale x 2 x i1> } @llvm.aarch64.sve.whilehi.x2.nxv2i1(i64 %op1, i64 %op2) 421 %1 = extractvalue { <vscale x 2 x i1>, <vscale x 2 x i1> } %0, 0 422 %2 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %1) 423 %3 = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> poison, <vscale x 16 x i1> %2, i64 0) 424 %4 = extractvalue { <vscale x 2 x i1>, <vscale x 2 x i1> } %0, 1 425 %5 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %4) 426 %6 = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> %3, <vscale x 16 x i1> %5, i64 16) 427 ret <vscale x 32 x i1> %6 428} 429 430define <vscale x 32 x i1> @codegen_whilehs_b16_x2(i64 noundef %op1, i64 noundef %op2) nounwind { 431; CHECK-LABEL: codegen_whilehs_b16_x2: 432; CHECK: // %bb.0: // %entry 433; CHECK-NEXT: whilehs { p0.h, p1.h }, x0, x1 434; CHECK-NEXT: ret 435entry: 436 %0 = tail call { <vscale x 8 x i1>, <vscale x 8 x i1> } @llvm.aarch64.sve.whilehs.x2.nxv8i1(i64 %op1, i64 %op2) 437 %1 = extractvalue { <vscale x 8 x i1>, <vscale x 8 x i1> } %0, 0 438 %2 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %1) 439 %3 = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> poison, <vscale x 16 x i1> %2, i64 0) 440 %4 = extractvalue { <vscale x 8 x i1>, <vscale x 8 x i1> } %0, 1 441 %5 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %4) 442 %6 = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> %3, <vscale x 16 x i1> %5, i64 16) 443 ret <vscale x 32 x i1> %6 444} 445 446define <vscale x 32 x i1> @codegen_whilele_b32_x2(i64 noundef %op1, i64 noundef %op2) nounwind { 447; CHECK-LABEL: codegen_whilele_b32_x2: 448; CHECK: // %bb.0: // %entry 449; CHECK-NEXT: whilele { p0.s, p1.s }, x0, x1 450; CHECK-NEXT: ret 451entry: 452 %0 = tail call { <vscale x 4 x i1>, <vscale x 4 x i1> } @llvm.aarch64.sve.whilele.x2.nxv4i1(i64 %op1, i64 %op2) 453 %1 = extractvalue { <vscale x 4 x i1>, <vscale x 4 x i1> } %0, 0 454 %2 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %1) 455 %3 = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> poison, <vscale x 16 x i1> %2, i64 0) 456 %4 = extractvalue { <vscale x 4 x i1>, <vscale x 4 x i1> } %0, 1 457 %5 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %4) 458 %6 = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> %3, <vscale x 16 x i1> %5, i64 16) 459 ret <vscale x 32 x i1> %6 460} 461 462define <vscale x 32 x i1> @codegen_whilelo_b64_x2(i64 noundef %op1, i64 noundef %op2) nounwind { 463; CHECK-LABEL: codegen_whilelo_b64_x2: 464; CHECK: // %bb.0: // %entry 465; CHECK-NEXT: whilelo { p0.d, p1.d }, x0, x1 466; CHECK-NEXT: ret 467entry: 468 %0 = tail call { <vscale x 2 x i1>, <vscale x 2 x i1> } @llvm.aarch64.sve.whilelo.x2.nxv2i1(i64 %op1, i64 %op2) 469 %1 = extractvalue { <vscale x 2 x i1>, <vscale x 2 x i1> } %0, 0 470 %2 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %1) 471 %3 = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> poison, <vscale x 16 x i1> %2, i64 0) 472 %4 = extractvalue { <vscale x 2 x i1>, <vscale x 2 x i1> } %0, 1 473 %5 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %4) 474 %6 = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> %3, <vscale x 16 x i1> %5, i64 16) 475 ret <vscale x 32 x i1> %6 476} 477 478define <vscale x 32 x i1> @codegen_whilels_b16_x2(i64 noundef %op1, i64 noundef %op2) nounwind { 479; CHECK-LABEL: codegen_whilels_b16_x2: 480; CHECK: // %bb.0: // %entry 481; CHECK-NEXT: whilels { p0.h, p1.h }, x0, x1 482; CHECK-NEXT: ret 483entry: 484 %0 = tail call { <vscale x 8 x i1>, <vscale x 8 x i1> } @llvm.aarch64.sve.whilels.x2.nxv8i1(i64 %op1, i64 %op2) 485 %1 = extractvalue { <vscale x 8 x i1>, <vscale x 8 x i1> } %0, 0 486 %2 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %1) 487 %3 = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> poison, <vscale x 16 x i1> %2, i64 0) 488 %4 = extractvalue { <vscale x 8 x i1>, <vscale x 8 x i1> } %0, 1 489 %5 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %4) 490 %6 = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> %3, <vscale x 16 x i1> %5, i64 16) 491 ret <vscale x 32 x i1> %6 492} 493 494define <vscale x 32 x i1> @codegen_whilelt_b32_x2(i64 noundef %op1, i64 noundef %op2) nounwind { 495; CHECK-LABEL: codegen_whilelt_b32_x2: 496; CHECK: // %bb.0: // %entry 497; CHECK-NEXT: whilelt { p0.s, p1.s }, x0, x1 498; CHECK-NEXT: ret 499entry: 500 %0 = tail call { <vscale x 4 x i1>, <vscale x 4 x i1> } @llvm.aarch64.sve.whilelt.x2.nxv4i1(i64 %op1, i64 %op2) 501 %1 = extractvalue { <vscale x 4 x i1>, <vscale x 4 x i1> } %0, 0 502 %2 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %1) 503 %3 = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> poison, <vscale x 16 x i1> %2, i64 0) 504 %4 = extractvalue { <vscale x 4 x i1>, <vscale x 4 x i1> } %0, 1 505 %5 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %4) 506 %6 = tail call <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1> %3, <vscale x 16 x i1> %5, i64 16) 507 ret <vscale x 32 x i1> %6 508} 509 510 511; == Test that we use predicate registers starting at a multiple of 2 == 512 513define <vscale x 16 x i1> @whilege_x2_nxv16i1_reg_off(<vscale x 16 x i1> %p0, i64 %m, i64 %n) nounwind { 514; CHECK-LABEL: whilege_x2_nxv16i1_reg_off: 515; CHECK: // %bb.0: 516; CHECK-NEXT: whilege { p2.b, p3.b }, x0, x1 517; CHECK-NEXT: and p0.b, p2/z, p2.b, p0.b 518; CHECK-NEXT: ret 519 %pp = call { <vscale x 16 x i1>, <vscale x 16 x i1> } @llvm.aarch64.sve.whilege.x2.nxv16i1(i64 %m, i64 %n) 520 %part1 = extractvalue {<vscale x 16 x i1>, <vscale x 16 x i1>} %pp, 0 521 %res = and <vscale x 16 x i1> %part1, %p0 522 ret <vscale x 16 x i1> %res 523} 524 525define <vscale x 8 x i1> @whilegt_x2_nxv8i1_reg_off(<vscale x 8 x i1> %p0, i64 %m, i64 %n) nounwind { 526; CHECK-LABEL: whilegt_x2_nxv8i1_reg_off: 527; CHECK: // %bb.0: 528; CHECK-NEXT: whilegt { p2.h, p3.h }, x0, x1 529; CHECK-NEXT: and p0.b, p2/z, p2.b, p0.b 530; CHECK-NEXT: ret 531 %pp = call { <vscale x 8 x i1>, <vscale x 8 x i1> } @llvm.aarch64.sve.whilegt.x2.nxv8i1(i64 %m, i64 %n) 532 %part1 = extractvalue {<vscale x 8 x i1>, <vscale x 8 x i1>} %pp, 0 533 %res = and <vscale x 8 x i1> %part1, %p0 534 ret <vscale x 8 x i1> %res 535} 536 537define <vscale x 4 x i1> @whilehi_x2_nxv4i1_reg_off(<vscale x 4 x i1> %p0, i64 %m, i64 %n) nounwind { 538; CHECK-LABEL: whilehi_x2_nxv4i1_reg_off: 539; CHECK: // %bb.0: 540; CHECK-NEXT: whilehi { p2.s, p3.s }, x0, x1 541; CHECK-NEXT: and p0.b, p2/z, p2.b, p0.b 542; CHECK-NEXT: ret 543 %pp = call { <vscale x 4 x i1>, <vscale x 4 x i1> } @llvm.aarch64.sve.whilehi.x2.nxv4i1(i64 %m, i64 %n) 544 %part1 = extractvalue {<vscale x 4 x i1>, <vscale x 4 x i1>} %pp, 0 545 %res = and <vscale x 4 x i1> %part1, %p0 546 ret <vscale x 4 x i1> %res 547} 548 549define <vscale x 2 x i1> @whilehs_x2_nxv2i1_reg_off(<vscale x 2 x i1> %p0, i64 %m, i64 %n) nounwind { 550; CHECK-LABEL: whilehs_x2_nxv2i1_reg_off: 551; CHECK: // %bb.0: 552; CHECK-NEXT: whilehs { p2.d, p3.d }, x0, x1 553; CHECK-NEXT: and p0.b, p2/z, p2.b, p0.b 554; CHECK-NEXT: ret 555 %pp = call { <vscale x 2 x i1>, <vscale x 2 x i1> } @llvm.aarch64.sve.whilehs.x2.nxv2i1(i64 %m, i64 %n) 556 %part1 = extractvalue {<vscale x 2 x i1>, <vscale x 2 x i1>} %pp, 0 557 %res = and <vscale x 2 x i1> %part1, %p0 558 ret <vscale x 2 x i1> %res 559} 560 561define <vscale x 16 x i1> @whilele_x2_nxv16i1_reg_off(<vscale x 16 x i1> %p0, i64 %m, i64 %n) nounwind { 562; CHECK-LABEL: whilele_x2_nxv16i1_reg_off: 563; CHECK: // %bb.0: 564; CHECK-NEXT: whilele { p2.b, p3.b }, x0, x1 565; CHECK-NEXT: and p0.b, p2/z, p2.b, p0.b 566; CHECK-NEXT: ret 567 %pp = call { <vscale x 16 x i1>, <vscale x 16 x i1> } @llvm.aarch64.sve.whilele.x2.nxv16i1(i64 %m, i64 %n) 568 %part1 = extractvalue {<vscale x 16 x i1>, <vscale x 16 x i1>} %pp, 0 569 %res = and <vscale x 16 x i1> %part1, %p0 570 ret <vscale x 16 x i1> %res 571} 572 573define <vscale x 8 x i1> @whilelo_x2_nxv8i1_reg_off(<vscale x 8 x i1> %p0, i64 %m, i64 %n) nounwind { 574; CHECK-LABEL: whilelo_x2_nxv8i1_reg_off: 575; CHECK: // %bb.0: 576; CHECK-NEXT: whilelo { p2.h, p3.h }, x0, x1 577; CHECK-NEXT: and p0.b, p2/z, p2.b, p0.b 578; CHECK-NEXT: ret 579 %pp = call { <vscale x 8 x i1>, <vscale x 8 x i1> } @llvm.aarch64.sve.whilelo.x2.nxv8i1(i64 %m, i64 %n) 580 %part1 = extractvalue {<vscale x 8 x i1>, <vscale x 8 x i1>} %pp, 0 581 %res = and <vscale x 8 x i1> %part1, %p0 582 ret <vscale x 8 x i1> %res 583} 584 585define <vscale x 4 x i1> @whilels_x2_nxv4i1_reg_off(<vscale x 4 x i1> %p0, i64 %m, i64 %n) nounwind { 586; CHECK-LABEL: whilels_x2_nxv4i1_reg_off: 587; CHECK: // %bb.0: 588; CHECK-NEXT: whilels { p2.s, p3.s }, x0, x1 589; CHECK-NEXT: and p0.b, p2/z, p2.b, p0.b 590; CHECK-NEXT: ret 591 %pp = call { <vscale x 4 x i1>, <vscale x 4 x i1> } @llvm.aarch64.sve.whilels.x2.nxv4i1(i64 %m, i64 %n) 592 %part1 = extractvalue {<vscale x 4 x i1>, <vscale x 4 x i1>} %pp, 0 593 %res = and <vscale x 4 x i1> %part1, %p0 594 ret <vscale x 4 x i1> %res 595} 596 597define <vscale x 2 x i1> @whilelt_x2_nxv2i1_reg_off(<vscale x 2 x i1> %p0, i64 %m, i64 %n) nounwind { 598; CHECK-LABEL: whilelt_x2_nxv2i1_reg_off: 599; CHECK: // %bb.0: 600; CHECK-NEXT: whilelt { p2.d, p3.d }, x0, x1 601; CHECK-NEXT: and p0.b, p2/z, p2.b, p0.b 602; CHECK-NEXT: ret 603 %pp = call { <vscale x 2 x i1>, <vscale x 2 x i1> } @llvm.aarch64.sve.whilelt.x2.nxv2i1(i64 %m, i64 %n) 604 %part1 = extractvalue {<vscale x 2 x i1>, <vscale x 2 x i1>} %pp, 0 605 %res = and <vscale x 2 x i1> %part1, %p0 606 ret <vscale x 2 x i1> %res 607} 608 609; == WHILEGE == 610declare { <vscale x 16 x i1>, <vscale x 16 x i1> } @llvm.aarch64.sve.whilege.x2.nxv16i1(i64, i64) 611declare { <vscale x 8 x i1>, <vscale x 8 x i1> } @llvm.aarch64.sve.whilege.x2.nxv8i1(i64, i64) 612declare { <vscale x 4 x i1>, <vscale x 4 x i1> } @llvm.aarch64.sve.whilege.x2.nxv4i1(i64, i64) 613declare { <vscale x 2 x i1>, <vscale x 2 x i1> } @llvm.aarch64.sve.whilege.x2.nxv2i1(i64, i64) 614 615; == WHILEGT == 616declare { <vscale x 16 x i1>, <vscale x 16 x i1> } @llvm.aarch64.sve.whilegt.x2.nxv16i1(i64, i64) 617declare { <vscale x 8 x i1>, <vscale x 8 x i1> } @llvm.aarch64.sve.whilegt.x2.nxv8i1(i64, i64) 618declare { <vscale x 4 x i1>, <vscale x 4 x i1> } @llvm.aarch64.sve.whilegt.x2.nxv4i1(i64, i64) 619declare { <vscale x 2 x i1>, <vscale x 2 x i1> } @llvm.aarch64.sve.whilegt.x2.nxv2i1(i64, i64) 620 621; == WHILEHI == 622declare { <vscale x 16 x i1>, <vscale x 16 x i1> } @llvm.aarch64.sve.whilehi.x2.nxv16i1(i64, i64) 623declare { <vscale x 8 x i1>, <vscale x 8 x i1> } @llvm.aarch64.sve.whilehi.x2.nxv8i1(i64, i64) 624declare { <vscale x 4 x i1>, <vscale x 4 x i1> } @llvm.aarch64.sve.whilehi.x2.nxv4i1(i64, i64) 625declare { <vscale x 2 x i1>, <vscale x 2 x i1> } @llvm.aarch64.sve.whilehi.x2.nxv2i1(i64, i64) 626 627; == WHILEHS == 628declare { <vscale x 16 x i1>, <vscale x 16 x i1> } @llvm.aarch64.sve.whilehs.x2.nxv16i1(i64, i64) 629declare { <vscale x 8 x i1>, <vscale x 8 x i1> } @llvm.aarch64.sve.whilehs.x2.nxv8i1(i64, i64) 630declare { <vscale x 4 x i1>, <vscale x 4 x i1> } @llvm.aarch64.sve.whilehs.x2.nxv4i1(i64, i64) 631declare { <vscale x 2 x i1>, <vscale x 2 x i1> } @llvm.aarch64.sve.whilehs.x2.nxv2i1(i64, i64) 632 633; == WHILELE == 634declare { <vscale x 16 x i1>, <vscale x 16 x i1> } @llvm.aarch64.sve.whilele.x2.nxv16i1(i64, i64) 635declare { <vscale x 8 x i1>, <vscale x 8 x i1> } @llvm.aarch64.sve.whilele.x2.nxv8i1(i64, i64) 636declare { <vscale x 4 x i1>, <vscale x 4 x i1> } @llvm.aarch64.sve.whilele.x2.nxv4i1(i64, i64) 637declare { <vscale x 2 x i1>, <vscale x 2 x i1> } @llvm.aarch64.sve.whilele.x2.nxv2i1(i64, i64) 638 639; == WHILELO == 640declare { <vscale x 16 x i1>, <vscale x 16 x i1> } @llvm.aarch64.sve.whilelo.x2.nxv16i1(i64, i64) 641declare { <vscale x 8 x i1>, <vscale x 8 x i1> } @llvm.aarch64.sve.whilelo.x2.nxv8i1(i64, i64) 642declare { <vscale x 4 x i1>, <vscale x 4 x i1> } @llvm.aarch64.sve.whilelo.x2.nxv4i1(i64, i64) 643declare { <vscale x 2 x i1>, <vscale x 2 x i1> } @llvm.aarch64.sve.whilelo.x2.nxv2i1(i64, i64) 644 645; == WHILELS == 646declare { <vscale x 16 x i1>, <vscale x 16 x i1> } @llvm.aarch64.sve.whilels.x2.nxv16i1(i64, i64) 647declare { <vscale x 8 x i1>, <vscale x 8 x i1> } @llvm.aarch64.sve.whilels.x2.nxv8i1(i64, i64) 648declare { <vscale x 4 x i1>, <vscale x 4 x i1> } @llvm.aarch64.sve.whilels.x2.nxv4i1(i64, i64) 649declare { <vscale x 2 x i1>, <vscale x 2 x i1> } @llvm.aarch64.sve.whilels.x2.nxv2i1(i64, i64) 650 651; == WHILELT == 652declare { <vscale x 16 x i1>, <vscale x 16 x i1> } @llvm.aarch64.sve.whilelt.x2.nxv16i1(i64, i64) 653declare { <vscale x 8 x i1>, <vscale x 8 x i1> } @llvm.aarch64.sve.whilelt.x2.nxv8i1(i64, i64) 654declare { <vscale x 4 x i1>, <vscale x 4 x i1> } @llvm.aarch64.sve.whilelt.x2.nxv4i1(i64, i64) 655declare { <vscale x 2 x i1>, <vscale x 2 x i1> } @llvm.aarch64.sve.whilelt.x2.nxv2i1(i64, i64) 656 657; == SVBOOL CONVERSION == 658declare <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1>) 659declare <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1>) 660declare <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1>) 661 662; == VECTOR INSERTS == 663declare <vscale x 32 x i1> @llvm.vector.insert.nxv32i1.nxv16i1(<vscale x 32 x i1>, <vscale x 16 x i1>, i64 immarg) 664