1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=riscv32 -mattr=+v -target-abi=ilp32d \ 3; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32NOM 4; RUN: llc -mtriple=riscv32 -mattr=+v,+m -target-abi=ilp32d \ 5; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32M 6 7 8define signext i8 @extractelt_nxv1i8_0(<vscale x 1 x i8> %v) { 9; CHECK-LABEL: extractelt_nxv1i8_0: 10; CHECK: # %bb.0: 11; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma 12; CHECK-NEXT: vmv.x.s a0, v8 13; CHECK-NEXT: ret 14 %r = extractelement <vscale x 1 x i8> %v, i32 0 15 ret i8 %r 16} 17 18define signext i8 @extractelt_nxv1i8_imm(<vscale x 1 x i8> %v) { 19; CHECK-LABEL: extractelt_nxv1i8_imm: 20; CHECK: # %bb.0: 21; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma 22; CHECK-NEXT: vslidedown.vi v8, v8, 2 23; CHECK-NEXT: vmv.x.s a0, v8 24; CHECK-NEXT: ret 25 %r = extractelement <vscale x 1 x i8> %v, i32 2 26 ret i8 %r 27} 28 29define signext i8 @extractelt_nxv1i8_idx(<vscale x 1 x i8> %v, i32 %idx) { 30; CHECK-LABEL: extractelt_nxv1i8_idx: 31; CHECK: # %bb.0: 32; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma 33; CHECK-NEXT: vslidedown.vx v8, v8, a0 34; CHECK-NEXT: vmv.x.s a0, v8 35; CHECK-NEXT: ret 36 %r = extractelement <vscale x 1 x i8> %v, i32 %idx 37 ret i8 %r 38} 39 40define signext i8 @extractelt_nxv2i8_0(<vscale x 2 x i8> %v) { 41; CHECK-LABEL: extractelt_nxv2i8_0: 42; CHECK: # %bb.0: 43; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma 44; CHECK-NEXT: vmv.x.s a0, v8 45; CHECK-NEXT: ret 46 %r = extractelement <vscale x 2 x i8> %v, i32 0 47 ret i8 %r 48} 49 50define signext i8 @extractelt_nxv2i8_imm(<vscale x 2 x i8> %v) { 51; CHECK-LABEL: extractelt_nxv2i8_imm: 52; CHECK: # %bb.0: 53; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, ma 54; CHECK-NEXT: vslidedown.vi v8, v8, 2 55; CHECK-NEXT: vmv.x.s a0, v8 56; CHECK-NEXT: ret 57 %r = extractelement <vscale x 2 x i8> %v, i32 2 58 ret i8 %r 59} 60 61define signext i8 @extractelt_nxv2i8_idx(<vscale x 2 x i8> %v, i32 %idx) { 62; CHECK-LABEL: extractelt_nxv2i8_idx: 63; CHECK: # %bb.0: 64; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, ma 65; CHECK-NEXT: vslidedown.vx v8, v8, a0 66; CHECK-NEXT: vmv.x.s a0, v8 67; CHECK-NEXT: ret 68 %r = extractelement <vscale x 2 x i8> %v, i32 %idx 69 ret i8 %r 70} 71 72define signext i8 @extractelt_nxv4i8_0(<vscale x 4 x i8> %v) { 73; CHECK-LABEL: extractelt_nxv4i8_0: 74; CHECK: # %bb.0: 75; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma 76; CHECK-NEXT: vmv.x.s a0, v8 77; CHECK-NEXT: ret 78 %r = extractelement <vscale x 4 x i8> %v, i32 0 79 ret i8 %r 80} 81 82define signext i8 @extractelt_nxv4i8_imm(<vscale x 4 x i8> %v) { 83; CHECK-LABEL: extractelt_nxv4i8_imm: 84; CHECK: # %bb.0: 85; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, ma 86; CHECK-NEXT: vslidedown.vi v8, v8, 2 87; CHECK-NEXT: vmv.x.s a0, v8 88; CHECK-NEXT: ret 89 %r = extractelement <vscale x 4 x i8> %v, i32 2 90 ret i8 %r 91} 92 93define signext i8 @extractelt_nxv4i8_idx(<vscale x 4 x i8> %v, i32 %idx) { 94; CHECK-LABEL: extractelt_nxv4i8_idx: 95; CHECK: # %bb.0: 96; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, ma 97; CHECK-NEXT: vslidedown.vx v8, v8, a0 98; CHECK-NEXT: vmv.x.s a0, v8 99; CHECK-NEXT: ret 100 %r = extractelement <vscale x 4 x i8> %v, i32 %idx 101 ret i8 %r 102} 103 104define signext i8 @extractelt_nxv8i8_0(<vscale x 8 x i8> %v) { 105; CHECK-LABEL: extractelt_nxv8i8_0: 106; CHECK: # %bb.0: 107; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma 108; CHECK-NEXT: vmv.x.s a0, v8 109; CHECK-NEXT: ret 110 %r = extractelement <vscale x 8 x i8> %v, i32 0 111 ret i8 %r 112} 113 114define signext i8 @extractelt_nxv8i8_imm(<vscale x 8 x i8> %v) { 115; CHECK-LABEL: extractelt_nxv8i8_imm: 116; CHECK: # %bb.0: 117; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma 118; CHECK-NEXT: vslidedown.vi v8, v8, 2 119; CHECK-NEXT: vmv.x.s a0, v8 120; CHECK-NEXT: ret 121 %r = extractelement <vscale x 8 x i8> %v, i32 2 122 ret i8 %r 123} 124 125define signext i8 @extractelt_nxv8i8_idx(<vscale x 8 x i8> %v, i32 %idx) { 126; CHECK-LABEL: extractelt_nxv8i8_idx: 127; CHECK: # %bb.0: 128; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma 129; CHECK-NEXT: vslidedown.vx v8, v8, a0 130; CHECK-NEXT: vmv.x.s a0, v8 131; CHECK-NEXT: ret 132 %r = extractelement <vscale x 8 x i8> %v, i32 %idx 133 ret i8 %r 134} 135 136define signext i8 @extractelt_nxv16i8_0(<vscale x 16 x i8> %v) { 137; CHECK-LABEL: extractelt_nxv16i8_0: 138; CHECK: # %bb.0: 139; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma 140; CHECK-NEXT: vmv.x.s a0, v8 141; CHECK-NEXT: ret 142 %r = extractelement <vscale x 16 x i8> %v, i32 0 143 ret i8 %r 144} 145 146define signext i8 @extractelt_nxv16i8_imm(<vscale x 16 x i8> %v) { 147; CHECK-LABEL: extractelt_nxv16i8_imm: 148; CHECK: # %bb.0: 149; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma 150; CHECK-NEXT: vslidedown.vi v8, v8, 2 151; CHECK-NEXT: vmv.x.s a0, v8 152; CHECK-NEXT: ret 153 %r = extractelement <vscale x 16 x i8> %v, i32 2 154 ret i8 %r 155} 156 157define signext i8 @extractelt_nxv16i8_idx(<vscale x 16 x i8> %v, i32 %idx) { 158; CHECK-LABEL: extractelt_nxv16i8_idx: 159; CHECK: # %bb.0: 160; CHECK-NEXT: vsetivli zero, 1, e8, m2, ta, ma 161; CHECK-NEXT: vslidedown.vx v8, v8, a0 162; CHECK-NEXT: vmv.x.s a0, v8 163; CHECK-NEXT: ret 164 %r = extractelement <vscale x 16 x i8> %v, i32 %idx 165 ret i8 %r 166} 167 168define signext i8 @extractelt_nxv32i8_0(<vscale x 32 x i8> %v) { 169; CHECK-LABEL: extractelt_nxv32i8_0: 170; CHECK: # %bb.0: 171; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma 172; CHECK-NEXT: vmv.x.s a0, v8 173; CHECK-NEXT: ret 174 %r = extractelement <vscale x 32 x i8> %v, i32 0 175 ret i8 %r 176} 177 178define signext i8 @extractelt_nxv32i8_imm(<vscale x 32 x i8> %v) { 179; CHECK-LABEL: extractelt_nxv32i8_imm: 180; CHECK: # %bb.0: 181; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma 182; CHECK-NEXT: vslidedown.vi v8, v8, 2 183; CHECK-NEXT: vmv.x.s a0, v8 184; CHECK-NEXT: ret 185 %r = extractelement <vscale x 32 x i8> %v, i32 2 186 ret i8 %r 187} 188 189define signext i8 @extractelt_nxv32i8_idx(<vscale x 32 x i8> %v, i32 %idx) { 190; CHECK-LABEL: extractelt_nxv32i8_idx: 191; CHECK: # %bb.0: 192; CHECK-NEXT: vsetivli zero, 1, e8, m4, ta, ma 193; CHECK-NEXT: vslidedown.vx v8, v8, a0 194; CHECK-NEXT: vmv.x.s a0, v8 195; CHECK-NEXT: ret 196 %r = extractelement <vscale x 32 x i8> %v, i32 %idx 197 ret i8 %r 198} 199 200define signext i8 @extractelt_nxv64i8_0(<vscale x 64 x i8> %v) { 201; CHECK-LABEL: extractelt_nxv64i8_0: 202; CHECK: # %bb.0: 203; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma 204; CHECK-NEXT: vmv.x.s a0, v8 205; CHECK-NEXT: ret 206 %r = extractelement <vscale x 64 x i8> %v, i32 0 207 ret i8 %r 208} 209 210define signext i8 @extractelt_nxv64i8_imm(<vscale x 64 x i8> %v) { 211; CHECK-LABEL: extractelt_nxv64i8_imm: 212; CHECK: # %bb.0: 213; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma 214; CHECK-NEXT: vslidedown.vi v8, v8, 2 215; CHECK-NEXT: vmv.x.s a0, v8 216; CHECK-NEXT: ret 217 %r = extractelement <vscale x 64 x i8> %v, i32 2 218 ret i8 %r 219} 220 221define signext i8 @extractelt_nxv64i8_idx(<vscale x 64 x i8> %v, i32 %idx) { 222; CHECK-LABEL: extractelt_nxv64i8_idx: 223; CHECK: # %bb.0: 224; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma 225; CHECK-NEXT: vslidedown.vx v8, v8, a0 226; CHECK-NEXT: vmv.x.s a0, v8 227; CHECK-NEXT: ret 228 %r = extractelement <vscale x 64 x i8> %v, i32 %idx 229 ret i8 %r 230} 231 232define signext i16 @extractelt_nxv1i16_0(<vscale x 1 x i16> %v) { 233; CHECK-LABEL: extractelt_nxv1i16_0: 234; CHECK: # %bb.0: 235; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma 236; CHECK-NEXT: vmv.x.s a0, v8 237; CHECK-NEXT: ret 238 %r = extractelement <vscale x 1 x i16> %v, i32 0 239 ret i16 %r 240} 241 242define signext i16 @extractelt_nxv1i16_imm(<vscale x 1 x i16> %v) { 243; CHECK-LABEL: extractelt_nxv1i16_imm: 244; CHECK: # %bb.0: 245; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma 246; CHECK-NEXT: vslidedown.vi v8, v8, 2 247; CHECK-NEXT: vmv.x.s a0, v8 248; CHECK-NEXT: ret 249 %r = extractelement <vscale x 1 x i16> %v, i32 2 250 ret i16 %r 251} 252 253define signext i16 @extractelt_nxv1i16_idx(<vscale x 1 x i16> %v, i32 %idx) { 254; CHECK-LABEL: extractelt_nxv1i16_idx: 255; CHECK: # %bb.0: 256; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma 257; CHECK-NEXT: vslidedown.vx v8, v8, a0 258; CHECK-NEXT: vmv.x.s a0, v8 259; CHECK-NEXT: ret 260 %r = extractelement <vscale x 1 x i16> %v, i32 %idx 261 ret i16 %r 262} 263 264define signext i16 @extractelt_nxv2i16_0(<vscale x 2 x i16> %v) { 265; CHECK-LABEL: extractelt_nxv2i16_0: 266; CHECK: # %bb.0: 267; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma 268; CHECK-NEXT: vmv.x.s a0, v8 269; CHECK-NEXT: ret 270 %r = extractelement <vscale x 2 x i16> %v, i32 0 271 ret i16 %r 272} 273 274define signext i16 @extractelt_nxv2i16_imm(<vscale x 2 x i16> %v) { 275; CHECK-LABEL: extractelt_nxv2i16_imm: 276; CHECK: # %bb.0: 277; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, ma 278; CHECK-NEXT: vslidedown.vi v8, v8, 2 279; CHECK-NEXT: vmv.x.s a0, v8 280; CHECK-NEXT: ret 281 %r = extractelement <vscale x 2 x i16> %v, i32 2 282 ret i16 %r 283} 284 285define signext i16 @extractelt_nxv2i16_idx(<vscale x 2 x i16> %v, i32 %idx) { 286; CHECK-LABEL: extractelt_nxv2i16_idx: 287; CHECK: # %bb.0: 288; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, ma 289; CHECK-NEXT: vslidedown.vx v8, v8, a0 290; CHECK-NEXT: vmv.x.s a0, v8 291; CHECK-NEXT: ret 292 %r = extractelement <vscale x 2 x i16> %v, i32 %idx 293 ret i16 %r 294} 295 296define signext i16 @extractelt_nxv4i16_0(<vscale x 4 x i16> %v) { 297; CHECK-LABEL: extractelt_nxv4i16_0: 298; CHECK: # %bb.0: 299; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma 300; CHECK-NEXT: vmv.x.s a0, v8 301; CHECK-NEXT: ret 302 %r = extractelement <vscale x 4 x i16> %v, i32 0 303 ret i16 %r 304} 305 306define signext i16 @extractelt_nxv4i16_imm(<vscale x 4 x i16> %v) { 307; CHECK-LABEL: extractelt_nxv4i16_imm: 308; CHECK: # %bb.0: 309; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma 310; CHECK-NEXT: vslidedown.vi v8, v8, 2 311; CHECK-NEXT: vmv.x.s a0, v8 312; CHECK-NEXT: ret 313 %r = extractelement <vscale x 4 x i16> %v, i32 2 314 ret i16 %r 315} 316 317define signext i16 @extractelt_nxv4i16_idx(<vscale x 4 x i16> %v, i32 %idx) { 318; CHECK-LABEL: extractelt_nxv4i16_idx: 319; CHECK: # %bb.0: 320; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma 321; CHECK-NEXT: vslidedown.vx v8, v8, a0 322; CHECK-NEXT: vmv.x.s a0, v8 323; CHECK-NEXT: ret 324 %r = extractelement <vscale x 4 x i16> %v, i32 %idx 325 ret i16 %r 326} 327 328define signext i16 @extractelt_nxv8i16_0(<vscale x 8 x i16> %v) { 329; CHECK-LABEL: extractelt_nxv8i16_0: 330; CHECK: # %bb.0: 331; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma 332; CHECK-NEXT: vmv.x.s a0, v8 333; CHECK-NEXT: ret 334 %r = extractelement <vscale x 8 x i16> %v, i32 0 335 ret i16 %r 336} 337 338define signext i16 @extractelt_nxv8i16_imm(<vscale x 8 x i16> %v) { 339; CHECK-LABEL: extractelt_nxv8i16_imm: 340; CHECK: # %bb.0: 341; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma 342; CHECK-NEXT: vslidedown.vi v8, v8, 2 343; CHECK-NEXT: vmv.x.s a0, v8 344; CHECK-NEXT: ret 345 %r = extractelement <vscale x 8 x i16> %v, i32 2 346 ret i16 %r 347} 348 349define signext i16 @extractelt_nxv8i16_idx(<vscale x 8 x i16> %v, i32 %idx) { 350; CHECK-LABEL: extractelt_nxv8i16_idx: 351; CHECK: # %bb.0: 352; CHECK-NEXT: vsetivli zero, 1, e16, m2, ta, ma 353; CHECK-NEXT: vslidedown.vx v8, v8, a0 354; CHECK-NEXT: vmv.x.s a0, v8 355; CHECK-NEXT: ret 356 %r = extractelement <vscale x 8 x i16> %v, i32 %idx 357 ret i16 %r 358} 359 360define signext i16 @extractelt_nxv16i16_0(<vscale x 16 x i16> %v) { 361; CHECK-LABEL: extractelt_nxv16i16_0: 362; CHECK: # %bb.0: 363; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma 364; CHECK-NEXT: vmv.x.s a0, v8 365; CHECK-NEXT: ret 366 %r = extractelement <vscale x 16 x i16> %v, i32 0 367 ret i16 %r 368} 369 370define signext i16 @extractelt_nxv16i16_imm(<vscale x 16 x i16> %v) { 371; CHECK-LABEL: extractelt_nxv16i16_imm: 372; CHECK: # %bb.0: 373; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma 374; CHECK-NEXT: vslidedown.vi v8, v8, 2 375; CHECK-NEXT: vmv.x.s a0, v8 376; CHECK-NEXT: ret 377 %r = extractelement <vscale x 16 x i16> %v, i32 2 378 ret i16 %r 379} 380 381define signext i16 @extractelt_nxv16i16_idx(<vscale x 16 x i16> %v, i32 %idx) { 382; CHECK-LABEL: extractelt_nxv16i16_idx: 383; CHECK: # %bb.0: 384; CHECK-NEXT: vsetivli zero, 1, e16, m4, ta, ma 385; CHECK-NEXT: vslidedown.vx v8, v8, a0 386; CHECK-NEXT: vmv.x.s a0, v8 387; CHECK-NEXT: ret 388 %r = extractelement <vscale x 16 x i16> %v, i32 %idx 389 ret i16 %r 390} 391 392define signext i16 @extractelt_nxv32i16_0(<vscale x 32 x i16> %v) { 393; CHECK-LABEL: extractelt_nxv32i16_0: 394; CHECK: # %bb.0: 395; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma 396; CHECK-NEXT: vmv.x.s a0, v8 397; CHECK-NEXT: ret 398 %r = extractelement <vscale x 32 x i16> %v, i32 0 399 ret i16 %r 400} 401 402define signext i16 @extractelt_nxv32i16_imm(<vscale x 32 x i16> %v) { 403; CHECK-LABEL: extractelt_nxv32i16_imm: 404; CHECK: # %bb.0: 405; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma 406; CHECK-NEXT: vslidedown.vi v8, v8, 2 407; CHECK-NEXT: vmv.x.s a0, v8 408; CHECK-NEXT: ret 409 %r = extractelement <vscale x 32 x i16> %v, i32 2 410 ret i16 %r 411} 412 413define signext i16 @extractelt_nxv32i16_idx(<vscale x 32 x i16> %v, i32 %idx) { 414; CHECK-LABEL: extractelt_nxv32i16_idx: 415; CHECK: # %bb.0: 416; CHECK-NEXT: vsetivli zero, 1, e16, m8, ta, ma 417; CHECK-NEXT: vslidedown.vx v8, v8, a0 418; CHECK-NEXT: vmv.x.s a0, v8 419; CHECK-NEXT: ret 420 %r = extractelement <vscale x 32 x i16> %v, i32 %idx 421 ret i16 %r 422} 423 424define i32 @extractelt_nxv1i32_0(<vscale x 1 x i32> %v) { 425; CHECK-LABEL: extractelt_nxv1i32_0: 426; CHECK: # %bb.0: 427; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma 428; CHECK-NEXT: vmv.x.s a0, v8 429; CHECK-NEXT: ret 430 %r = extractelement <vscale x 1 x i32> %v, i32 0 431 ret i32 %r 432} 433 434define i32 @extractelt_nxv1i32_imm(<vscale x 1 x i32> %v) { 435; CHECK-LABEL: extractelt_nxv1i32_imm: 436; CHECK: # %bb.0: 437; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma 438; CHECK-NEXT: vslidedown.vi v8, v8, 2 439; CHECK-NEXT: vmv.x.s a0, v8 440; CHECK-NEXT: ret 441 %r = extractelement <vscale x 1 x i32> %v, i32 2 442 ret i32 %r 443} 444 445define i32 @extractelt_nxv1i32_idx(<vscale x 1 x i32> %v, i32 %idx) { 446; CHECK-LABEL: extractelt_nxv1i32_idx: 447; CHECK: # %bb.0: 448; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma 449; CHECK-NEXT: vslidedown.vx v8, v8, a0 450; CHECK-NEXT: vmv.x.s a0, v8 451; CHECK-NEXT: ret 452 %r = extractelement <vscale x 1 x i32> %v, i32 %idx 453 ret i32 %r 454} 455 456define i32 @extractelt_nxv2i32_0(<vscale x 2 x i32> %v) { 457; CHECK-LABEL: extractelt_nxv2i32_0: 458; CHECK: # %bb.0: 459; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma 460; CHECK-NEXT: vmv.x.s a0, v8 461; CHECK-NEXT: ret 462 %r = extractelement <vscale x 2 x i32> %v, i32 0 463 ret i32 %r 464} 465 466define i32 @extractelt_nxv2i32_imm(<vscale x 2 x i32> %v) { 467; CHECK-LABEL: extractelt_nxv2i32_imm: 468; CHECK: # %bb.0: 469; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma 470; CHECK-NEXT: vslidedown.vi v8, v8, 2 471; CHECK-NEXT: vmv.x.s a0, v8 472; CHECK-NEXT: ret 473 %r = extractelement <vscale x 2 x i32> %v, i32 2 474 ret i32 %r 475} 476 477define i32 @extractelt_nxv2i32_idx(<vscale x 2 x i32> %v, i32 %idx) { 478; CHECK-LABEL: extractelt_nxv2i32_idx: 479; CHECK: # %bb.0: 480; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma 481; CHECK-NEXT: vslidedown.vx v8, v8, a0 482; CHECK-NEXT: vmv.x.s a0, v8 483; CHECK-NEXT: ret 484 %r = extractelement <vscale x 2 x i32> %v, i32 %idx 485 ret i32 %r 486} 487 488define i32 @extractelt_nxv4i32_0(<vscale x 4 x i32> %v) { 489; CHECK-LABEL: extractelt_nxv4i32_0: 490; CHECK: # %bb.0: 491; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma 492; CHECK-NEXT: vmv.x.s a0, v8 493; CHECK-NEXT: ret 494 %r = extractelement <vscale x 4 x i32> %v, i32 0 495 ret i32 %r 496} 497 498define i32 @extractelt_nxv4i32_imm(<vscale x 4 x i32> %v) { 499; CHECK-LABEL: extractelt_nxv4i32_imm: 500; CHECK: # %bb.0: 501; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma 502; CHECK-NEXT: vslidedown.vi v8, v8, 2 503; CHECK-NEXT: vmv.x.s a0, v8 504; CHECK-NEXT: ret 505 %r = extractelement <vscale x 4 x i32> %v, i32 2 506 ret i32 %r 507} 508 509define i32 @extractelt_nxv4i32_idx(<vscale x 4 x i32> %v, i32 %idx) { 510; CHECK-LABEL: extractelt_nxv4i32_idx: 511; CHECK: # %bb.0: 512; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma 513; CHECK-NEXT: vslidedown.vx v8, v8, a0 514; CHECK-NEXT: vmv.x.s a0, v8 515; CHECK-NEXT: ret 516 %r = extractelement <vscale x 4 x i32> %v, i32 %idx 517 ret i32 %r 518} 519 520define i32 @extractelt_nxv8i32_0(<vscale x 8 x i32> %v) { 521; CHECK-LABEL: extractelt_nxv8i32_0: 522; CHECK: # %bb.0: 523; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma 524; CHECK-NEXT: vmv.x.s a0, v8 525; CHECK-NEXT: ret 526 %r = extractelement <vscale x 8 x i32> %v, i32 0 527 ret i32 %r 528} 529 530define i32 @extractelt_nxv8i32_imm(<vscale x 8 x i32> %v) { 531; CHECK-LABEL: extractelt_nxv8i32_imm: 532; CHECK: # %bb.0: 533; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma 534; CHECK-NEXT: vslidedown.vi v8, v8, 2 535; CHECK-NEXT: vmv.x.s a0, v8 536; CHECK-NEXT: ret 537 %r = extractelement <vscale x 8 x i32> %v, i32 2 538 ret i32 %r 539} 540 541define i32 @extractelt_nxv8i32_idx(<vscale x 8 x i32> %v, i32 %idx) { 542; CHECK-LABEL: extractelt_nxv8i32_idx: 543; CHECK: # %bb.0: 544; CHECK-NEXT: vsetivli zero, 1, e32, m4, ta, ma 545; CHECK-NEXT: vslidedown.vx v8, v8, a0 546; CHECK-NEXT: vmv.x.s a0, v8 547; CHECK-NEXT: ret 548 %r = extractelement <vscale x 8 x i32> %v, i32 %idx 549 ret i32 %r 550} 551 552define i32 @extractelt_nxv16i32_0(<vscale x 16 x i32> %v) { 553; CHECK-LABEL: extractelt_nxv16i32_0: 554; CHECK: # %bb.0: 555; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma 556; CHECK-NEXT: vmv.x.s a0, v8 557; CHECK-NEXT: ret 558 %r = extractelement <vscale x 16 x i32> %v, i32 0 559 ret i32 %r 560} 561 562define i32 @extractelt_nxv16i32_imm(<vscale x 16 x i32> %v) { 563; CHECK-LABEL: extractelt_nxv16i32_imm: 564; CHECK: # %bb.0: 565; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma 566; CHECK-NEXT: vslidedown.vi v8, v8, 2 567; CHECK-NEXT: vmv.x.s a0, v8 568; CHECK-NEXT: ret 569 %r = extractelement <vscale x 16 x i32> %v, i32 2 570 ret i32 %r 571} 572 573define i32 @extractelt_nxv16i32_idx(<vscale x 16 x i32> %v, i32 %idx) { 574; CHECK-LABEL: extractelt_nxv16i32_idx: 575; CHECK: # %bb.0: 576; CHECK-NEXT: vsetivli zero, 1, e32, m8, ta, ma 577; CHECK-NEXT: vslidedown.vx v8, v8, a0 578; CHECK-NEXT: vmv.x.s a0, v8 579; CHECK-NEXT: ret 580 %r = extractelement <vscale x 16 x i32> %v, i32 %idx 581 ret i32 %r 582} 583 584define i64 @extractelt_nxv1i64_0(<vscale x 1 x i64> %v) { 585; CHECK-LABEL: extractelt_nxv1i64_0: 586; CHECK: # %bb.0: 587; CHECK-NEXT: li a0, 32 588; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma 589; CHECK-NEXT: vsrl.vx v9, v8, a0 590; CHECK-NEXT: vmv.x.s a1, v9 591; CHECK-NEXT: vmv.x.s a0, v8 592; CHECK-NEXT: ret 593 %r = extractelement <vscale x 1 x i64> %v, i32 0 594 ret i64 %r 595} 596 597define i64 @extractelt_nxv1i64_imm(<vscale x 1 x i64> %v) { 598; CHECK-LABEL: extractelt_nxv1i64_imm: 599; CHECK: # %bb.0: 600; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma 601; CHECK-NEXT: vslidedown.vi v8, v8, 2 602; CHECK-NEXT: li a0, 32 603; CHECK-NEXT: vsrl.vx v9, v8, a0 604; CHECK-NEXT: vmv.x.s a1, v9 605; CHECK-NEXT: vmv.x.s a0, v8 606; CHECK-NEXT: ret 607 %r = extractelement <vscale x 1 x i64> %v, i32 2 608 ret i64 %r 609} 610 611define i64 @extractelt_nxv1i64_idx(<vscale x 1 x i64> %v, i32 %idx) { 612; CHECK-LABEL: extractelt_nxv1i64_idx: 613; CHECK: # %bb.0: 614; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma 615; CHECK-NEXT: vslidedown.vx v8, v8, a0 616; CHECK-NEXT: li a1, 32 617; CHECK-NEXT: vmv.x.s a0, v8 618; CHECK-NEXT: vsrl.vx v8, v8, a1 619; CHECK-NEXT: vmv.x.s a1, v8 620; CHECK-NEXT: ret 621 %r = extractelement <vscale x 1 x i64> %v, i32 %idx 622 ret i64 %r 623} 624 625define i64 @extractelt_nxv2i64_0(<vscale x 2 x i64> %v) { 626; CHECK-LABEL: extractelt_nxv2i64_0: 627; CHECK: # %bb.0: 628; CHECK-NEXT: li a0, 32 629; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, ma 630; CHECK-NEXT: vsrl.vx v10, v8, a0 631; CHECK-NEXT: vmv.x.s a1, v10 632; CHECK-NEXT: vmv.x.s a0, v8 633; CHECK-NEXT: ret 634 %r = extractelement <vscale x 2 x i64> %v, i32 0 635 ret i64 %r 636} 637 638define i64 @extractelt_nxv2i64_imm(<vscale x 2 x i64> %v) { 639; CHECK-LABEL: extractelt_nxv2i64_imm: 640; CHECK: # %bb.0: 641; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, ma 642; CHECK-NEXT: vslidedown.vi v8, v8, 2 643; CHECK-NEXT: li a0, 32 644; CHECK-NEXT: vsrl.vx v10, v8, a0 645; CHECK-NEXT: vmv.x.s a1, v10 646; CHECK-NEXT: vmv.x.s a0, v8 647; CHECK-NEXT: ret 648 %r = extractelement <vscale x 2 x i64> %v, i32 2 649 ret i64 %r 650} 651 652define i64 @extractelt_nxv2i64_idx(<vscale x 2 x i64> %v, i32 %idx) { 653; CHECK-LABEL: extractelt_nxv2i64_idx: 654; CHECK: # %bb.0: 655; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, ma 656; CHECK-NEXT: vslidedown.vx v8, v8, a0 657; CHECK-NEXT: li a1, 32 658; CHECK-NEXT: vmv.x.s a0, v8 659; CHECK-NEXT: vsrl.vx v8, v8, a1 660; CHECK-NEXT: vmv.x.s a1, v8 661; CHECK-NEXT: ret 662 %r = extractelement <vscale x 2 x i64> %v, i32 %idx 663 ret i64 %r 664} 665 666define i64 @extractelt_nxv4i64_0(<vscale x 4 x i64> %v) { 667; CHECK-LABEL: extractelt_nxv4i64_0: 668; CHECK: # %bb.0: 669; CHECK-NEXT: li a0, 32 670; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, ma 671; CHECK-NEXT: vsrl.vx v12, v8, a0 672; CHECK-NEXT: vmv.x.s a1, v12 673; CHECK-NEXT: vmv.x.s a0, v8 674; CHECK-NEXT: ret 675 %r = extractelement <vscale x 4 x i64> %v, i32 0 676 ret i64 %r 677} 678 679define i64 @extractelt_nxv4i64_imm(<vscale x 4 x i64> %v) { 680; CHECK-LABEL: extractelt_nxv4i64_imm: 681; CHECK: # %bb.0: 682; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, ma 683; CHECK-NEXT: vslidedown.vi v8, v8, 2 684; CHECK-NEXT: li a0, 32 685; CHECK-NEXT: vsrl.vx v12, v8, a0 686; CHECK-NEXT: vmv.x.s a1, v12 687; CHECK-NEXT: vmv.x.s a0, v8 688; CHECK-NEXT: ret 689 %r = extractelement <vscale x 4 x i64> %v, i32 2 690 ret i64 %r 691} 692 693define i64 @extractelt_nxv4i64_idx(<vscale x 4 x i64> %v, i32 %idx) { 694; CHECK-LABEL: extractelt_nxv4i64_idx: 695; CHECK: # %bb.0: 696; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, ma 697; CHECK-NEXT: vslidedown.vx v8, v8, a0 698; CHECK-NEXT: li a1, 32 699; CHECK-NEXT: vmv.x.s a0, v8 700; CHECK-NEXT: vsrl.vx v8, v8, a1 701; CHECK-NEXT: vmv.x.s a1, v8 702; CHECK-NEXT: ret 703 %r = extractelement <vscale x 4 x i64> %v, i32 %idx 704 ret i64 %r 705} 706 707define i64 @extractelt_nxv8i64_0(<vscale x 8 x i64> %v) { 708; CHECK-LABEL: extractelt_nxv8i64_0: 709; CHECK: # %bb.0: 710; CHECK-NEXT: li a0, 32 711; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, ma 712; CHECK-NEXT: vsrl.vx v16, v8, a0 713; CHECK-NEXT: vmv.x.s a1, v16 714; CHECK-NEXT: vmv.x.s a0, v8 715; CHECK-NEXT: ret 716 %r = extractelement <vscale x 8 x i64> %v, i32 0 717 ret i64 %r 718} 719 720define i64 @extractelt_nxv8i64_imm(<vscale x 8 x i64> %v) { 721; CHECK-LABEL: extractelt_nxv8i64_imm: 722; CHECK: # %bb.0: 723; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, ma 724; CHECK-NEXT: vslidedown.vi v8, v8, 2 725; CHECK-NEXT: li a0, 32 726; CHECK-NEXT: vsrl.vx v16, v8, a0 727; CHECK-NEXT: vmv.x.s a1, v16 728; CHECK-NEXT: vmv.x.s a0, v8 729; CHECK-NEXT: ret 730 %r = extractelement <vscale x 8 x i64> %v, i32 2 731 ret i64 %r 732} 733 734define i64 @extractelt_nxv8i64_idx(<vscale x 8 x i64> %v, i32 %idx) { 735; CHECK-LABEL: extractelt_nxv8i64_idx: 736; CHECK: # %bb.0: 737; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, ma 738; CHECK-NEXT: vslidedown.vx v8, v8, a0 739; CHECK-NEXT: li a1, 32 740; CHECK-NEXT: vmv.x.s a0, v8 741; CHECK-NEXT: vsrl.vx v8, v8, a1 742; CHECK-NEXT: vmv.x.s a1, v8 743; CHECK-NEXT: ret 744 %r = extractelement <vscale x 8 x i64> %v, i32 %idx 745 ret i64 %r 746} 747 748define i32 @extractelt_add_nxv4i32_splat(<vscale x 4 x i32> %x) { 749; CHECK-LABEL: extractelt_add_nxv4i32_splat: 750; CHECK: # %bb.0: 751; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma 752; CHECK-NEXT: vslidedown.vi v8, v8, 2 753; CHECK-NEXT: vmv.x.s a0, v8 754; CHECK-NEXT: addi a0, a0, 3 755; CHECK-NEXT: ret 756 %bo = add <vscale x 4 x i32> %x, splat (i32 3) 757 %ext = extractelement <vscale x 4 x i32> %bo, i32 2 758 ret i32 %ext 759} 760 761define i32 @extractelt_sub_nxv4i32_splat(<vscale x 4 x i32> %x) { 762; CHECK-LABEL: extractelt_sub_nxv4i32_splat: 763; CHECK: # %bb.0: 764; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma 765; CHECK-NEXT: vslidedown.vi v8, v8, 1 766; CHECK-NEXT: vmv.x.s a0, v8 767; CHECK-NEXT: li a1, 3 768; CHECK-NEXT: sub a0, a1, a0 769; CHECK-NEXT: ret 770 %bo = sub <vscale x 4 x i32> splat (i32 3), %x 771 %ext = extractelement <vscale x 4 x i32> %bo, i32 1 772 ret i32 %ext 773} 774 775define i32 @extractelt_mul_nxv4i32_splat(<vscale x 4 x i32> %x) { 776; RV32NOM-LABEL: extractelt_mul_nxv4i32_splat: 777; RV32NOM: # %bb.0: 778; RV32NOM-NEXT: li a0, 3 779; RV32NOM-NEXT: vsetvli a1, zero, e32, m2, ta, ma 780; RV32NOM-NEXT: vmul.vx v8, v8, a0 781; RV32NOM-NEXT: vsetivli zero, 1, e32, m1, ta, ma 782; RV32NOM-NEXT: vslidedown.vi v8, v8, 3 783; RV32NOM-NEXT: vmv.x.s a0, v8 784; RV32NOM-NEXT: ret 785; 786; RV32M-LABEL: extractelt_mul_nxv4i32_splat: 787; RV32M: # %bb.0: 788; RV32M-NEXT: vsetivli zero, 1, e32, m1, ta, ma 789; RV32M-NEXT: vslidedown.vi v8, v8, 3 790; RV32M-NEXT: vmv.x.s a0, v8 791; RV32M-NEXT: slli a1, a0, 1 792; RV32M-NEXT: add a0, a1, a0 793; RV32M-NEXT: ret 794 %bo = mul <vscale x 4 x i32> %x, splat (i32 3) 795 %ext = extractelement <vscale x 4 x i32> %bo, i32 3 796 ret i32 %ext 797} 798 799define i32 @extractelt_sdiv_nxv4i32_splat(<vscale x 4 x i32> %x) { 800; RV32NOM-LABEL: extractelt_sdiv_nxv4i32_splat: 801; RV32NOM: # %bb.0: 802; RV32NOM-NEXT: lui a0, 349525 803; RV32NOM-NEXT: addi a0, a0, 1366 804; RV32NOM-NEXT: vsetvli a1, zero, e32, m2, ta, ma 805; RV32NOM-NEXT: vmulh.vx v8, v8, a0 806; RV32NOM-NEXT: vsrl.vi v10, v8, 31 807; RV32NOM-NEXT: vadd.vv v8, v8, v10 808; RV32NOM-NEXT: vmv.x.s a0, v8 809; RV32NOM-NEXT: ret 810; 811; RV32M-LABEL: extractelt_sdiv_nxv4i32_splat: 812; RV32M: # %bb.0: 813; RV32M-NEXT: vsetivli zero, 1, e32, m1, ta, ma 814; RV32M-NEXT: vmv.x.s a0, v8 815; RV32M-NEXT: lui a1, 349525 816; RV32M-NEXT: addi a1, a1, 1366 817; RV32M-NEXT: mulh a0, a0, a1 818; RV32M-NEXT: srli a1, a0, 31 819; RV32M-NEXT: add a0, a0, a1 820; RV32M-NEXT: ret 821 %bo = sdiv <vscale x 4 x i32> %x, splat (i32 3) 822 %ext = extractelement <vscale x 4 x i32> %bo, i32 0 823 ret i32 %ext 824} 825 826define i32 @extractelt_udiv_nxv4i32_splat(<vscale x 4 x i32> %x) { 827; RV32NOM-LABEL: extractelt_udiv_nxv4i32_splat: 828; RV32NOM: # %bb.0: 829; RV32NOM-NEXT: lui a0, 349525 830; RV32NOM-NEXT: addi a0, a0, 1366 831; RV32NOM-NEXT: vsetvli a1, zero, e32, m2, ta, ma 832; RV32NOM-NEXT: vmulh.vx v8, v8, a0 833; RV32NOM-NEXT: vsrl.vi v10, v8, 31 834; RV32NOM-NEXT: vadd.vv v8, v8, v10 835; RV32NOM-NEXT: vmv.x.s a0, v8 836; RV32NOM-NEXT: ret 837; 838; RV32M-LABEL: extractelt_udiv_nxv4i32_splat: 839; RV32M: # %bb.0: 840; RV32M-NEXT: vsetivli zero, 1, e32, m1, ta, ma 841; RV32M-NEXT: vmv.x.s a0, v8 842; RV32M-NEXT: lui a1, 349525 843; RV32M-NEXT: addi a1, a1, 1366 844; RV32M-NEXT: mulh a0, a0, a1 845; RV32M-NEXT: srli a1, a0, 31 846; RV32M-NEXT: add a0, a0, a1 847; RV32M-NEXT: ret 848 %bo = sdiv <vscale x 4 x i32> %x, splat (i32 3) 849 %ext = extractelement <vscale x 4 x i32> %bo, i32 0 850 ret i32 %ext 851} 852 853define i32 @extractelt_nxv32i32_0(<vscale x 32 x i32> %v) { 854; CHECK-LABEL: extractelt_nxv32i32_0: 855; CHECK: # %bb.0: 856; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma 857; CHECK-NEXT: vmv.x.s a0, v8 858; CHECK-NEXT: ret 859 %r = extractelement <vscale x 32 x i32> %v, i32 0 860 ret i32 %r 861} 862 863define i32 @extractelt_nxv32i32_neg1(<vscale x 32 x i32> %v) { 864; CHECK-LABEL: extractelt_nxv32i32_neg1: 865; CHECK: # %bb.0: 866; CHECK-NEXT: addi sp, sp, -80 867; CHECK-NEXT: .cfi_def_cfa_offset 80 868; CHECK-NEXT: sw ra, 76(sp) # 4-byte Folded Spill 869; CHECK-NEXT: sw s0, 72(sp) # 4-byte Folded Spill 870; CHECK-NEXT: .cfi_offset ra, -4 871; CHECK-NEXT: .cfi_offset s0, -8 872; CHECK-NEXT: addi s0, sp, 80 873; CHECK-NEXT: .cfi_def_cfa s0, 0 874; CHECK-NEXT: csrr a0, vlenb 875; CHECK-NEXT: slli a0, a0, 4 876; CHECK-NEXT: sub sp, sp, a0 877; CHECK-NEXT: andi sp, sp, -64 878; CHECK-NEXT: addi a0, sp, 64 879; CHECK-NEXT: csrr a1, vlenb 880; CHECK-NEXT: vs8r.v v8, (a0) 881; CHECK-NEXT: slli a2, a1, 3 882; CHECK-NEXT: slli a1, a1, 4 883; CHECK-NEXT: add a2, a0, a2 884; CHECK-NEXT: vs8r.v v16, (a2) 885; CHECK-NEXT: add a0, a1, a0 886; CHECK-NEXT: lw a0, -4(a0) 887; CHECK-NEXT: addi sp, s0, -80 888; CHECK-NEXT: .cfi_def_cfa sp, 80 889; CHECK-NEXT: lw ra, 76(sp) # 4-byte Folded Reload 890; CHECK-NEXT: lw s0, 72(sp) # 4-byte Folded Reload 891; CHECK-NEXT: .cfi_restore ra 892; CHECK-NEXT: .cfi_restore s0 893; CHECK-NEXT: addi sp, sp, 80 894; CHECK-NEXT: .cfi_def_cfa_offset 0 895; CHECK-NEXT: ret 896 %r = extractelement <vscale x 32 x i32> %v, i32 -1 897 ret i32 %r 898} 899 900define i32 @extractelt_nxv32i32_imm(<vscale x 32 x i32> %v) { 901; CHECK-LABEL: extractelt_nxv32i32_imm: 902; CHECK: # %bb.0: 903; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma 904; CHECK-NEXT: vslidedown.vi v8, v8, 2 905; CHECK-NEXT: vmv.x.s a0, v8 906; CHECK-NEXT: ret 907 %r = extractelement <vscale x 32 x i32> %v, i32 2 908 ret i32 %r 909} 910 911define i32 @extractelt_nxv32i32_idx(<vscale x 32 x i32> %v, i32 %idx) { 912; CHECK-LABEL: extractelt_nxv32i32_idx: 913; CHECK: # %bb.0: 914; CHECK-NEXT: csrr a1, vlenb 915; CHECK-NEXT: slli a2, a1, 2 916; CHECK-NEXT: addi a2, a2, -1 917; CHECK-NEXT: bltu a0, a2, .LBB74_2 918; CHECK-NEXT: # %bb.1: 919; CHECK-NEXT: mv a0, a2 920; CHECK-NEXT: .LBB74_2: 921; CHECK-NEXT: addi sp, sp, -80 922; CHECK-NEXT: .cfi_def_cfa_offset 80 923; CHECK-NEXT: sw ra, 76(sp) # 4-byte Folded Spill 924; CHECK-NEXT: sw s0, 72(sp) # 4-byte Folded Spill 925; CHECK-NEXT: .cfi_offset ra, -4 926; CHECK-NEXT: .cfi_offset s0, -8 927; CHECK-NEXT: addi s0, sp, 80 928; CHECK-NEXT: .cfi_def_cfa s0, 0 929; CHECK-NEXT: csrr a2, vlenb 930; CHECK-NEXT: slli a2, a2, 4 931; CHECK-NEXT: sub sp, sp, a2 932; CHECK-NEXT: andi sp, sp, -64 933; CHECK-NEXT: slli a0, a0, 2 934; CHECK-NEXT: addi a2, sp, 64 935; CHECK-NEXT: slli a1, a1, 3 936; CHECK-NEXT: add a0, a2, a0 937; CHECK-NEXT: vs8r.v v8, (a2) 938; CHECK-NEXT: add a1, a2, a1 939; CHECK-NEXT: vs8r.v v16, (a1) 940; CHECK-NEXT: lw a0, 0(a0) 941; CHECK-NEXT: addi sp, s0, -80 942; CHECK-NEXT: .cfi_def_cfa sp, 80 943; CHECK-NEXT: lw ra, 76(sp) # 4-byte Folded Reload 944; CHECK-NEXT: lw s0, 72(sp) # 4-byte Folded Reload 945; CHECK-NEXT: .cfi_restore ra 946; CHECK-NEXT: .cfi_restore s0 947; CHECK-NEXT: addi sp, sp, 80 948; CHECK-NEXT: .cfi_def_cfa_offset 0 949; CHECK-NEXT: ret 950 %r = extractelement <vscale x 32 x i32> %v, i32 %idx 951 ret i32 %r 952} 953 954define i64 @extractelt_nxv16i64_0(<vscale x 16 x i64> %v) { 955; CHECK-LABEL: extractelt_nxv16i64_0: 956; CHECK: # %bb.0: 957; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma 958; CHECK-NEXT: vmv.x.s a0, v8 959; CHECK-NEXT: vslidedown.vi v8, v8, 1 960; CHECK-NEXT: vmv.x.s a1, v8 961; CHECK-NEXT: ret 962 %r = extractelement <vscale x 16 x i64> %v, i32 0 963 ret i64 %r 964} 965