1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2 3; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ 4; RUN: < %s | FileCheck %s --check-prefixes=CHECK,CHECK-128-65536 5 6; RUN: llc -mtriple=riscv32 -riscv-v-vector-bits-max=512 \ 7; RUN: -mattr=+v,+zvl512b -verify-machineinstrs \ 8; RUN: < %s | FileCheck %s --check-prefixes=CHECK,CHECK-512 9 10; RUN: llc -mtriple=riscv32 -riscv-v-vector-bits-max=64 \ 11; RUN: -mattr=+zve64x,+zvl64b -verify-machineinstrs \ 12; RUN: < %s | FileCheck %s --check-prefixes=CHECK,CHECK-64 13 14declare <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64( 15 <vscale x 1 x i64>, 16 <vscale x 1 x i64>, 17 i64, 18 i32) 19 20define <vscale x 1 x i64> @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl1(<vscale x 1 x i64> %0, i64 %1) nounwind { 21; CHECK-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl1: 22; CHECK: # %bb.0: # %entry 23; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma 24; CHECK-NEXT: vslide1up.vx v9, v8, a1 25; CHECK-NEXT: vslide1up.vx v8, v9, a0 26; CHECK-NEXT: ret 27entry: 28 %a = call <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64( 29 <vscale x 1 x i64> undef, 30 <vscale x 1 x i64> %0, 31 i64 %1, 32 i32 1) 33 34 ret <vscale x 1 x i64> %a 35} 36 37define <vscale x 1 x i64> @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl2(<vscale x 1 x i64> %0, i64 %1) nounwind { 38; CHECK-128-65536-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl2: 39; CHECK-128-65536: # %bb.0: # %entry 40; CHECK-128-65536-NEXT: vsetivli zero, 4, e32, m1, ta, ma 41; CHECK-128-65536-NEXT: vslide1up.vx v9, v8, a1 42; CHECK-128-65536-NEXT: vslide1up.vx v8, v9, a0 43; CHECK-128-65536-NEXT: ret 44; 45; CHECK-512-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl2: 46; CHECK-512: # %bb.0: # %entry 47; CHECK-512-NEXT: vsetivli zero, 4, e32, m1, ta, ma 48; CHECK-512-NEXT: vslide1up.vx v9, v8, a1 49; CHECK-512-NEXT: vslide1up.vx v8, v9, a0 50; CHECK-512-NEXT: ret 51; 52; CHECK-64-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl2: 53; CHECK-64: # %bb.0: # %entry 54; CHECK-64-NEXT: vsetivli zero, 2, e32, m1, ta, ma 55; CHECK-64-NEXT: vslide1up.vx v9, v8, a1 56; CHECK-64-NEXT: vslide1up.vx v8, v9, a0 57; CHECK-64-NEXT: ret 58entry: 59 %a = call <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64( 60 <vscale x 1 x i64> undef, 61 <vscale x 1 x i64> %0, 62 i64 %1, 63 i32 2) 64 65 ret <vscale x 1 x i64> %a 66} 67 68define <vscale x 1 x i64> @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl3(<vscale x 1 x i64> %0, i64 %1) nounwind { 69; CHECK-128-65536-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl3: 70; CHECK-128-65536: # %bb.0: # %entry 71; CHECK-128-65536-NEXT: vsetivli a2, 3, e64, m1, ta, ma 72; CHECK-128-65536-NEXT: slli a2, a2, 1 73; CHECK-128-65536-NEXT: vsetvli zero, a2, e32, m1, ta, ma 74; CHECK-128-65536-NEXT: vslide1up.vx v9, v8, a1 75; CHECK-128-65536-NEXT: vslide1up.vx v8, v9, a0 76; CHECK-128-65536-NEXT: ret 77; 78; CHECK-512-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl3: 79; CHECK-512: # %bb.0: # %entry 80; CHECK-512-NEXT: vsetivli zero, 6, e32, m1, ta, ma 81; CHECK-512-NEXT: vslide1up.vx v9, v8, a1 82; CHECK-512-NEXT: vslide1up.vx v8, v9, a0 83; CHECK-512-NEXT: ret 84; 85; CHECK-64-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl3: 86; CHECK-64: # %bb.0: # %entry 87; CHECK-64-NEXT: vsetivli zero, 2, e32, m1, ta, ma 88; CHECK-64-NEXT: vslide1up.vx v9, v8, a1 89; CHECK-64-NEXT: vslide1up.vx v8, v9, a0 90; CHECK-64-NEXT: ret 91entry: 92 %a = call <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64( 93 <vscale x 1 x i64> undef, 94 <vscale x 1 x i64> %0, 95 i64 %1, 96 i32 3) 97 98 ret <vscale x 1 x i64> %a 99} 100 101define <vscale x 1 x i64> @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl8(<vscale x 1 x i64> %0, i64 %1) nounwind { 102; CHECK-128-65536-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl8: 103; CHECK-128-65536: # %bb.0: # %entry 104; CHECK-128-65536-NEXT: vsetivli a2, 8, e64, m1, ta, ma 105; CHECK-128-65536-NEXT: slli a2, a2, 1 106; CHECK-128-65536-NEXT: vsetvli zero, a2, e32, m1, ta, ma 107; CHECK-128-65536-NEXT: vslide1up.vx v9, v8, a1 108; CHECK-128-65536-NEXT: vslide1up.vx v8, v9, a0 109; CHECK-128-65536-NEXT: ret 110; 111; CHECK-512-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl8: 112; CHECK-512: # %bb.0: # %entry 113; CHECK-512-NEXT: vsetivli zero, 16, e32, m1, ta, ma 114; CHECK-512-NEXT: vslide1up.vx v9, v8, a1 115; CHECK-512-NEXT: vslide1up.vx v8, v9, a0 116; CHECK-512-NEXT: ret 117; 118; CHECK-64-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl8: 119; CHECK-64: # %bb.0: # %entry 120; CHECK-64-NEXT: vsetivli zero, 2, e32, m1, ta, ma 121; CHECK-64-NEXT: vslide1up.vx v9, v8, a1 122; CHECK-64-NEXT: vslide1up.vx v8, v9, a0 123; CHECK-64-NEXT: ret 124entry: 125 %a = call <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64( 126 <vscale x 1 x i64> undef, 127 <vscale x 1 x i64> %0, 128 i64 %1, 129 i32 8) 130 131 ret <vscale x 1 x i64> %a 132} 133 134define <vscale x 1 x i64> @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl9(<vscale x 1 x i64> %0, i64 %1) nounwind { 135; CHECK-128-65536-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl9: 136; CHECK-128-65536: # %bb.0: # %entry 137; CHECK-128-65536-NEXT: vsetivli a2, 9, e64, m1, ta, ma 138; CHECK-128-65536-NEXT: slli a2, a2, 1 139; CHECK-128-65536-NEXT: vsetvli zero, a2, e32, m1, ta, ma 140; CHECK-128-65536-NEXT: vslide1up.vx v9, v8, a1 141; CHECK-128-65536-NEXT: vslide1up.vx v8, v9, a0 142; CHECK-128-65536-NEXT: ret 143; 144; CHECK-512-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl9: 145; CHECK-512: # %bb.0: # %entry 146; CHECK-512-NEXT: vsetivli a2, 9, e64, m1, ta, ma 147; CHECK-512-NEXT: slli a2, a2, 1 148; CHECK-512-NEXT: vsetvli zero, a2, e32, m1, ta, ma 149; CHECK-512-NEXT: vslide1up.vx v9, v8, a1 150; CHECK-512-NEXT: vslide1up.vx v8, v9, a0 151; CHECK-512-NEXT: ret 152; 153; CHECK-64-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl9: 154; CHECK-64: # %bb.0: # %entry 155; CHECK-64-NEXT: vsetivli zero, 2, e32, m1, ta, ma 156; CHECK-64-NEXT: vslide1up.vx v9, v8, a1 157; CHECK-64-NEXT: vslide1up.vx v8, v9, a0 158; CHECK-64-NEXT: ret 159entry: 160 %a = call <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64( 161 <vscale x 1 x i64> undef, 162 <vscale x 1 x i64> %0, 163 i64 %1, 164 i32 9) 165 166 ret <vscale x 1 x i64> %a 167} 168 169define <vscale x 1 x i64> @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl15(<vscale x 1 x i64> %0, i64 %1) nounwind { 170; CHECK-128-65536-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl15: 171; CHECK-128-65536: # %bb.0: # %entry 172; CHECK-128-65536-NEXT: vsetivli a2, 15, e64, m1, ta, ma 173; CHECK-128-65536-NEXT: slli a2, a2, 1 174; CHECK-128-65536-NEXT: vsetvli zero, a2, e32, m1, ta, ma 175; CHECK-128-65536-NEXT: vslide1up.vx v9, v8, a1 176; CHECK-128-65536-NEXT: vslide1up.vx v8, v9, a0 177; CHECK-128-65536-NEXT: ret 178; 179; CHECK-512-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl15: 180; CHECK-512: # %bb.0: # %entry 181; CHECK-512-NEXT: vsetivli a2, 15, e64, m1, ta, ma 182; CHECK-512-NEXT: slli a2, a2, 1 183; CHECK-512-NEXT: vsetvli zero, a2, e32, m1, ta, ma 184; CHECK-512-NEXT: vslide1up.vx v9, v8, a1 185; CHECK-512-NEXT: vslide1up.vx v8, v9, a0 186; CHECK-512-NEXT: ret 187; 188; CHECK-64-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl15: 189; CHECK-64: # %bb.0: # %entry 190; CHECK-64-NEXT: vsetivli zero, 2, e32, m1, ta, ma 191; CHECK-64-NEXT: vslide1up.vx v9, v8, a1 192; CHECK-64-NEXT: vslide1up.vx v8, v9, a0 193; CHECK-64-NEXT: ret 194entry: 195 %a = call <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64( 196 <vscale x 1 x i64> undef, 197 <vscale x 1 x i64> %0, 198 i64 %1, 199 i32 15) 200 201 ret <vscale x 1 x i64> %a 202} 203 204define <vscale x 1 x i64> @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl16(<vscale x 1 x i64> %0, i64 %1) nounwind { 205; CHECK-128-65536-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl16: 206; CHECK-128-65536: # %bb.0: # %entry 207; CHECK-128-65536-NEXT: vsetivli a2, 16, e64, m1, ta, ma 208; CHECK-128-65536-NEXT: slli a2, a2, 1 209; CHECK-128-65536-NEXT: vsetvli zero, a2, e32, m1, ta, ma 210; CHECK-128-65536-NEXT: vslide1up.vx v9, v8, a1 211; CHECK-128-65536-NEXT: vslide1up.vx v8, v9, a0 212; CHECK-128-65536-NEXT: ret 213; 214; CHECK-512-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl16: 215; CHECK-512: # %bb.0: # %entry 216; CHECK-512-NEXT: vsetivli zero, 16, e32, m1, ta, ma 217; CHECK-512-NEXT: vslide1up.vx v9, v8, a1 218; CHECK-512-NEXT: vslide1up.vx v8, v9, a0 219; CHECK-512-NEXT: ret 220; 221; CHECK-64-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl16: 222; CHECK-64: # %bb.0: # %entry 223; CHECK-64-NEXT: vsetivli zero, 2, e32, m1, ta, ma 224; CHECK-64-NEXT: vslide1up.vx v9, v8, a1 225; CHECK-64-NEXT: vslide1up.vx v8, v9, a0 226; CHECK-64-NEXT: ret 227entry: 228 %a = call <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64( 229 <vscale x 1 x i64> undef, 230 <vscale x 1 x i64> %0, 231 i64 %1, 232 i32 16) 233 234 ret <vscale x 1 x i64> %a 235} 236 237define <vscale x 1 x i64> @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl2047(<vscale x 1 x i64> %0, i64 %1) nounwind { 238; CHECK-128-65536-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl2047: 239; CHECK-128-65536: # %bb.0: # %entry 240; CHECK-128-65536-NEXT: li a2, 2047 241; CHECK-128-65536-NEXT: vsetvli a2, a2, e64, m1, ta, ma 242; CHECK-128-65536-NEXT: slli a2, a2, 1 243; CHECK-128-65536-NEXT: vsetvli zero, a2, e32, m1, ta, ma 244; CHECK-128-65536-NEXT: vslide1up.vx v9, v8, a1 245; CHECK-128-65536-NEXT: vslide1up.vx v8, v9, a0 246; CHECK-128-65536-NEXT: ret 247; 248; CHECK-512-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl2047: 249; CHECK-512: # %bb.0: # %entry 250; CHECK-512-NEXT: vsetivli zero, 16, e32, m1, ta, ma 251; CHECK-512-NEXT: vslide1up.vx v9, v8, a1 252; CHECK-512-NEXT: vslide1up.vx v8, v9, a0 253; CHECK-512-NEXT: ret 254; 255; CHECK-64-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl2047: 256; CHECK-64: # %bb.0: # %entry 257; CHECK-64-NEXT: vsetivli zero, 2, e32, m1, ta, ma 258; CHECK-64-NEXT: vslide1up.vx v9, v8, a1 259; CHECK-64-NEXT: vslide1up.vx v8, v9, a0 260; CHECK-64-NEXT: ret 261entry: 262 %a = call <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64( 263 <vscale x 1 x i64> undef, 264 <vscale x 1 x i64> %0, 265 i64 %1, 266 i32 2047) 267 268 ret <vscale x 1 x i64> %a 269} 270 271define <vscale x 1 x i64> @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl2048(<vscale x 1 x i64> %0, i64 %1) nounwind { 272; CHECK-128-65536-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl2048: 273; CHECK-128-65536: # %bb.0: # %entry 274; CHECK-128-65536-NEXT: vsetvli a2, zero, e32, m1, ta, ma 275; CHECK-128-65536-NEXT: vslide1up.vx v9, v8, a1 276; CHECK-128-65536-NEXT: vslide1up.vx v8, v9, a0 277; CHECK-128-65536-NEXT: ret 278; 279; CHECK-512-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl2048: 280; CHECK-512: # %bb.0: # %entry 281; CHECK-512-NEXT: vsetivli zero, 16, e32, m1, ta, ma 282; CHECK-512-NEXT: vslide1up.vx v9, v8, a1 283; CHECK-512-NEXT: vslide1up.vx v8, v9, a0 284; CHECK-512-NEXT: ret 285; 286; CHECK-64-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl2048: 287; CHECK-64: # %bb.0: # %entry 288; CHECK-64-NEXT: vsetivli zero, 2, e32, m1, ta, ma 289; CHECK-64-NEXT: vslide1up.vx v9, v8, a1 290; CHECK-64-NEXT: vslide1up.vx v8, v9, a0 291; CHECK-64-NEXT: ret 292entry: 293 %a = call <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64( 294 <vscale x 1 x i64> undef, 295 <vscale x 1 x i64> %0, 296 i64 %1, 297 i32 2048) 298 299 ret <vscale x 1 x i64> %a 300} 301