1; RUN: llc -mtriple=riscv64 -mattr=+v -riscv-enable-vl-optimizer \ 2; RUN: -verify-machineinstrs -debug-only=riscv-vl-optimizer -o - 2>&1 %s | FileCheck %s 3 4; REQUIRES: asserts 5 6; GitHub Issue #123862 provided a case where the riscv-vl-optimizer pass was 7; very slow. It was found that that case benefited greatly from aborting due 8; to CommonVL == VLOp. Adding the case provided in the issue would show up 9; as a long running test instead of a test failure. We would likley have a hard 10; time figuring if that case had a regression. So instead, we check this output 11; which was responsible for speeding it up. 12 13define <vscale x 4 x i32> @same_vl_imm(<vscale x 4 x i32> %passthru, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) { 14 ; CHECK: User VL is: 4 15 ; CHECK: Abort due to CommonVL == VLOp, no point in reducing. 16 %v = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, i64 4) 17 %w = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %v, <vscale x 4 x i32> %a, i64 4) 18 ret <vscale x 4 x i32> %w 19} 20 21define <vscale x 4 x i32> @same_vl_reg(<vscale x 4 x i32> %passthru, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, i64 %vl) { 22 ; CHECK: User VL is: %3:gprnox0 23 ; CHECK: Abort due to CommonVL == VLOp, no point in reducing. 24 %v = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, i64 %vl) 25 %w = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %v, <vscale x 4 x i32> %a, i64 %vl) 26 ret <vscale x 4 x i32> %w 27} 28