xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/fold-scalar-load-crash.ll (revision b6c0f1bfa79a3a32d841ac5ab1f94c3aee3b5d90)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
2; RUN: llc -mtriple=riscv32 -mattr=+v,+xtheadmemidx -verify-machineinstrs < %s \
3; RUN:   | FileCheck %s --check-prefix RV32
4; RUN: llc -mtriple=riscv64 -mattr=+v,+xtheadmemidx -verify-machineinstrs < %s \
5; RUN:   | FileCheck %s --check-prefix RV64
6
7define i32 @test(i32 %size, ptr %add.ptr, i64 %const) {
8; RV32-LABEL: test:
9; RV32:       # %bb.0: # %entry
10; RV32-NEXT:    addi a3, a2, 1
11; RV32-NEXT:    th.lbib a4, (a1), -1, 0
12; RV32-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
13; RV32-NEXT:    vmv.v.x v8, a4
14; RV32-NEXT:    vmv.s.x v9, zero
15; RV32-NEXT:    vsetvli zero, a3, e8, mf2, tu, ma
16; RV32-NEXT:    vslideup.vx v8, v9, a2
17; RV32-NEXT:    addi a2, a0, 1
18; RV32-NEXT:  .LBB0_1: # %for.body
19; RV32-NEXT:    # =>This Inner Loop Header: Depth=1
20; RV32-NEXT:    th.lrb a0, a1, a0, 0
21; RV32-NEXT:    vsetivli zero, 1, e8, m1, tu, ma
22; RV32-NEXT:    vmv1r.v v9, v8
23; RV32-NEXT:    vmv.s.x v9, a0
24; RV32-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
25; RV32-NEXT:    vmseq.vi v9, v9, 0
26; RV32-NEXT:    vmv.x.s a0, v9
27; RV32-NEXT:    andi a3, a0, 255
28; RV32-NEXT:    mv a0, a2
29; RV32-NEXT:    bnez a3, .LBB0_1
30; RV32-NEXT:  # %bb.2: # %if.then381
31; RV32-NEXT:    li a0, 0
32; RV32-NEXT:    ret
33;
34; RV64-LABEL: test:
35; RV64:       # %bb.0: # %entry
36; RV64-NEXT:    addi a3, a2, 1
37; RV64-NEXT:    th.lbib a4, (a1), -1, 0
38; RV64-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
39; RV64-NEXT:    vmv.v.x v8, a4
40; RV64-NEXT:    vmv.s.x v9, zero
41; RV64-NEXT:    vsetvli zero, a3, e8, mf2, tu, ma
42; RV64-NEXT:    vslideup.vx v8, v9, a2
43; RV64-NEXT:    addi a2, a0, 1
44; RV64-NEXT:  .LBB0_1: # %for.body
45; RV64-NEXT:    # =>This Inner Loop Header: Depth=1
46; RV64-NEXT:    sext.w a0, a0
47; RV64-NEXT:    th.lrb a0, a1, a0, 0
48; RV64-NEXT:    vsetivli zero, 1, e8, m1, tu, ma
49; RV64-NEXT:    vmv1r.v v9, v8
50; RV64-NEXT:    vmv.s.x v9, a0
51; RV64-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
52; RV64-NEXT:    vmseq.vi v9, v9, 0
53; RV64-NEXT:    vmv.x.s a0, v9
54; RV64-NEXT:    andi a3, a0, 255
55; RV64-NEXT:    mv a0, a2
56; RV64-NEXT:    bnez a3, .LBB0_1
57; RV64-NEXT:  # %bb.2: # %if.then381
58; RV64-NEXT:    li a0, 0
59; RV64-NEXT:    ret
60entry:
61  br label %for.body
62
63for.body:                                         ; preds = %for.body, %entry
64  %size.actual = phi i32 [%size, %entry], [%size.inc, %for.body]
65  %add.ptr1 = getelementptr i8, ptr %add.ptr, i32 -1
66  %add.ptr2 = getelementptr i8, ptr %add.ptr1, i32 %size.actual
67  %0 = load i8, ptr %add.ptr1, align 1
68  %1 = load i8, ptr %add.ptr2, align 1
69  %2 = insertelement <8 x i8> poison, i8 %0, i64 0
70  %3 = insertelement <8 x i8> %2, i8 0, i64 %const
71  %4 = insertelement <8 x i8> %3, i8 %1, i64 0
72  %5 = icmp ult <8 x i8> %4, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
73  %6 = bitcast <8 x i1> %5 to i8
74  %7 = zext i8 %6 to i32
75  %cond = icmp eq i32 %7, 0
76  %size.inc = add i32 %size, 1
77  br i1 %cond, label %if.then381, label %for.body
78
79if.then381:                                       ; preds = %for.body
80  ret i32 0
81}
82