xref: /llvm-project/llvm/test/Transforms/LoopStrengthReduce/AArch64/lsr-pre-inc-offset-check.ll (revision d39b4ce3ce8a3c256e01bdec2b140777a332a633)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=aarch64 -lsr-preferred-addressing-mode=preindexed %s -o - | FileCheck %s
3
4; In LSR for constant offsets and steps, we can generate pre-inc
5; accesses by having the offset equal the step and generate a reuse
6; formula. However, there are cases where the step, results in an
7; illegal addressing mode.
8
9; In this test, we set the preferred addressing mode to be preindexed,
10; in order to test a scenario where the step results in an illegal
11; addressing mode and because of that it should not generate a reuse formula.
12
13; This test was created in order to reproduce a bug that was observed
14; when building a bootstrap build on an AArch64 machine, where the
15; preferred addresing mode is preindexed.
16
17
18%"Type" = type <{[166 x [338 x i8]]}>
19define void @test_lsr_pre_inc_offset_check(ptr %p) {
20; CHECK-LABEL: test_lsr_pre_inc_offset_check:
21; CHECK:       // %bb.0: // %entry
22; CHECK-NEXT:    mov w8, #165
23; CHECK-NEXT:    add x9, x0, #339
24; CHECK-NEXT:    mov w10, #2
25; CHECK-NEXT:  .LBB0_1: // %main
26; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
27; CHECK-NEXT:    str wzr, [x9]
28; CHECK-NEXT:    subs x8, x8, #1
29; CHECK-NEXT:    strb w10, [x9, #1]
30; CHECK-NEXT:    add x9, x9, #338
31; CHECK-NEXT:    b.ne .LBB0_1
32; CHECK-NEXT:  // %bb.2: // %exit
33; CHECK-NEXT:    ret
34entry:
35  br label %main
36exit:
37  ret void
38if.then:
39  %arrayidx.i = getelementptr inbounds %"Type", ptr %p, i64 0, i32 0, i64 %indvars, i64 1
40  store i32 0, ptr %arrayidx.i, align 1
41  br label %if.end
42if.end:
43  %arrayidx.p = getelementptr inbounds %"Type", ptr %p, i64 0, i32 0, i64 %indvars, i64 2
44  store i8 2, ptr %arrayidx.p, align 1
45  %indvars.iv.next = add nuw nsw i64 %indvars, 1
46  %add.i = add nuw i8 %begin, 1
47  %cmp.i.not = icmp eq i64 %indvars.iv.next, 166
48  br i1 %cmp.i.not, label %exit, label %main
49main:
50  %begin = phi i8 [ 1, %entry ], [ %add.i, %if.end ]
51  %indvars = phi i64 [ 1, %entry ], [ %indvars.iv.next, %if.end ]
52  br label %if.then
53}
54