xref: /llvm-project/llvm/test/CodeGen/AArch64/zext-reg-coalesce.mir (revision ebbfdca586d5543c13617b15d0cdf5b7fdc4fd4a)
1# RUN: llc -mtriple=aarch64 -o - %s \
2# RUN: -run-pass register-coalescer | FileCheck %s
3
4# In this test case, the 32-bit copy implements a 32 to 64 bit zero extension
5# and relies on the upper 32 bits being zeroed.
6# Coalescing to the result of the 64-bit load meant overwriting
7# the upper 32 bits incorrectly when the loaded byte was negative.
8
9--- |
10  @c = local_unnamed_addr global i8 -1, align 4
11
12  define i64 @bug_e(i32 %i32) local_unnamed_addr {
13  ret i64 0
14  }
15...
16---
17name:            bug_e
18tracksRegLiveness: true
19body:             |
20  bb.0:
21    liveins: $w0
22
23    %1:gpr32 = COPY $w0
24    %2:gpr64common = ADRP target-flags(aarch64-page) @c
25    %3:gpr64 = LDRSBXui %2, target-flags(aarch64-pageoff, aarch64-nc) @c :: (dereferenceable load (s8) from @c, align 4)
26    %0:gpr32 = COPY %3.sub_32
27  ; CHECK: {{.*}}.sub_32:gpr64 = COPY {{.*}}.sub_32
28    STRBBui %1, %2, target-flags(aarch64-pageoff, aarch64-nc) @c :: (store (s8) into @c, align 4)
29    %8:gpr64all = SUBREG_TO_REG 0, %0, %subreg.sub_32
30    $x0 = COPY %8
31  ; CHECK: $x0 = COPY
32    RET_ReallyLR implicit $x0
33...
34