xref: /llvm-project/llvm/test/CodeGen/AArch64/arm64-srl-and.ll (revision 5ddce70ef0e5a641d7fea95e31fc5e2439cb98cb)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=aarch64-linux-gnu -O3 < %s | FileCheck %s
3
4; This used to miscompile:
5; The 16-bit -1 should not become 32-bit -1 (sub w8, w8, #1).
6
7@g = global i16 0, align 4
8define i32 @srl_and()  {
9; CHECK-LABEL: srl_and:
10; CHECK:       // %bb.0: // %entry
11; CHECK-NEXT:    adrp x8, :got:g
12; CHECK-NEXT:    mov w9, #50
13; CHECK-NEXT:    ldr x8, [x8, :got_lo12:g]
14; CHECK-NEXT:    ldrh w8, [x8]
15; CHECK-NEXT:    eor w8, w8, w9
16; CHECK-NEXT:    mov w9, #65535
17; CHECK-NEXT:    add w8, w8, w9
18; CHECK-NEXT:    and w0, w8, w8, lsr #16
19; CHECK-NEXT:    ret
20entry:
21  %0 = load i16, ptr @g, align 4
22  %1 = xor i16 %0, 50
23  %tobool = icmp ne i16 %1, 0
24  %lor.ext = zext i1 %tobool to i32
25  %sub = add i16 %1, -1
26
27  %srl = zext i16 %sub to i32
28  %and = and i32 %srl, %lor.ext
29
30  ret i32 %and
31}
32