xref: /llvm-project/llvm/test/CodeGen/X86/2009-05-23-dagcombine-shifts.ll (revision 51f1cb5355d296ccb7756944d0545d9c96066b78)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
2; RUN: llc < %s | FileCheck %s
3
4; Check that the shr(shl X, 56), 48) is not mistakenly turned into
5; a shr (X, -8) that gets subsequently "optimized away" as undef
6; PR4254
7
8; after fixing PR24373
9; shlq $56, %rdi
10; sarq $48, %rdi
11; folds into
12; movsbq %dil, %rax
13; shlq $8, %rax
14; which is better for x86
15
16target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
17target triple = "x86_64-unknown-linux-gnu"
18
19define i64 @foo(i64 %b) nounwind readnone {
20; CHECK-LABEL: foo:
21; CHECK:       # %bb.0: # %entry
22; CHECK-NEXT:    movsbq %dil, %rax
23; CHECK-NEXT:    shlq $8, %rax
24; CHECK-NEXT:    incq %rax
25; CHECK-NEXT:    retq
26entry:
27	%shl = shl i64 %b, 56		; <i64> [#uses=1]
28	%shr = ashr i64 %shl, 48		; <i64> [#uses=1]
29	%add5 = or i64 %shr, 1		; <i64> [#uses=1]
30	ret i64 %add5
31}
32