xref: /llvm-project/llvm/test/Transforms/PhaseOrdering/X86/vec-shift.ll (revision 462cb3cd6cecd0511ecaf0e3ebcaba455ece587d)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -passes="default<O3>" -S            < %s | FileCheck %s --check-prefix=SSE
3; RUN: opt -passes="default<O3>" -S -mattr=avx < %s | FileCheck %s --check-prefix=AVX
4
5; This test is based on https://github.com/llvm/llvm-project/issues/50778
6; It's the unoptimized IR passed through -passes=mem2reg to remove obvious noise.
7; This should show cooperation between instcombine, unrolling, inlining,
8; and SLP to create the target-optimal vector math+logic ops.
9
10target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
11target triple = "x86_64-unknown-linux-gnu"
12
13define noundef i64 @foo(i64 noundef %0) {
14; CHECK-LABEL: @foo(
15; CHECK-NEXT:    [[TMP2:%.*]] = shl i64 [[TMP0:%.*]], 44
16; CHECK-NEXT:    [[TMP3:%.*]] = sub nuw nsw i64 -17592186044416, [[TMP2]]
17; CHECK-NEXT:    ret i64 [[TMP3]]
18;
19; SSE-LABEL: @foo(
20; SSE-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP0:%.*]], -1
21; SSE-NEXT:    [[TMP3:%.*]] = shl i64 [[TMP2]], 44
22; SSE-NEXT:    ret i64 [[TMP3]]
23;
24; AVX-LABEL: @foo(
25; AVX-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP0:%.*]], -1
26; AVX-NEXT:    [[TMP3:%.*]] = shl i64 [[TMP2]], 44
27; AVX-NEXT:    ret i64 [[TMP3]]
28;
29  %2 = sub i64 1048575, %0
30  %3 = shl i64 %2, 44
31  ret i64 %3
32}
33
34define void @bar(ptr noundef %0) {
35; SSE-LABEL: @bar(
36; SSE-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr [[TMP0:%.*]], align 8
37; SSE-NEXT:    [[TMP3:%.*]] = xor <2 x i64> [[TMP2]], splat (i64 -1)
38; SSE-NEXT:    [[TMP4:%.*]] = shl <2 x i64> [[TMP3]], splat (i64 44)
39; SSE-NEXT:    store <2 x i64> [[TMP4]], ptr [[TMP0]], align 8
40; SSE-NEXT:    [[TMP5:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i64 16
41; SSE-NEXT:    [[TMP6:%.*]] = load <2 x i64>, ptr [[TMP5]], align 8
42; SSE-NEXT:    [[TMP7:%.*]] = xor <2 x i64> [[TMP6]], splat (i64 -1)
43; SSE-NEXT:    [[TMP8:%.*]] = shl <2 x i64> [[TMP7]], splat (i64 44)
44; SSE-NEXT:    store <2 x i64> [[TMP8]], ptr [[TMP5]], align 8
45; SSE-NEXT:    [[TMP9:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i64 32
46; SSE-NEXT:    [[TMP10:%.*]] = load <2 x i64>, ptr [[TMP9]], align 8
47; SSE-NEXT:    [[TMP11:%.*]] = xor <2 x i64> [[TMP10]], splat (i64 -1)
48; SSE-NEXT:    [[TMP12:%.*]] = shl <2 x i64> [[TMP11]], splat (i64 44)
49; SSE-NEXT:    store <2 x i64> [[TMP12]], ptr [[TMP9]], align 8
50; SSE-NEXT:    [[TMP13:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i64 48
51; SSE-NEXT:    [[TMP14:%.*]] = load <2 x i64>, ptr [[TMP13]], align 8
52; SSE-NEXT:    [[TMP15:%.*]] = xor <2 x i64> [[TMP14]], splat (i64 -1)
53; SSE-NEXT:    [[TMP16:%.*]] = shl <2 x i64> [[TMP15]], splat (i64 44)
54; SSE-NEXT:    store <2 x i64> [[TMP16]], ptr [[TMP13]], align 8
55; SSE-NEXT:    ret void
56;
57; AVX-LABEL: @bar(
58; AVX-NEXT:    [[TMP2:%.*]] = load <4 x i64>, ptr [[TMP0:%.*]], align 8
59; AVX-NEXT:    [[TMP3:%.*]] = xor <4 x i64> [[TMP2]], splat (i64 -1)
60; AVX-NEXT:    [[TMP4:%.*]] = shl <4 x i64> [[TMP3]], splat (i64 44)
61; AVX-NEXT:    store <4 x i64> [[TMP4]], ptr [[TMP0]], align 8
62; AVX-NEXT:    [[TMP5:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i64 32
63; AVX-NEXT:    [[TMP6:%.*]] = load <4 x i64>, ptr [[TMP5]], align 8
64; AVX-NEXT:    [[TMP7:%.*]] = xor <4 x i64> [[TMP6]], splat (i64 -1)
65; AVX-NEXT:    [[TMP8:%.*]] = shl <4 x i64> [[TMP7]], splat (i64 44)
66; AVX-NEXT:    store <4 x i64> [[TMP8]], ptr [[TMP5]], align 8
67; AVX-NEXT:    ret void
68;
69  br label %2
70
712:                                                ; preds = %12, %1
72  %.0 = phi i32 [ 0, %1 ], [ %13, %12 ]
73  %3 = icmp slt i32 %.0, 8
74  br i1 %3, label %5, label %4
75
764:                                                ; preds = %2
77  br label %14
78
795:                                                ; preds = %2
80  %6 = sext i32 %.0 to i64
81  %7 = getelementptr inbounds i64, ptr %0, i64 %6
82  %8 = load i64, ptr %7, align 8
83  %9 = call noundef i64 @foo(i64 noundef %8)
84  %10 = sext i32 %.0 to i64
85  %11 = getelementptr inbounds i64, ptr %0, i64 %10
86  store i64 %9, ptr %11, align 8
87  br label %12
88
8912:                                               ; preds = %5
90  %13 = add nsw i32 %.0, 1
91  br label %2
92
9314:                                               ; preds = %4
94  ret void
95}
96