xref: /llvm-project/llvm/test/CodeGen/X86/constant-pool-sharing.ll (revision e6bf48d11047e970cb24554a01b65b566d6b5d22)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=x86_64-linux -mcpu=corei7 | FileCheck %s --check-prefixes=SSE-LINUX
3; RUN: llc < %s -mtriple=x86_64-win32 -mcpu=corei7 | FileCheck %s --check-prefixes=SSE-MSVC
4; RUN: llc < %s -mtriple=x86_64-linux -mcpu=corei7-avx | FileCheck %s --check-prefixes=AVX-LINUX
5; RUN: llc < %s -mtriple=x86_64-win32 -mcpu=corei7-avx | FileCheck %s --check-prefixes=AVX-MSVC
6
7; llc should share constant pool entries between this integer vector
8; and this floating-point vector since they have the same encoding.
9; FIXME: AVX is duplicating broadcasts
10
11define void @share_v4i32_v4f32(ptr %p, ptr %q, i1 %t) nounwind {
12; SSE-LINUX-LABEL: share_v4i32_v4f32:
13; SSE-LINUX:       # %bb.0: # %entry
14; SSE-LINUX-NEXT:    movaps {{.*#+}} xmm0 = [1073741824,1073741824,1073741824,1073741824]
15; SSE-LINUX-NEXT:    .p2align 4
16; SSE-LINUX-NEXT:  .LBB0_1: # %loop
17; SSE-LINUX-NEXT:    # =>This Inner Loop Header: Depth=1
18; SSE-LINUX-NEXT:    movaps %xmm0, (%rdi)
19; SSE-LINUX-NEXT:    movaps %xmm0, (%rsi)
20; SSE-LINUX-NEXT:    testb $1, %dl
21; SSE-LINUX-NEXT:    jne .LBB0_1
22; SSE-LINUX-NEXT:  # %bb.2: # %ret
23; SSE-LINUX-NEXT:    retq
24;
25; SSE-MSVC-LABEL: share_v4i32_v4f32:
26; SSE-MSVC:       # %bb.0: # %entry
27; SSE-MSVC-NEXT:    movaps {{.*#+}} xmm0 = [1073741824,1073741824,1073741824,1073741824]
28; SSE-MSVC-NEXT:    .p2align 4
29; SSE-MSVC-NEXT:  .LBB0_1: # %loop
30; SSE-MSVC-NEXT:    # =>This Inner Loop Header: Depth=1
31; SSE-MSVC-NEXT:    movaps %xmm0, (%rcx)
32; SSE-MSVC-NEXT:    movaps %xmm0, (%rdx)
33; SSE-MSVC-NEXT:    testb $1, %r8b
34; SSE-MSVC-NEXT:    jne .LBB0_1
35; SSE-MSVC-NEXT:  # %bb.2: # %ret
36; SSE-MSVC-NEXT:    retq
37;
38; AVX-LINUX-LABEL: share_v4i32_v4f32:
39; AVX-LINUX:       # %bb.0: # %entry
40; AVX-LINUX-NEXT:    vbroadcastss {{.*#+}} xmm0 = [1073741824,1073741824,1073741824,1073741824]
41; AVX-LINUX-NEXT:    vbroadcastss {{.*#+}} xmm1 = [1073741824,1073741824,1073741824,1073741824]
42; AVX-LINUX-NEXT:    .p2align 4
43; AVX-LINUX-NEXT:  .LBB0_1: # %loop
44; AVX-LINUX-NEXT:    # =>This Inner Loop Header: Depth=1
45; AVX-LINUX-NEXT:    vmovaps %xmm0, (%rdi)
46; AVX-LINUX-NEXT:    vmovaps %xmm1, (%rsi)
47; AVX-LINUX-NEXT:    testb $1, %dl
48; AVX-LINUX-NEXT:    jne .LBB0_1
49; AVX-LINUX-NEXT:  # %bb.2: # %ret
50; AVX-LINUX-NEXT:    retq
51;
52; AVX-MSVC-LABEL: share_v4i32_v4f32:
53; AVX-MSVC:       # %bb.0: # %entry
54; AVX-MSVC-NEXT:    vbroadcastss {{.*#+}} xmm0 = [1073741824,1073741824,1073741824,1073741824]
55; AVX-MSVC-NEXT:    vbroadcastss {{.*#+}} xmm1 = [1073741824,1073741824,1073741824,1073741824]
56; AVX-MSVC-NEXT:    .p2align 4
57; AVX-MSVC-NEXT:  .LBB0_1: # %loop
58; AVX-MSVC-NEXT:    # =>This Inner Loop Header: Depth=1
59; AVX-MSVC-NEXT:    vmovaps %xmm0, (%rcx)
60; AVX-MSVC-NEXT:    vmovaps %xmm1, (%rdx)
61; AVX-MSVC-NEXT:    testb $1, %r8b
62; AVX-MSVC-NEXT:    jne .LBB0_1
63; AVX-MSVC-NEXT:  # %bb.2: # %ret
64; AVX-MSVC-NEXT:    retq
65entry:
66  br label %loop
67loop:
68  store <4 x i32><i32 1073741824, i32 1073741824, i32 1073741824, i32 1073741824>, ptr %p
69  store <4 x float><float 2.0, float 2.0, float 2.0, float 2.0>, ptr %q
70  br i1 %t, label %loop, label %ret
71ret:
72  ret void
73}
74
75define void @store_repeated_constants(ptr %lo, ptr %hi) {
76; SSE-LINUX-LABEL: store_repeated_constants:
77; SSE-LINUX:       # %bb.0:
78; SSE-LINUX-NEXT:    xorps %xmm0, %xmm0
79; SSE-LINUX-NEXT:    movaps %xmm0, 48(%rdi)
80; SSE-LINUX-NEXT:    movsd {{.*#+}} xmm1 = [18446744073709551615,0]
81; SSE-LINUX-NEXT:    movaps %xmm1, 32(%rdi)
82; SSE-LINUX-NEXT:    movaps %xmm1, 16(%rdi)
83; SSE-LINUX-NEXT:    movaps %xmm1, (%rdi)
84; SSE-LINUX-NEXT:    movaps %xmm0, 32(%rsi)
85; SSE-LINUX-NEXT:    movaps %xmm0, 48(%rsi)
86; SSE-LINUX-NEXT:    movaps %xmm1, (%rsi)
87; SSE-LINUX-NEXT:    movaps {{.*#+}} xmm0 = [0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255]
88; SSE-LINUX-NEXT:    movaps %xmm0, 16(%rsi)
89; SSE-LINUX-NEXT:    retq
90;
91; SSE-MSVC-LABEL: store_repeated_constants:
92; SSE-MSVC:       # %bb.0:
93; SSE-MSVC-NEXT:    xorps %xmm0, %xmm0
94; SSE-MSVC-NEXT:    movaps %xmm0, 48(%rcx)
95; SSE-MSVC-NEXT:    movsd {{.*#+}} xmm1 = [18446744073709551615,0]
96; SSE-MSVC-NEXT:    movaps %xmm1, 32(%rcx)
97; SSE-MSVC-NEXT:    movaps %xmm1, 16(%rcx)
98; SSE-MSVC-NEXT:    movaps %xmm1, (%rcx)
99; SSE-MSVC-NEXT:    movaps %xmm0, 32(%rdx)
100; SSE-MSVC-NEXT:    movaps %xmm0, 48(%rdx)
101; SSE-MSVC-NEXT:    movaps %xmm1, (%rdx)
102; SSE-MSVC-NEXT:    movaps {{.*#+}} xmm0 = [0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255]
103; SSE-MSVC-NEXT:    movaps %xmm0, 16(%rdx)
104; SSE-MSVC-NEXT:    retq
105;
106; AVX-LINUX-LABEL: store_repeated_constants:
107; AVX-LINUX:       # %bb.0:
108; AVX-LINUX-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = [18446744073709551615,0,18446744073709551615,0]
109; AVX-LINUX-NEXT:    # ymm0 = mem[0,1,0,1]
110; AVX-LINUX-NEXT:    vmovaps %ymm0, (%rdi)
111; AVX-LINUX-NEXT:    vmovaps {{.*#+}} ymm0 = [18446744073709551615,0,0,18446744073709551615]
112; AVX-LINUX-NEXT:    vmovaps %xmm0, %xmm1
113; AVX-LINUX-NEXT:    vmovaps %ymm1, 32(%rdi)
114; AVX-LINUX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
115; AVX-LINUX-NEXT:    vmovaps %ymm1, 32(%rsi)
116; AVX-LINUX-NEXT:    vmovaps %ymm0, (%rsi)
117; AVX-LINUX-NEXT:    vzeroupper
118; AVX-LINUX-NEXT:    retq
119;
120; AVX-MSVC-LABEL: store_repeated_constants:
121; AVX-MSVC:       # %bb.0:
122; AVX-MSVC-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = [18446744073709551615,0,18446744073709551615,0]
123; AVX-MSVC-NEXT:    # ymm0 = mem[0,1,0,1]
124; AVX-MSVC-NEXT:    vmovaps %ymm0, (%rcx)
125; AVX-MSVC-NEXT:    vmovaps {{.*#+}} ymm0 = [18446744073709551615,0,0,18446744073709551615]
126; AVX-MSVC-NEXT:    vmovaps %xmm0, %xmm1
127; AVX-MSVC-NEXT:    vmovaps %ymm1, 32(%rcx)
128; AVX-MSVC-NEXT:    vxorps %xmm1, %xmm1, %xmm1
129; AVX-MSVC-NEXT:    vmovaps %ymm1, 32(%rdx)
130; AVX-MSVC-NEXT:    vmovaps %ymm0, (%rdx)
131; AVX-MSVC-NEXT:    vzeroupper
132; AVX-MSVC-NEXT:    retq
133  store volatile <8 x i64> <i64 -1, i64 0, i64 -1, i64 0, i64 -1, i64 0, i64 0, i64 0>, ptr %lo, align 64
134  store volatile <8 x i64> <i64 -1, i64 0, i64 0, i64 -1, i64 0, i64 0, i64 0, i64 0>, ptr %hi, align 64
135  ret void
136}
137