xref: /llvm-project/llvm/test/CodeGen/X86/dag-merge-fast-accesses.ll (revision 2f448bf509432c1a19ec46ab8cbc7353c03c6280)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=-slow-unaligned-mem-16 | FileCheck %s --check-prefix=FAST
3; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+slow-unaligned-mem-16 | FileCheck %s --check-prefix=SLOW
4
5; Verify that the DAGCombiner is creating unaligned 16-byte loads and stores
6; if and only if those are fast.
7
8define void @merge_const_vec_store(ptr %ptr) {
9; FAST-LABEL: merge_const_vec_store:
10; FAST:       # %bb.0:
11; FAST-NEXT:    xorps %xmm0, %xmm0
12; FAST-NEXT:    movups %xmm0, (%rdi)
13; FAST-NEXT:    retq
14;
15; SLOW-LABEL: merge_const_vec_store:
16; SLOW:       # %bb.0:
17; SLOW-NEXT:    movq $0, (%rdi)
18; SLOW-NEXT:    movq $0, 8(%rdi)
19; SLOW-NEXT:    retq
20
21  %idx1 = getelementptr i64, ptr %ptr, i64 1
22
23  store i64 0, ptr %ptr, align 8
24  store i64 0, ptr %idx1, align 8
25  ret void
26}
27
28
29define void @merge_vec_element_store(<4 x double> %v, ptr %ptr) {
30; FAST-LABEL: merge_vec_element_store:
31; FAST:       # %bb.0:
32; FAST-NEXT:    movups %xmm0, (%rdi)
33; FAST-NEXT:    retq
34;
35; SLOW-LABEL: merge_vec_element_store:
36; SLOW:       # %bb.0:
37; SLOW-NEXT:    movlps %xmm0, (%rdi)
38; SLOW-NEXT:    movhps %xmm0, 8(%rdi)
39; SLOW-NEXT:    retq
40
41  %vecext0 = extractelement <4 x double> %v, i32 0
42  %vecext1 = extractelement <4 x double> %v, i32 1
43
44  %idx1 = getelementptr double, ptr %ptr, i64 1
45
46  store double %vecext0, ptr %ptr, align 8
47  store double %vecext1, ptr %idx1, align 8
48  ret void
49}
50
51
52define void @merge_vec_load_and_stores(ptr%ptr) {
53; FAST-LABEL: merge_vec_load_and_stores:
54; FAST:       # %bb.0:
55; FAST-NEXT:    movups (%rdi), %xmm0
56; FAST-NEXT:    movups %xmm0, 40(%rdi)
57; FAST-NEXT:    retq
58;
59; SLOW-LABEL: merge_vec_load_and_stores:
60; SLOW:       # %bb.0:
61; SLOW-NEXT:    movq (%rdi), %rax
62; SLOW-NEXT:    movq 8(%rdi), %rcx
63; SLOW-NEXT:    movq %rax, 40(%rdi)
64; SLOW-NEXT:    movq %rcx, 48(%rdi)
65; SLOW-NEXT:    retq
66
67  %idx1 = getelementptr i64, ptr %ptr, i64 1
68
69  %ld0 = load i64, ptr %ptr, align 4
70  %ld1 = load i64, ptr %idx1, align 4
71
72  %idx4 = getelementptr i64, ptr %ptr, i64 5
73  %idx5 = getelementptr i64, ptr %ptr, i64 6
74
75  store i64 %ld0, ptr %idx4, align 4
76  store i64 %ld1, ptr %idx5, align 4
77  ret void
78}
79
80