xref: /llvm-project/llvm/test/CodeGen/X86/volatile-memstores-nooverlapping-load-stores.ll (revision f0dd12ec5c0169ba5b4363b62d59511181cf954a)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu | FileCheck %s
3
4
5declare void @llvm.memcpy.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1 immarg) #1
6define dso_local void @copy_7_bytes(ptr noalias nocapture, ptr noalias nocapture readonly) nounwind #0 {
7; CHECK-LABEL: copy_7_bytes:
8; CHECK:       # %bb.0:
9; CHECK-NEXT:    movl (%rsi), %eax
10; CHECK-NEXT:    movl 3(%rsi), %ecx
11; CHECK-NEXT:    movl %ecx, 3(%rdi)
12; CHECK-NEXT:    movl %eax, (%rdi)
13; CHECK-NEXT:    retq
14  tail call void @llvm.memcpy.p0.p0.i64(ptr align 1 %0, ptr align 1 %1, i64 7, i1 false)
15  ret void
16}
17define dso_local void @copy_7_bytes_volatile(ptr noalias nocapture, ptr noalias nocapture readonly) nounwind #0 {
18; CHECK-LABEL: copy_7_bytes_volatile:
19; CHECK:       # %bb.0:
20; CHECK-NEXT:    movzbl 6(%rsi), %eax
21; CHECK-NEXT:    movb %al, 6(%rdi)
22; CHECK-NEXT:    movzwl 4(%rsi), %eax
23; CHECK-NEXT:    movw %ax, 4(%rdi)
24; CHECK-NEXT:    movl (%rsi), %eax
25; CHECK-NEXT:    movl %eax, (%rdi)
26; CHECK-NEXT:    retq
27  tail call void @llvm.memcpy.p0.p0.i64(ptr align 1 %0, ptr align 1 %1, i64 7, i1 true)
28  ret void
29}
30
31
32declare void @llvm.memmove.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1 immarg) #1
33define dso_local void @move_7_bytes(ptr nocapture, ptr nocapture readonly) nounwind #0 {
34; CHECK-LABEL: move_7_bytes:
35; CHECK:       # %bb.0:
36; CHECK-NEXT:    movl (%rsi), %eax
37; CHECK-NEXT:    movzwl 4(%rsi), %ecx
38; CHECK-NEXT:    movzbl 6(%rsi), %edx
39; CHECK-NEXT:    movb %dl, 6(%rdi)
40; CHECK-NEXT:    movw %cx, 4(%rdi)
41; CHECK-NEXT:    movl %eax, (%rdi)
42; CHECK-NEXT:    retq
43  tail call void @llvm.memmove.p0.p0.i64(ptr align 1 %0, ptr align 1 %1, i64 7, i1 false)
44  ret void
45}
46define dso_local void @move_7_bytes_volatile(ptr nocapture, ptr nocapture readonly) nounwind #0 {
47; CHECK-LABEL: move_7_bytes_volatile:
48; CHECK:       # %bb.0:
49; CHECK-NEXT:    movl (%rsi), %eax
50; CHECK-NEXT:    movzwl 4(%rsi), %ecx
51; CHECK-NEXT:    movzbl 6(%rsi), %edx
52; CHECK-NEXT:    movb %dl, 6(%rdi)
53; CHECK-NEXT:    movw %cx, 4(%rdi)
54; CHECK-NEXT:    movl %eax, (%rdi)
55; CHECK-NEXT:    retq
56  tail call void @llvm.memmove.p0.p0.i64(ptr align 1 %0, ptr align 1 %1, i64 7, i1 true)
57  ret void
58}
59
60
61declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8 , i64, i1 immarg) #1
62define dso_local void @set_7_bytes(ptr noalias nocapture) nounwind #0 {
63; CHECK-LABEL: set_7_bytes:
64; CHECK:       # %bb.0:
65; CHECK-NEXT:    movl $16843009, 3(%rdi) # imm = 0x1010101
66; CHECK-NEXT:    movl $16843009, (%rdi) # imm = 0x1010101
67; CHECK-NEXT:    retq
68  tail call void @llvm.memset.p0.i64(ptr align 1 %0, i8 1, i64 7, i1 false)
69  ret void
70}
71define dso_local void @set_7_bytes_volatile(ptr noalias nocapture) nounwind #0 {
72; CHECK-LABEL: set_7_bytes_volatile:
73; CHECK:       # %bb.0:
74; CHECK-NEXT:    movb $1, 6(%rdi)
75; CHECK-NEXT:    movw $257, 4(%rdi) # imm = 0x101
76; CHECK-NEXT:    movl $16843009, (%rdi) # imm = 0x1010101
77; CHECK-NEXT:    retq
78  tail call void @llvm.memset.p0.i64(ptr align 1 %0, i8 1, i64 7, i1 true)
79  ret void
80}
81
82attributes #0 = { noreturn nounwind uwtable "target-cpu"="x86-64" }
83attributes #1 = { argmemonly nounwind }
84