xref: /llvm-project/llvm/test/CodeGen/X86/insert.ll (revision f44079db22036d0ade2cf3d2e5a24bde5d378efd)
1*f44079dbSDavid Li; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
2*f44079dbSDavid Li; RUN: llc < %s -mtriple=i386-unknown-unknown | FileCheck %s --check-prefixes=X86
3*f44079dbSDavid Li; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefixes=X64
4*f44079dbSDavid Li
5*f44079dbSDavid Lidefine i64 @sub8(i64 noundef %res, ptr %byte) {
6*f44079dbSDavid Li; X86-LABEL: sub8:
7*f44079dbSDavid Li; X86:       # %bb.0: # %entry
8*f44079dbSDavid Li; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
9*f44079dbSDavid Li; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
10*f44079dbSDavid Li; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
11*f44079dbSDavid Li; X86-NEXT:    movb (%ecx), %al
12*f44079dbSDavid Li; X86-NEXT:    retl
13*f44079dbSDavid Li;
14*f44079dbSDavid Li; X64-LABEL: sub8:
15*f44079dbSDavid Li; X64:       # %bb.0: # %entry
16*f44079dbSDavid Li; X64-NEXT:    movq %rdi, %rax
17*f44079dbSDavid Li; X64-NEXT:    movb (%rsi), %al
18*f44079dbSDavid Li; X64-NEXT:    retq
19*f44079dbSDavid Lientry:
20*f44079dbSDavid Li  %and = and i64 %res, -256
21*f44079dbSDavid Li  %d = load i8, ptr %byte, align 1
22*f44079dbSDavid Li  %conv2 = zext i8 %d to i64
23*f44079dbSDavid Li  %or = or i64 %and, %conv2
24*f44079dbSDavid Li  ret i64 %or
25*f44079dbSDavid Li}
26*f44079dbSDavid Li
27*f44079dbSDavid Lidefine i64 @sub16(i64 noundef %res, ptr %byte) {
28*f44079dbSDavid Li; X86-LABEL: sub16:
29*f44079dbSDavid Li; X86:       # %bb.0: # %entry
30*f44079dbSDavid Li; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %ecx
31*f44079dbSDavid Li; X86-NEXT:    shll $16, %ecx
32*f44079dbSDavid Li; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
33*f44079dbSDavid Li; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
34*f44079dbSDavid Li; X86-NEXT:    movzwl (%eax), %eax
35*f44079dbSDavid Li; X86-NEXT:    orl %ecx, %eax
36*f44079dbSDavid Li; X86-NEXT:    retl
37*f44079dbSDavid Li;
38*f44079dbSDavid Li; X64-LABEL: sub16:
39*f44079dbSDavid Li; X64:       # %bb.0: # %entry
40*f44079dbSDavid Li; X64-NEXT:    movq %rdi, %rax
41*f44079dbSDavid Li; X64-NEXT:    movw (%rsi), %ax
42*f44079dbSDavid Li; X64-NEXT:    retq
43*f44079dbSDavid Lientry:
44*f44079dbSDavid Li  %and = and i64 %res, -65536
45*f44079dbSDavid Li  %d = load i16, ptr %byte, align 1
46*f44079dbSDavid Li  %conv2 = zext i16 %d to i64
47*f44079dbSDavid Li  %or = or i64 %and, %conv2
48*f44079dbSDavid Li  ret i64 %or
49*f44079dbSDavid Li}
50*f44079dbSDavid Li
51*f44079dbSDavid Lidefine i32 @sub8_32(i32 noundef %res, ptr %byte) {
52*f44079dbSDavid Li; X86-LABEL: sub8_32:
53*f44079dbSDavid Li; X86:       # %bb.0: # %entry
54*f44079dbSDavid Li; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
55*f44079dbSDavid Li; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
56*f44079dbSDavid Li; X86-NEXT:    movb (%ecx), %al
57*f44079dbSDavid Li; X86-NEXT:    retl
58*f44079dbSDavid Li;
59*f44079dbSDavid Li; X64-LABEL: sub8_32:
60*f44079dbSDavid Li; X64:       # %bb.0: # %entry
61*f44079dbSDavid Li; X64-NEXT:    movl %edi, %eax
62*f44079dbSDavid Li; X64-NEXT:    movb (%rsi), %al
63*f44079dbSDavid Li; X64-NEXT:    retq
64*f44079dbSDavid Lientry:
65*f44079dbSDavid Li  %and = and i32 %res, -256
66*f44079dbSDavid Li  %d = load i8, ptr %byte, align 1
67*f44079dbSDavid Li  %conv2 = zext i8 %d to i32
68*f44079dbSDavid Li  %or = or i32 %and, %conv2
69*f44079dbSDavid Li  ret i32 %or
70*f44079dbSDavid Li}
71*f44079dbSDavid Li
72*f44079dbSDavid Lidefine i32 @sub16_32(i32 noundef %res, ptr %byte) {
73*f44079dbSDavid Li; X86-LABEL: sub16_32:
74*f44079dbSDavid Li; X86:       # %bb.0: # %entry
75*f44079dbSDavid Li; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %ecx
76*f44079dbSDavid Li; X86-NEXT:    shll $16, %ecx
77*f44079dbSDavid Li; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
78*f44079dbSDavid Li; X86-NEXT:    movzwl (%eax), %eax
79*f44079dbSDavid Li; X86-NEXT:    orl %ecx, %eax
80*f44079dbSDavid Li; X86-NEXT:    retl
81*f44079dbSDavid Li;
82*f44079dbSDavid Li; X64-LABEL: sub16_32:
83*f44079dbSDavid Li; X64:       # %bb.0: # %entry
84*f44079dbSDavid Li; X64-NEXT:    movl %edi, %eax
85*f44079dbSDavid Li; X64-NEXT:    movw (%rsi), %ax
86*f44079dbSDavid Li; X64-NEXT:    retq
87*f44079dbSDavid Lientry:
88*f44079dbSDavid Li  %and = and i32 %res, -65536
89*f44079dbSDavid Li  %d = load i16, ptr %byte, align 1
90*f44079dbSDavid Li  %conv2 = zext i16 %d to i32
91*f44079dbSDavid Li  %or = or i32 %and, %conv2
92*f44079dbSDavid Li  ret i32 %or
93*f44079dbSDavid Li}
94