1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefixes=X64 3 4declare i64 @llvm.fshl.i64(i64, i64, i64) nounwind readnone 5declare i64 @llvm.fshr.i64(i64, i64, i64) nounwind readnone 6 7define i64 @hoist_fshl_from_or(i64 %a, i64 %b, i64 %c, i64 %d, i64 %s) nounwind { 8; X64-LABEL: hoist_fshl_from_or: 9; X64: # %bb.0: 10; X64-NEXT: movq %rdi, %rax 11; X64-NEXT: orq %rcx, %rsi 12; X64-NEXT: orq %rdx, %rax 13; X64-NEXT: movl %r8d, %ecx 14; X64-NEXT: shldq %cl, %rsi, %rax 15; X64-NEXT: retq 16 %fshl.0 = call i64 @llvm.fshl.i64(i64 %a, i64 %b, i64 %s) 17 %fshl.1 = call i64 @llvm.fshl.i64(i64 %c, i64 %d, i64 %s) 18 %res = or i64 %fshl.0, %fshl.1 19 ret i64 %res 20} 21 22define i64 @hoist_fshl_from_and(i64 %a, i64 %b, i64 %c, i64 %d, i64 %s) nounwind { 23; X64-LABEL: hoist_fshl_from_and: 24; X64: # %bb.0: 25; X64-NEXT: movq %rdi, %rax 26; X64-NEXT: andq %rcx, %rsi 27; X64-NEXT: andq %rdx, %rax 28; X64-NEXT: movl %r8d, %ecx 29; X64-NEXT: shldq %cl, %rsi, %rax 30; X64-NEXT: retq 31 %fshl.0 = call i64 @llvm.fshl.i64(i64 %a, i64 %b, i64 %s) 32 %fshl.1 = call i64 @llvm.fshl.i64(i64 %c, i64 %d, i64 %s) 33 %res = and i64 %fshl.0, %fshl.1 34 ret i64 %res 35} 36 37define i64 @hoist_fshl_from_xor(i64 %a, i64 %b, i64 %c, i64 %d, i64 %s) nounwind { 38; X64-LABEL: hoist_fshl_from_xor: 39; X64: # %bb.0: 40; X64-NEXT: movq %rdi, %rax 41; X64-NEXT: xorq %rcx, %rsi 42; X64-NEXT: xorq %rdx, %rax 43; X64-NEXT: movl %r8d, %ecx 44; X64-NEXT: shldq %cl, %rsi, %rax 45; X64-NEXT: retq 46 %fshl.0 = call i64 @llvm.fshl.i64(i64 %a, i64 %b, i64 %s) 47 %fshl.1 = call i64 @llvm.fshl.i64(i64 %c, i64 %d, i64 %s) 48 %res = xor i64 %fshl.0, %fshl.1 49 ret i64 %res 50} 51 52define i64 @fshl_or_with_different_shift_value(i64 %a, i64 %b, i64 %c, i64 %d) nounwind { 53; X64-LABEL: fshl_or_with_different_shift_value: 54; X64: # %bb.0: 55; X64-NEXT: movq %rdx, %rax 56; X64-NEXT: shldq $12, %rsi, %rdi 57; X64-NEXT: shldq $13, %rcx, %rax 58; X64-NEXT: orq %rdi, %rax 59; X64-NEXT: retq 60 %fshl.0 = call i64 @llvm.fshl.i64(i64 %a, i64 %b, i64 12) 61 %fshl.1 = call i64 @llvm.fshl.i64(i64 %c, i64 %d, i64 13) 62 %res = or i64 %fshl.0, %fshl.1 63 ret i64 %res 64} 65 66define i64 @hoist_fshl_from_or_const_shift(i64 %a, i64 %b, i64 %c, i64 %d) nounwind { 67; X64-LABEL: hoist_fshl_from_or_const_shift: 68; X64: # %bb.0: 69; X64-NEXT: movq %rdi, %rax 70; X64-NEXT: orq %rcx, %rsi 71; X64-NEXT: orq %rdx, %rax 72; X64-NEXT: shldq $15, %rsi, %rax 73; X64-NEXT: retq 74 %fshl.0 = call i64 @llvm.fshl.i64(i64 %a, i64 %b, i64 15) 75 %fshl.1 = call i64 @llvm.fshl.i64(i64 %c, i64 %d, i64 15) 76 %res = or i64 %fshl.0, %fshl.1 77 ret i64 %res 78} 79 80define i64 @hoist_fshr_from_or(i64 %a, i64 %b, i64 %c, i64 %d, i64 %s) nounwind { 81; X64-LABEL: hoist_fshr_from_or: 82; X64: # %bb.0: 83; X64-NEXT: movq %rsi, %rax 84; X64-NEXT: orq %rdx, %rdi 85; X64-NEXT: orq %rcx, %rax 86; X64-NEXT: movl %r8d, %ecx 87; X64-NEXT: shrdq %cl, %rdi, %rax 88; X64-NEXT: retq 89 %fshr.0 = call i64 @llvm.fshr.i64(i64 %a, i64 %b, i64 %s) 90 %fshr.1 = call i64 @llvm.fshr.i64(i64 %c, i64 %d, i64 %s) 91 %res = or i64 %fshr.0, %fshr.1 92 ret i64 %res 93} 94 95define i64 @hoist_fshr_from_and(i64 %a, i64 %b, i64 %c, i64 %d, i64 %s) nounwind { 96; X64-LABEL: hoist_fshr_from_and: 97; X64: # %bb.0: 98; X64-NEXT: movq %rsi, %rax 99; X64-NEXT: andq %rdx, %rdi 100; X64-NEXT: andq %rcx, %rax 101; X64-NEXT: movl %r8d, %ecx 102; X64-NEXT: shrdq %cl, %rdi, %rax 103; X64-NEXT: retq 104 %fshr.0 = call i64 @llvm.fshr.i64(i64 %a, i64 %b, i64 %s) 105 %fshr.1 = call i64 @llvm.fshr.i64(i64 %c, i64 %d, i64 %s) 106 %res = and i64 %fshr.0, %fshr.1 107 ret i64 %res 108} 109 110define i64 @hoist_fshr_from_xor(i64 %a, i64 %b, i64 %c, i64 %d, i64 %s) nounwind { 111; X64-LABEL: hoist_fshr_from_xor: 112; X64: # %bb.0: 113; X64-NEXT: movq %rsi, %rax 114; X64-NEXT: xorq %rdx, %rdi 115; X64-NEXT: xorq %rcx, %rax 116; X64-NEXT: movl %r8d, %ecx 117; X64-NEXT: shrdq %cl, %rdi, %rax 118; X64-NEXT: retq 119 %fshr.0 = call i64 @llvm.fshr.i64(i64 %a, i64 %b, i64 %s) 120 %fshr.1 = call i64 @llvm.fshr.i64(i64 %c, i64 %d, i64 %s) 121 %res = xor i64 %fshr.0, %fshr.1 122 ret i64 %res 123} 124 125define i64 @fshr_or_with_different_shift_value(i64 %a, i64 %b, i64 %c, i64 %d) nounwind { 126; X64-LABEL: fshr_or_with_different_shift_value: 127; X64: # %bb.0: 128; X64-NEXT: movq %rdx, %rax 129; X64-NEXT: shldq $52, %rsi, %rdi 130; X64-NEXT: shldq $51, %rcx, %rax 131; X64-NEXT: orq %rdi, %rax 132; X64-NEXT: retq 133 %fshr.0 = call i64 @llvm.fshr.i64(i64 %a, i64 %b, i64 12) 134 %fshr.1 = call i64 @llvm.fshr.i64(i64 %c, i64 %d, i64 13) 135 %res = or i64 %fshr.0, %fshr.1 136 ret i64 %res 137} 138 139define i64 @hoist_fshr_from_or_const_shift(i64 %a, i64 %b, i64 %c, i64 %d) nounwind { 140; X64-LABEL: hoist_fshr_from_or_const_shift: 141; X64: # %bb.0: 142; X64-NEXT: movq %rdi, %rax 143; X64-NEXT: orq %rcx, %rsi 144; X64-NEXT: orl %edx, %eax 145; X64-NEXT: shldq $49, %rsi, %rax 146; X64-NEXT: retq 147 %fshr.0 = call i64 @llvm.fshr.i64(i64 %a, i64 %b, i64 15) 148 %fshr.1 = call i64 @llvm.fshr.i64(i64 %c, i64 %d, i64 15) 149 %res = or i64 %fshr.0, %fshr.1 150 ret i64 %res 151} 152