1dnl AMD64 mpn_lshiftc -- mpn left shift with complement, optimised for Atom. 2 3dnl Contributed to the GNU project by Torbjorn Granlund. 4 5dnl Copyright 2011, 2012 Free Software Foundation, Inc. 6 7dnl This file is part of the GNU MP Library. 8dnl 9dnl The GNU MP Library is free software; you can redistribute it and/or modify 10dnl it under the terms of either: 11dnl 12dnl * the GNU Lesser General Public License as published by the Free 13dnl Software Foundation; either version 3 of the License, or (at your 14dnl option) any later version. 15dnl 16dnl or 17dnl 18dnl * the GNU General Public License as published by the Free Software 19dnl Foundation; either version 2 of the License, or (at your option) any 20dnl later version. 21dnl 22dnl or both in parallel, as here. 23dnl 24dnl The GNU MP Library is distributed in the hope that it will be useful, but 25dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 26dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 27dnl for more details. 28dnl 29dnl You should have received copies of the GNU General Public License and the 30dnl GNU Lesser General Public License along with the GNU MP Library. If not, 31dnl see https://www.gnu.org/licenses/. 32 33include(`../config.m4') 34 35C cycles/limb 36C AMD K8,K9 ? 37C AMD K10 ? 38C Intel P4 ? 39C Intel core2 ? 40C Intel NHM ? 41C Intel SBR ? 42C Intel atom 5 43C VIA nano ? 44 45C TODO 46C * Consider using 4-way unrolling. We reach 4.5 c/l, but the code is 2.5 47C times larger. 48 49C INPUT PARAMETERS 50define(`rp', `%rdi') 51define(`up', `%rsi') 52define(`n', `%rdx') 53define(`cnt', `%rcx') 54 55ABI_SUPPORT(DOS64) 56ABI_SUPPORT(STD64) 57 58ASM_START() 59 TEXT 60 ALIGN(16) 61PROLOGUE(mpn_lshiftc) 62 FUNC_ENTRY(4) 63 lea -8(up,n,8), up 64 lea -8(rp,n,8), rp 65 shr R32(n) 66 mov (up), %rax 67 jnc L(evn) 68 69 mov %rax, %r11 70 shl R8(%rcx), %r11 71 neg R8(%rcx) 72 shr R8(%rcx), %rax 73 test n, n 74 jnz L(gt1) 75 not %r11 76 mov %r11, (rp) 77 FUNC_EXIT() 78 ret 79 80L(gt1): mov -8(up), %r8 81 mov %r8, %r10 82 shr R8(%rcx), %r8 83 jmp L(lo1) 84 85L(evn): mov %rax, %r10 86 neg R8(%rcx) 87 shr R8(%rcx), %rax 88 mov -8(up), %r9 89 mov %r9, %r11 90 shr R8(%rcx), %r9 91 neg R8(%rcx) 92 lea 8(rp), rp 93 lea -8(up), up 94 jmp L(lo0) 95 96C ALIGN(16) 97L(top): shl R8(%rcx), %r10 98 or %r10, %r9 99 shl R8(%rcx), %r11 100 not %r9 101 neg R8(%rcx) 102 mov -8(up), %r8 103 lea -16(rp), rp 104 mov %r8, %r10 105 shr R8(%rcx), %r8 106 mov %r9, 8(rp) 107L(lo1): or %r11, %r8 108 mov -16(up), %r9 109 mov %r9, %r11 110 shr R8(%rcx), %r9 111 lea -16(up), up 112 neg R8(%rcx) 113 not %r8 114 mov %r8, (rp) 115L(lo0): dec n 116 jg L(top) 117 118L(end): shl R8(%rcx), %r10 119 or %r10, %r9 120 not %r9 121 shl R8(%rcx), %r11 122 not %r11 123 mov %r9, -8(rp) 124 mov %r11, -16(rp) 125 FUNC_EXIT() 126 ret 127EPILOGUE() 128