1*eda14cbcSMatt Macy /* 2*eda14cbcSMatt Macy * Implement fast Fletcher4 with AVX2 instructions. (x86_64) 3*eda14cbcSMatt Macy * 4*eda14cbcSMatt Macy * Use the 256-bit AVX2 SIMD instructions and registers to compute 5*eda14cbcSMatt Macy * Fletcher4 in four incremental 64-bit parallel accumulator streams, 6*eda14cbcSMatt Macy * and then combine the streams to form the final four checksum words. 7*eda14cbcSMatt Macy * 8*eda14cbcSMatt Macy * Copyright (C) 2015 Intel Corporation. 9*eda14cbcSMatt Macy * 10*eda14cbcSMatt Macy * Authors: 11*eda14cbcSMatt Macy * James Guilford <james.guilford@intel.com> 12*eda14cbcSMatt Macy * Jinshan Xiong <jinshan.xiong@intel.com> 13*eda14cbcSMatt Macy * 14*eda14cbcSMatt Macy * This software is available to you under a choice of one of two 15*eda14cbcSMatt Macy * licenses. You may choose to be licensed under the terms of the GNU 16*eda14cbcSMatt Macy * General Public License (GPL) Version 2, available from the file 17*eda14cbcSMatt Macy * COPYING in the main directory of this source tree, or the 18*eda14cbcSMatt Macy * OpenIB.org BSD license below: 19*eda14cbcSMatt Macy * 20*eda14cbcSMatt Macy * Redistribution and use in source and binary forms, with or 21*eda14cbcSMatt Macy * without modification, are permitted provided that the following 22*eda14cbcSMatt Macy * conditions are met: 23*eda14cbcSMatt Macy * 24*eda14cbcSMatt Macy * - Redistributions of source code must retain the above 25*eda14cbcSMatt Macy * copyright notice, this list of conditions and the following 26*eda14cbcSMatt Macy * disclaimer. 27*eda14cbcSMatt Macy * 28*eda14cbcSMatt Macy * - Redistributions in binary form must reproduce the above 29*eda14cbcSMatt Macy * copyright notice, this list of conditions and the following 30*eda14cbcSMatt Macy * disclaimer in the documentation and/or other materials 31*eda14cbcSMatt Macy * provided with the distribution. 32*eda14cbcSMatt Macy * 33*eda14cbcSMatt Macy * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 34*eda14cbcSMatt Macy * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 35*eda14cbcSMatt Macy * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 36*eda14cbcSMatt Macy * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 37*eda14cbcSMatt Macy * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 38*eda14cbcSMatt Macy * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 39*eda14cbcSMatt Macy * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 40*eda14cbcSMatt Macy * SOFTWARE. 41*eda14cbcSMatt Macy */ 42*eda14cbcSMatt Macy 43*eda14cbcSMatt Macy #if defined(HAVE_AVX) && defined(HAVE_AVX2) 44*eda14cbcSMatt Macy 45*eda14cbcSMatt Macy #include <sys/spa_checksum.h> 46*eda14cbcSMatt Macy #include <sys/simd.h> 47*eda14cbcSMatt Macy #include <sys/strings.h> 48*eda14cbcSMatt Macy #include <zfs_fletcher.h> 49*eda14cbcSMatt Macy 50*eda14cbcSMatt Macy static void 51*eda14cbcSMatt Macy fletcher_4_avx2_init(fletcher_4_ctx_t *ctx) 52*eda14cbcSMatt Macy { 53*eda14cbcSMatt Macy bzero(ctx->avx, 4 * sizeof (zfs_fletcher_avx_t)); 54*eda14cbcSMatt Macy } 55*eda14cbcSMatt Macy 56*eda14cbcSMatt Macy static void 57*eda14cbcSMatt Macy fletcher_4_avx2_fini(fletcher_4_ctx_t *ctx, zio_cksum_t *zcp) 58*eda14cbcSMatt Macy { 59*eda14cbcSMatt Macy uint64_t A, B, C, D; 60*eda14cbcSMatt Macy 61*eda14cbcSMatt Macy A = ctx->avx[0].v[0] + ctx->avx[0].v[1] + 62*eda14cbcSMatt Macy ctx->avx[0].v[2] + ctx->avx[0].v[3]; 63*eda14cbcSMatt Macy B = 0 - ctx->avx[0].v[1] - 2 * ctx->avx[0].v[2] - 3 * ctx->avx[0].v[3] + 64*eda14cbcSMatt Macy 4 * ctx->avx[1].v[0] + 4 * ctx->avx[1].v[1] + 4 * ctx->avx[1].v[2] + 65*eda14cbcSMatt Macy 4 * ctx->avx[1].v[3]; 66*eda14cbcSMatt Macy 67*eda14cbcSMatt Macy C = ctx->avx[0].v[2] + 3 * ctx->avx[0].v[3] - 6 * ctx->avx[1].v[0] - 68*eda14cbcSMatt Macy 10 * ctx->avx[1].v[1] - 14 * ctx->avx[1].v[2] - 69*eda14cbcSMatt Macy 18 * ctx->avx[1].v[3] + 16 * ctx->avx[2].v[0] + 70*eda14cbcSMatt Macy 16 * ctx->avx[2].v[1] + 16 * ctx->avx[2].v[2] + 71*eda14cbcSMatt Macy 16 * ctx->avx[2].v[3]; 72*eda14cbcSMatt Macy 73*eda14cbcSMatt Macy D = 0 - ctx->avx[0].v[3] + 4 * ctx->avx[1].v[0] + 74*eda14cbcSMatt Macy 10 * ctx->avx[1].v[1] + 20 * ctx->avx[1].v[2] + 75*eda14cbcSMatt Macy 34 * ctx->avx[1].v[3] - 48 * ctx->avx[2].v[0] - 76*eda14cbcSMatt Macy 64 * ctx->avx[2].v[1] - 80 * ctx->avx[2].v[2] - 77*eda14cbcSMatt Macy 96 * ctx->avx[2].v[3] + 64 * ctx->avx[3].v[0] + 78*eda14cbcSMatt Macy 64 * ctx->avx[3].v[1] + 64 * ctx->avx[3].v[2] + 79*eda14cbcSMatt Macy 64 * ctx->avx[3].v[3]; 80*eda14cbcSMatt Macy 81*eda14cbcSMatt Macy ZIO_SET_CHECKSUM(zcp, A, B, C, D); 82*eda14cbcSMatt Macy } 83*eda14cbcSMatt Macy 84*eda14cbcSMatt Macy #define FLETCHER_4_AVX2_RESTORE_CTX(ctx) \ 85*eda14cbcSMatt Macy { \ 86*eda14cbcSMatt Macy asm volatile("vmovdqu %0, %%ymm0" :: "m" ((ctx)->avx[0])); \ 87*eda14cbcSMatt Macy asm volatile("vmovdqu %0, %%ymm1" :: "m" ((ctx)->avx[1])); \ 88*eda14cbcSMatt Macy asm volatile("vmovdqu %0, %%ymm2" :: "m" ((ctx)->avx[2])); \ 89*eda14cbcSMatt Macy asm volatile("vmovdqu %0, %%ymm3" :: "m" ((ctx)->avx[3])); \ 90*eda14cbcSMatt Macy } 91*eda14cbcSMatt Macy 92*eda14cbcSMatt Macy #define FLETCHER_4_AVX2_SAVE_CTX(ctx) \ 93*eda14cbcSMatt Macy { \ 94*eda14cbcSMatt Macy asm volatile("vmovdqu %%ymm0, %0" : "=m" ((ctx)->avx[0])); \ 95*eda14cbcSMatt Macy asm volatile("vmovdqu %%ymm1, %0" : "=m" ((ctx)->avx[1])); \ 96*eda14cbcSMatt Macy asm volatile("vmovdqu %%ymm2, %0" : "=m" ((ctx)->avx[2])); \ 97*eda14cbcSMatt Macy asm volatile("vmovdqu %%ymm3, %0" : "=m" ((ctx)->avx[3])); \ 98*eda14cbcSMatt Macy } 99*eda14cbcSMatt Macy 100*eda14cbcSMatt Macy 101*eda14cbcSMatt Macy static void 102*eda14cbcSMatt Macy fletcher_4_avx2_native(fletcher_4_ctx_t *ctx, const void *buf, uint64_t size) 103*eda14cbcSMatt Macy { 104*eda14cbcSMatt Macy const uint64_t *ip = buf; 105*eda14cbcSMatt Macy const uint64_t *ipend = (uint64_t *)((uint8_t *)ip + size); 106*eda14cbcSMatt Macy 107*eda14cbcSMatt Macy kfpu_begin(); 108*eda14cbcSMatt Macy 109*eda14cbcSMatt Macy FLETCHER_4_AVX2_RESTORE_CTX(ctx); 110*eda14cbcSMatt Macy 111*eda14cbcSMatt Macy for (; ip < ipend; ip += 2) { 112*eda14cbcSMatt Macy asm volatile("vpmovzxdq %0, %%ymm4"::"m" (*ip)); 113*eda14cbcSMatt Macy asm volatile("vpaddq %ymm4, %ymm0, %ymm0"); 114*eda14cbcSMatt Macy asm volatile("vpaddq %ymm0, %ymm1, %ymm1"); 115*eda14cbcSMatt Macy asm volatile("vpaddq %ymm1, %ymm2, %ymm2"); 116*eda14cbcSMatt Macy asm volatile("vpaddq %ymm2, %ymm3, %ymm3"); 117*eda14cbcSMatt Macy } 118*eda14cbcSMatt Macy 119*eda14cbcSMatt Macy FLETCHER_4_AVX2_SAVE_CTX(ctx); 120*eda14cbcSMatt Macy asm volatile("vzeroupper"); 121*eda14cbcSMatt Macy 122*eda14cbcSMatt Macy kfpu_end(); 123*eda14cbcSMatt Macy } 124*eda14cbcSMatt Macy 125*eda14cbcSMatt Macy static void 126*eda14cbcSMatt Macy fletcher_4_avx2_byteswap(fletcher_4_ctx_t *ctx, const void *buf, uint64_t size) 127*eda14cbcSMatt Macy { 128*eda14cbcSMatt Macy static const zfs_fletcher_avx_t mask = { 129*eda14cbcSMatt Macy .v = { 0xFFFFFFFF00010203, 0xFFFFFFFF08090A0B, 130*eda14cbcSMatt Macy 0xFFFFFFFF00010203, 0xFFFFFFFF08090A0B } 131*eda14cbcSMatt Macy }; 132*eda14cbcSMatt Macy const uint64_t *ip = buf; 133*eda14cbcSMatt Macy const uint64_t *ipend = (uint64_t *)((uint8_t *)ip + size); 134*eda14cbcSMatt Macy 135*eda14cbcSMatt Macy kfpu_begin(); 136*eda14cbcSMatt Macy 137*eda14cbcSMatt Macy FLETCHER_4_AVX2_RESTORE_CTX(ctx); 138*eda14cbcSMatt Macy 139*eda14cbcSMatt Macy asm volatile("vmovdqu %0, %%ymm5" :: "m" (mask)); 140*eda14cbcSMatt Macy 141*eda14cbcSMatt Macy for (; ip < ipend; ip += 2) { 142*eda14cbcSMatt Macy asm volatile("vpmovzxdq %0, %%ymm4"::"m" (*ip)); 143*eda14cbcSMatt Macy asm volatile("vpshufb %ymm5, %ymm4, %ymm4"); 144*eda14cbcSMatt Macy 145*eda14cbcSMatt Macy asm volatile("vpaddq %ymm4, %ymm0, %ymm0"); 146*eda14cbcSMatt Macy asm volatile("vpaddq %ymm0, %ymm1, %ymm1"); 147*eda14cbcSMatt Macy asm volatile("vpaddq %ymm1, %ymm2, %ymm2"); 148*eda14cbcSMatt Macy asm volatile("vpaddq %ymm2, %ymm3, %ymm3"); 149*eda14cbcSMatt Macy } 150*eda14cbcSMatt Macy 151*eda14cbcSMatt Macy FLETCHER_4_AVX2_SAVE_CTX(ctx); 152*eda14cbcSMatt Macy asm volatile("vzeroupper"); 153*eda14cbcSMatt Macy 154*eda14cbcSMatt Macy kfpu_end(); 155*eda14cbcSMatt Macy } 156*eda14cbcSMatt Macy 157*eda14cbcSMatt Macy static boolean_t fletcher_4_avx2_valid(void) 158*eda14cbcSMatt Macy { 159*eda14cbcSMatt Macy return (kfpu_allowed() && zfs_avx_available() && zfs_avx2_available()); 160*eda14cbcSMatt Macy } 161*eda14cbcSMatt Macy 162*eda14cbcSMatt Macy const fletcher_4_ops_t fletcher_4_avx2_ops = { 163*eda14cbcSMatt Macy .init_native = fletcher_4_avx2_init, 164*eda14cbcSMatt Macy .fini_native = fletcher_4_avx2_fini, 165*eda14cbcSMatt Macy .compute_native = fletcher_4_avx2_native, 166*eda14cbcSMatt Macy .init_byteswap = fletcher_4_avx2_init, 167*eda14cbcSMatt Macy .fini_byteswap = fletcher_4_avx2_fini, 168*eda14cbcSMatt Macy .compute_byteswap = fletcher_4_avx2_byteswap, 169*eda14cbcSMatt Macy .valid = fletcher_4_avx2_valid, 170*eda14cbcSMatt Macy .name = "avx2" 171*eda14cbcSMatt Macy }; 172*eda14cbcSMatt Macy 173*eda14cbcSMatt Macy #endif /* defined(HAVE_AVX) && defined(HAVE_AVX2) */ 174