1eda14cbcSMatt Macy /*
2eda14cbcSMatt Macy * Implement fast Fletcher4 with SSE2,SSSE3 instructions. (x86)
3eda14cbcSMatt Macy *
4eda14cbcSMatt Macy * Use the 128-bit SSE2/SSSE3 SIMD instructions and registers to compute
5eda14cbcSMatt Macy * Fletcher4 in two incremental 64-bit parallel accumulator streams,
6eda14cbcSMatt Macy * and then combine the streams to form the final four checksum words.
7eda14cbcSMatt Macy * This implementation is a derivative of the AVX SIMD implementation by
8eda14cbcSMatt Macy * James Guilford and Jinshan Xiong from Intel (see zfs_fletcher_intel.c).
9eda14cbcSMatt Macy *
10eda14cbcSMatt Macy * Copyright (C) 2016 Tyler J. Stachecki.
11eda14cbcSMatt Macy *
12eda14cbcSMatt Macy * Authors:
13eda14cbcSMatt Macy * Tyler J. Stachecki <stachecki.tyler@gmail.com>
14eda14cbcSMatt Macy *
15eda14cbcSMatt Macy * This software is available to you under a choice of one of two
16eda14cbcSMatt Macy * licenses. You may choose to be licensed under the terms of the GNU
17eda14cbcSMatt Macy * General Public License (GPL) Version 2, available from the file
18eda14cbcSMatt Macy * COPYING in the main directory of this source tree, or the
19eda14cbcSMatt Macy * OpenIB.org BSD license below:
20eda14cbcSMatt Macy *
21eda14cbcSMatt Macy * Redistribution and use in source and binary forms, with or
22eda14cbcSMatt Macy * without modification, are permitted provided that the following
23eda14cbcSMatt Macy * conditions are met:
24eda14cbcSMatt Macy *
25eda14cbcSMatt Macy * - Redistributions of source code must retain the above
26eda14cbcSMatt Macy * copyright notice, this list of conditions and the following
27eda14cbcSMatt Macy * disclaimer.
28eda14cbcSMatt Macy *
29eda14cbcSMatt Macy * - Redistributions in binary form must reproduce the above
30eda14cbcSMatt Macy * copyright notice, this list of conditions and the following
31eda14cbcSMatt Macy * disclaimer in the documentation and/or other materials
32eda14cbcSMatt Macy * provided with the distribution.
33eda14cbcSMatt Macy *
34eda14cbcSMatt Macy * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
35eda14cbcSMatt Macy * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
36eda14cbcSMatt Macy * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
37eda14cbcSMatt Macy * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
38eda14cbcSMatt Macy * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
39eda14cbcSMatt Macy * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
40eda14cbcSMatt Macy * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
41eda14cbcSMatt Macy * SOFTWARE.
42eda14cbcSMatt Macy */
43eda14cbcSMatt Macy
44eda14cbcSMatt Macy #if defined(HAVE_SSE2)
45eda14cbcSMatt Macy
46eda14cbcSMatt Macy #include <sys/simd.h>
47eda14cbcSMatt Macy #include <sys/spa_checksum.h>
48da5137abSMartin Matuska #include <sys/string.h>
49eda14cbcSMatt Macy #include <sys/byteorder.h>
50eda14cbcSMatt Macy #include <zfs_fletcher.h>
51eda14cbcSMatt Macy
52eda14cbcSMatt Macy static void
fletcher_4_sse2_init(fletcher_4_ctx_t * ctx)53eda14cbcSMatt Macy fletcher_4_sse2_init(fletcher_4_ctx_t *ctx)
54eda14cbcSMatt Macy {
55da5137abSMartin Matuska memset(ctx->sse, 0, 4 * sizeof (zfs_fletcher_sse_t));
56eda14cbcSMatt Macy }
57eda14cbcSMatt Macy
58eda14cbcSMatt Macy static void
fletcher_4_sse2_fini(fletcher_4_ctx_t * ctx,zio_cksum_t * zcp)59eda14cbcSMatt Macy fletcher_4_sse2_fini(fletcher_4_ctx_t *ctx, zio_cksum_t *zcp)
60eda14cbcSMatt Macy {
61eda14cbcSMatt Macy uint64_t A, B, C, D;
62eda14cbcSMatt Macy
63eda14cbcSMatt Macy /*
64eda14cbcSMatt Macy * The mixing matrix for checksum calculation is:
65eda14cbcSMatt Macy * a = a0 + a1
66eda14cbcSMatt Macy * b = 2b0 + 2b1 - a1
67eda14cbcSMatt Macy * c = 4c0 - b0 + 4c1 -3b1
68eda14cbcSMatt Macy * d = 8d0 - 4c0 + 8d1 - 8c1 + b1;
69eda14cbcSMatt Macy *
70eda14cbcSMatt Macy * c and d are multiplied by 4 and 8, respectively,
71eda14cbcSMatt Macy * before spilling the vectors out to memory.
72eda14cbcSMatt Macy */
73eda14cbcSMatt Macy A = ctx->sse[0].v[0] + ctx->sse[0].v[1];
74eda14cbcSMatt Macy B = 2 * ctx->sse[1].v[0] + 2 * ctx->sse[1].v[1] - ctx->sse[0].v[1];
75eda14cbcSMatt Macy C = 4 * ctx->sse[2].v[0] - ctx->sse[1].v[0] + 4 * ctx->sse[2].v[1] -
76eda14cbcSMatt Macy 3 * ctx->sse[1].v[1];
77eda14cbcSMatt Macy D = 8 * ctx->sse[3].v[0] - 4 * ctx->sse[2].v[0] + 8 * ctx->sse[3].v[1] -
78eda14cbcSMatt Macy 8 * ctx->sse[2].v[1] + ctx->sse[1].v[1];
79eda14cbcSMatt Macy
80eda14cbcSMatt Macy ZIO_SET_CHECKSUM(zcp, A, B, C, D);
81eda14cbcSMatt Macy }
82eda14cbcSMatt Macy
83eda14cbcSMatt Macy #define FLETCHER_4_SSE_RESTORE_CTX(ctx) \
84eda14cbcSMatt Macy { \
85eda14cbcSMatt Macy asm volatile("movdqu %0, %%xmm0" :: "m" ((ctx)->sse[0])); \
86eda14cbcSMatt Macy asm volatile("movdqu %0, %%xmm1" :: "m" ((ctx)->sse[1])); \
87eda14cbcSMatt Macy asm volatile("movdqu %0, %%xmm2" :: "m" ((ctx)->sse[2])); \
88eda14cbcSMatt Macy asm volatile("movdqu %0, %%xmm3" :: "m" ((ctx)->sse[3])); \
89eda14cbcSMatt Macy }
90eda14cbcSMatt Macy
91eda14cbcSMatt Macy #define FLETCHER_4_SSE_SAVE_CTX(ctx) \
92eda14cbcSMatt Macy { \
93eda14cbcSMatt Macy asm volatile("movdqu %%xmm0, %0" : "=m" ((ctx)->sse[0])); \
94eda14cbcSMatt Macy asm volatile("movdqu %%xmm1, %0" : "=m" ((ctx)->sse[1])); \
95eda14cbcSMatt Macy asm volatile("movdqu %%xmm2, %0" : "=m" ((ctx)->sse[2])); \
96eda14cbcSMatt Macy asm volatile("movdqu %%xmm3, %0" : "=m" ((ctx)->sse[3])); \
97eda14cbcSMatt Macy }
98eda14cbcSMatt Macy
99eda14cbcSMatt Macy static void
fletcher_4_sse2_native(fletcher_4_ctx_t * ctx,const void * buf,uint64_t size)100eda14cbcSMatt Macy fletcher_4_sse2_native(fletcher_4_ctx_t *ctx, const void *buf, uint64_t size)
101eda14cbcSMatt Macy {
102eda14cbcSMatt Macy const uint64_t *ip = buf;
103eda14cbcSMatt Macy const uint64_t *ipend = (uint64_t *)((uint8_t *)ip + size);
104eda14cbcSMatt Macy
105eda14cbcSMatt Macy FLETCHER_4_SSE_RESTORE_CTX(ctx);
106eda14cbcSMatt Macy
107eda14cbcSMatt Macy asm volatile("pxor %xmm4, %xmm4");
108eda14cbcSMatt Macy
109bb2d13b6SMartin Matuska do {
110eda14cbcSMatt Macy asm volatile("movdqu %0, %%xmm5" :: "m"(*ip));
111eda14cbcSMatt Macy asm volatile("movdqa %xmm5, %xmm6");
112eda14cbcSMatt Macy asm volatile("punpckldq %xmm4, %xmm5");
113eda14cbcSMatt Macy asm volatile("punpckhdq %xmm4, %xmm6");
114eda14cbcSMatt Macy asm volatile("paddq %xmm5, %xmm0");
115eda14cbcSMatt Macy asm volatile("paddq %xmm0, %xmm1");
116eda14cbcSMatt Macy asm volatile("paddq %xmm1, %xmm2");
117eda14cbcSMatt Macy asm volatile("paddq %xmm2, %xmm3");
118eda14cbcSMatt Macy asm volatile("paddq %xmm6, %xmm0");
119eda14cbcSMatt Macy asm volatile("paddq %xmm0, %xmm1");
120eda14cbcSMatt Macy asm volatile("paddq %xmm1, %xmm2");
121eda14cbcSMatt Macy asm volatile("paddq %xmm2, %xmm3");
122bb2d13b6SMartin Matuska } while ((ip += 2) < ipend);
123eda14cbcSMatt Macy
124eda14cbcSMatt Macy FLETCHER_4_SSE_SAVE_CTX(ctx);
125eda14cbcSMatt Macy }
126eda14cbcSMatt Macy
127eda14cbcSMatt Macy static void
fletcher_4_sse2_byteswap(fletcher_4_ctx_t * ctx,const void * buf,uint64_t size)128eda14cbcSMatt Macy fletcher_4_sse2_byteswap(fletcher_4_ctx_t *ctx, const void *buf, uint64_t size)
129eda14cbcSMatt Macy {
130eda14cbcSMatt Macy const uint32_t *ip = buf;
131eda14cbcSMatt Macy const uint32_t *ipend = (uint32_t *)((uint8_t *)ip + size);
132eda14cbcSMatt Macy
133eda14cbcSMatt Macy FLETCHER_4_SSE_RESTORE_CTX(ctx);
134eda14cbcSMatt Macy
135bb2d13b6SMartin Matuska do {
136eda14cbcSMatt Macy uint32_t scratch1 = BSWAP_32(ip[0]);
137eda14cbcSMatt Macy uint32_t scratch2 = BSWAP_32(ip[1]);
138eda14cbcSMatt Macy asm volatile("movd %0, %%xmm5" :: "r"(scratch1));
139eda14cbcSMatt Macy asm volatile("movd %0, %%xmm6" :: "r"(scratch2));
140eda14cbcSMatt Macy asm volatile("punpcklqdq %xmm6, %xmm5");
141eda14cbcSMatt Macy asm volatile("paddq %xmm5, %xmm0");
142eda14cbcSMatt Macy asm volatile("paddq %xmm0, %xmm1");
143eda14cbcSMatt Macy asm volatile("paddq %xmm1, %xmm2");
144eda14cbcSMatt Macy asm volatile("paddq %xmm2, %xmm3");
145bb2d13b6SMartin Matuska } while ((ip += 2) < ipend);
146eda14cbcSMatt Macy
147eda14cbcSMatt Macy FLETCHER_4_SSE_SAVE_CTX(ctx);
148eda14cbcSMatt Macy }
149eda14cbcSMatt Macy
fletcher_4_sse2_valid(void)150eda14cbcSMatt Macy static boolean_t fletcher_4_sse2_valid(void)
151eda14cbcSMatt Macy {
152eda14cbcSMatt Macy return (kfpu_allowed() && zfs_sse2_available());
153eda14cbcSMatt Macy }
154eda14cbcSMatt Macy
155eda14cbcSMatt Macy const fletcher_4_ops_t fletcher_4_sse2_ops = {
156eda14cbcSMatt Macy .init_native = fletcher_4_sse2_init,
157eda14cbcSMatt Macy .fini_native = fletcher_4_sse2_fini,
158eda14cbcSMatt Macy .compute_native = fletcher_4_sse2_native,
159eda14cbcSMatt Macy .init_byteswap = fletcher_4_sse2_init,
160eda14cbcSMatt Macy .fini_byteswap = fletcher_4_sse2_fini,
161eda14cbcSMatt Macy .compute_byteswap = fletcher_4_sse2_byteswap,
162eda14cbcSMatt Macy .valid = fletcher_4_sse2_valid,
163*2a58b312SMartin Matuska .uses_fpu = B_TRUE,
164eda14cbcSMatt Macy .name = "sse2"
165eda14cbcSMatt Macy };
166eda14cbcSMatt Macy
167eda14cbcSMatt Macy #endif /* defined(HAVE_SSE2) */
168eda14cbcSMatt Macy
169eda14cbcSMatt Macy #if defined(HAVE_SSE2) && defined(HAVE_SSSE3)
170eda14cbcSMatt Macy static void
fletcher_4_ssse3_byteswap(fletcher_4_ctx_t * ctx,const void * buf,uint64_t size)171eda14cbcSMatt Macy fletcher_4_ssse3_byteswap(fletcher_4_ctx_t *ctx, const void *buf, uint64_t size)
172eda14cbcSMatt Macy {
173eda14cbcSMatt Macy static const zfs_fletcher_sse_t mask = {
174eda14cbcSMatt Macy .v = { 0x0405060700010203, 0x0C0D0E0F08090A0B }
175eda14cbcSMatt Macy };
176eda14cbcSMatt Macy
177eda14cbcSMatt Macy const uint64_t *ip = buf;
178eda14cbcSMatt Macy const uint64_t *ipend = (uint64_t *)((uint8_t *)ip + size);
179eda14cbcSMatt Macy
180eda14cbcSMatt Macy FLETCHER_4_SSE_RESTORE_CTX(ctx);
181eda14cbcSMatt Macy
182eda14cbcSMatt Macy asm volatile("movdqu %0, %%xmm7"::"m" (mask));
183eda14cbcSMatt Macy asm volatile("pxor %xmm4, %xmm4");
184eda14cbcSMatt Macy
185bb2d13b6SMartin Matuska do {
186eda14cbcSMatt Macy asm volatile("movdqu %0, %%xmm5"::"m" (*ip));
187eda14cbcSMatt Macy asm volatile("pshufb %xmm7, %xmm5");
188eda14cbcSMatt Macy asm volatile("movdqa %xmm5, %xmm6");
189eda14cbcSMatt Macy asm volatile("punpckldq %xmm4, %xmm5");
190eda14cbcSMatt Macy asm volatile("punpckhdq %xmm4, %xmm6");
191eda14cbcSMatt Macy asm volatile("paddq %xmm5, %xmm0");
192eda14cbcSMatt Macy asm volatile("paddq %xmm0, %xmm1");
193eda14cbcSMatt Macy asm volatile("paddq %xmm1, %xmm2");
194eda14cbcSMatt Macy asm volatile("paddq %xmm2, %xmm3");
195eda14cbcSMatt Macy asm volatile("paddq %xmm6, %xmm0");
196eda14cbcSMatt Macy asm volatile("paddq %xmm0, %xmm1");
197eda14cbcSMatt Macy asm volatile("paddq %xmm1, %xmm2");
198eda14cbcSMatt Macy asm volatile("paddq %xmm2, %xmm3");
199bb2d13b6SMartin Matuska } while ((ip += 2) < ipend);
200eda14cbcSMatt Macy
201eda14cbcSMatt Macy FLETCHER_4_SSE_SAVE_CTX(ctx);
202eda14cbcSMatt Macy }
203eda14cbcSMatt Macy
fletcher_4_ssse3_valid(void)204eda14cbcSMatt Macy static boolean_t fletcher_4_ssse3_valid(void)
205eda14cbcSMatt Macy {
206eda14cbcSMatt Macy return (kfpu_allowed() && zfs_sse2_available() &&
207eda14cbcSMatt Macy zfs_ssse3_available());
208eda14cbcSMatt Macy }
209eda14cbcSMatt Macy
210eda14cbcSMatt Macy const fletcher_4_ops_t fletcher_4_ssse3_ops = {
211eda14cbcSMatt Macy .init_native = fletcher_4_sse2_init,
212eda14cbcSMatt Macy .fini_native = fletcher_4_sse2_fini,
213eda14cbcSMatt Macy .compute_native = fletcher_4_sse2_native,
214eda14cbcSMatt Macy .init_byteswap = fletcher_4_sse2_init,
215eda14cbcSMatt Macy .fini_byteswap = fletcher_4_sse2_fini,
216eda14cbcSMatt Macy .compute_byteswap = fletcher_4_ssse3_byteswap,
217eda14cbcSMatt Macy .valid = fletcher_4_ssse3_valid,
218*2a58b312SMartin Matuska .uses_fpu = B_TRUE,
219eda14cbcSMatt Macy .name = "ssse3"
220eda14cbcSMatt Macy };
221eda14cbcSMatt Macy
222eda14cbcSMatt Macy #endif /* defined(HAVE_SSE2) && defined(HAVE_SSSE3) */
223