1eda14cbcSMatt Macy /*
2eda14cbcSMatt Macy * Implement fast Fletcher4 with NEON instructions. (aarch64)
3eda14cbcSMatt Macy *
4eda14cbcSMatt Macy * Use the 128-bit NEON SIMD instructions and registers to compute
5eda14cbcSMatt Macy * Fletcher4 in two incremental 64-bit parallel accumulator streams,
6eda14cbcSMatt Macy * and then combine the streams to form the final four checksum words.
7eda14cbcSMatt Macy * This implementation is a derivative of the AVX SIMD implementation by
8eda14cbcSMatt Macy * James Guilford and Jinshan Xiong from Intel (see zfs_fletcher_intel.c).
9eda14cbcSMatt Macy *
10eda14cbcSMatt Macy * Copyright (C) 2016 Romain Dolbeau.
11eda14cbcSMatt Macy *
12eda14cbcSMatt Macy * Authors:
13eda14cbcSMatt Macy * Romain Dolbeau <romain.dolbeau@atos.net>
14eda14cbcSMatt Macy *
15eda14cbcSMatt Macy * This software is available to you under a choice of one of two
16eda14cbcSMatt Macy * licenses. You may choose to be licensed under the terms of the GNU
17eda14cbcSMatt Macy * General Public License (GPL) Version 2, available from the file
18eda14cbcSMatt Macy * COPYING in the main directory of this source tree, or the
19eda14cbcSMatt Macy * OpenIB.org BSD license below:
20eda14cbcSMatt Macy *
21eda14cbcSMatt Macy * Redistribution and use in source and binary forms, with or
22eda14cbcSMatt Macy * without modification, are permitted provided that the following
23eda14cbcSMatt Macy * conditions are met:
24eda14cbcSMatt Macy *
25eda14cbcSMatt Macy * - Redistributions of source code must retain the above
26eda14cbcSMatt Macy * copyright notice, this list of conditions and the following
27eda14cbcSMatt Macy * disclaimer.
28eda14cbcSMatt Macy *
29eda14cbcSMatt Macy * - Redistributions in binary form must reproduce the above
30eda14cbcSMatt Macy * copyright notice, this list of conditions and the following
31eda14cbcSMatt Macy * disclaimer in the documentation and/or other materials
32eda14cbcSMatt Macy * provided with the distribution.
33eda14cbcSMatt Macy *
34eda14cbcSMatt Macy * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
35eda14cbcSMatt Macy * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
36eda14cbcSMatt Macy * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
37eda14cbcSMatt Macy * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
38eda14cbcSMatt Macy * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
39eda14cbcSMatt Macy * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
40eda14cbcSMatt Macy * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
41eda14cbcSMatt Macy * SOFTWARE.
42eda14cbcSMatt Macy */
43eda14cbcSMatt Macy
44eda14cbcSMatt Macy #if defined(__aarch64__)
45eda14cbcSMatt Macy
46eda14cbcSMatt Macy #include <sys/simd.h>
47eda14cbcSMatt Macy #include <sys/spa_checksum.h>
48da5137abSMartin Matuska #include <sys/string.h>
49eda14cbcSMatt Macy #include <zfs_fletcher.h>
50eda14cbcSMatt Macy
51eda14cbcSMatt Macy static void
fletcher_4_aarch64_neon_init(fletcher_4_ctx_t * ctx)52eda14cbcSMatt Macy fletcher_4_aarch64_neon_init(fletcher_4_ctx_t *ctx)
53eda14cbcSMatt Macy {
54da5137abSMartin Matuska memset(ctx->aarch64_neon, 0, 4 * sizeof (zfs_fletcher_aarch64_neon_t));
55eda14cbcSMatt Macy }
56eda14cbcSMatt Macy
57eda14cbcSMatt Macy static void
fletcher_4_aarch64_neon_fini(fletcher_4_ctx_t * ctx,zio_cksum_t * zcp)58eda14cbcSMatt Macy fletcher_4_aarch64_neon_fini(fletcher_4_ctx_t *ctx, zio_cksum_t *zcp)
59eda14cbcSMatt Macy {
60eda14cbcSMatt Macy uint64_t A, B, C, D;
61eda14cbcSMatt Macy A = ctx->aarch64_neon[0].v[0] + ctx->aarch64_neon[0].v[1];
62eda14cbcSMatt Macy B = 2 * ctx->aarch64_neon[1].v[0] + 2 * ctx->aarch64_neon[1].v[1] -
63eda14cbcSMatt Macy ctx->aarch64_neon[0].v[1];
64eda14cbcSMatt Macy C = 4 * ctx->aarch64_neon[2].v[0] - ctx->aarch64_neon[1].v[0] +
65eda14cbcSMatt Macy 4 * ctx->aarch64_neon[2].v[1] - 3 * ctx->aarch64_neon[1].v[1];
66eda14cbcSMatt Macy D = 8 * ctx->aarch64_neon[3].v[0] - 4 * ctx->aarch64_neon[2].v[0] +
67eda14cbcSMatt Macy 8 * ctx->aarch64_neon[3].v[1] - 8 * ctx->aarch64_neon[2].v[1] +
68eda14cbcSMatt Macy ctx->aarch64_neon[1].v[1];
69eda14cbcSMatt Macy ZIO_SET_CHECKSUM(zcp, A, B, C, D);
70eda14cbcSMatt Macy }
71eda14cbcSMatt Macy
72eda14cbcSMatt Macy #define NEON_INIT_LOOP() \
73eda14cbcSMatt Macy asm("eor %[ZERO].16b,%[ZERO].16b,%[ZERO].16b\n" \
74eda14cbcSMatt Macy "ld1 { %[ACC0].4s }, %[CTX0]\n" \
75eda14cbcSMatt Macy "ld1 { %[ACC1].4s }, %[CTX1]\n" \
76eda14cbcSMatt Macy "ld1 { %[ACC2].4s }, %[CTX2]\n" \
77eda14cbcSMatt Macy "ld1 { %[ACC3].4s }, %[CTX3]\n" \
78eda14cbcSMatt Macy : [ZERO] "=w" (ZERO), \
79eda14cbcSMatt Macy [ACC0] "=w" (ACC0), [ACC1] "=w" (ACC1), \
80eda14cbcSMatt Macy [ACC2] "=w" (ACC2), [ACC3] "=w" (ACC3) \
81eda14cbcSMatt Macy : [CTX0] "Q" (ctx->aarch64_neon[0]), \
82eda14cbcSMatt Macy [CTX1] "Q" (ctx->aarch64_neon[1]), \
83eda14cbcSMatt Macy [CTX2] "Q" (ctx->aarch64_neon[2]), \
84eda14cbcSMatt Macy [CTX3] "Q" (ctx->aarch64_neon[3]))
85eda14cbcSMatt Macy
86eda14cbcSMatt Macy #define NEON_DO_REVERSE "rev32 %[SRC].16b, %[SRC].16b\n"
87eda14cbcSMatt Macy
88eda14cbcSMatt Macy #define NEON_DONT_REVERSE ""
89eda14cbcSMatt Macy
90eda14cbcSMatt Macy #define NEON_MAIN_LOOP(REVERSE) \
91eda14cbcSMatt Macy asm("ld1 { %[SRC].4s }, %[IP]\n" \
92eda14cbcSMatt Macy REVERSE \
93eda14cbcSMatt Macy "zip1 %[TMP1].4s, %[SRC].4s, %[ZERO].4s\n" \
94eda14cbcSMatt Macy "zip2 %[TMP2].4s, %[SRC].4s, %[ZERO].4s\n" \
95eda14cbcSMatt Macy "add %[ACC0].2d, %[ACC0].2d, %[TMP1].2d\n" \
96eda14cbcSMatt Macy "add %[ACC1].2d, %[ACC1].2d, %[ACC0].2d\n" \
97eda14cbcSMatt Macy "add %[ACC2].2d, %[ACC2].2d, %[ACC1].2d\n" \
98eda14cbcSMatt Macy "add %[ACC3].2d, %[ACC3].2d, %[ACC2].2d\n" \
99eda14cbcSMatt Macy "add %[ACC0].2d, %[ACC0].2d, %[TMP2].2d\n" \
100eda14cbcSMatt Macy "add %[ACC1].2d, %[ACC1].2d, %[ACC0].2d\n" \
101eda14cbcSMatt Macy "add %[ACC2].2d, %[ACC2].2d, %[ACC1].2d\n" \
102eda14cbcSMatt Macy "add %[ACC3].2d, %[ACC3].2d, %[ACC2].2d\n" \
103eda14cbcSMatt Macy : [SRC] "=&w" (SRC), \
104eda14cbcSMatt Macy [TMP1] "=&w" (TMP1), [TMP2] "=&w" (TMP2), \
105eda14cbcSMatt Macy [ACC0] "+w" (ACC0), [ACC1] "+w" (ACC1), \
106eda14cbcSMatt Macy [ACC2] "+w" (ACC2), [ACC3] "+w" (ACC3) \
107eda14cbcSMatt Macy : [ZERO] "w" (ZERO), [IP] "Q" (*ip))
108eda14cbcSMatt Macy
109eda14cbcSMatt Macy #define NEON_FINI_LOOP() \
110eda14cbcSMatt Macy asm("st1 { %[ACC0].4s },%[DST0]\n" \
111eda14cbcSMatt Macy "st1 { %[ACC1].4s },%[DST1]\n" \
112eda14cbcSMatt Macy "st1 { %[ACC2].4s },%[DST2]\n" \
113eda14cbcSMatt Macy "st1 { %[ACC3].4s },%[DST3]\n" \
114eda14cbcSMatt Macy : [DST0] "=Q" (ctx->aarch64_neon[0]), \
115eda14cbcSMatt Macy [DST1] "=Q" (ctx->aarch64_neon[1]), \
116eda14cbcSMatt Macy [DST2] "=Q" (ctx->aarch64_neon[2]), \
117eda14cbcSMatt Macy [DST3] "=Q" (ctx->aarch64_neon[3]) \
118eda14cbcSMatt Macy : [ACC0] "w" (ACC0), [ACC1] "w" (ACC1), \
119eda14cbcSMatt Macy [ACC2] "w" (ACC2), [ACC3] "w" (ACC3))
120eda14cbcSMatt Macy
121eda14cbcSMatt Macy static void
fletcher_4_aarch64_neon_native(fletcher_4_ctx_t * ctx,const void * buf,uint64_t size)122eda14cbcSMatt Macy fletcher_4_aarch64_neon_native(fletcher_4_ctx_t *ctx,
123eda14cbcSMatt Macy const void *buf, uint64_t size)
124eda14cbcSMatt Macy {
125eda14cbcSMatt Macy const uint64_t *ip = buf;
126eda14cbcSMatt Macy const uint64_t *ipend = (uint64_t *)((uint8_t *)ip + size);
127eda14cbcSMatt Macy #if defined(_KERNEL)
128eda14cbcSMatt Macy register unsigned char ZERO asm("v0") __attribute__((vector_size(16)));
129eda14cbcSMatt Macy register unsigned char ACC0 asm("v1") __attribute__((vector_size(16)));
130eda14cbcSMatt Macy register unsigned char ACC1 asm("v2") __attribute__((vector_size(16)));
131eda14cbcSMatt Macy register unsigned char ACC2 asm("v3") __attribute__((vector_size(16)));
132eda14cbcSMatt Macy register unsigned char ACC3 asm("v4") __attribute__((vector_size(16)));
133eda14cbcSMatt Macy register unsigned char TMP1 asm("v5") __attribute__((vector_size(16)));
134eda14cbcSMatt Macy register unsigned char TMP2 asm("v6") __attribute__((vector_size(16)));
135eda14cbcSMatt Macy register unsigned char SRC asm("v7") __attribute__((vector_size(16)));
136eda14cbcSMatt Macy #else
137eda14cbcSMatt Macy unsigned char ZERO __attribute__((vector_size(16)));
138eda14cbcSMatt Macy unsigned char ACC0 __attribute__((vector_size(16)));
139eda14cbcSMatt Macy unsigned char ACC1 __attribute__((vector_size(16)));
140eda14cbcSMatt Macy unsigned char ACC2 __attribute__((vector_size(16)));
141eda14cbcSMatt Macy unsigned char ACC3 __attribute__((vector_size(16)));
142eda14cbcSMatt Macy unsigned char TMP1 __attribute__((vector_size(16)));
143eda14cbcSMatt Macy unsigned char TMP2 __attribute__((vector_size(16)));
144eda14cbcSMatt Macy unsigned char SRC __attribute__((vector_size(16)));
145eda14cbcSMatt Macy #endif
146eda14cbcSMatt Macy
147eda14cbcSMatt Macy NEON_INIT_LOOP();
148eda14cbcSMatt Macy
149bb2d13b6SMartin Matuska do {
150eda14cbcSMatt Macy NEON_MAIN_LOOP(NEON_DONT_REVERSE);
151bb2d13b6SMartin Matuska } while ((ip += 2) < ipend);
152eda14cbcSMatt Macy
153eda14cbcSMatt Macy NEON_FINI_LOOP();
154eda14cbcSMatt Macy }
155eda14cbcSMatt Macy
156eda14cbcSMatt Macy static void
fletcher_4_aarch64_neon_byteswap(fletcher_4_ctx_t * ctx,const void * buf,uint64_t size)157eda14cbcSMatt Macy fletcher_4_aarch64_neon_byteswap(fletcher_4_ctx_t *ctx,
158eda14cbcSMatt Macy const void *buf, uint64_t size)
159eda14cbcSMatt Macy {
160eda14cbcSMatt Macy const uint64_t *ip = buf;
161eda14cbcSMatt Macy const uint64_t *ipend = (uint64_t *)((uint8_t *)ip + size);
162eda14cbcSMatt Macy #if defined(_KERNEL)
163eda14cbcSMatt Macy register unsigned char ZERO asm("v0") __attribute__((vector_size(16)));
164eda14cbcSMatt Macy register unsigned char ACC0 asm("v1") __attribute__((vector_size(16)));
165eda14cbcSMatt Macy register unsigned char ACC1 asm("v2") __attribute__((vector_size(16)));
166eda14cbcSMatt Macy register unsigned char ACC2 asm("v3") __attribute__((vector_size(16)));
167eda14cbcSMatt Macy register unsigned char ACC3 asm("v4") __attribute__((vector_size(16)));
168eda14cbcSMatt Macy register unsigned char TMP1 asm("v5") __attribute__((vector_size(16)));
169eda14cbcSMatt Macy register unsigned char TMP2 asm("v6") __attribute__((vector_size(16)));
170eda14cbcSMatt Macy register unsigned char SRC asm("v7") __attribute__((vector_size(16)));
171eda14cbcSMatt Macy #else
172eda14cbcSMatt Macy unsigned char ZERO __attribute__((vector_size(16)));
173eda14cbcSMatt Macy unsigned char ACC0 __attribute__((vector_size(16)));
174eda14cbcSMatt Macy unsigned char ACC1 __attribute__((vector_size(16)));
175eda14cbcSMatt Macy unsigned char ACC2 __attribute__((vector_size(16)));
176eda14cbcSMatt Macy unsigned char ACC3 __attribute__((vector_size(16)));
177eda14cbcSMatt Macy unsigned char TMP1 __attribute__((vector_size(16)));
178eda14cbcSMatt Macy unsigned char TMP2 __attribute__((vector_size(16)));
179eda14cbcSMatt Macy unsigned char SRC __attribute__((vector_size(16)));
180eda14cbcSMatt Macy #endif
181eda14cbcSMatt Macy
182eda14cbcSMatt Macy NEON_INIT_LOOP();
183eda14cbcSMatt Macy
184bb2d13b6SMartin Matuska do {
185eda14cbcSMatt Macy NEON_MAIN_LOOP(NEON_DO_REVERSE);
186bb2d13b6SMartin Matuska } while ((ip += 2) < ipend);
187eda14cbcSMatt Macy
188eda14cbcSMatt Macy NEON_FINI_LOOP();
189eda14cbcSMatt Macy }
190eda14cbcSMatt Macy
fletcher_4_aarch64_neon_valid(void)191eda14cbcSMatt Macy static boolean_t fletcher_4_aarch64_neon_valid(void)
192eda14cbcSMatt Macy {
193eda14cbcSMatt Macy return (kfpu_allowed());
194eda14cbcSMatt Macy }
195eda14cbcSMatt Macy
196eda14cbcSMatt Macy const fletcher_4_ops_t fletcher_4_aarch64_neon_ops = {
197eda14cbcSMatt Macy .init_native = fletcher_4_aarch64_neon_init,
198eda14cbcSMatt Macy .compute_native = fletcher_4_aarch64_neon_native,
199eda14cbcSMatt Macy .fini_native = fletcher_4_aarch64_neon_fini,
200eda14cbcSMatt Macy .init_byteswap = fletcher_4_aarch64_neon_init,
201eda14cbcSMatt Macy .compute_byteswap = fletcher_4_aarch64_neon_byteswap,
202eda14cbcSMatt Macy .fini_byteswap = fletcher_4_aarch64_neon_fini,
203eda14cbcSMatt Macy .valid = fletcher_4_aarch64_neon_valid,
204*2a58b312SMartin Matuska .uses_fpu = B_TRUE,
205eda14cbcSMatt Macy .name = "aarch64_neon"
206eda14cbcSMatt Macy };
207eda14cbcSMatt Macy
208eda14cbcSMatt Macy #endif /* defined(__aarch64__) */
209