1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
3 */
4
5 #include <stdalign.h>
6 #include <string.h>
7
8 #include <rte_common.h>
9 #include <rte_branch_prediction.h>
10 #include <rte_net_crc.h>
11 #include <rte_vect.h>
12 #include <rte_cpuflags.h>
13
14 #include "net_crc.h"
15
16 /** PMULL CRC computation context structure */
17 struct crc_pmull_ctx {
18 uint64x2_t rk1_rk2;
19 uint64x2_t rk5_rk6;
20 uint64x2_t rk7_rk8;
21 };
22
23 alignas(16) struct crc_pmull_ctx crc32_eth_pmull;
24 alignas(16) struct crc_pmull_ctx crc16_ccitt_pmull;
25
26 /**
27 * @brief Performs one folding round
28 *
29 * Logically function operates as follows:
30 * DATA = READ_NEXT_16BYTES();
31 * F1 = LSB8(FOLD)
32 * F2 = MSB8(FOLD)
33 * T1 = CLMUL(F1, RK1)
34 * T2 = CLMUL(F2, RK2)
35 * FOLD = XOR(T1, T2, DATA)
36 *
37 * @param data_block 16 byte data block
38 * @param precomp precomputed rk1 constant
39 * @param fold running 16 byte folded data
40 *
41 * @return New 16 byte folded data
42 */
43 static inline uint64x2_t
crcr32_folding_round(uint64x2_t data_block,uint64x2_t precomp,uint64x2_t fold)44 crcr32_folding_round(uint64x2_t data_block, uint64x2_t precomp,
45 uint64x2_t fold)
46 {
47 uint64x2_t tmp0 = vreinterpretq_u64_p128(vmull_p64(
48 vgetq_lane_p64(vreinterpretq_p64_u64(fold), 1),
49 vgetq_lane_p64(vreinterpretq_p64_u64(precomp), 0)));
50
51 uint64x2_t tmp1 = vreinterpretq_u64_p128(vmull_p64(
52 vgetq_lane_p64(vreinterpretq_p64_u64(fold), 0),
53 vgetq_lane_p64(vreinterpretq_p64_u64(precomp), 1)));
54
55 return veorq_u64(tmp1, veorq_u64(data_block, tmp0));
56 }
57
58 /**
59 * Performs reduction from 128 bits to 64 bits
60 *
61 * @param data128 128 bits data to be reduced
62 * @param precomp rk5 and rk6 precomputed constants
63 *
64 * @return data reduced to 64 bits
65 */
66 static inline uint64x2_t
crcr32_reduce_128_to_64(uint64x2_t data128,uint64x2_t precomp)67 crcr32_reduce_128_to_64(uint64x2_t data128,
68 uint64x2_t precomp)
69 {
70 uint64x2_t tmp0, tmp1, tmp2;
71
72 /* 64b fold */
73 tmp0 = vreinterpretq_u64_p128(vmull_p64(
74 vgetq_lane_p64(vreinterpretq_p64_u64(data128), 0),
75 vgetq_lane_p64(vreinterpretq_p64_u64(precomp), 0)));
76 tmp1 = vshift_bytes_right(data128, 8);
77 tmp0 = veorq_u64(tmp0, tmp1);
78
79 /* 32b fold */
80 tmp2 = vshift_bytes_left(tmp0, 4);
81 tmp1 = vreinterpretq_u64_p128(vmull_p64(
82 vgetq_lane_p64(vreinterpretq_p64_u64(tmp2), 0),
83 vgetq_lane_p64(vreinterpretq_p64_u64(precomp), 1)));
84
85 return veorq_u64(tmp1, tmp0);
86 }
87
88 /**
89 * Performs Barret's reduction from 64 bits to 32 bits
90 *
91 * @param data64 64 bits data to be reduced
92 * @param precomp rk7 precomputed constant
93 *
94 * @return data reduced to 32 bits
95 */
96 static inline uint32_t
crcr32_reduce_64_to_32(uint64x2_t data64,uint64x2_t precomp)97 crcr32_reduce_64_to_32(uint64x2_t data64,
98 uint64x2_t precomp)
99 {
100 static alignas(16) uint32_t mask1[4] = {
101 0xffffffff, 0xffffffff, 0x00000000, 0x00000000
102 };
103 static alignas(16) uint32_t mask2[4] = {
104 0x00000000, 0xffffffff, 0xffffffff, 0xffffffff
105 };
106 uint64x2_t tmp0, tmp1, tmp2;
107
108 tmp0 = vandq_u64(data64, vld1q_u64((uint64_t *)mask2));
109
110 tmp1 = vreinterpretq_u64_p128(vmull_p64(
111 vgetq_lane_p64(vreinterpretq_p64_u64(tmp0), 0),
112 vgetq_lane_p64(vreinterpretq_p64_u64(precomp), 0)));
113 tmp1 = veorq_u64(tmp1, tmp0);
114 tmp1 = vandq_u64(tmp1, vld1q_u64((uint64_t *)mask1));
115
116 tmp2 = vreinterpretq_u64_p128(vmull_p64(
117 vgetq_lane_p64(vreinterpretq_p64_u64(tmp1), 0),
118 vgetq_lane_p64(vreinterpretq_p64_u64(precomp), 1)));
119 tmp2 = veorq_u64(tmp2, tmp1);
120 tmp2 = veorq_u64(tmp2, tmp0);
121
122 return vgetq_lane_u32(vreinterpretq_u32_u64(tmp2), 2);
123 }
124
125 static inline uint32_t
crc32_eth_calc_pmull(const uint8_t * data,uint32_t data_len,uint32_t crc,const struct crc_pmull_ctx * params)126 crc32_eth_calc_pmull(
127 const uint8_t *data,
128 uint32_t data_len,
129 uint32_t crc,
130 const struct crc_pmull_ctx *params)
131 {
132 uint64x2_t temp, fold, k;
133 uint32_t n;
134
135 /* Get CRC init value */
136 temp = vreinterpretq_u64_u32(vsetq_lane_u32(crc, vmovq_n_u32(0), 0));
137
138 /**
139 * Folding all data into single 16 byte data block
140 * Assumes: fold holds first 16 bytes of data
141 */
142 if (unlikely(data_len < 32)) {
143 if (unlikely(data_len == 16)) {
144 /* 16 bytes */
145 fold = vld1q_u64((const uint64_t *)data);
146 fold = veorq_u64(fold, temp);
147 goto reduction_128_64;
148 }
149
150 if (unlikely(data_len < 16)) {
151 /* 0 to 15 bytes */
152 alignas(16) uint8_t buffer[16];
153
154 memset(buffer, 0, sizeof(buffer));
155 memcpy(buffer, data, data_len);
156
157 fold = vld1q_u64((uint64_t *)buffer);
158 fold = veorq_u64(fold, temp);
159 if (unlikely(data_len < 4)) {
160 fold = vshift_bytes_left(fold, 8 - data_len);
161 goto barret_reduction;
162 }
163 fold = vshift_bytes_left(fold, 16 - data_len);
164 goto reduction_128_64;
165 }
166 /* 17 to 31 bytes */
167 fold = vld1q_u64((const uint64_t *)data);
168 fold = veorq_u64(fold, temp);
169 n = 16;
170 k = params->rk1_rk2;
171 goto partial_bytes;
172 }
173
174 /** At least 32 bytes in the buffer */
175 /** Apply CRC initial value */
176 fold = vld1q_u64((const uint64_t *)data);
177 fold = veorq_u64(fold, temp);
178
179 /** Main folding loop - the last 16 bytes is processed separately */
180 k = params->rk1_rk2;
181 for (n = 16; (n + 16) <= data_len; n += 16) {
182 temp = vld1q_u64((const uint64_t *)&data[n]);
183 fold = crcr32_folding_round(temp, k, fold);
184 }
185
186 partial_bytes:
187 if (likely(n < data_len)) {
188 uint64x2_t last16, a, b, mask;
189 uint32_t rem = data_len & 15;
190
191 last16 = vld1q_u64((const uint64_t *)&data[data_len - 16]);
192 a = vshift_bytes_left(fold, 16 - rem);
193 b = vshift_bytes_right(fold, rem);
194 mask = vshift_bytes_left(vdupq_n_u64(-1), 16 - rem);
195 b = vorrq_u64(b, vandq_u64(mask, last16));
196
197 /* k = rk1 & rk2 */
198 temp = vreinterpretq_u64_p128(vmull_p64(
199 vgetq_lane_p64(vreinterpretq_p64_u64(a), 1),
200 vgetq_lane_p64(vreinterpretq_p64_u64(k), 0)));
201 fold = vreinterpretq_u64_p128(vmull_p64(
202 vgetq_lane_p64(vreinterpretq_p64_u64(a), 0),
203 vgetq_lane_p64(vreinterpretq_p64_u64(k), 1)));
204 fold = veorq_u64(fold, temp);
205 fold = veorq_u64(fold, b);
206 }
207
208 /** Reduction 128 -> 32 Assumes: fold holds 128bit folded data */
209 reduction_128_64:
210 k = params->rk5_rk6;
211 fold = crcr32_reduce_128_to_64(fold, k);
212
213 barret_reduction:
214 k = params->rk7_rk8;
215 n = crcr32_reduce_64_to_32(fold, k);
216
217 return n;
218 }
219
220 void
rte_net_crc_neon_init(void)221 rte_net_crc_neon_init(void)
222 {
223 /* Initialize CRC16 data */
224 uint64_t ccitt_k1_k2[2] = {0x189aeLLU, 0x8e10LLU};
225 uint64_t ccitt_k5_k6[2] = {0x189aeLLU, 0x114aaLLU};
226 uint64_t ccitt_k7_k8[2] = {0x11c581910LLU, 0x10811LLU};
227
228 /* Initialize CRC32 data */
229 uint64_t eth_k1_k2[2] = {0xccaa009eLLU, 0x1751997d0LLU};
230 uint64_t eth_k5_k6[2] = {0xccaa009eLLU, 0x163cd6124LLU};
231 uint64_t eth_k7_k8[2] = {0x1f7011640LLU, 0x1db710641LLU};
232
233 /** Save the params in context structure */
234 crc16_ccitt_pmull.rk1_rk2 = vld1q_u64(ccitt_k1_k2);
235 crc16_ccitt_pmull.rk5_rk6 = vld1q_u64(ccitt_k5_k6);
236 crc16_ccitt_pmull.rk7_rk8 = vld1q_u64(ccitt_k7_k8);
237
238 /** Save the params in context structure */
239 crc32_eth_pmull.rk1_rk2 = vld1q_u64(eth_k1_k2);
240 crc32_eth_pmull.rk5_rk6 = vld1q_u64(eth_k5_k6);
241 crc32_eth_pmull.rk7_rk8 = vld1q_u64(eth_k7_k8);
242 }
243
244 uint32_t
rte_crc16_ccitt_neon_handler(const uint8_t * data,uint32_t data_len)245 rte_crc16_ccitt_neon_handler(const uint8_t *data, uint32_t data_len)
246 {
247 return (uint16_t)~crc32_eth_calc_pmull(data,
248 data_len,
249 0xffff,
250 &crc16_ccitt_pmull);
251 }
252
253 uint32_t
rte_crc32_eth_neon_handler(const uint8_t * data,uint32_t data_len)254 rte_crc32_eth_neon_handler(const uint8_t *data, uint32_t data_len)
255 {
256 return ~crc32_eth_calc_pmull(data,
257 data_len,
258 0xffffffffUL,
259 &crc32_eth_pmull);
260 }
261