xref: /freebsd-src/contrib/arm-optimized-routines/networking/aarch64/chksum_simd.c (revision 072a4ba82a01476eaee33781ccd241033eefcf0b)
131914882SAlex Richardson /*
231914882SAlex Richardson  * AArch64-specific checksum implementation using NEON
331914882SAlex Richardson  *
431914882SAlex Richardson  * Copyright (c) 2020, Arm Limited.
5*072a4ba8SAndrew Turner  * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
631914882SAlex Richardson  */
731914882SAlex Richardson 
831914882SAlex Richardson #include "networking.h"
931914882SAlex Richardson #include "../chksum_common.h"
1031914882SAlex Richardson 
1131914882SAlex Richardson #ifndef __ARM_NEON
1231914882SAlex Richardson #pragma GCC target("+simd")
1331914882SAlex Richardson #endif
1431914882SAlex Richardson 
1531914882SAlex Richardson #include <arm_neon.h>
1631914882SAlex Richardson 
1731914882SAlex Richardson always_inline
1831914882SAlex Richardson static inline uint64_t
slurp_head64(const void ** pptr,uint32_t * nbytes)1931914882SAlex Richardson slurp_head64(const void **pptr, uint32_t *nbytes)
2031914882SAlex Richardson {
2131914882SAlex Richardson     Assert(*nbytes >= 8);
2231914882SAlex Richardson     uint64_t sum = 0;
2331914882SAlex Richardson     uint32_t off = (uintptr_t) *pptr % 8;
2431914882SAlex Richardson     if (likely(off != 0))
2531914882SAlex Richardson     {
2631914882SAlex Richardson 	/* Get rid of bytes 0..off-1 */
2731914882SAlex Richardson 	const unsigned char *ptr64 = align_ptr(*pptr, 8);
2831914882SAlex Richardson 	uint64_t mask = ALL_ONES << (CHAR_BIT * off);
2931914882SAlex Richardson 	uint64_t val = load64(ptr64) & mask;
3031914882SAlex Richardson 	/* Fold 64-bit sum to 33 bits */
3131914882SAlex Richardson 	sum = val >> 32;
3231914882SAlex Richardson 	sum += (uint32_t) val;
3331914882SAlex Richardson 	*pptr = ptr64 + 8;
3431914882SAlex Richardson 	*nbytes -= 8 - off;
3531914882SAlex Richardson     }
3631914882SAlex Richardson     return sum;
3731914882SAlex Richardson }
3831914882SAlex Richardson 
3931914882SAlex Richardson always_inline
4031914882SAlex Richardson static inline uint64_t
slurp_tail64(uint64_t sum,const void * ptr,uint32_t nbytes)4131914882SAlex Richardson slurp_tail64(uint64_t sum, const void *ptr, uint32_t nbytes)
4231914882SAlex Richardson {
4331914882SAlex Richardson     Assert(nbytes < 8);
4431914882SAlex Richardson     if (likely(nbytes != 0))
4531914882SAlex Richardson     {
4631914882SAlex Richardson 	/* Get rid of bytes 7..nbytes */
4731914882SAlex Richardson 	uint64_t mask = ALL_ONES >> (CHAR_BIT * (8 - nbytes));
4831914882SAlex Richardson 	Assert(__builtin_popcountl(mask) / CHAR_BIT == nbytes);
4931914882SAlex Richardson 	uint64_t val = load64(ptr) & mask;
5031914882SAlex Richardson 	sum += val >> 32;
5131914882SAlex Richardson 	sum += (uint32_t) val;
5231914882SAlex Richardson 	nbytes = 0;
5331914882SAlex Richardson     }
5431914882SAlex Richardson     Assert(nbytes == 0);
5531914882SAlex Richardson     return sum;
5631914882SAlex Richardson }
5731914882SAlex Richardson 
5831914882SAlex Richardson unsigned short
__chksum_aarch64_simd(const void * ptr,unsigned int nbytes)5931914882SAlex Richardson __chksum_aarch64_simd(const void *ptr, unsigned int nbytes)
6031914882SAlex Richardson {
6131914882SAlex Richardson     bool swap = (uintptr_t) ptr & 1;
6231914882SAlex Richardson     uint64_t sum;
6331914882SAlex Richardson 
6431914882SAlex Richardson     if (unlikely(nbytes < 50))
6531914882SAlex Richardson     {
6631914882SAlex Richardson 	sum = slurp_small(ptr, nbytes);
6731914882SAlex Richardson 	swap = false;
6831914882SAlex Richardson 	goto fold;
6931914882SAlex Richardson     }
7031914882SAlex Richardson 
7131914882SAlex Richardson     /* 8-byte align pointer */
7231914882SAlex Richardson     Assert(nbytes >= 8);
7331914882SAlex Richardson     sum = slurp_head64(&ptr, &nbytes);
7431914882SAlex Richardson     Assert(((uintptr_t) ptr & 7) == 0);
7531914882SAlex Richardson 
7631914882SAlex Richardson     const uint32_t *may_alias ptr32 = ptr;
7731914882SAlex Richardson 
7831914882SAlex Richardson     uint64x2_t vsum0 = { 0, 0 };
7931914882SAlex Richardson     uint64x2_t vsum1 = { 0, 0 };
8031914882SAlex Richardson     uint64x2_t vsum2 = { 0, 0 };
8131914882SAlex Richardson     uint64x2_t vsum3 = { 0, 0 };
8231914882SAlex Richardson 
8331914882SAlex Richardson     /* Sum groups of 64 bytes */
8431914882SAlex Richardson     for (uint32_t i = 0; i < nbytes / 64; i++)
8531914882SAlex Richardson     {
8631914882SAlex Richardson 	uint32x4_t vtmp0 = vld1q_u32(ptr32);
8731914882SAlex Richardson 	uint32x4_t vtmp1 = vld1q_u32(ptr32 + 4);
8831914882SAlex Richardson 	uint32x4_t vtmp2 = vld1q_u32(ptr32 + 8);
8931914882SAlex Richardson 	uint32x4_t vtmp3 = vld1q_u32(ptr32 + 12);
9031914882SAlex Richardson 	vsum0 = vpadalq_u32(vsum0, vtmp0);
9131914882SAlex Richardson 	vsum1 = vpadalq_u32(vsum1, vtmp1);
9231914882SAlex Richardson 	vsum2 = vpadalq_u32(vsum2, vtmp2);
9331914882SAlex Richardson 	vsum3 = vpadalq_u32(vsum3, vtmp3);
9431914882SAlex Richardson 	ptr32 += 16;
9531914882SAlex Richardson     }
9631914882SAlex Richardson     nbytes %= 64;
9731914882SAlex Richardson 
9831914882SAlex Richardson     /* Fold vsum2 and vsum3 into vsum0 and vsum1 */
9931914882SAlex Richardson     vsum0 = vpadalq_u32(vsum0, vreinterpretq_u32_u64(vsum2));
10031914882SAlex Richardson     vsum1 = vpadalq_u32(vsum1, vreinterpretq_u32_u64(vsum3));
10131914882SAlex Richardson 
10231914882SAlex Richardson     /* Add any trailing group of 32 bytes */
10331914882SAlex Richardson     if (nbytes & 32)
10431914882SAlex Richardson     {
10531914882SAlex Richardson 	uint32x4_t vtmp0 = vld1q_u32(ptr32);
10631914882SAlex Richardson 	uint32x4_t vtmp1 = vld1q_u32(ptr32 + 4);
10731914882SAlex Richardson 	vsum0 = vpadalq_u32(vsum0, vtmp0);
10831914882SAlex Richardson 	vsum1 = vpadalq_u32(vsum1, vtmp1);
10931914882SAlex Richardson 	ptr32 += 8;
11031914882SAlex Richardson 	nbytes -= 32;
11131914882SAlex Richardson     }
11231914882SAlex Richardson     Assert(nbytes < 32);
11331914882SAlex Richardson 
11431914882SAlex Richardson     /* Fold vsum1 into vsum0 */
11531914882SAlex Richardson     vsum0 = vpadalq_u32(vsum0, vreinterpretq_u32_u64(vsum1));
11631914882SAlex Richardson 
11731914882SAlex Richardson     /* Add any trailing group of 16 bytes */
11831914882SAlex Richardson     if (nbytes & 16)
11931914882SAlex Richardson     {
12031914882SAlex Richardson 	uint32x4_t vtmp = vld1q_u32(ptr32);
12131914882SAlex Richardson 	vsum0 = vpadalq_u32(vsum0, vtmp);
12231914882SAlex Richardson 	ptr32 += 4;
12331914882SAlex Richardson 	nbytes -= 16;
12431914882SAlex Richardson     }
12531914882SAlex Richardson     Assert(nbytes < 16);
12631914882SAlex Richardson 
12731914882SAlex Richardson     /* Add any trailing group of 8 bytes */
12831914882SAlex Richardson     if (nbytes & 8)
12931914882SAlex Richardson     {
13031914882SAlex Richardson 	uint32x2_t vtmp = vld1_u32(ptr32);
13131914882SAlex Richardson 	vsum0 = vaddw_u32(vsum0, vtmp);
13231914882SAlex Richardson 	ptr32 += 2;
13331914882SAlex Richardson 	nbytes -= 8;
13431914882SAlex Richardson     }
13531914882SAlex Richardson     Assert(nbytes < 8);
13631914882SAlex Richardson 
13731914882SAlex Richardson     uint64_t val = vaddlvq_u32(vreinterpretq_u32_u64(vsum0));
13831914882SAlex Richardson     sum += val >> 32;
13931914882SAlex Richardson     sum += (uint32_t) val;
14031914882SAlex Richardson 
14131914882SAlex Richardson     /* Handle any trailing 0..7 bytes */
14231914882SAlex Richardson     sum = slurp_tail64(sum, ptr32, nbytes);
14331914882SAlex Richardson 
14431914882SAlex Richardson fold:
14531914882SAlex Richardson     return fold_and_swap(sum, swap);
14631914882SAlex Richardson }
147