xref: /dpdk/lib/eal/arm/include/rte_vect.h (revision c6552d9a8deffa448de2d5e2e726f50508c1efd2)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015 Cavium, Inc
3  */
4 
5 #ifndef _RTE_VECT_ARM_H_
6 #define _RTE_VECT_ARM_H_
7 
8 #include <stdint.h>
9 #include "generic/rte_vect.h"
10 #include "rte_debug.h"
11 #include "arm_neon.h"
12 #ifdef RTE_HAS_SVE_ACLE
13 #include <arm_sve.h>
14 #endif
15 
16 #ifdef __cplusplus
17 extern "C" {
18 #endif
19 
20 #define RTE_VECT_DEFAULT_SIMD_BITWIDTH RTE_VECT_SIMD_MAX
21 
22 typedef int32x4_t xmm_t;
23 
24 #define	XMM_SIZE	(sizeof(xmm_t))
25 #define	XMM_MASK	(XMM_SIZE - 1)
26 
27 typedef union __rte_aligned(16) rte_xmm {
28 	xmm_t    x;
29 	uint8_t  u8[XMM_SIZE / sizeof(uint8_t)];
30 	uint16_t u16[XMM_SIZE / sizeof(uint16_t)];
31 	uint32_t u32[XMM_SIZE / sizeof(uint32_t)];
32 	uint64_t u64[XMM_SIZE / sizeof(uint64_t)];
33 	double   pd[XMM_SIZE / sizeof(double)];
34 } rte_xmm_t;
35 
36 #if defined(RTE_ARCH_ARM) && defined(RTE_ARCH_32)
37 /* NEON intrinsic vqtbl1q_u8() is not supported in ARMv7-A(AArch32) */
38 static __inline uint8x16_t
vqtbl1q_u8(uint8x16_t a,uint8x16_t b)39 vqtbl1q_u8(uint8x16_t a, uint8x16_t b)
40 {
41 	uint8_t i, pos;
42 	rte_xmm_t rte_a, rte_b, rte_ret;
43 
44 	vst1q_u8(rte_a.u8, a);
45 	vst1q_u8(rte_b.u8, b);
46 
47 	for (i = 0; i < 16; i++) {
48 		pos = rte_b.u8[i];
49 		if (pos < 16)
50 			rte_ret.u8[i] = rte_a.u8[pos];
51 		else
52 			rte_ret.u8[i] = 0;
53 	}
54 
55 	return vld1q_u8(rte_ret.u8);
56 }
57 
58 static inline uint16_t
vaddvq_u16(uint16x8_t a)59 vaddvq_u16(uint16x8_t a)
60 {
61 	uint32x4_t m = vpaddlq_u16(a);
62 	uint64x2_t n = vpaddlq_u32(m);
63 	uint64x1_t o = vget_low_u64(n) + vget_high_u64(n);
64 
65 	return vget_lane_u32((uint32x2_t)o, 0);
66 }
67 
68 #endif
69 
70 #if (defined(RTE_ARCH_ARM) && defined(RTE_ARCH_32)) || \
71 (defined(RTE_ARCH_ARM64) && RTE_CC_IS_GNU && (GCC_VERSION < 70000))
72 /* NEON intrinsic vcopyq_laneq_u32() is not supported in ARMv7-A(AArch32)
73  * On AArch64, this intrinsic is supported since GCC version 7.
74  */
75 static inline uint32x4_t
vcopyq_laneq_u32(uint32x4_t a,const int lane_a,uint32x4_t b,const int lane_b)76 vcopyq_laneq_u32(uint32x4_t a, const int lane_a,
77 		 uint32x4_t b, const int lane_b)
78 {
79 	return vsetq_lane_u32(vgetq_lane_u32(b, lane_b), a, lane_a);
80 }
81 #endif
82 
83 #if defined(RTE_ARCH_ARM64)
84 #if RTE_CC_IS_GNU && (GCC_VERSION < 70000)
85 
86 /* NEON intrinsic vreinterpretq_u64_p128() is supported since GCC version 7 */
87 static inline uint64x2_t
vreinterpretq_u64_p128(poly128_t x)88 vreinterpretq_u64_p128(poly128_t x)
89 {
90 	return (uint64x2_t)x;
91 }
92 
93 /* NEON intrinsic vreinterpretq_p64_u64() is supported since GCC version 7 */
94 static inline poly64x2_t
vreinterpretq_p64_u64(uint64x2_t x)95 vreinterpretq_p64_u64(uint64x2_t x)
96 {
97 	return (poly64x2_t)x;
98 }
99 
100 /* NEON intrinsic vgetq_lane_p64() is supported since GCC version 7 */
101 static inline poly64_t
vgetq_lane_p64(poly64x2_t x,const int lane)102 vgetq_lane_p64(poly64x2_t x, const int lane)
103 {
104 	RTE_ASSERT(lane >= 0 && lane <= 1);
105 
106 	poly64_t *p = (poly64_t *)&x;
107 
108 	return p[lane];
109 }
110 #endif
111 #endif
112 
113 /*
114  * If (0 <= index <= 15), then call the ASIMD ext instruction on the
115  * 128 bit regs v0 and v1 with the appropriate index.
116  *
117  * Else returns a zero vector.
118  */
119 static inline uint8x16_t
vextract(uint8x16_t v0,uint8x16_t v1,const int index)120 vextract(uint8x16_t v0, uint8x16_t v1, const int index)
121 {
122 	switch (index) {
123 	case 0: return vextq_u8(v0, v1, 0);
124 	case 1: return vextq_u8(v0, v1, 1);
125 	case 2: return vextq_u8(v0, v1, 2);
126 	case 3: return vextq_u8(v0, v1, 3);
127 	case 4: return vextq_u8(v0, v1, 4);
128 	case 5: return vextq_u8(v0, v1, 5);
129 	case 6: return vextq_u8(v0, v1, 6);
130 	case 7: return vextq_u8(v0, v1, 7);
131 	case 8: return vextq_u8(v0, v1, 8);
132 	case 9: return vextq_u8(v0, v1, 9);
133 	case 10: return vextq_u8(v0, v1, 10);
134 	case 11: return vextq_u8(v0, v1, 11);
135 	case 12: return vextq_u8(v0, v1, 12);
136 	case 13: return vextq_u8(v0, v1, 13);
137 	case 14: return vextq_u8(v0, v1, 14);
138 	case 15: return vextq_u8(v0, v1, 15);
139 	}
140 	return vdupq_n_u8(0);
141 }
142 
143 /**
144  * Shifts right 128 bit register by specified number of bytes
145  *
146  * Value of shift parameter must be in range 0 - 16
147  */
148 static inline uint64x2_t
vshift_bytes_right(uint64x2_t reg,const unsigned int shift)149 vshift_bytes_right(uint64x2_t reg, const unsigned int shift)
150 {
151 	return vreinterpretq_u64_u8(vextract(
152 				vreinterpretq_u8_u64(reg),
153 				vdupq_n_u8(0),
154 				shift));
155 }
156 
157 /**
158  * Shifts left 128 bit register by specified number of bytes
159  *
160  * Value of shift parameter must be in range 0 - 16
161  */
162 static inline uint64x2_t
vshift_bytes_left(uint64x2_t reg,const unsigned int shift)163 vshift_bytes_left(uint64x2_t reg, const unsigned int shift)
164 {
165 	return vreinterpretq_u64_u8(vextract(
166 				vdupq_n_u8(0),
167 				vreinterpretq_u8_u64(reg),
168 				16 - shift));
169 }
170 
171 #ifdef __cplusplus
172 }
173 #endif
174 
175 #endif
176