xref: /dpdk/drivers/common/dpaax/compat.h (revision 21a66096bb44a4468353782c36fc85913520dc6c)
1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
2  *
3  * Copyright 2011 Freescale Semiconductor, Inc.
4  * All rights reserved.
5  * Copyright 2019-2020 NXP
6  *
7  */
8 
9 #ifndef __COMPAT_H
10 #define __COMPAT_H
11 
12 #include <sched.h>
13 #include <stdint.h>
14 #include <stdlib.h>
15 #include <stddef.h>
16 #include <stdio.h>
17 #include <errno.h>
18 #include <string.h>
19 #include <pthread.h>
20 #include <linux/types.h>
21 #include <stdbool.h>
22 #include <ctype.h>
23 #include <malloc.h>
24 #include <sys/types.h>
25 #include <sys/stat.h>
26 #include <fcntl.h>
27 #include <unistd.h>
28 #include <sys/mman.h>
29 #include <limits.h>
30 #include <assert.h>
31 #include <dirent.h>
32 #include <inttypes.h>
33 
34 #include <rte_byteorder.h>
35 #include <rte_atomic.h>
36 #include <rte_spinlock.h>
37 #include <rte_common.h>
38 #include <rte_debug.h>
39 #include <rte_cycles.h>
40 #include <rte_malloc.h>
41 #include <rte_prefetch.h>
42 
43 /* The following definitions are primarily to allow the single-source driver
44  * interfaces to be included by arbitrary program code. Ie. for interfaces that
45  * are also available in kernel-space, these definitions provide compatibility
46  * with certain attributes and types used in those interfaces.
47  */
48 
49 /* Required compiler attributes */
50 #ifndef __maybe_unused
51 #define __maybe_unused	__rte_unused
52 #endif
53 #ifndef __always_unused
54 #define __always_unused	__rte_unused
55 #endif
56 #ifndef __packed
57 #define __packed	__rte_packed
58 #endif
59 #ifndef noinline
60 #define noinline	__rte_noinline
61 #endif
62 #define L1_CACHE_BYTES 64
63 #define ____cacheline_aligned __rte_aligned(L1_CACHE_BYTES)
64 #define __stringify_1(x) #x
65 #define __stringify(x)	__stringify_1(x)
66 
67 #ifdef ARRAY_SIZE
68 #undef ARRAY_SIZE
69 #endif
70 #define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
71 
72 /* Debugging */
73 #define prflush(fmt, args...) \
74 	do { \
75 		printf(fmt, ##args); \
76 		fflush(stdout); \
77 	} while (0)
78 #ifndef pr_crit
79 #define pr_crit(fmt, args...)	 prflush("CRIT:" fmt, ##args)
80 #endif
81 #ifndef pr_err
82 #define pr_err(fmt, args...)	 prflush("ERR:" fmt, ##args)
83 #endif
84 #ifndef pr_warn
85 #define pr_warn(fmt, args...)	 prflush("WARN:" fmt, ##args)
86 #endif
87 #ifndef pr_info
88 #define pr_info(fmt, args...)	 prflush(fmt, ##args)
89 #endif
90 #ifndef pr_debug
91 #ifdef RTE_LIBRTE_DPAA_DEBUG_BUS
92 #define pr_debug(fmt, args...)	printf(fmt, ##args)
93 #else
94 #define pr_debug(fmt, args...) {}
95 #endif
96 #endif
97 
98 #define DPAA_BUG_ON(x) RTE_ASSERT(x)
99 
100 /* Required types */
101 typedef uint8_t		u8;
102 typedef uint16_t	u16;
103 typedef uint32_t	u32;
104 typedef uint64_t	u64;
105 typedef uint64_t	dma_addr_t;
106 typedef cpu_set_t	cpumask_t;
107 typedef uint32_t	phandle;
108 typedef uint32_t	gfp_t;
109 typedef uint32_t	irqreturn_t;
110 
111 #define ETHER_ADDR_LEN 6
112 
113 #define IRQ_HANDLED	0
114 #define request_irq	qbman_request_irq
115 #define free_irq	qbman_free_irq
116 
117 #define __iomem
118 #define GFP_KERNEL	0
119 #define __raw_readb(p)	(*(const volatile unsigned char *)(p))
120 #define __raw_readl(p)	(*(const volatile unsigned int *)(p))
121 #define __raw_writel(v, p) {*(volatile unsigned int *)(p) = (v); }
122 
123 /* to be used as an upper-limit only */
124 #define NR_CPUS			64
125 
126 /* Waitqueue stuff */
127 typedef struct { }		wait_queue_head_t;
128 #define DECLARE_WAIT_QUEUE_HEAD(x) int dummy_##x __always_unused
129 #define wake_up(x)		do { } while (0)
130 
131 /* I/O operations */
132 static inline u32 in_be32(volatile void *__p)
133 {
134 	volatile u32 *p = __p;
135 	return rte_be_to_cpu_32(*p);
136 }
137 
138 static inline void out_be32(volatile void *__p, u32 val)
139 {
140 	volatile u32 *p = __p;
141 	*p = rte_cpu_to_be_32(val);
142 }
143 
144 #define hwsync() rte_rmb()
145 #define lwsync() rte_wmb()
146 
147 #define dcbt_ro(p) rte_prefetch0(p)
148 #define dcbt_rw(p) rte_prefetch0_write(p)
149 
150 #if defined(RTE_ARCH_ARM)
151 #if defined(RTE_ARCH_64)
152 #define dcbz(p) { asm volatile("dc zva, %0" : : "r" (p) : "memory"); }
153 #define dcbz_64(p) dcbz(p)
154 #define dcbf(p) { asm volatile("dc cvac, %0" : : "r"(p) : "memory"); }
155 #define dcbf_64(p) dcbf(p)
156 #define dccivac(p) { asm volatile("dc civac, %0" : : "r"(p) : "memory"); }
157 
158 #define dcbit_ro(p) \
159 	do { \
160 		dccivac(p);						\
161 		asm volatile("prfm pldl1keep, [%0, #64]" : : "r" (p));	\
162 	} while (0)
163 
164 #else /* RTE_ARCH_32 */
165 #define dcbz(p) memset((p), 0, 32)
166 #define dcbz_64(p) memset((p), 0, 64)
167 #define dcbf(p)	RTE_SET_USED(p)
168 #define dcbf_64(p) dcbf(p)
169 #define dccivac(p)	RTE_SET_USED(p)
170 #define dcbit_ro(p)	RTE_SET_USED(p)
171 #endif
172 
173 #else
174 #define dcbz(p)	RTE_SET_USED(p)
175 #define dcbz_64(p) dcbz(p)
176 #define dcbf(p)	RTE_SET_USED(p)
177 #define dcbf_64(p) dcbf(p)
178 #define dccivac(p)	RTE_SET_USED(p)
179 #define dcbit_ro(p)	RTE_SET_USED(p)
180 #endif
181 
182 #define barrier() { asm volatile ("" : : : "memory"); }
183 #define cpu_relax barrier
184 
185 #if defined(RTE_ARCH_ARM64)
186 static inline uint64_t mfatb(void)
187 {
188 	uint64_t ret, ret_new, timeout = 200;
189 
190 	asm volatile ("mrs %0, cntvct_el0" : "=r" (ret));
191 	asm volatile ("mrs %0, cntvct_el0" : "=r" (ret_new));
192 	while (ret != ret_new && timeout--) {
193 		ret = ret_new;
194 		asm volatile ("mrs %0, cntvct_el0" : "=r" (ret_new));
195 	}
196 	DPAA_BUG_ON(!timeout && (ret != ret_new));
197 	return ret * 64;
198 }
199 #else
200 
201 #define mfatb rte_rdtsc
202 
203 #endif
204 
205 /* Spin for a few cycles without bothering the bus */
206 static inline void cpu_spin(int cycles)
207 {
208 	uint64_t now = mfatb();
209 
210 	while (mfatb() < (now + cycles))
211 		;
212 }
213 
214 /* Qman/Bman API inlines and macros; */
215 #ifdef lower_32_bits
216 #undef lower_32_bits
217 #endif
218 #define lower_32_bits(x) ((u32)(x))
219 
220 #ifdef upper_32_bits
221 #undef upper_32_bits
222 #endif
223 #define upper_32_bits(x) ((u32)(((x) >> 16) >> 16))
224 
225 /*
226  * Swap bytes of a 48-bit value.
227  */
228 static inline uint64_t
229 __bswap_48(uint64_t x)
230 {
231 	return  ((x & 0x0000000000ffULL) << 40) |
232 		((x & 0x00000000ff00ULL) << 24) |
233 		((x & 0x000000ff0000ULL) <<  8) |
234 		((x & 0x0000ff000000ULL) >>  8) |
235 		((x & 0x00ff00000000ULL) >> 24) |
236 		((x & 0xff0000000000ULL) >> 40);
237 }
238 
239 /*
240  * Swap bytes of a 40-bit value.
241  */
242 static inline uint64_t
243 __bswap_40(uint64_t x)
244 {
245 	return  ((x & 0x00000000ffULL) << 32) |
246 		((x & 0x000000ff00ULL) << 16) |
247 		((x & 0x0000ff0000ULL)) |
248 		((x & 0x00ff000000ULL) >> 16) |
249 		((x & 0xff00000000ULL) >> 32);
250 }
251 
252 /*
253  * Swap bytes of a 24-bit value.
254  */
255 static inline uint32_t
256 __bswap_24(uint32_t x)
257 {
258 	return  ((x & 0x0000ffULL) << 16) |
259 		((x & 0x00ff00ULL)) |
260 		((x & 0xff0000ULL) >> 16);
261 }
262 
263 #define be64_to_cpu(x) rte_be_to_cpu_64(x)
264 #define be32_to_cpu(x) rte_be_to_cpu_32(x)
265 #define be16_to_cpu(x) rte_be_to_cpu_16(x)
266 
267 #define cpu_to_be64(x) rte_cpu_to_be_64(x)
268 #if !defined(cpu_to_be32)
269 #define cpu_to_be32(x) rte_cpu_to_be_32(x)
270 #endif
271 #define cpu_to_be16(x) rte_cpu_to_be_16(x)
272 
273 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
274 
275 #define cpu_to_be48(x) __bswap_48(x)
276 #define be48_to_cpu(x) __bswap_48(x)
277 
278 #define cpu_to_be40(x) __bswap_40(x)
279 #define be40_to_cpu(x) __bswap_40(x)
280 
281 #define cpu_to_be24(x) __bswap_24(x)
282 #define be24_to_cpu(x) __bswap_24(x)
283 
284 #else /* RTE_BIG_ENDIAN */
285 
286 #define cpu_to_be48(x) (x)
287 #define be48_to_cpu(x) (x)
288 
289 #define cpu_to_be40(x) (x)
290 #define be40_to_cpu(x) (x)
291 
292 #define cpu_to_be24(x) (x)
293 #define be24_to_cpu(x) (x)
294 
295 #endif /* RTE_BIG_ENDIAN */
296 
297 /* When copying aligned words or shorts, try to avoid memcpy() */
298 /* memcpy() stuff - when you know alignments in advance */
299 #define CONFIG_TRY_BETTER_MEMCPY
300 
301 #ifdef CONFIG_TRY_BETTER_MEMCPY
302 static inline void copy_words(void *dest, const void *src, size_t sz)
303 {
304 	u32 *__dest = dest;
305 	const u32 *__src = src;
306 	size_t __sz = sz >> 2;
307 
308 	DPAA_BUG_ON((unsigned long)dest & 0x3);
309 	DPAA_BUG_ON((unsigned long)src & 0x3);
310 	DPAA_BUG_ON(sz & 0x3);
311 	while (__sz--)
312 		*(__dest++) = *(__src++);
313 }
314 
315 static inline void copy_shorts(void *dest, const void *src, size_t sz)
316 {
317 	u16 *__dest = dest;
318 	const u16 *__src = src;
319 	size_t __sz = sz >> 1;
320 
321 	DPAA_BUG_ON((unsigned long)dest & 0x1);
322 	DPAA_BUG_ON((unsigned long)src & 0x1);
323 	DPAA_BUG_ON(sz & 0x1);
324 	while (__sz--)
325 		*(__dest++) = *(__src++);
326 }
327 
328 static inline void copy_bytes(void *dest, const void *src, size_t sz)
329 {
330 	u8 *__dest = dest;
331 	const u8 *__src = src;
332 
333 	while (sz--)
334 		*(__dest++) = *(__src++);
335 }
336 #else
337 #define copy_words memcpy
338 #define copy_shorts memcpy
339 #define copy_bytes memcpy
340 #endif
341 
342 /* Allocator stuff */
343 #define kmalloc(sz, t)	rte_malloc(NULL, sz, 0)
344 #define kzalloc(sz, t)  rte_zmalloc(NULL, sz, 0)
345 #define vmalloc(sz)	rte_malloc(NULL, sz, 0)
346 #define kfree(p)	rte_free(p)
347 
348 static inline unsigned long get_zeroed_page(gfp_t __foo __rte_unused)
349 {
350 	void *p;
351 
352 	if (posix_memalign(&p, 4096, 4096))
353 		return 0;
354 	memset(p, 0, 4096);
355 	return (unsigned long)p;
356 }
357 
358 /* Spinlock stuff */
359 #define spinlock_t		rte_spinlock_t
360 #define __SPIN_LOCK_UNLOCKED(x)	RTE_SPINLOCK_INITIALIZER
361 #define DEFINE_SPINLOCK(x)	spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
362 #define spin_lock_init(x)	rte_spinlock_init(x)
363 #define spin_lock_destroy(x)
364 #define spin_lock(x)		rte_spinlock_lock(x)
365 #define spin_unlock(x)		rte_spinlock_unlock(x)
366 #define spin_lock_irq(x)	spin_lock(x)
367 #define spin_unlock_irq(x)	spin_unlock(x)
368 #define spin_lock_irqsave(x, f) spin_lock_irq(x)
369 #define spin_unlock_irqrestore(x, f) spin_unlock_irq(x)
370 
371 #define atomic_t                rte_atomic32_t
372 #define atomic_read(v)          rte_atomic32_read(v)
373 #define atomic_set(v, i)        rte_atomic32_set(v, i)
374 
375 #define atomic_inc(v)           rte_atomic32_add(v, 1)
376 #define atomic_dec(v)           rte_atomic32_sub(v, 1)
377 
378 #define atomic_inc_and_test(v)  rte_atomic32_inc_and_test(v)
379 #define atomic_dec_and_test(v)  rte_atomic32_dec_and_test(v)
380 
381 #define atomic_inc_return(v)    rte_atomic32_add_return(v, 1)
382 #define atomic_dec_return(v)    rte_atomic32_sub_return(v, 1)
383 #define atomic_sub_and_test(i, v) (rte_atomic32_sub_return(v, i) == 0)
384 
385 /* Interface name len*/
386 #define IF_NAME_MAX_LEN 16
387 
388 #endif /* __COMPAT_H */
389