1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) 2 * 3 * Copyright 2011 Freescale Semiconductor, Inc. 4 * All rights reserved. 5 * Copyright 2019-2020 NXP 6 * 7 */ 8 9 #ifndef __COMPAT_H 10 #define __COMPAT_H 11 12 #include <sched.h> 13 14 #ifndef _GNU_SOURCE 15 #define _GNU_SOURCE 16 #endif 17 #include <stdint.h> 18 #include <stdlib.h> 19 #include <stddef.h> 20 #include <stdio.h> 21 #include <errno.h> 22 #include <string.h> 23 #include <pthread.h> 24 #include <linux/types.h> 25 #include <stdbool.h> 26 #include <ctype.h> 27 #include <malloc.h> 28 #include <sys/types.h> 29 #include <sys/stat.h> 30 #include <fcntl.h> 31 #include <unistd.h> 32 #include <sys/mman.h> 33 #include <limits.h> 34 #include <assert.h> 35 #include <dirent.h> 36 #include <inttypes.h> 37 #include <error.h> 38 #include <rte_byteorder.h> 39 #include <rte_atomic.h> 40 #include <rte_spinlock.h> 41 #include <rte_common.h> 42 #include <rte_debug.h> 43 #include <rte_cycles.h> 44 #include <rte_malloc.h> 45 46 /* The following definitions are primarily to allow the single-source driver 47 * interfaces to be included by arbitrary program code. Ie. for interfaces that 48 * are also available in kernel-space, these definitions provide compatibility 49 * with certain attributes and types used in those interfaces. 50 */ 51 52 /* Required compiler attributes */ 53 #ifndef __maybe_unused 54 #define __maybe_unused __rte_unused 55 #endif 56 #ifndef __always_unused 57 #define __always_unused __rte_unused 58 #endif 59 #ifndef __packed 60 #define __packed __rte_packed 61 #endif 62 #ifndef noinline 63 #define noinline __rte_noinline 64 #endif 65 #define L1_CACHE_BYTES 64 66 #define ____cacheline_aligned __rte_aligned(L1_CACHE_BYTES) 67 #define __stringify_1(x) #x 68 #define __stringify(x) __stringify_1(x) 69 70 #ifdef ARRAY_SIZE 71 #undef ARRAY_SIZE 72 #endif 73 #define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0])) 74 75 /* Debugging */ 76 #define prflush(fmt, args...) \ 77 do { \ 78 printf(fmt, ##args); \ 79 fflush(stdout); \ 80 } while (0) 81 #ifndef pr_crit 82 #define pr_crit(fmt, args...) prflush("CRIT:" fmt, ##args) 83 #endif 84 #ifndef pr_err 85 #define pr_err(fmt, args...) prflush("ERR:" fmt, ##args) 86 #endif 87 #ifndef pr_warn 88 #define pr_warn(fmt, args...) prflush("WARN:" fmt, ##args) 89 #endif 90 #ifndef pr_info 91 #define pr_info(fmt, args...) prflush(fmt, ##args) 92 #endif 93 #ifndef pr_debug 94 #ifdef RTE_LIBRTE_DPAA_DEBUG_BUS 95 #define pr_debug(fmt, args...) printf(fmt, ##args) 96 #else 97 #define pr_debug(fmt, args...) {} 98 #endif 99 #endif 100 101 #define DPAA_BUG_ON(x) RTE_ASSERT(x) 102 103 /* Required types */ 104 typedef uint8_t u8; 105 typedef uint16_t u16; 106 typedef uint32_t u32; 107 typedef uint64_t u64; 108 typedef uint64_t dma_addr_t; 109 typedef cpu_set_t cpumask_t; 110 typedef uint32_t phandle; 111 typedef uint32_t gfp_t; 112 typedef uint32_t irqreturn_t; 113 114 #define ETHER_ADDR_LEN 6 115 116 #define IRQ_HANDLED 0 117 #define request_irq qbman_request_irq 118 #define free_irq qbman_free_irq 119 120 #define __iomem 121 #define GFP_KERNEL 0 122 #define __raw_readb(p) (*(const volatile unsigned char *)(p)) 123 #define __raw_readl(p) (*(const volatile unsigned int *)(p)) 124 #define __raw_writel(v, p) {*(volatile unsigned int *)(p) = (v); } 125 126 /* to be used as an upper-limit only */ 127 #define NR_CPUS 64 128 129 /* Waitqueue stuff */ 130 typedef struct { } wait_queue_head_t; 131 #define DECLARE_WAIT_QUEUE_HEAD(x) int dummy_##x __always_unused 132 #define wake_up(x) do { } while (0) 133 134 /* I/O operations */ 135 static inline u32 in_be32(volatile void *__p) 136 { 137 volatile u32 *p = __p; 138 return rte_be_to_cpu_32(*p); 139 } 140 141 static inline void out_be32(volatile void *__p, u32 val) 142 { 143 volatile u32 *p = __p; 144 *p = rte_cpu_to_be_32(val); 145 } 146 147 #define hwsync() rte_rmb() 148 #define lwsync() rte_wmb() 149 150 #define dcbt_ro(p) __builtin_prefetch(p, 0) 151 #define dcbt_rw(p) __builtin_prefetch(p, 1) 152 153 #if defined(RTE_ARCH_ARM) 154 #if defined(RTE_ARCH_64) 155 #define dcbz(p) { asm volatile("dc zva, %0" : : "r" (p) : "memory"); } 156 #define dcbz_64(p) dcbz(p) 157 #define dcbf(p) { asm volatile("dc cvac, %0" : : "r"(p) : "memory"); } 158 #define dcbf_64(p) dcbf(p) 159 #define dccivac(p) { asm volatile("dc civac, %0" : : "r"(p) : "memory"); } 160 161 #define dcbit_ro(p) \ 162 do { \ 163 dccivac(p); \ 164 asm volatile("prfm pldl1keep, [%0, #64]" : : "r" (p)); \ 165 } while (0) 166 167 #else /* RTE_ARCH_32 */ 168 #define dcbz(p) memset((p), 0, 32) 169 #define dcbz_64(p) memset((p), 0, 64) 170 #define dcbf(p) RTE_SET_USED(p) 171 #define dcbf_64(p) dcbf(p) 172 #define dccivac(p) RTE_SET_USED(p) 173 #define dcbit_ro(p) RTE_SET_USED(p) 174 #endif 175 176 #else 177 #define dcbz(p) RTE_SET_USED(p) 178 #define dcbz_64(p) dcbz(p) 179 #define dcbf(p) RTE_SET_USED(p) 180 #define dcbf_64(p) dcbf(p) 181 #define dccivac(p) RTE_SET_USED(p) 182 #define dcbit_ro(p) RTE_SET_USED(p) 183 #endif 184 185 #define barrier() { asm volatile ("" : : : "memory"); } 186 #define cpu_relax barrier 187 188 #if defined(RTE_ARCH_ARM64) 189 static inline uint64_t mfatb(void) 190 { 191 uint64_t ret, ret_new, timeout = 200; 192 193 asm volatile ("mrs %0, cntvct_el0" : "=r" (ret)); 194 asm volatile ("mrs %0, cntvct_el0" : "=r" (ret_new)); 195 while (ret != ret_new && timeout--) { 196 ret = ret_new; 197 asm volatile ("mrs %0, cntvct_el0" : "=r" (ret_new)); 198 } 199 DPAA_BUG_ON(!timeout && (ret != ret_new)); 200 return ret * 64; 201 } 202 #else 203 204 #define mfatb rte_rdtsc 205 206 #endif 207 208 /* Spin for a few cycles without bothering the bus */ 209 static inline void cpu_spin(int cycles) 210 { 211 uint64_t now = mfatb(); 212 213 while (mfatb() < (now + cycles)) 214 ; 215 } 216 217 /* Qman/Bman API inlines and macros; */ 218 #ifdef lower_32_bits 219 #undef lower_32_bits 220 #endif 221 #define lower_32_bits(x) ((u32)(x)) 222 223 #ifdef upper_32_bits 224 #undef upper_32_bits 225 #endif 226 #define upper_32_bits(x) ((u32)(((x) >> 16) >> 16)) 227 228 /* 229 * Swap bytes of a 48-bit value. 230 */ 231 static inline uint64_t 232 __bswap_48(uint64_t x) 233 { 234 return ((x & 0x0000000000ffULL) << 40) | 235 ((x & 0x00000000ff00ULL) << 24) | 236 ((x & 0x000000ff0000ULL) << 8) | 237 ((x & 0x0000ff000000ULL) >> 8) | 238 ((x & 0x00ff00000000ULL) >> 24) | 239 ((x & 0xff0000000000ULL) >> 40); 240 } 241 242 /* 243 * Swap bytes of a 40-bit value. 244 */ 245 static inline uint64_t 246 __bswap_40(uint64_t x) 247 { 248 return ((x & 0x00000000ffULL) << 32) | 249 ((x & 0x000000ff00ULL) << 16) | 250 ((x & 0x0000ff0000ULL)) | 251 ((x & 0x00ff000000ULL) >> 16) | 252 ((x & 0xff00000000ULL) >> 32); 253 } 254 255 /* 256 * Swap bytes of a 24-bit value. 257 */ 258 static inline uint32_t 259 __bswap_24(uint32_t x) 260 { 261 return ((x & 0x0000ffULL) << 16) | 262 ((x & 0x00ff00ULL)) | 263 ((x & 0xff0000ULL) >> 16); 264 } 265 266 #define be64_to_cpu(x) rte_be_to_cpu_64(x) 267 #define be32_to_cpu(x) rte_be_to_cpu_32(x) 268 #define be16_to_cpu(x) rte_be_to_cpu_16(x) 269 270 #define cpu_to_be64(x) rte_cpu_to_be_64(x) 271 #if !defined(cpu_to_be32) 272 #define cpu_to_be32(x) rte_cpu_to_be_32(x) 273 #endif 274 #define cpu_to_be16(x) rte_cpu_to_be_16(x) 275 276 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 277 278 #define cpu_to_be48(x) __bswap_48(x) 279 #define be48_to_cpu(x) __bswap_48(x) 280 281 #define cpu_to_be40(x) __bswap_40(x) 282 #define be40_to_cpu(x) __bswap_40(x) 283 284 #define cpu_to_be24(x) __bswap_24(x) 285 #define be24_to_cpu(x) __bswap_24(x) 286 287 #else /* RTE_BIG_ENDIAN */ 288 289 #define cpu_to_be48(x) (x) 290 #define be48_to_cpu(x) (x) 291 292 #define cpu_to_be40(x) (x) 293 #define be40_to_cpu(x) (x) 294 295 #define cpu_to_be24(x) (x) 296 #define be24_to_cpu(x) (x) 297 298 #endif /* RTE_BIG_ENDIAN */ 299 300 /* When copying aligned words or shorts, try to avoid memcpy() */ 301 /* memcpy() stuff - when you know alignments in advance */ 302 #define CONFIG_TRY_BETTER_MEMCPY 303 304 #ifdef CONFIG_TRY_BETTER_MEMCPY 305 static inline void copy_words(void *dest, const void *src, size_t sz) 306 { 307 u32 *__dest = dest; 308 const u32 *__src = src; 309 size_t __sz = sz >> 2; 310 311 DPAA_BUG_ON((unsigned long)dest & 0x3); 312 DPAA_BUG_ON((unsigned long)src & 0x3); 313 DPAA_BUG_ON(sz & 0x3); 314 while (__sz--) 315 *(__dest++) = *(__src++); 316 } 317 318 static inline void copy_shorts(void *dest, const void *src, size_t sz) 319 { 320 u16 *__dest = dest; 321 const u16 *__src = src; 322 size_t __sz = sz >> 1; 323 324 DPAA_BUG_ON((unsigned long)dest & 0x1); 325 DPAA_BUG_ON((unsigned long)src & 0x1); 326 DPAA_BUG_ON(sz & 0x1); 327 while (__sz--) 328 *(__dest++) = *(__src++); 329 } 330 331 static inline void copy_bytes(void *dest, const void *src, size_t sz) 332 { 333 u8 *__dest = dest; 334 const u8 *__src = src; 335 336 while (sz--) 337 *(__dest++) = *(__src++); 338 } 339 #else 340 #define copy_words memcpy 341 #define copy_shorts memcpy 342 #define copy_bytes memcpy 343 #endif 344 345 /* Allocator stuff */ 346 #define kmalloc(sz, t) rte_malloc(NULL, sz, 0) 347 #define vmalloc(sz) rte_malloc(NULL, sz, 0) 348 #define kfree(p) { if (p) rte_free(p); } 349 static inline void *kzalloc(size_t sz, gfp_t __foo __rte_unused) 350 { 351 void *ptr = rte_malloc(NULL, sz, 0); 352 353 if (ptr) 354 memset(ptr, 0, sz); 355 return ptr; 356 } 357 358 static inline unsigned long get_zeroed_page(gfp_t __foo __rte_unused) 359 { 360 void *p; 361 362 if (posix_memalign(&p, 4096, 4096)) 363 return 0; 364 memset(p, 0, 4096); 365 return (unsigned long)p; 366 } 367 368 /* Spinlock stuff */ 369 #define spinlock_t rte_spinlock_t 370 #define __SPIN_LOCK_UNLOCKED(x) RTE_SPINLOCK_INITIALIZER 371 #define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) 372 #define spin_lock_init(x) rte_spinlock_init(x) 373 #define spin_lock_destroy(x) 374 #define spin_lock(x) rte_spinlock_lock(x) 375 #define spin_unlock(x) rte_spinlock_unlock(x) 376 #define spin_lock_irq(x) spin_lock(x) 377 #define spin_unlock_irq(x) spin_unlock(x) 378 #define spin_lock_irqsave(x, f) spin_lock_irq(x) 379 #define spin_unlock_irqrestore(x, f) spin_unlock_irq(x) 380 381 #define atomic_t rte_atomic32_t 382 #define atomic_read(v) rte_atomic32_read(v) 383 #define atomic_set(v, i) rte_atomic32_set(v, i) 384 385 #define atomic_inc(v) rte_atomic32_add(v, 1) 386 #define atomic_dec(v) rte_atomic32_sub(v, 1) 387 388 #define atomic_inc_and_test(v) rte_atomic32_inc_and_test(v) 389 #define atomic_dec_and_test(v) rte_atomic32_dec_and_test(v) 390 391 #define atomic_inc_return(v) rte_atomic32_add_return(v, 1) 392 #define atomic_dec_return(v) rte_atomic32_sub_return(v, 1) 393 #define atomic_sub_and_test(i, v) (rte_atomic32_sub_return(v, i) == 0) 394 395 /* Interface name len*/ 396 #define IF_NAME_MAX_LEN 16 397 398 #endif /* __COMPAT_H */ 399