xref: /dpdk/drivers/common/idpf/base/idpf_osdep.h (revision e77506397fc8005c5129e22e9e2d15d5876790fd)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2024 Intel Corporation
3  */
4 
5 #ifndef _IDPF_OSDEP_H_
6 #define _IDPF_OSDEP_H_
7 
8 #include <string.h>
9 #include <stdint.h>
10 #include <stdio.h>
11 #include <stdarg.h>
12 #include <inttypes.h>
13 #include <sys/queue.h>
14 #include <stdbool.h>
15 
16 #include <rte_common.h>
17 #include <rte_memcpy.h>
18 #include <rte_malloc.h>
19 #include <rte_memzone.h>
20 #include <rte_byteorder.h>
21 #include <rte_cycles.h>
22 #include <rte_spinlock.h>
23 #include <rte_log.h>
24 #include <rte_random.h>
25 #include <rte_io.h>
26 #include <rte_compat.h>
27 
28 #include "../idpf_common_logs.h"
29 
30 #define INLINE inline
31 #define STATIC static
32 
33 typedef uint8_t		u8;
34 typedef int8_t		s8;
35 typedef uint16_t	u16;
36 typedef int16_t		s16;
37 typedef uint32_t	u32;
38 typedef int32_t		s32;
39 typedef uint64_t	u64;
40 typedef uint64_t	s64;
41 
42 typedef struct idpf_lock idpf_lock;
43 
44 #define __iomem
45 #define hw_dbg(hw, S, ...)	do {} while (0)
46 #define upper_32_bits(n)	((u32)(((n) >> 16) >> 16))
47 #define lower_32_bits(n)	((u32)(n))
48 #define low_16_bits(x)		((x) & 0xFFFF)
49 #define high_16_bits(x)		(((x) & 0xFFFF0000) >> 16)
50 
51 #define IDPF_M(m, s)		((m) << (s))
52 
53 #define BITS_PER_LONG (8 * sizeof(long))
54 #define BITS_PER_LONG_LONG (8 * sizeof(long long))
55 #define GENMASK(h, l) \
56 	(((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
57 #define GENMASK_ULL(h, l) \
58 	(((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
59 
60 #ifndef ETH_ADDR_LEN
61 #define ETH_ADDR_LEN		6
62 #endif
63 
64 #ifndef __le16
65 #define __le16	uint16_t
66 #endif
67 #ifndef __le32
68 #define __le32	uint32_t
69 #endif
70 #ifndef __le64
71 #define __le64	uint64_t
72 #endif
73 #ifndef __be16
74 #define __be16	uint16_t
75 #endif
76 #ifndef __be32
77 #define __be32	uint32_t
78 #endif
79 #ifndef __be64
80 #define __be64	uint64_t
81 #endif
82 
83 #ifndef BIT_ULL
84 #define BIT_ULL(a) RTE_BIT64(a)
85 #endif
86 
87 #ifndef BIT
88 #define BIT(a) RTE_BIT32(a)
89 #endif
90 
91 #define FALSE	0
92 #define TRUE	1
93 #define false	0
94 #define true	1
95 
96 /* Avoid macro redefinition warning on Windows */
97 #ifdef RTE_EXEC_ENV_WINDOWS
98 #ifdef min
99 #undef min
100 #endif
101 #ifdef max
102 #undef max
103 #endif
104 #endif
105 
106 #define min(a, b) RTE_MIN(a, b)
107 #define max(a, b) RTE_MAX(a, b)
108 
109 #define ARRAY_SIZE(arr)  RTE_DIM(arr)
110 #define FIELD_SIZEOF(t, f) (sizeof(((t *)0)->(f)))
111 #define MAKEMASK(m, s) ((m) << (s))
112 
113 #define DEBUGOUT(S, ...)	RTE_LOG(DEBUG, IDPF_COMMON, S, ## __VA_ARGS__)
114 #define DEBUGOUT2(S, ...)	DEBUGOUT(S, ## __VA_ARGS__)
115 #define DEBUGFUNC(F)		DEBUGOUT(F "\n")
116 
117 #define idpf_debug(h, m, s, ...)					\
118 	do {								\
119 		if (((m) & (h)->debug_mask))				\
120 			DEBUGOUT("idpf %02x.%x " s "\n",		\
121 					(h)->bus.device, (h)->bus.func,	\
122 					##__VA_ARGS__);			\
123 	} while (0)
124 
125 #define idpf_info(hw, fmt, ...) \
126 	idpf_debug(hw, IDPF_DBG_ALL, fmt, ##__VA_ARGS__)
127 #define idpf_warn(hw, fmt, ...) \
128 	idpf_debug(hw, IDPF_DBG_ALL, fmt, ##__VA_ARGS__)
129 #define idpf_debug_array(hw, type, rowsize, groupsize, buf, len)	\
130 	do {								\
131 		struct idpf_hw *hw_l = hw;				\
132 		u16 len_l = len;					\
133 		u8 *buf_l = buf;					\
134 		int i;							\
135 		for (i = 0; i < len_l; i += 8)				\
136 			idpf_debug(hw_l, type,				\
137 				   "0x%04X  0x%016"PRIx64"\n",		\
138 				   i, *((u64 *)((buf_l) + i)));		\
139 	} while (0)
140 #define idpf_snprintf snprintf
141 #ifndef SNPRINTF
142 #define SNPRINTF idpf_snprintf
143 #endif
144 
145 #define IDPF_PCI_REG(reg)     rte_read32(reg)
146 #define IDPF_PCI_REG_ADDR(a, reg)				\
147 	((volatile uint32_t *)((char *)(a)->hw_addr + (reg)))
148 #define IDPF_PCI_REG64(reg)     rte_read64(reg)
149 #define IDPF_PCI_REG_ADDR64(a, reg)				\
150 	((volatile uint64_t *)((char *)(a)->hw_addr + (reg)))
151 
152 #define idpf_wmb() rte_io_wmb()
153 #define idpf_rmb() rte_io_rmb()
154 #define idpf_mb() rte_io_mb()
155 
156 static inline uint32_t idpf_read_addr(volatile void *addr)
157 {
158 	return rte_le_to_cpu_32(IDPF_PCI_REG(addr));
159 }
160 
161 static inline uint64_t idpf_read_addr64(volatile void *addr)
162 {
163 	return rte_le_to_cpu_64(IDPF_PCI_REG64(addr));
164 }
165 
166 #define IDPF_PCI_REG_WRITE(reg, value)			\
167 	rte_write32((rte_cpu_to_le_32(value)), reg)
168 
169 #define IDPF_PCI_REG_WRITE64(reg, value)		\
170 	rte_write64((rte_cpu_to_le_64(value)), reg)
171 
172 #define IDPF_READ_REG(hw, reg) idpf_read_addr(IDPF_PCI_REG_ADDR((hw), (reg)))
173 #define IDPF_WRITE_REG(hw, reg, value)					\
174 	IDPF_PCI_REG_WRITE(IDPF_PCI_REG_ADDR((hw), (reg)), (value))
175 
176 #define rd32(a, reg) idpf_read_addr(IDPF_PCI_REG_ADDR((a), (reg)))
177 #define wr32(a, reg, value)						\
178 	IDPF_PCI_REG_WRITE(IDPF_PCI_REG_ADDR((a), (reg)), (value))
179 #define div64_long(n, d) ((n) / (d))
180 #define rd64(a, reg) idpf_read_addr64(IDPF_PCI_REG_ADDR64((a), (reg)))
181 
182 #define BITS_PER_BYTE       8
183 
184 /* memory allocation tracking */
185 struct __rte_packed_begin idpf_dma_mem {
186 	void *va;
187 	u64 pa;
188 	u32 size;
189 	const void *zone;
190 } __rte_packed_end;
191 
192 struct __rte_packed_begin idpf_virt_mem {
193 	void *va;
194 	u32 size;
195 } __rte_packed_end;
196 
197 #define idpf_malloc(h, s)	rte_zmalloc(NULL, s, 0)
198 #define idpf_calloc(h, c, s)	rte_zmalloc(NULL, (c) * (s), 0)
199 #define idpf_free(h, m)		rte_free(m)
200 
201 #define idpf_memset(a, b, c, d)	memset((a), (b), (c))
202 #define idpf_memcpy(a, b, c, d)	rte_memcpy((a), (b), (c))
203 #define idpf_memdup(a, b, c, d)	rte_memcpy(idpf_malloc(a, c), b, c)
204 
205 #define CPU_TO_BE16(o) rte_cpu_to_be_16(o)
206 #define CPU_TO_BE32(o) rte_cpu_to_be_32(o)
207 #define CPU_TO_BE64(o) rte_cpu_to_be_64(o)
208 #define CPU_TO_LE16(o) rte_cpu_to_le_16(o)
209 #define CPU_TO_LE32(s) rte_cpu_to_le_32(s)
210 #define CPU_TO_LE64(h) rte_cpu_to_le_64(h)
211 #define LE16_TO_CPU(a) rte_le_to_cpu_16(a)
212 #define LE32_TO_CPU(c) rte_le_to_cpu_32(c)
213 #define LE64_TO_CPU(k) rte_le_to_cpu_64(k)
214 
215 #define NTOHS(a) rte_be_to_cpu_16(a)
216 #define NTOHL(a) rte_be_to_cpu_32(a)
217 #define HTONS(a) rte_cpu_to_be_16(a)
218 #define HTONL(a) rte_cpu_to_be_32(a)
219 
220 /* SW spinlock */
221 struct idpf_lock {
222 	rte_spinlock_t spinlock;
223 };
224 
225 #define idpf_init_lock(sp) rte_spinlock_init(&(sp)->spinlock)
226 #define idpf_acquire_lock(sp) rte_spinlock_lock(&(sp)->spinlock)
227 #define idpf_release_lock(sp) rte_spinlock_unlock(&(sp)->spinlock)
228 #define idpf_destroy_lock(sp) RTE_SET_USED(sp)
229 
230 struct idpf_hw;
231 
232 static inline void *
233 idpf_alloc_dma_mem(__rte_unused struct idpf_hw *hw,
234 		   struct idpf_dma_mem *mem, u64 size)
235 {
236 	const struct rte_memzone *mz = NULL;
237 	char z_name[RTE_MEMZONE_NAMESIZE];
238 
239 	if (!mem)
240 		return NULL;
241 
242 	snprintf(z_name, sizeof(z_name), "idpf_dma_%"PRIu64, rte_rand());
243 	mz = rte_memzone_reserve_aligned(z_name, size, SOCKET_ID_ANY,
244 					 RTE_MEMZONE_IOVA_CONTIG, RTE_PGSIZE_4K);
245 	if (!mz)
246 		return NULL;
247 
248 	mem->size = size;
249 	mem->va = mz->addr;
250 	mem->pa = mz->iova;
251 	mem->zone = (const void *)mz;
252 	memset(mem->va, 0, size);
253 
254 	return mem->va;
255 }
256 
257 static inline void
258 idpf_free_dma_mem(__rte_unused struct idpf_hw *hw,
259 		  struct idpf_dma_mem *mem)
260 {
261 	rte_memzone_free((const struct rte_memzone *)mem->zone);
262 	mem->size = 0;
263 	mem->va = NULL;
264 	mem->pa = 0;
265 }
266 
267 static inline u8
268 idpf_hweight8(u32 num)
269 {
270 	u8 bits = 0;
271 	u32 i;
272 
273 	for (i = 0; i < 8; i++) {
274 		bits += (u8)(num & 0x1);
275 		num >>= 1;
276 	}
277 
278 	return bits;
279 }
280 
281 static inline u8
282 idpf_hweight32(u32 num)
283 {
284 	u8 bits = 0;
285 	u32 i;
286 
287 	for (i = 0; i < 32; i++) {
288 		bits += (u8)(num & 0x1);
289 		num >>= 1;
290 	}
291 
292 	return bits;
293 }
294 
295 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
296 #define DELAY(x) rte_delay_us(x)
297 #define idpf_usec_delay(x) rte_delay_us(x)
298 #define idpf_msec_delay(x, y) rte_delay_us(1000 * (x))
299 #define udelay(x) DELAY(x)
300 #define msleep(x) DELAY(1000 * (x))
301 #define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000))
302 
303 #ifndef IDPF_DBG_TRACE
304 #define IDPF_DBG_TRACE	  BIT_ULL(0)
305 #endif
306 
307 #ifndef DIVIDE_AND_ROUND_UP
308 #define DIVIDE_AND_ROUND_UP(a, b) (((a) + (b) - 1) / (b))
309 #endif
310 
311 #ifndef IDPF_INTEL_VENDOR_ID
312 #define IDPF_INTEL_VENDOR_ID	    0x8086
313 #endif
314 
315 #ifndef IS_UNICAST_ETHER_ADDR
316 #define IS_UNICAST_ETHER_ADDR(addr)			\
317 	((bool)((((u8 *)(addr))[0] % ((u8)0x2)) == 0))
318 #endif
319 
320 #ifndef IS_MULTICAST_ETHER_ADDR
321 #define IS_MULTICAST_ETHER_ADDR(addr)			\
322 	((bool)((((u8 *)(addr))[0] % ((u8)0x2)) == 1))
323 #endif
324 
325 #ifndef IS_BROADCAST_ETHER_ADDR
326 /* Check whether an address is broadcast. */
327 #define IS_BROADCAST_ETHER_ADDR(addr)			\
328 	((bool)((((u16 *)(addr))[0] == ((u16)0xffff))))
329 #endif
330 
331 #ifndef IS_ZERO_ETHER_ADDR
332 #define IS_ZERO_ETHER_ADDR(addr)				\
333 	(((bool)((((u16 *)(addr))[0] == ((u16)0x0)))) &&	\
334 	 ((bool)((((u16 *)(addr))[1] == ((u16)0x0)))) &&	\
335 	 ((bool)((((u16 *)(addr))[2] == ((u16)0x0)))))
336 #endif
337 
338 #ifndef LIST_HEAD_TYPE
339 #define LIST_HEAD_TYPE(list_name, type) LIST_HEAD(list_name, type)
340 #endif
341 
342 #ifndef LIST_ENTRY_TYPE
343 #define LIST_ENTRY_TYPE(type)	   LIST_ENTRY(type)
344 #endif
345 
346 #ifndef LIST_FOREACH_SAFE
347 #define LIST_FOREACH_SAFE(var, head, field, tvar)			\
348 	for ((var) = LIST_FIRST((head));				\
349 	    (var) && ((tvar) = LIST_NEXT((var), field), 1);		\
350 	    (var) = (tvar))
351 #endif
352 
353 #ifndef LIST_FOR_EACH_ENTRY_SAFE
354 #define LIST_FOR_EACH_ENTRY_SAFE(pos, temp, head, entry_type, list)	\
355 	LIST_FOREACH_SAFE(pos, head, list, temp)
356 #endif
357 
358 #ifndef LIST_FOR_EACH_ENTRY
359 #define LIST_FOR_EACH_ENTRY(pos, head, entry_type, list)		\
360 	LIST_FOREACH(pos, head, list)
361 
362 #endif
363 
364 enum idpf_mac_type {
365 	IDPF_MAC_UNKNOWN = 0,
366 	IDPF_MAC_PF,
367 	IDPF_MAC_VF,
368 	IDPF_MAC_GENERIC
369 };
370 
371 #define ETH_ALEN 6
372 
373 struct idpf_mac_info {
374 	enum idpf_mac_type type;
375 	u8 addr[ETH_ALEN];
376 	u8 perm_addr[ETH_ALEN];
377 };
378 
379 #define IDPF_AQ_LINK_UP 0x1
380 
381 /* PCI bus types */
382 enum idpf_bus_type {
383 	idpf_bus_type_unknown = 0,
384 	idpf_bus_type_pci,
385 	idpf_bus_type_pcix,
386 	idpf_bus_type_pci_express,
387 	idpf_bus_type_reserved
388 };
389 
390 /* PCI bus speeds */
391 enum idpf_bus_speed {
392 	idpf_bus_speed_unknown	= 0,
393 	idpf_bus_speed_33	= 33,
394 	idpf_bus_speed_66	= 66,
395 	idpf_bus_speed_100	= 100,
396 	idpf_bus_speed_120	= 120,
397 	idpf_bus_speed_133	= 133,
398 	idpf_bus_speed_2500	= 2500,
399 	idpf_bus_speed_5000	= 5000,
400 	idpf_bus_speed_8000	= 8000,
401 	idpf_bus_speed_reserved
402 };
403 
404 /* PCI bus widths */
405 enum idpf_bus_width {
406 	idpf_bus_width_unknown	= 0,
407 	idpf_bus_width_pcie_x1	= 1,
408 	idpf_bus_width_pcie_x2	= 2,
409 	idpf_bus_width_pcie_x4	= 4,
410 	idpf_bus_width_pcie_x8	= 8,
411 	idpf_bus_width_32	= 32,
412 	idpf_bus_width_64	= 64,
413 	idpf_bus_width_reserved
414 };
415 
416 /* Bus parameters */
417 struct idpf_bus_info {
418 	enum idpf_bus_speed speed;
419 	enum idpf_bus_width width;
420 	enum idpf_bus_type type;
421 
422 	u16 func;
423 	u16 device;
424 	u16 lan_id;
425 	u16 bus_id;
426 };
427 
428 /* Function specific capabilities */
429 struct idpf_hw_func_caps {
430 	u32 num_alloc_vfs;
431 	u32 vf_base_id;
432 };
433 
434 #endif /* _IDPF_OSDEP_H_ */
435