xref: /dpdk/drivers/common/sfc_efx/efsys.h (revision 3bb3ebb51b789d4ecb417cbdb1dce5c7211f6f18)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright(c) 2019-2021 Xilinx, Inc.
4  * Copyright(c) 2016-2019 Solarflare Communications Inc.
5  *
6  * This software was jointly developed between OKTET Labs (under contract
7  * for Solarflare) and Solarflare Communications, Inc.
8  */
9 
10 #ifndef _SFC_COMMON_EFSYS_H
11 #define _SFC_COMMON_EFSYS_H
12 
13 #include <stdbool.h>
14 
15 #include <rte_spinlock.h>
16 #include <rte_byteorder.h>
17 #include <rte_debug.h>
18 #include <rte_memzone.h>
19 #include <rte_memory.h>
20 #include <rte_memcpy.h>
21 #include <rte_cycles.h>
22 #include <rte_prefetch.h>
23 #include <rte_common.h>
24 #include <rte_malloc.h>
25 #include <rte_log.h>
26 #include <rte_io.h>
27 
28 #include "sfc_efx_debug.h"
29 #include "sfc_efx_log.h"
30 
31 #ifdef __cplusplus
32 extern "C" {
33 #endif
34 
35 #define LIBEFX_API		__rte_internal
36 
37 /* No specific decorations required since functions are local by default */
38 #define LIBEFX_INTERNAL
39 
40 #define EFSYS_HAS_UINT64 1
41 #define EFSYS_USE_UINT64 1
42 /*
43  * __SSE2__ is defined by a compiler if target architecture supports
44  * Streaming SIMD Extensions 2 (SSE2). __m128i is a data type used
45  * by the extension instructions.
46  */
47 #if defined(__SSE2__)
48 #define EFSYS_HAS_UINT128 1
49 typedef __m128i efsys_uint128_t;
50 /*
51  * __int128 and unsigned __int128 are compiler extensions (built-in types).
52  * __SIZEOF_INT128__ is defined by the compiler if these data types are
53  * available.
54  */
55 #elif defined(__SIZEOF_INT128__)
56 #define EFSYS_HAS_UINT128 1
57 typedef unsigned __int128 efsys_uint128_t;
58 #else
59 #error Unsigned 128-bit width integers support is required
60 #endif
61 
62 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
63 #define EFSYS_IS_BIG_ENDIAN 1
64 #define EFSYS_IS_LITTLE_ENDIAN 0
65 #elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
66 #define EFSYS_IS_BIG_ENDIAN 0
67 #define EFSYS_IS_LITTLE_ENDIAN 1
68 #else
69 #error "Cannot determine system endianness"
70 #endif
71 
72 
73 typedef bool boolean_t;
74 
75 #ifndef B_FALSE
76 #define B_FALSE	false
77 #endif
78 #ifndef B_TRUE
79 #define B_TRUE	true
80 #endif
81 
82 /*
83  * RTE_MAX() and RTE_MIN() cannot be used since braced-group within
84  * expression allowed only inside a function, but MAX() is used as
85  * a number of elements in array.
86  */
87 #ifndef MAX
88 #define MAX(v1, v2)	((v1) > (v2) ? (v1) : (v2))
89 #endif
90 #ifndef MIN
91 #define MIN(v1, v2)	((v1) < (v2) ? (v1) : (v2))
92 #endif
93 
94 #ifndef ISP2
95 #define ISP2(x)			rte_is_power_of_2(x)
96 #endif
97 
98 #define ENOTACTIVE	ENOTCONN
99 
100 static inline void
101 prefetch_read_many(const volatile void *addr)
102 {
103 	rte_prefetch0(addr);
104 }
105 
106 static inline void
107 prefetch_read_once(const volatile void *addr)
108 {
109 	rte_prefetch_non_temporal(addr);
110 }
111 
112 /* Code inclusion options */
113 
114 
115 #define EFSYS_OPT_NAMES 1
116 
117 /* Disable SFN5xxx/SFN6xxx since it requires specific support in the PMD */
118 #define EFSYS_OPT_SIENA 0
119 /* Enable SFN7xxx support */
120 #define EFSYS_OPT_HUNTINGTON 1
121 /* Enable SFN8xxx support */
122 #define EFSYS_OPT_MEDFORD 1
123 /* Enable SFN2xxx support */
124 #define EFSYS_OPT_MEDFORD2 1
125 /* Enable Riverhead support */
126 #define EFSYS_OPT_RIVERHEAD 1
127 
128 #ifdef RTE_LIBRTE_SFC_EFX_DEBUG
129 #define EFSYS_OPT_CHECK_REG 1
130 #else
131 #define EFSYS_OPT_CHECK_REG 0
132 #endif
133 
134 /* MCDI is required for SFN7xxx and SFN8xx */
135 #define EFSYS_OPT_MCDI 1
136 #define EFSYS_OPT_MCDI_LOGGING 1
137 #define EFSYS_OPT_MCDI_PROXY_AUTH 1
138 
139 #define EFSYS_OPT_MAC_STATS 1
140 
141 #define EFSYS_OPT_LOOPBACK 1
142 
143 #define EFSYS_OPT_MON_MCDI 0
144 #define EFSYS_OPT_MON_STATS 0
145 
146 #define EFSYS_OPT_PHY_STATS 0
147 #define EFSYS_OPT_BIST 0
148 #define EFSYS_OPT_PHY_LED_CONTROL 0
149 #define EFSYS_OPT_PHY_FLAGS 0
150 
151 #define EFSYS_OPT_VPD 0
152 #define EFSYS_OPT_NVRAM 0
153 #define EFSYS_OPT_BOOTCFG 0
154 #define EFSYS_OPT_IMAGE_LAYOUT 0
155 
156 #define EFSYS_OPT_DIAG 0
157 #define EFSYS_OPT_RX_SCALE 1
158 #define EFSYS_OPT_QSTATS 0
159 /* Filters support is required for SFN7xxx and SFN8xx */
160 #define EFSYS_OPT_FILTER 1
161 #define EFSYS_OPT_RX_SCATTER 0
162 
163 #define EFSYS_OPT_EV_EXTENDED_WIDTH 0
164 #define EFSYS_OPT_EV_PREFETCH 0
165 
166 #define EFSYS_OPT_DECODE_INTR_FATAL 0
167 
168 #define EFSYS_OPT_LICENSING 0
169 
170 #define EFSYS_OPT_ALLOW_UNCONFIGURED_NIC 0
171 
172 #define EFSYS_OPT_RX_PACKED_STREAM 0
173 
174 #define EFSYS_OPT_RX_ES_SUPER_BUFFER 1
175 
176 #define EFSYS_OPT_TUNNEL 1
177 
178 #define EFSYS_OPT_FW_SUBVARIANT_AWARE 1
179 
180 #define EFSYS_OPT_EVB 1
181 
182 #define EFSYS_OPT_MCDI_PROXY_AUTH_SERVER 0
183 
184 #define EFSYS_OPT_PCI 1
185 
186 #define EFSYS_OPT_DESC_PROXY 0
187 
188 #define EFSYS_OPT_MAE 1
189 
190 #define EFSYS_OPT_VIRTIO 0
191 
192 /* ID */
193 
194 typedef struct __efsys_identifier_s efsys_identifier_t;
195 
196 
197 #define EFSYS_PROBE(_name)						\
198 	do { } while (0)
199 
200 #define EFSYS_PROBE1(_name, _type1, _arg1)				\
201 	do { } while (0)
202 
203 #define EFSYS_PROBE2(_name, _type1, _arg1, _type2, _arg2)		\
204 	do { } while (0)
205 
206 #define EFSYS_PROBE3(_name, _type1, _arg1, _type2, _arg2,		\
207 		     _type3, _arg3)					\
208 	do { } while (0)
209 
210 #define EFSYS_PROBE4(_name, _type1, _arg1, _type2, _arg2,		\
211 		     _type3, _arg3, _type4, _arg4)			\
212 	do { } while (0)
213 
214 #define EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2,		\
215 		     _type3, _arg3, _type4, _arg4, _type5, _arg5)	\
216 	do { } while (0)
217 
218 #define EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2,		\
219 		     _type3, _arg3, _type4, _arg4, _type5, _arg5,	\
220 		     _type6, _arg6)					\
221 	do { } while (0)
222 
223 #define EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2,		\
224 		     _type3, _arg3, _type4, _arg4, _type5, _arg5,	\
225 		     _type6, _arg6, _type7, _arg7)			\
226 	do { } while (0)
227 
228 
229 /* DMA */
230 
231 typedef rte_iova_t efsys_dma_addr_t;
232 
233 typedef struct efsys_mem_s {
234 	const struct rte_memzone	*esm_mz;
235 	/*
236 	 * Ideally it should have volatile qualifier to denote that
237 	 * the memory may be updated by someone else. However, it adds
238 	 * qualifier discard warnings when the pointer or its derivative
239 	 * is passed to memset() or rte_mov16().
240 	 * So, skip the qualifier here, but make sure that it is added
241 	 * below in access macros.
242 	 */
243 	void				*esm_base;
244 	efsys_dma_addr_t		esm_addr;
245 } efsys_mem_t;
246 
247 
248 #define EFSYS_MEM_ZERO(_esmp, _size)					\
249 	do {								\
250 		(void)memset((void *)(_esmp)->esm_base, 0, (_size));	\
251 									\
252 		_NOTE(CONSTANTCONDITION);				\
253 	} while (B_FALSE)
254 
255 #define EFSYS_MEM_READD(_esmp, _offset, _edp)				\
256 	do {								\
257 		volatile uint8_t  *_base = (_esmp)->esm_base;		\
258 		volatile uint32_t *_addr;				\
259 									\
260 		_NOTE(CONSTANTCONDITION);				\
261 		SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset,	\
262 						sizeof(efx_dword_t)));	\
263 									\
264 		_addr = (volatile uint32_t *)(_base + (_offset));	\
265 		(_edp)->ed_u32[0] = _addr[0];				\
266 									\
267 		EFSYS_PROBE2(mem_readl, unsigned int, (_offset),	\
268 					 uint32_t, (_edp)->ed_u32[0]);	\
269 									\
270 		_NOTE(CONSTANTCONDITION);				\
271 	} while (B_FALSE)
272 
273 #define EFSYS_MEM_READQ(_esmp, _offset, _eqp)				\
274 	do {								\
275 		volatile uint8_t  *_base = (_esmp)->esm_base;		\
276 		volatile uint64_t *_addr;				\
277 									\
278 		_NOTE(CONSTANTCONDITION);				\
279 		SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset,	\
280 						sizeof(efx_qword_t)));	\
281 									\
282 		_addr = (volatile uint64_t *)(_base + (_offset));	\
283 		(_eqp)->eq_u64[0] = _addr[0];				\
284 									\
285 		EFSYS_PROBE3(mem_readq, unsigned int, (_offset),	\
286 					 uint32_t, (_eqp)->eq_u32[1],	\
287 					 uint32_t, (_eqp)->eq_u32[0]);	\
288 									\
289 		_NOTE(CONSTANTCONDITION);				\
290 	} while (B_FALSE)
291 
292 #define EFSYS_MEM_READO(_esmp, _offset, _eop)				\
293 	do {								\
294 		volatile uint8_t *_base = (_esmp)->esm_base;		\
295 		volatile efsys_uint128_t *_addr;			\
296 									\
297 		_NOTE(CONSTANTCONDITION);				\
298 		SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset,	\
299 						sizeof(efx_oword_t)));	\
300 									\
301 		_addr = (volatile efsys_uint128_t *)(_base + (_offset));\
302 		(_eop)->eo_u128[0] = _addr[0];				\
303 									\
304 		EFSYS_PROBE5(mem_reado, unsigned int, (_offset),	\
305 					 uint32_t, (_eop)->eo_u32[3],	\
306 					 uint32_t, (_eop)->eo_u32[2],	\
307 					 uint32_t, (_eop)->eo_u32[1],	\
308 					 uint32_t, (_eop)->eo_u32[0]);	\
309 									\
310 		_NOTE(CONSTANTCONDITION);				\
311 	} while (B_FALSE)
312 
313 
314 #define EFSYS_MEM_WRITED(_esmp, _offset, _edp)				\
315 	do {								\
316 		volatile uint8_t  *_base = (_esmp)->esm_base;		\
317 		volatile uint32_t *_addr;				\
318 									\
319 		_NOTE(CONSTANTCONDITION);				\
320 		SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset,	\
321 						sizeof(efx_dword_t)));	\
322 									\
323 		EFSYS_PROBE2(mem_writed, unsigned int, (_offset),	\
324 					 uint32_t, (_edp)->ed_u32[0]);	\
325 									\
326 		_addr = (volatile uint32_t *)(_base + (_offset));	\
327 		_addr[0] = (_edp)->ed_u32[0];				\
328 									\
329 		_NOTE(CONSTANTCONDITION);				\
330 	} while (B_FALSE)
331 
332 #define EFSYS_MEM_WRITEQ(_esmp, _offset, _eqp)				\
333 	do {								\
334 		volatile uint8_t  *_base = (_esmp)->esm_base;		\
335 		volatile uint64_t *_addr;				\
336 									\
337 		_NOTE(CONSTANTCONDITION);				\
338 		SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset,	\
339 						sizeof(efx_qword_t)));	\
340 									\
341 		EFSYS_PROBE3(mem_writeq, unsigned int, (_offset),	\
342 					 uint32_t, (_eqp)->eq_u32[1],	\
343 					 uint32_t, (_eqp)->eq_u32[0]);	\
344 									\
345 		_addr = (volatile uint64_t *)(_base + (_offset));	\
346 		_addr[0] = (_eqp)->eq_u64[0];				\
347 									\
348 		_NOTE(CONSTANTCONDITION);				\
349 	} while (B_FALSE)
350 
351 #define EFSYS_MEM_WRITEO(_esmp, _offset, _eop)				\
352 	do {								\
353 		volatile uint8_t *_base = (_esmp)->esm_base;		\
354 		volatile efsys_uint128_t *_addr;			\
355 									\
356 		_NOTE(CONSTANTCONDITION);				\
357 		SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset,	\
358 						sizeof(efx_oword_t)));	\
359 									\
360 									\
361 		EFSYS_PROBE5(mem_writeo, unsigned int, (_offset),	\
362 					 uint32_t, (_eop)->eo_u32[3],	\
363 					 uint32_t, (_eop)->eo_u32[2],	\
364 					 uint32_t, (_eop)->eo_u32[1],	\
365 					 uint32_t, (_eop)->eo_u32[0]);	\
366 									\
367 		_addr = (volatile efsys_uint128_t *)(_base + (_offset));\
368 		_addr[0] = (_eop)->eo_u128[0];				\
369 									\
370 		_NOTE(CONSTANTCONDITION);				\
371 	} while (B_FALSE)
372 
373 
374 #define	EFSYS_MEM_SIZE(_esmp)						\
375 	((_esmp)->esm_mz->len)
376 
377 #define EFSYS_MEM_ADDR(_esmp)						\
378 	((_esmp)->esm_addr)
379 
380 #define EFSYS_MEM_IS_NULL(_esmp)					\
381 	((_esmp)->esm_base == NULL)
382 
383 #define EFSYS_MEM_PREFETCH(_esmp, _offset)				\
384 	do {								\
385 		volatile uint8_t *_base = (_esmp)->esm_base;		\
386 									\
387 		rte_prefetch0(_base + (_offset));			\
388 	} while (0)
389 
390 
391 /* BAR */
392 
393 typedef struct efsys_bar_s {
394 	rte_spinlock_t		esb_lock;
395 	int			esb_rid;
396 	struct rte_pci_device	*esb_dev;
397 	/*
398 	 * Ideally it should have volatile qualifier to denote that
399 	 * the memory may be updated by someone else. However, it adds
400 	 * qualifier discard warnings when the pointer or its derivative
401 	 * is passed to memset() or rte_mov16().
402 	 * So, skip the qualifier here, but make sure that it is added
403 	 * below in access macros.
404 	 */
405 	void			*esb_base;
406 } efsys_bar_t;
407 
408 #define SFC_BAR_LOCK_INIT(_esbp, _ifname)				\
409 	do {								\
410 		rte_spinlock_init(&(_esbp)->esb_lock);			\
411 		_NOTE(CONSTANTCONDITION);				\
412 	} while (B_FALSE)
413 #define SFC_BAR_LOCK_DESTROY(_esbp)	((void)0)
414 #define SFC_BAR_LOCK(_esbp)		rte_spinlock_lock(&(_esbp)->esb_lock)
415 #define SFC_BAR_UNLOCK(_esbp)		rte_spinlock_unlock(&(_esbp)->esb_lock)
416 
417 #define EFSYS_BAR_READD(_esbp, _offset, _edp, _lock)			\
418 	do {								\
419 		volatile uint8_t  *_base = (_esbp)->esb_base;		\
420 		volatile uint32_t *_addr;				\
421 									\
422 		_NOTE(CONSTANTCONDITION);				\
423 		SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset,	\
424 						sizeof(efx_dword_t)));	\
425 		_NOTE(CONSTANTCONDITION);				\
426 		if (_lock)						\
427 			SFC_BAR_LOCK(_esbp);				\
428 									\
429 		_addr = (volatile uint32_t *)(_base + (_offset));	\
430 		rte_rmb();						\
431 		(_edp)->ed_u32[0] = rte_read32_relaxed(_addr);		\
432 									\
433 		EFSYS_PROBE2(bar_readd, unsigned int, (_offset),	\
434 					 uint32_t, (_edp)->ed_u32[0]);	\
435 									\
436 		_NOTE(CONSTANTCONDITION);				\
437 		if (_lock)						\
438 			SFC_BAR_UNLOCK(_esbp);				\
439 		_NOTE(CONSTANTCONDITION);				\
440 	} while (B_FALSE)
441 
442 #define EFSYS_BAR_READQ(_esbp, _offset, _eqp)				\
443 	do {								\
444 		volatile uint8_t  *_base = (_esbp)->esb_base;		\
445 		volatile uint64_t *_addr;				\
446 									\
447 		_NOTE(CONSTANTCONDITION);				\
448 		SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset,	\
449 						sizeof(efx_qword_t)));	\
450 									\
451 		SFC_BAR_LOCK(_esbp);					\
452 									\
453 		_addr = (volatile uint64_t *)(_base + (_offset));	\
454 		rte_rmb();						\
455 		(_eqp)->eq_u64[0] = rte_read64_relaxed(_addr);		\
456 									\
457 		EFSYS_PROBE3(bar_readq, unsigned int, (_offset),	\
458 					 uint32_t, (_eqp)->eq_u32[1],	\
459 					 uint32_t, (_eqp)->eq_u32[0]);	\
460 									\
461 		SFC_BAR_UNLOCK(_esbp);					\
462 		_NOTE(CONSTANTCONDITION);				\
463 	} while (B_FALSE)
464 
465 #define EFSYS_BAR_READO(_esbp, _offset, _eop, _lock)			\
466 	do {								\
467 		volatile uint8_t *_base = (_esbp)->esb_base;		\
468 		volatile efsys_uint128_t *_addr;			\
469 									\
470 		_NOTE(CONSTANTCONDITION);				\
471 		SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset,	\
472 						sizeof(efx_oword_t)));	\
473 									\
474 		_NOTE(CONSTANTCONDITION);				\
475 		if (_lock)						\
476 			SFC_BAR_LOCK(_esbp);				\
477 									\
478 		_addr = (volatile efsys_uint128_t *)(_base + (_offset));\
479 		rte_rmb();						\
480 		/* There is no rte_read128_relaxed() yet */		\
481 		(_eop)->eo_u128[0] = _addr[0];				\
482 									\
483 		EFSYS_PROBE5(bar_reado, unsigned int, (_offset),	\
484 					 uint32_t, (_eop)->eo_u32[3],	\
485 					 uint32_t, (_eop)->eo_u32[2],	\
486 					 uint32_t, (_eop)->eo_u32[1],	\
487 					 uint32_t, (_eop)->eo_u32[0]);	\
488 									\
489 		_NOTE(CONSTANTCONDITION);				\
490 		if (_lock)						\
491 			SFC_BAR_UNLOCK(_esbp);				\
492 		_NOTE(CONSTANTCONDITION);				\
493 	} while (B_FALSE)
494 
495 
496 #define EFSYS_BAR_WRITED(_esbp, _offset, _edp, _lock)			\
497 	do {								\
498 		volatile uint8_t  *_base = (_esbp)->esb_base;		\
499 		volatile uint32_t *_addr;				\
500 									\
501 		_NOTE(CONSTANTCONDITION);				\
502 		SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset,	\
503 						sizeof(efx_dword_t)));	\
504 									\
505 		_NOTE(CONSTANTCONDITION);				\
506 		if (_lock)						\
507 			SFC_BAR_LOCK(_esbp);				\
508 									\
509 		EFSYS_PROBE2(bar_writed, unsigned int, (_offset),	\
510 					 uint32_t, (_edp)->ed_u32[0]);	\
511 									\
512 		_addr = (volatile uint32_t *)(_base + (_offset));	\
513 		rte_write32_relaxed((_edp)->ed_u32[0], _addr);		\
514 		rte_wmb();						\
515 									\
516 		_NOTE(CONSTANTCONDITION);				\
517 		if (_lock)						\
518 			SFC_BAR_UNLOCK(_esbp);				\
519 		_NOTE(CONSTANTCONDITION);				\
520 	} while (B_FALSE)
521 
522 #define EFSYS_BAR_WRITEQ(_esbp, _offset, _eqp)				\
523 	do {								\
524 		volatile uint8_t  *_base = (_esbp)->esb_base;		\
525 		volatile uint64_t *_addr;				\
526 									\
527 		_NOTE(CONSTANTCONDITION);				\
528 		SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset,	\
529 						sizeof(efx_qword_t)));	\
530 									\
531 		SFC_BAR_LOCK(_esbp);					\
532 									\
533 		EFSYS_PROBE3(bar_writeq, unsigned int, (_offset),	\
534 					 uint32_t, (_eqp)->eq_u32[1],	\
535 					 uint32_t, (_eqp)->eq_u32[0]);	\
536 									\
537 		_addr = (volatile uint64_t *)(_base + (_offset));	\
538 		rte_write64_relaxed((_eqp)->eq_u64[0], _addr);		\
539 		rte_wmb();						\
540 									\
541 		SFC_BAR_UNLOCK(_esbp);					\
542 		_NOTE(CONSTANTCONDITION);				\
543 	} while (B_FALSE)
544 
545 /*
546  * Guarantees 64bit aligned 64bit writes to write combined BAR mapping
547  * (required by PIO hardware).
548  *
549  * Neither VFIO, nor UIO, nor NIC UIO (on FreeBSD) support
550  * write-combined memory mapped to user-land, so just abort if used.
551  */
552 #define EFSYS_BAR_WC_WRITEQ(_esbp, _offset, _eqp)			\
553 	do {								\
554 		rte_panic("Write-combined BAR access not supported");	\
555 	} while (B_FALSE)
556 
557 #define EFSYS_BAR_WRITEO(_esbp, _offset, _eop, _lock)			\
558 	do {								\
559 		volatile uint8_t *_base = (_esbp)->esb_base;		\
560 		volatile efsys_uint128_t *_addr;			\
561 									\
562 		_NOTE(CONSTANTCONDITION);				\
563 		SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset,	\
564 						sizeof(efx_oword_t)));	\
565 									\
566 		_NOTE(CONSTANTCONDITION);				\
567 		if (_lock)						\
568 			SFC_BAR_LOCK(_esbp);				\
569 									\
570 		EFSYS_PROBE5(bar_writeo, unsigned int, (_offset),	\
571 					 uint32_t, (_eop)->eo_u32[3],	\
572 					 uint32_t, (_eop)->eo_u32[2],	\
573 					 uint32_t, (_eop)->eo_u32[1],	\
574 					 uint32_t, (_eop)->eo_u32[0]);	\
575 									\
576 		_addr = (volatile efsys_uint128_t *)(_base + (_offset));\
577 		/* There is no rte_write128_relaxed() yet */		\
578 		_addr[0] = (_eop)->eo_u128[0];				\
579 		rte_wmb();						\
580 									\
581 		_NOTE(CONSTANTCONDITION);				\
582 		if (_lock)						\
583 			SFC_BAR_UNLOCK(_esbp);				\
584 		_NOTE(CONSTANTCONDITION);				\
585 	} while (B_FALSE)
586 
587 /* Use the standard octo-word write for doorbell writes */
588 #define EFSYS_BAR_DOORBELL_WRITEO(_esbp, _offset, _eop)			\
589 	do {								\
590 		EFSYS_BAR_WRITEO((_esbp), (_offset), (_eop), B_FALSE);	\
591 		_NOTE(CONSTANTCONDITION);				\
592 	} while (B_FALSE)
593 
594 /* SPIN */
595 
596 #define EFSYS_SPIN(_us)							\
597 	do {								\
598 		rte_delay_us(_us);					\
599 		_NOTE(CONSTANTCONDITION);				\
600 	} while (B_FALSE)
601 
602 #define EFSYS_SLEEP EFSYS_SPIN
603 
604 /* BARRIERS */
605 
606 #define EFSYS_MEM_READ_BARRIER()	rte_rmb()
607 #define EFSYS_PIO_WRITE_BARRIER()	rte_io_wmb()
608 
609 /* DMA SYNC */
610 
611 /*
612  * DPDK does not provide any DMA syncing API, and no PMD drivers
613  * have any traces of explicit DMA syncing.
614  * DMA mapping is assumed to be coherent.
615  */
616 
617 #define EFSYS_DMA_SYNC_FOR_KERNEL(_esmp, _offset, _size)	((void)0)
618 
619 /* Just avoid store and compiler (impliciltly) reordering */
620 #define EFSYS_DMA_SYNC_FOR_DEVICE(_esmp, _offset, _size)	rte_wmb()
621 
622 /* TIMESTAMP */
623 
624 typedef uint64_t efsys_timestamp_t;
625 
626 #define EFSYS_TIMESTAMP(_usp)						\
627 	do {								\
628 		*(_usp) = rte_get_timer_cycles() * 1000000 /		\
629 			rte_get_timer_hz();				\
630 		_NOTE(CONSTANTCONDITION);				\
631 	} while (B_FALSE)
632 
633 /* KMEM */
634 
635 #define EFSYS_KMEM_ALLOC(_esip, _size, _p)				\
636 	do {								\
637 		(_esip) = (_esip);					\
638 		(_p) = rte_zmalloc("sfc", (_size), 0);			\
639 		_NOTE(CONSTANTCONDITION);				\
640 	} while (B_FALSE)
641 
642 #define EFSYS_KMEM_FREE(_esip, _size, _p)				\
643 	do {								\
644 		(void)(_esip);						\
645 		(void)(_size);						\
646 		rte_free((_p));						\
647 		_NOTE(CONSTANTCONDITION);				\
648 	} while (B_FALSE)
649 
650 /* LOCK */
651 
652 typedef rte_spinlock_t efsys_lock_t;
653 
654 #define SFC_EFSYS_LOCK_INIT(_eslp, _ifname, _label)	\
655 	rte_spinlock_init((_eslp))
656 #define SFC_EFSYS_LOCK_DESTROY(_eslp) ((void)0)
657 #define SFC_EFSYS_LOCK(_eslp)				\
658 	rte_spinlock_lock((_eslp))
659 #define SFC_EFSYS_UNLOCK(_eslp)				\
660 	rte_spinlock_unlock((_eslp))
661 #define SFC_EFSYS_LOCK_ASSERT_OWNED(_eslp)		\
662 	SFC_EFX_ASSERT(rte_spinlock_is_locked((_eslp)))
663 
664 typedef int efsys_lock_state_t;
665 
666 #define EFSYS_LOCK_MAGIC	0x000010c4
667 
668 #define EFSYS_LOCK(_lockp, _state)				\
669 	do {							\
670 		SFC_EFSYS_LOCK(_lockp);				\
671 		(_state) = EFSYS_LOCK_MAGIC;			\
672 		_NOTE(CONSTANTCONDITION);			\
673 	} while (B_FALSE)
674 
675 #define EFSYS_UNLOCK(_lockp, _state)				\
676 	do {							\
677 		SFC_EFX_ASSERT((_state) == EFSYS_LOCK_MAGIC);	\
678 		SFC_EFSYS_UNLOCK(_lockp);			\
679 		_NOTE(CONSTANTCONDITION);			\
680 	} while (B_FALSE)
681 
682 /* STAT */
683 
684 typedef uint64_t	efsys_stat_t;
685 
686 #define EFSYS_STAT_INCR(_knp, _delta)				\
687 	do {							\
688 		*(_knp) += (_delta);				\
689 		_NOTE(CONSTANTCONDITION);			\
690 	} while (B_FALSE)
691 
692 #define EFSYS_STAT_DECR(_knp, _delta)				\
693 	do {							\
694 		*(_knp) -= (_delta);				\
695 		_NOTE(CONSTANTCONDITION);			\
696 	} while (B_FALSE)
697 
698 #define EFSYS_STAT_SET(_knp, _val)				\
699 	do {							\
700 		*(_knp) = (_val);				\
701 		_NOTE(CONSTANTCONDITION);			\
702 	} while (B_FALSE)
703 
704 #define EFSYS_STAT_SET_QWORD(_knp, _valp)			\
705 	do {							\
706 		*(_knp) = rte_le_to_cpu_64((_valp)->eq_u64[0]);	\
707 		_NOTE(CONSTANTCONDITION);			\
708 	} while (B_FALSE)
709 
710 #define EFSYS_STAT_SET_DWORD(_knp, _valp)			\
711 	do {							\
712 		*(_knp) = rte_le_to_cpu_32((_valp)->ed_u32[0]);	\
713 		_NOTE(CONSTANTCONDITION);			\
714 	} while (B_FALSE)
715 
716 #define EFSYS_STAT_INCR_QWORD(_knp, _valp)				\
717 	do {								\
718 		*(_knp) += rte_le_to_cpu_64((_valp)->eq_u64[0]);	\
719 		_NOTE(CONSTANTCONDITION);				\
720 	} while (B_FALSE)
721 
722 #define EFSYS_STAT_SUBR_QWORD(_knp, _valp)				\
723 	do {								\
724 		*(_knp) -= rte_le_to_cpu_64((_valp)->eq_u64[0]);	\
725 		_NOTE(CONSTANTCONDITION);				\
726 	} while (B_FALSE)
727 
728 /* ERR */
729 
730 #if EFSYS_OPT_DECODE_INTR_FATAL
731 #define EFSYS_ERR(_esip, _code, _dword0, _dword1)			\
732 	do {								\
733 		(void)(_esip);						\
734 		SFC_EFX_LOG(ERR, "FATAL ERROR #%u (0x%08x%08x)",	\
735 			(_code), (_dword0), (_dword1));			\
736 		_NOTE(CONSTANTCONDITION);				\
737 	} while (B_FALSE)
738 #endif
739 
740 /* ASSERT */
741 
742 /* RTE_VERIFY from DPDK treats expressions with % operator incorrectly,
743  * so we re-implement it here
744  */
745 #ifdef RTE_LIBRTE_SFC_EFX_DEBUG
746 #define EFSYS_ASSERT(_exp)						\
747 	do {								\
748 		if (unlikely(!(_exp)))					\
749 			rte_panic("line %d\tassert \"%s\" failed\n",	\
750 				  __LINE__, (#_exp));			\
751 	} while (0)
752 #else
753 #define EFSYS_ASSERT(_exp)		(void)(_exp)
754 #endif
755 
756 #define EFSYS_ASSERT3(_x, _op, _y, _t)	EFSYS_ASSERT((_t)(_x) _op (_t)(_y))
757 
758 #define EFSYS_ASSERT3U(_x, _op, _y)	EFSYS_ASSERT3(_x, _op, _y, uint64_t)
759 #define EFSYS_ASSERT3S(_x, _op, _y)	EFSYS_ASSERT3(_x, _op, _y, int64_t)
760 #define EFSYS_ASSERT3P(_x, _op, _y)	EFSYS_ASSERT3(_x, _op, _y, uintptr_t)
761 
762 /* ROTATE */
763 
764 #define EFSYS_HAS_ROTL_DWORD	0
765 
766 /* PCI */
767 
768 typedef struct efsys_pci_config_s {
769 	struct rte_pci_device	*espc_dev;
770 } efsys_pci_config_t;
771 
772 #ifdef __cplusplus
773 }
774 #endif
775 
776 #endif  /* _SFC_COMMON_EFSYS_H */
777