xref: /dpdk/drivers/common/sfc_efx/efsys.h (revision 141d2870675abc1e87d8d23b57af9f95e08cf7ba)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright(c) 2019-2020 Xilinx, Inc.
4  * Copyright(c) 2016-2019 Solarflare Communications Inc.
5  *
6  * This software was jointly developed between OKTET Labs (under contract
7  * for Solarflare) and Solarflare Communications, Inc.
8  */
9 
10 #ifndef _SFC_COMMON_EFSYS_H
11 #define _SFC_COMMON_EFSYS_H
12 
13 #include <stdbool.h>
14 
15 #include <rte_spinlock.h>
16 #include <rte_byteorder.h>
17 #include <rte_debug.h>
18 #include <rte_memzone.h>
19 #include <rte_memory.h>
20 #include <rte_memcpy.h>
21 #include <rte_cycles.h>
22 #include <rte_prefetch.h>
23 #include <rte_common.h>
24 #include <rte_malloc.h>
25 #include <rte_log.h>
26 #include <rte_io.h>
27 
28 #include "sfc_efx_debug.h"
29 #include "sfc_efx_log.h"
30 
31 #ifdef __cplusplus
32 extern "C" {
33 #endif
34 
35 #define LIBEFX_API		__rte_internal
36 
37 /* No specific decorations required since functions are local by default */
38 #define LIBEFX_INTERNAL
39 
40 #define EFSYS_HAS_UINT64 1
41 #define EFSYS_USE_UINT64 1
42 /*
43  * __SSE2__ is defined by a compiler if target architecture supports
44  * Streaming SIMD Extensions 2 (SSE2). __m128i is a data type used
45  * by the extension instructions.
46  */
47 #if defined(__SSE2__)
48 #define EFSYS_HAS_UINT128 1
49 typedef __m128i efsys_uint128_t;
50 /*
51  * __int128 and unsigned __int128 are compiler extensions (built-in types).
52  * __SIZEOF_INT128__ is defined by the compiler if these data types are
53  * available.
54  */
55 #elif defined(__SIZEOF_INT128__)
56 #define EFSYS_HAS_UINT128 1
57 typedef unsigned __int128 efsys_uint128_t;
58 #else
59 #error Unsigned 128-bit width integers support is required
60 #endif
61 
62 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
63 #define EFSYS_IS_BIG_ENDIAN 1
64 #define EFSYS_IS_LITTLE_ENDIAN 0
65 #elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
66 #define EFSYS_IS_BIG_ENDIAN 0
67 #define EFSYS_IS_LITTLE_ENDIAN 1
68 #else
69 #error "Cannot determine system endianness"
70 #endif
71 
72 
73 typedef bool boolean_t;
74 
75 #ifndef B_FALSE
76 #define B_FALSE	false
77 #endif
78 #ifndef B_TRUE
79 #define B_TRUE	true
80 #endif
81 
82 /*
83  * RTE_MAX() and RTE_MIN() cannot be used since braced-group within
84  * expression allowed only inside a function, but MAX() is used as
85  * a number of elements in array.
86  */
87 #ifndef MAX
88 #define MAX(v1, v2)	((v1) > (v2) ? (v1) : (v2))
89 #endif
90 #ifndef MIN
91 #define MIN(v1, v2)	((v1) < (v2) ? (v1) : (v2))
92 #endif
93 
94 #ifndef ISP2
95 #define ISP2(x)			rte_is_power_of_2(x)
96 #endif
97 
98 #define ENOTACTIVE	ENOTCONN
99 
100 static inline void
101 prefetch_read_many(const volatile void *addr)
102 {
103 	rte_prefetch0(addr);
104 }
105 
106 static inline void
107 prefetch_read_once(const volatile void *addr)
108 {
109 	rte_prefetch_non_temporal(addr);
110 }
111 
112 /* Code inclusion options */
113 
114 
115 #define EFSYS_OPT_NAMES 1
116 
117 /* Disable SFN5xxx/SFN6xxx since it requires specific support in the PMD */
118 #define EFSYS_OPT_SIENA 0
119 /* Enable SFN7xxx support */
120 #define EFSYS_OPT_HUNTINGTON 1
121 /* Enable SFN8xxx support */
122 #define EFSYS_OPT_MEDFORD 1
123 /* Enable SFN2xxx support */
124 #define EFSYS_OPT_MEDFORD2 1
125 /* Enable Riverhead support */
126 #define EFSYS_OPT_RIVERHEAD 1
127 
128 #ifdef RTE_LIBRTE_SFC_EFX_DEBUG
129 #define EFSYS_OPT_CHECK_REG 1
130 #else
131 #define EFSYS_OPT_CHECK_REG 0
132 #endif
133 
134 /* MCDI is required for SFN7xxx and SFN8xx */
135 #define EFSYS_OPT_MCDI 1
136 #define EFSYS_OPT_MCDI_LOGGING 1
137 #define EFSYS_OPT_MCDI_PROXY_AUTH 1
138 
139 #define EFSYS_OPT_MAC_STATS 1
140 
141 #define EFSYS_OPT_LOOPBACK 1
142 
143 #define EFSYS_OPT_MON_MCDI 0
144 #define EFSYS_OPT_MON_STATS 0
145 
146 #define EFSYS_OPT_PHY_STATS 0
147 #define EFSYS_OPT_BIST 0
148 #define EFSYS_OPT_PHY_LED_CONTROL 0
149 #define EFSYS_OPT_PHY_FLAGS 0
150 
151 #define EFSYS_OPT_VPD 0
152 #define EFSYS_OPT_NVRAM 0
153 #define EFSYS_OPT_BOOTCFG 0
154 #define EFSYS_OPT_IMAGE_LAYOUT 0
155 
156 #define EFSYS_OPT_DIAG 0
157 #define EFSYS_OPT_RX_SCALE 1
158 #define EFSYS_OPT_QSTATS 0
159 /* Filters support is required for SFN7xxx and SFN8xx */
160 #define EFSYS_OPT_FILTER 1
161 #define EFSYS_OPT_RX_SCATTER 0
162 
163 #define EFSYS_OPT_EV_EXTENDED_WIDTH 0
164 #define EFSYS_OPT_EV_PREFETCH 0
165 
166 #define EFSYS_OPT_DECODE_INTR_FATAL 0
167 
168 #define EFSYS_OPT_LICENSING 0
169 
170 #define EFSYS_OPT_ALLOW_UNCONFIGURED_NIC 0
171 
172 #define EFSYS_OPT_RX_PACKED_STREAM 0
173 
174 #define EFSYS_OPT_RX_ES_SUPER_BUFFER 1
175 
176 #define EFSYS_OPT_TUNNEL 1
177 
178 #define EFSYS_OPT_FW_SUBVARIANT_AWARE 1
179 
180 #define EFSYS_OPT_EVB 1
181 
182 #define EFSYS_OPT_MCDI_PROXY_AUTH_SERVER 0
183 
184 #define EFSYS_OPT_PCI 1
185 
186 #define EFSYS_OPT_DESC_PROXY 0
187 
188 #define EFSYS_OPT_MAE 1
189 
190 /* ID */
191 
192 typedef struct __efsys_identifier_s efsys_identifier_t;
193 
194 
195 #define EFSYS_PROBE(_name)						\
196 	do { } while (0)
197 
198 #define EFSYS_PROBE1(_name, _type1, _arg1)				\
199 	do { } while (0)
200 
201 #define EFSYS_PROBE2(_name, _type1, _arg1, _type2, _arg2)		\
202 	do { } while (0)
203 
204 #define EFSYS_PROBE3(_name, _type1, _arg1, _type2, _arg2,		\
205 		     _type3, _arg3)					\
206 	do { } while (0)
207 
208 #define EFSYS_PROBE4(_name, _type1, _arg1, _type2, _arg2,		\
209 		     _type3, _arg3, _type4, _arg4)			\
210 	do { } while (0)
211 
212 #define EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2,		\
213 		     _type3, _arg3, _type4, _arg4, _type5, _arg5)	\
214 	do { } while (0)
215 
216 #define EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2,		\
217 		     _type3, _arg3, _type4, _arg4, _type5, _arg5,	\
218 		     _type6, _arg6)					\
219 	do { } while (0)
220 
221 #define EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2,		\
222 		     _type3, _arg3, _type4, _arg4, _type5, _arg5,	\
223 		     _type6, _arg6, _type7, _arg7)			\
224 	do { } while (0)
225 
226 
227 /* DMA */
228 
229 typedef rte_iova_t efsys_dma_addr_t;
230 
231 typedef struct efsys_mem_s {
232 	const struct rte_memzone	*esm_mz;
233 	/*
234 	 * Ideally it should have volatile qualifier to denote that
235 	 * the memory may be updated by someone else. However, it adds
236 	 * qualifier discard warnings when the pointer or its derivative
237 	 * is passed to memset() or rte_mov16().
238 	 * So, skip the qualifier here, but make sure that it is added
239 	 * below in access macros.
240 	 */
241 	void				*esm_base;
242 	efsys_dma_addr_t		esm_addr;
243 } efsys_mem_t;
244 
245 
246 #define EFSYS_MEM_ZERO(_esmp, _size)					\
247 	do {								\
248 		(void)memset((void *)(_esmp)->esm_base, 0, (_size));	\
249 									\
250 		_NOTE(CONSTANTCONDITION);				\
251 	} while (B_FALSE)
252 
253 #define EFSYS_MEM_READD(_esmp, _offset, _edp)				\
254 	do {								\
255 		volatile uint8_t  *_base = (_esmp)->esm_base;		\
256 		volatile uint32_t *_addr;				\
257 									\
258 		_NOTE(CONSTANTCONDITION);				\
259 		SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset,	\
260 						sizeof(efx_dword_t)));	\
261 									\
262 		_addr = (volatile uint32_t *)(_base + (_offset));	\
263 		(_edp)->ed_u32[0] = _addr[0];				\
264 									\
265 		EFSYS_PROBE2(mem_readl, unsigned int, (_offset),	\
266 					 uint32_t, (_edp)->ed_u32[0]);	\
267 									\
268 		_NOTE(CONSTANTCONDITION);				\
269 	} while (B_FALSE)
270 
271 #define EFSYS_MEM_READQ(_esmp, _offset, _eqp)				\
272 	do {								\
273 		volatile uint8_t  *_base = (_esmp)->esm_base;		\
274 		volatile uint64_t *_addr;				\
275 									\
276 		_NOTE(CONSTANTCONDITION);				\
277 		SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset,	\
278 						sizeof(efx_qword_t)));	\
279 									\
280 		_addr = (volatile uint64_t *)(_base + (_offset));	\
281 		(_eqp)->eq_u64[0] = _addr[0];				\
282 									\
283 		EFSYS_PROBE3(mem_readq, unsigned int, (_offset),	\
284 					 uint32_t, (_eqp)->eq_u32[1],	\
285 					 uint32_t, (_eqp)->eq_u32[0]);	\
286 									\
287 		_NOTE(CONSTANTCONDITION);				\
288 	} while (B_FALSE)
289 
290 #define EFSYS_MEM_READO(_esmp, _offset, _eop)				\
291 	do {								\
292 		volatile uint8_t *_base = (_esmp)->esm_base;		\
293 		volatile efsys_uint128_t *_addr;			\
294 									\
295 		_NOTE(CONSTANTCONDITION);				\
296 		SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset,	\
297 						sizeof(efx_oword_t)));	\
298 									\
299 		_addr = (volatile efsys_uint128_t *)(_base + (_offset));\
300 		(_eop)->eo_u128[0] = _addr[0];				\
301 									\
302 		EFSYS_PROBE5(mem_reado, unsigned int, (_offset),	\
303 					 uint32_t, (_eop)->eo_u32[3],	\
304 					 uint32_t, (_eop)->eo_u32[2],	\
305 					 uint32_t, (_eop)->eo_u32[1],	\
306 					 uint32_t, (_eop)->eo_u32[0]);	\
307 									\
308 		_NOTE(CONSTANTCONDITION);				\
309 	} while (B_FALSE)
310 
311 
312 #define EFSYS_MEM_WRITED(_esmp, _offset, _edp)				\
313 	do {								\
314 		volatile uint8_t  *_base = (_esmp)->esm_base;		\
315 		volatile uint32_t *_addr;				\
316 									\
317 		_NOTE(CONSTANTCONDITION);				\
318 		SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset,	\
319 						sizeof(efx_dword_t)));	\
320 									\
321 		EFSYS_PROBE2(mem_writed, unsigned int, (_offset),	\
322 					 uint32_t, (_edp)->ed_u32[0]);	\
323 									\
324 		_addr = (volatile uint32_t *)(_base + (_offset));	\
325 		_addr[0] = (_edp)->ed_u32[0];				\
326 									\
327 		_NOTE(CONSTANTCONDITION);				\
328 	} while (B_FALSE)
329 
330 #define EFSYS_MEM_WRITEQ(_esmp, _offset, _eqp)				\
331 	do {								\
332 		volatile uint8_t  *_base = (_esmp)->esm_base;		\
333 		volatile uint64_t *_addr;				\
334 									\
335 		_NOTE(CONSTANTCONDITION);				\
336 		SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset,	\
337 						sizeof(efx_qword_t)));	\
338 									\
339 		EFSYS_PROBE3(mem_writeq, unsigned int, (_offset),	\
340 					 uint32_t, (_eqp)->eq_u32[1],	\
341 					 uint32_t, (_eqp)->eq_u32[0]);	\
342 									\
343 		_addr = (volatile uint64_t *)(_base + (_offset));	\
344 		_addr[0] = (_eqp)->eq_u64[0];				\
345 									\
346 		_NOTE(CONSTANTCONDITION);				\
347 	} while (B_FALSE)
348 
349 #define EFSYS_MEM_WRITEO(_esmp, _offset, _eop)				\
350 	do {								\
351 		volatile uint8_t *_base = (_esmp)->esm_base;		\
352 		volatile efsys_uint128_t *_addr;			\
353 									\
354 		_NOTE(CONSTANTCONDITION);				\
355 		SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset,	\
356 						sizeof(efx_oword_t)));	\
357 									\
358 									\
359 		EFSYS_PROBE5(mem_writeo, unsigned int, (_offset),	\
360 					 uint32_t, (_eop)->eo_u32[3],	\
361 					 uint32_t, (_eop)->eo_u32[2],	\
362 					 uint32_t, (_eop)->eo_u32[1],	\
363 					 uint32_t, (_eop)->eo_u32[0]);	\
364 									\
365 		_addr = (volatile efsys_uint128_t *)(_base + (_offset));\
366 		_addr[0] = (_eop)->eo_u128[0];				\
367 									\
368 		_NOTE(CONSTANTCONDITION);				\
369 	} while (B_FALSE)
370 
371 
372 #define	EFSYS_MEM_SIZE(_esmp)						\
373 	((_esmp)->esm_mz->len)
374 
375 #define EFSYS_MEM_ADDR(_esmp)						\
376 	((_esmp)->esm_addr)
377 
378 #define EFSYS_MEM_IS_NULL(_esmp)					\
379 	((_esmp)->esm_base == NULL)
380 
381 #define EFSYS_MEM_PREFETCH(_esmp, _offset)				\
382 	do {								\
383 		volatile uint8_t *_base = (_esmp)->esm_base;		\
384 									\
385 		rte_prefetch0(_base + (_offset));			\
386 	} while (0)
387 
388 
389 /* BAR */
390 
391 typedef struct efsys_bar_s {
392 	rte_spinlock_t		esb_lock;
393 	int			esb_rid;
394 	struct rte_pci_device	*esb_dev;
395 	/*
396 	 * Ideally it should have volatile qualifier to denote that
397 	 * the memory may be updated by someone else. However, it adds
398 	 * qualifier discard warnings when the pointer or its derivative
399 	 * is passed to memset() or rte_mov16().
400 	 * So, skip the qualifier here, but make sure that it is added
401 	 * below in access macros.
402 	 */
403 	void			*esb_base;
404 } efsys_bar_t;
405 
406 #define SFC_BAR_LOCK_INIT(_esbp, _ifname)				\
407 	do {								\
408 		rte_spinlock_init(&(_esbp)->esb_lock);			\
409 		_NOTE(CONSTANTCONDITION);				\
410 	} while (B_FALSE)
411 #define SFC_BAR_LOCK_DESTROY(_esbp)	((void)0)
412 #define SFC_BAR_LOCK(_esbp)		rte_spinlock_lock(&(_esbp)->esb_lock)
413 #define SFC_BAR_UNLOCK(_esbp)		rte_spinlock_unlock(&(_esbp)->esb_lock)
414 
415 #define EFSYS_BAR_READD(_esbp, _offset, _edp, _lock)			\
416 	do {								\
417 		volatile uint8_t  *_base = (_esbp)->esb_base;		\
418 		volatile uint32_t *_addr;				\
419 									\
420 		_NOTE(CONSTANTCONDITION);				\
421 		SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset,	\
422 						sizeof(efx_dword_t)));	\
423 		_NOTE(CONSTANTCONDITION);				\
424 		if (_lock)						\
425 			SFC_BAR_LOCK(_esbp);				\
426 									\
427 		_addr = (volatile uint32_t *)(_base + (_offset));	\
428 		rte_rmb();						\
429 		(_edp)->ed_u32[0] = rte_read32_relaxed(_addr);		\
430 									\
431 		EFSYS_PROBE2(bar_readd, unsigned int, (_offset),	\
432 					 uint32_t, (_edp)->ed_u32[0]);	\
433 									\
434 		_NOTE(CONSTANTCONDITION);				\
435 		if (_lock)						\
436 			SFC_BAR_UNLOCK(_esbp);				\
437 		_NOTE(CONSTANTCONDITION);				\
438 	} while (B_FALSE)
439 
440 #define EFSYS_BAR_READQ(_esbp, _offset, _eqp)				\
441 	do {								\
442 		volatile uint8_t  *_base = (_esbp)->esb_base;		\
443 		volatile uint64_t *_addr;				\
444 									\
445 		_NOTE(CONSTANTCONDITION);				\
446 		SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset,	\
447 						sizeof(efx_qword_t)));	\
448 									\
449 		SFC_BAR_LOCK(_esbp);					\
450 									\
451 		_addr = (volatile uint64_t *)(_base + (_offset));	\
452 		rte_rmb();						\
453 		(_eqp)->eq_u64[0] = rte_read64_relaxed(_addr);		\
454 									\
455 		EFSYS_PROBE3(bar_readq, unsigned int, (_offset),	\
456 					 uint32_t, (_eqp)->eq_u32[1],	\
457 					 uint32_t, (_eqp)->eq_u32[0]);	\
458 									\
459 		SFC_BAR_UNLOCK(_esbp);					\
460 		_NOTE(CONSTANTCONDITION);				\
461 	} while (B_FALSE)
462 
463 #define EFSYS_BAR_READO(_esbp, _offset, _eop, _lock)			\
464 	do {								\
465 		volatile uint8_t *_base = (_esbp)->esb_base;		\
466 		volatile efsys_uint128_t *_addr;			\
467 									\
468 		_NOTE(CONSTANTCONDITION);				\
469 		SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset,	\
470 						sizeof(efx_oword_t)));	\
471 									\
472 		_NOTE(CONSTANTCONDITION);				\
473 		if (_lock)						\
474 			SFC_BAR_LOCK(_esbp);				\
475 									\
476 		_addr = (volatile efsys_uint128_t *)(_base + (_offset));\
477 		rte_rmb();						\
478 		/* There is no rte_read128_relaxed() yet */		\
479 		(_eop)->eo_u128[0] = _addr[0];				\
480 									\
481 		EFSYS_PROBE5(bar_reado, unsigned int, (_offset),	\
482 					 uint32_t, (_eop)->eo_u32[3],	\
483 					 uint32_t, (_eop)->eo_u32[2],	\
484 					 uint32_t, (_eop)->eo_u32[1],	\
485 					 uint32_t, (_eop)->eo_u32[0]);	\
486 									\
487 		_NOTE(CONSTANTCONDITION);				\
488 		if (_lock)						\
489 			SFC_BAR_UNLOCK(_esbp);				\
490 		_NOTE(CONSTANTCONDITION);				\
491 	} while (B_FALSE)
492 
493 
494 #define EFSYS_BAR_WRITED(_esbp, _offset, _edp, _lock)			\
495 	do {								\
496 		volatile uint8_t  *_base = (_esbp)->esb_base;		\
497 		volatile uint32_t *_addr;				\
498 									\
499 		_NOTE(CONSTANTCONDITION);				\
500 		SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset,	\
501 						sizeof(efx_dword_t)));	\
502 									\
503 		_NOTE(CONSTANTCONDITION);				\
504 		if (_lock)						\
505 			SFC_BAR_LOCK(_esbp);				\
506 									\
507 		EFSYS_PROBE2(bar_writed, unsigned int, (_offset),	\
508 					 uint32_t, (_edp)->ed_u32[0]);	\
509 									\
510 		_addr = (volatile uint32_t *)(_base + (_offset));	\
511 		rte_write32_relaxed((_edp)->ed_u32[0], _addr);		\
512 		rte_wmb();						\
513 									\
514 		_NOTE(CONSTANTCONDITION);				\
515 		if (_lock)						\
516 			SFC_BAR_UNLOCK(_esbp);				\
517 		_NOTE(CONSTANTCONDITION);				\
518 	} while (B_FALSE)
519 
520 #define EFSYS_BAR_WRITEQ(_esbp, _offset, _eqp)				\
521 	do {								\
522 		volatile uint8_t  *_base = (_esbp)->esb_base;		\
523 		volatile uint64_t *_addr;				\
524 									\
525 		_NOTE(CONSTANTCONDITION);				\
526 		SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset,	\
527 						sizeof(efx_qword_t)));	\
528 									\
529 		SFC_BAR_LOCK(_esbp);					\
530 									\
531 		EFSYS_PROBE3(bar_writeq, unsigned int, (_offset),	\
532 					 uint32_t, (_eqp)->eq_u32[1],	\
533 					 uint32_t, (_eqp)->eq_u32[0]);	\
534 									\
535 		_addr = (volatile uint64_t *)(_base + (_offset));	\
536 		rte_write64_relaxed((_eqp)->eq_u64[0], _addr);		\
537 		rte_wmb();						\
538 									\
539 		SFC_BAR_UNLOCK(_esbp);					\
540 		_NOTE(CONSTANTCONDITION);				\
541 	} while (B_FALSE)
542 
543 /*
544  * Guarantees 64bit aligned 64bit writes to write combined BAR mapping
545  * (required by PIO hardware).
546  *
547  * Neither VFIO, nor UIO, nor NIC UIO (on FreeBSD) support
548  * write-combined memory mapped to user-land, so just abort if used.
549  */
550 #define EFSYS_BAR_WC_WRITEQ(_esbp, _offset, _eqp)			\
551 	do {								\
552 		rte_panic("Write-combined BAR access not supported");	\
553 	} while (B_FALSE)
554 
555 #define EFSYS_BAR_WRITEO(_esbp, _offset, _eop, _lock)			\
556 	do {								\
557 		volatile uint8_t *_base = (_esbp)->esb_base;		\
558 		volatile efsys_uint128_t *_addr;			\
559 									\
560 		_NOTE(CONSTANTCONDITION);				\
561 		SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset,	\
562 						sizeof(efx_oword_t)));	\
563 									\
564 		_NOTE(CONSTANTCONDITION);				\
565 		if (_lock)						\
566 			SFC_BAR_LOCK(_esbp);				\
567 									\
568 		EFSYS_PROBE5(bar_writeo, unsigned int, (_offset),	\
569 					 uint32_t, (_eop)->eo_u32[3],	\
570 					 uint32_t, (_eop)->eo_u32[2],	\
571 					 uint32_t, (_eop)->eo_u32[1],	\
572 					 uint32_t, (_eop)->eo_u32[0]);	\
573 									\
574 		_addr = (volatile efsys_uint128_t *)(_base + (_offset));\
575 		/* There is no rte_write128_relaxed() yet */		\
576 		_addr[0] = (_eop)->eo_u128[0];				\
577 		rte_wmb();						\
578 									\
579 		_NOTE(CONSTANTCONDITION);				\
580 		if (_lock)						\
581 			SFC_BAR_UNLOCK(_esbp);				\
582 		_NOTE(CONSTANTCONDITION);				\
583 	} while (B_FALSE)
584 
585 /* Use the standard octo-word write for doorbell writes */
586 #define EFSYS_BAR_DOORBELL_WRITEO(_esbp, _offset, _eop)			\
587 	do {								\
588 		EFSYS_BAR_WRITEO((_esbp), (_offset), (_eop), B_FALSE);	\
589 		_NOTE(CONSTANTCONDITION);				\
590 	} while (B_FALSE)
591 
592 /* SPIN */
593 
594 #define EFSYS_SPIN(_us)							\
595 	do {								\
596 		rte_delay_us(_us);					\
597 		_NOTE(CONSTANTCONDITION);				\
598 	} while (B_FALSE)
599 
600 #define EFSYS_SLEEP EFSYS_SPIN
601 
602 /* BARRIERS */
603 
604 #define EFSYS_MEM_READ_BARRIER()	rte_rmb()
605 #define EFSYS_PIO_WRITE_BARRIER()	rte_io_wmb()
606 
607 /* DMA SYNC */
608 
609 /*
610  * DPDK does not provide any DMA syncing API, and no PMD drivers
611  * have any traces of explicit DMA syncing.
612  * DMA mapping is assumed to be coherent.
613  */
614 
615 #define EFSYS_DMA_SYNC_FOR_KERNEL(_esmp, _offset, _size)	((void)0)
616 
617 /* Just avoid store and compiler (impliciltly) reordering */
618 #define EFSYS_DMA_SYNC_FOR_DEVICE(_esmp, _offset, _size)	rte_wmb()
619 
620 /* TIMESTAMP */
621 
622 typedef uint64_t efsys_timestamp_t;
623 
624 #define EFSYS_TIMESTAMP(_usp)						\
625 	do {								\
626 		*(_usp) = rte_get_timer_cycles() * 1000000 /		\
627 			rte_get_timer_hz();				\
628 		_NOTE(CONSTANTCONDITION);				\
629 	} while (B_FALSE)
630 
631 /* KMEM */
632 
633 #define EFSYS_KMEM_ALLOC(_esip, _size, _p)				\
634 	do {								\
635 		(_esip) = (_esip);					\
636 		(_p) = rte_zmalloc("sfc", (_size), 0);			\
637 		_NOTE(CONSTANTCONDITION);				\
638 	} while (B_FALSE)
639 
640 #define EFSYS_KMEM_FREE(_esip, _size, _p)				\
641 	do {								\
642 		(void)(_esip);						\
643 		(void)(_size);						\
644 		rte_free((_p));						\
645 		_NOTE(CONSTANTCONDITION);				\
646 	} while (B_FALSE)
647 
648 /* LOCK */
649 
650 typedef rte_spinlock_t efsys_lock_t;
651 
652 #define SFC_EFSYS_LOCK_INIT(_eslp, _ifname, _label)	\
653 	rte_spinlock_init((_eslp))
654 #define SFC_EFSYS_LOCK_DESTROY(_eslp) ((void)0)
655 #define SFC_EFSYS_LOCK(_eslp)				\
656 	rte_spinlock_lock((_eslp))
657 #define SFC_EFSYS_UNLOCK(_eslp)				\
658 	rte_spinlock_unlock((_eslp))
659 #define SFC_EFSYS_LOCK_ASSERT_OWNED(_eslp)		\
660 	SFC_EFX_ASSERT(rte_spinlock_is_locked((_eslp)))
661 
662 typedef int efsys_lock_state_t;
663 
664 #define EFSYS_LOCK_MAGIC	0x000010c4
665 
666 #define EFSYS_LOCK(_lockp, _state)				\
667 	do {							\
668 		SFC_EFSYS_LOCK(_lockp);				\
669 		(_state) = EFSYS_LOCK_MAGIC;			\
670 		_NOTE(CONSTANTCONDITION);			\
671 	} while (B_FALSE)
672 
673 #define EFSYS_UNLOCK(_lockp, _state)				\
674 	do {							\
675 		SFC_EFX_ASSERT((_state) == EFSYS_LOCK_MAGIC);	\
676 		SFC_EFSYS_UNLOCK(_lockp);			\
677 		_NOTE(CONSTANTCONDITION);			\
678 	} while (B_FALSE)
679 
680 /* STAT */
681 
682 typedef uint64_t	efsys_stat_t;
683 
684 #define EFSYS_STAT_INCR(_knp, _delta)				\
685 	do {							\
686 		*(_knp) += (_delta);				\
687 		_NOTE(CONSTANTCONDITION);			\
688 	} while (B_FALSE)
689 
690 #define EFSYS_STAT_DECR(_knp, _delta)				\
691 	do {							\
692 		*(_knp) -= (_delta);				\
693 		_NOTE(CONSTANTCONDITION);			\
694 	} while (B_FALSE)
695 
696 #define EFSYS_STAT_SET(_knp, _val)				\
697 	do {							\
698 		*(_knp) = (_val);				\
699 		_NOTE(CONSTANTCONDITION);			\
700 	} while (B_FALSE)
701 
702 #define EFSYS_STAT_SET_QWORD(_knp, _valp)			\
703 	do {							\
704 		*(_knp) = rte_le_to_cpu_64((_valp)->eq_u64[0]);	\
705 		_NOTE(CONSTANTCONDITION);			\
706 	} while (B_FALSE)
707 
708 #define EFSYS_STAT_SET_DWORD(_knp, _valp)			\
709 	do {							\
710 		*(_knp) = rte_le_to_cpu_32((_valp)->ed_u32[0]);	\
711 		_NOTE(CONSTANTCONDITION);			\
712 	} while (B_FALSE)
713 
714 #define EFSYS_STAT_INCR_QWORD(_knp, _valp)				\
715 	do {								\
716 		*(_knp) += rte_le_to_cpu_64((_valp)->eq_u64[0]);	\
717 		_NOTE(CONSTANTCONDITION);				\
718 	} while (B_FALSE)
719 
720 #define EFSYS_STAT_SUBR_QWORD(_knp, _valp)				\
721 	do {								\
722 		*(_knp) -= rte_le_to_cpu_64((_valp)->eq_u64[0]);	\
723 		_NOTE(CONSTANTCONDITION);				\
724 	} while (B_FALSE)
725 
726 /* ERR */
727 
728 #if EFSYS_OPT_DECODE_INTR_FATAL
729 #define EFSYS_ERR(_esip, _code, _dword0, _dword1)			\
730 	do {								\
731 		(void)(_esip);						\
732 		SFC_EFX_LOG(ERR, "FATAL ERROR #%u (0x%08x%08x)",	\
733 			(_code), (_dword0), (_dword1));			\
734 		_NOTE(CONSTANTCONDITION);				\
735 	} while (B_FALSE)
736 #endif
737 
738 /* ASSERT */
739 
740 /* RTE_VERIFY from DPDK treats expressions with % operator incorrectly,
741  * so we re-implement it here
742  */
743 #ifdef RTE_LIBRTE_SFC_EFX_DEBUG
744 #define EFSYS_ASSERT(_exp)						\
745 	do {								\
746 		if (unlikely(!(_exp)))					\
747 			rte_panic("line %d\tassert \"%s\" failed\n",	\
748 				  __LINE__, (#_exp));			\
749 	} while (0)
750 #else
751 #define EFSYS_ASSERT(_exp)		(void)(_exp)
752 #endif
753 
754 #define EFSYS_ASSERT3(_x, _op, _y, _t)	EFSYS_ASSERT((_t)(_x) _op (_t)(_y))
755 
756 #define EFSYS_ASSERT3U(_x, _op, _y)	EFSYS_ASSERT3(_x, _op, _y, uint64_t)
757 #define EFSYS_ASSERT3S(_x, _op, _y)	EFSYS_ASSERT3(_x, _op, _y, int64_t)
758 #define EFSYS_ASSERT3P(_x, _op, _y)	EFSYS_ASSERT3(_x, _op, _y, uintptr_t)
759 
760 /* ROTATE */
761 
762 #define EFSYS_HAS_ROTL_DWORD	0
763 
764 /* PCI */
765 
766 typedef struct efsys_pci_config_s {
767 	struct rte_pci_device	*espc_dev;
768 } efsys_pci_config_t;
769 
770 #ifdef __cplusplus
771 }
772 #endif
773 
774 #endif  /* _SFC_COMMON_EFSYS_H */
775