1 /* SPDX-License-Identifier: BSD-3-Clause
2 *
3 * Copyright(c) 2019-2021 Xilinx, Inc.
4 * Copyright(c) 2016-2019 Solarflare Communications Inc.
5 *
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
8 */
9
10 #ifndef _SFC_COMMON_EFSYS_H
11 #define _SFC_COMMON_EFSYS_H
12
13 #include <stdbool.h>
14
15 #include <rte_compat.h>
16 #include <rte_spinlock.h>
17 #include <rte_byteorder.h>
18 #include <rte_debug.h>
19 #include <rte_memzone.h>
20 #include <rte_memory.h>
21 #include <rte_memcpy.h>
22 #include <rte_cycles.h>
23 #include <rte_prefetch.h>
24 #include <rte_common.h>
25 #include <rte_malloc.h>
26 #include <rte_log.h>
27 #include <rte_io.h>
28
29 #include "sfc_efx_debug.h"
30 #include "sfc_efx_log.h"
31
32 #ifdef __cplusplus
33 extern "C" {
34 #endif
35
36 #define LIBEFX_API __rte_internal
37
38 /* No specific decorations required since functions are local by default */
39 #define LIBEFX_INTERNAL
40
41 #define EFSYS_HAS_UINT64 1
42 #define EFSYS_USE_UINT64 1
43 /*
44 * __SSE2__ is defined by a compiler if target architecture supports
45 * Streaming SIMD Extensions 2 (SSE2). __m128i is a data type used
46 * by the extension instructions.
47 */
48 #if defined(__SSE2__)
49 #define EFSYS_HAS_UINT128 1
50 typedef __m128i efsys_uint128_t;
51 /*
52 * __int128 and unsigned __int128 are compiler extensions (built-in types).
53 * __SIZEOF_INT128__ is defined by the compiler if these data types are
54 * available.
55 */
56 #elif defined(__SIZEOF_INT128__)
57 #define EFSYS_HAS_UINT128 1
58 typedef unsigned __int128 efsys_uint128_t;
59 #else
60 #error Unsigned 128-bit width integers support is required
61 #endif
62
63 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
64 #define EFSYS_IS_BIG_ENDIAN 1
65 #define EFSYS_IS_LITTLE_ENDIAN 0
66 #elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
67 #define EFSYS_IS_BIG_ENDIAN 0
68 #define EFSYS_IS_LITTLE_ENDIAN 1
69 #else
70 #error "Cannot determine system endianness"
71 #endif
72
73
74 typedef bool boolean_t;
75
76 #ifndef B_FALSE
77 #define B_FALSE false
78 #endif
79 #ifndef B_TRUE
80 #define B_TRUE true
81 #endif
82
83 /*
84 * RTE_MAX() and RTE_MIN() cannot be used since braced-group within
85 * expression allowed only inside a function, but MAX() is used as
86 * a number of elements in array.
87 */
88 #ifndef MAX
89 #define MAX(v1, v2) ((v1) > (v2) ? (v1) : (v2))
90 #endif
91 #ifndef MIN
92 #define MIN(v1, v2) ((v1) < (v2) ? (v1) : (v2))
93 #endif
94
95 #ifndef ISP2
96 #define ISP2(x) rte_is_power_of_2(x)
97 #endif
98
99 #define ENOTACTIVE ENOTCONN
100
101 static inline void
prefetch_read_many(const volatile void * addr)102 prefetch_read_many(const volatile void *addr)
103 {
104 rte_prefetch0(addr);
105 }
106
107 static inline void
prefetch_read_once(const volatile void * addr)108 prefetch_read_once(const volatile void *addr)
109 {
110 rte_prefetch_non_temporal(addr);
111 }
112
113 /* Code inclusion options */
114
115
116 #define EFSYS_OPT_NAMES 1
117
118 /* Disable SFN5xxx/SFN6xxx since it requires specific support in the PMD */
119 #define EFSYS_OPT_SIENA 0
120 /* Enable SFN7xxx support */
121 #define EFSYS_OPT_HUNTINGTON 1
122 /* Enable SFN8xxx support */
123 #define EFSYS_OPT_MEDFORD 1
124 /* Enable SFN2xxx support */
125 #define EFSYS_OPT_MEDFORD2 1
126 /* Enable Riverhead support */
127 #define EFSYS_OPT_RIVERHEAD 1
128
129 #ifdef RTE_DEBUG_COMMON_SFC_EFX
130 #define EFSYS_OPT_CHECK_REG 1
131 #else
132 #define EFSYS_OPT_CHECK_REG 0
133 #endif
134
135 /* MCDI is required for SFN7xxx and SFN8xx */
136 #define EFSYS_OPT_MCDI 1
137 #define EFSYS_OPT_MCDI_LOGGING 1
138 #define EFSYS_OPT_MCDI_PROXY_AUTH 1
139
140 #define EFSYS_OPT_MAC_STATS 1
141
142 #define EFSYS_OPT_LOOPBACK 1
143
144 #define EFSYS_OPT_MON_MCDI 0
145 #define EFSYS_OPT_MON_STATS 0
146
147 #define EFSYS_OPT_PHY_STATS 0
148 #define EFSYS_OPT_BIST 0
149 #define EFSYS_OPT_PHY_LED_CONTROL 0
150 #define EFSYS_OPT_PHY_FLAGS 0
151
152 #define EFSYS_OPT_VPD 0
153 #define EFSYS_OPT_NVRAM 0
154 #define EFSYS_OPT_BOOTCFG 0
155 #define EFSYS_OPT_IMAGE_LAYOUT 0
156
157 #define EFSYS_OPT_DIAG 0
158 #define EFSYS_OPT_RX_SCALE 1
159 #define EFSYS_OPT_QSTATS 0
160 /* Filters support is required for SFN7xxx and SFN8xx */
161 #define EFSYS_OPT_FILTER 1
162 #define EFSYS_OPT_RX_SCATTER 0
163
164 #define EFSYS_OPT_EV_EXTENDED_WIDTH 0
165 #define EFSYS_OPT_EV_PREFETCH 0
166
167 #define EFSYS_OPT_DECODE_INTR_FATAL 0
168
169 #define EFSYS_OPT_LICENSING 0
170
171 #define EFSYS_OPT_ALLOW_UNCONFIGURED_NIC 0
172
173 #define EFSYS_OPT_RX_PACKED_STREAM 0
174
175 #define EFSYS_OPT_RX_ES_SUPER_BUFFER 1
176
177 #define EFSYS_OPT_TUNNEL 1
178
179 #define EFSYS_OPT_FW_SUBVARIANT_AWARE 1
180
181 #define EFSYS_OPT_EVB 1
182
183 #define EFSYS_OPT_MCDI_PROXY_AUTH_SERVER 0
184
185 #define EFSYS_OPT_PCI 1
186
187 #define EFSYS_OPT_DESC_PROXY 0
188
189 #define EFSYS_OPT_MAE 1
190
191 #define EFSYS_OPT_VIRTIO 1
192
193 /* ID */
194
195 typedef struct __efsys_identifier_s efsys_identifier_t;
196
197
198 #define EFSYS_PROBE(_name) \
199 do { } while (0)
200
201 #define EFSYS_PROBE1(_name, _type1, _arg1) \
202 do { } while (0)
203
204 #define EFSYS_PROBE2(_name, _type1, _arg1, _type2, _arg2) \
205 do { } while (0)
206
207 #define EFSYS_PROBE3(_name, _type1, _arg1, _type2, _arg2, \
208 _type3, _arg3) \
209 do { } while (0)
210
211 #define EFSYS_PROBE4(_name, _type1, _arg1, _type2, _arg2, \
212 _type3, _arg3, _type4, _arg4) \
213 do { } while (0)
214
215 #define EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2, \
216 _type3, _arg3, _type4, _arg4, _type5, _arg5) \
217 do { } while (0)
218
219 #define EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2, \
220 _type3, _arg3, _type4, _arg4, _type5, _arg5, \
221 _type6, _arg6) \
222 do { } while (0)
223
224 #define EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2, \
225 _type3, _arg3, _type4, _arg4, _type5, _arg5, \
226 _type6, _arg6, _type7, _arg7) \
227 do { } while (0)
228
229
230 /* DMA */
231
232 typedef rte_iova_t efsys_dma_addr_t;
233
234 typedef struct efsys_mem_s {
235 const struct rte_memzone *esm_mz;
236 /*
237 * Ideally it should have volatile qualifier to denote that
238 * the memory may be updated by someone else. However, it adds
239 * qualifier discard warnings when the pointer or its derivative
240 * is passed to memset() or rte_mov16().
241 * So, skip the qualifier here, but make sure that it is added
242 * below in access macros.
243 */
244 void *esm_base;
245 efsys_dma_addr_t esm_addr;
246 } efsys_mem_t;
247
248
249 #define EFSYS_MEM_ZERO(_esmp, _size) \
250 do { \
251 (void)memset((void *)(_esmp)->esm_base, 0, (_size)); \
252 \
253 _NOTE(CONSTANTCONDITION); \
254 } while (B_FALSE)
255
256 #define EFSYS_MEM_READD(_esmp, _offset, _edp) \
257 do { \
258 volatile uint8_t *_base = (_esmp)->esm_base; \
259 volatile uint32_t *_addr; \
260 \
261 _NOTE(CONSTANTCONDITION); \
262 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
263 sizeof(efx_dword_t))); \
264 \
265 _addr = (volatile uint32_t *)(_base + (_offset)); \
266 (_edp)->ed_u32[0] = _addr[0]; \
267 \
268 EFSYS_PROBE2(mem_readl, unsigned int, (_offset), \
269 uint32_t, (_edp)->ed_u32[0]); \
270 \
271 _NOTE(CONSTANTCONDITION); \
272 } while (B_FALSE)
273
274 #define EFSYS_MEM_READQ(_esmp, _offset, _eqp) \
275 do { \
276 volatile uint8_t *_base = (_esmp)->esm_base; \
277 volatile uint64_t *_addr; \
278 \
279 _NOTE(CONSTANTCONDITION); \
280 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
281 sizeof(efx_qword_t))); \
282 \
283 _addr = (volatile uint64_t *)(_base + (_offset)); \
284 (_eqp)->eq_u64[0] = _addr[0]; \
285 \
286 EFSYS_PROBE3(mem_readq, unsigned int, (_offset), \
287 uint32_t, (_eqp)->eq_u32[1], \
288 uint32_t, (_eqp)->eq_u32[0]); \
289 \
290 _NOTE(CONSTANTCONDITION); \
291 } while (B_FALSE)
292
293 #define EFSYS_MEM_READO(_esmp, _offset, _eop) \
294 do { \
295 volatile uint8_t *_base = (_esmp)->esm_base; \
296 volatile efsys_uint128_t *_addr; \
297 \
298 _NOTE(CONSTANTCONDITION); \
299 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
300 sizeof(efx_oword_t))); \
301 \
302 _addr = (volatile efsys_uint128_t *)(_base + (_offset));\
303 (_eop)->eo_u128[0] = _addr[0]; \
304 \
305 EFSYS_PROBE5(mem_reado, unsigned int, (_offset), \
306 uint32_t, (_eop)->eo_u32[3], \
307 uint32_t, (_eop)->eo_u32[2], \
308 uint32_t, (_eop)->eo_u32[1], \
309 uint32_t, (_eop)->eo_u32[0]); \
310 \
311 _NOTE(CONSTANTCONDITION); \
312 } while (B_FALSE)
313
314
315 #define EFSYS_MEM_WRITED(_esmp, _offset, _edp) \
316 do { \
317 volatile uint8_t *_base = (_esmp)->esm_base; \
318 volatile uint32_t *_addr; \
319 \
320 _NOTE(CONSTANTCONDITION); \
321 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
322 sizeof(efx_dword_t))); \
323 \
324 EFSYS_PROBE2(mem_writed, unsigned int, (_offset), \
325 uint32_t, (_edp)->ed_u32[0]); \
326 \
327 _addr = (volatile uint32_t *)(_base + (_offset)); \
328 _addr[0] = (_edp)->ed_u32[0]; \
329 \
330 _NOTE(CONSTANTCONDITION); \
331 } while (B_FALSE)
332
333 #define EFSYS_MEM_WRITEQ(_esmp, _offset, _eqp) \
334 do { \
335 volatile uint8_t *_base = (_esmp)->esm_base; \
336 volatile uint64_t *_addr; \
337 \
338 _NOTE(CONSTANTCONDITION); \
339 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
340 sizeof(efx_qword_t))); \
341 \
342 EFSYS_PROBE3(mem_writeq, unsigned int, (_offset), \
343 uint32_t, (_eqp)->eq_u32[1], \
344 uint32_t, (_eqp)->eq_u32[0]); \
345 \
346 _addr = (volatile uint64_t *)(_base + (_offset)); \
347 _addr[0] = (_eqp)->eq_u64[0]; \
348 \
349 _NOTE(CONSTANTCONDITION); \
350 } while (B_FALSE)
351
352 #define EFSYS_MEM_WRITEO(_esmp, _offset, _eop) \
353 do { \
354 volatile uint8_t *_base = (_esmp)->esm_base; \
355 volatile efsys_uint128_t *_addr; \
356 \
357 _NOTE(CONSTANTCONDITION); \
358 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
359 sizeof(efx_oword_t))); \
360 \
361 \
362 EFSYS_PROBE5(mem_writeo, unsigned int, (_offset), \
363 uint32_t, (_eop)->eo_u32[3], \
364 uint32_t, (_eop)->eo_u32[2], \
365 uint32_t, (_eop)->eo_u32[1], \
366 uint32_t, (_eop)->eo_u32[0]); \
367 \
368 _addr = (volatile efsys_uint128_t *)(_base + (_offset));\
369 _addr[0] = (_eop)->eo_u128[0]; \
370 \
371 _NOTE(CONSTANTCONDITION); \
372 } while (B_FALSE)
373
374
375 #define EFSYS_MEM_SIZE(_esmp) \
376 ((_esmp)->esm_mz->len)
377
378 #define EFSYS_MEM_ADDR(_esmp) \
379 ((_esmp)->esm_addr)
380
381 #define EFSYS_MEM_IS_NULL(_esmp) \
382 ((_esmp)->esm_base == NULL)
383
384 #define EFSYS_MEM_PREFETCH(_esmp, _offset) \
385 do { \
386 volatile uint8_t *_base = (_esmp)->esm_base; \
387 \
388 rte_prefetch0(_base + (_offset)); \
389 } while (0)
390
391
392 /* BAR */
393
394 typedef struct efsys_bar_s {
395 rte_spinlock_t esb_lock;
396 int esb_rid;
397 struct rte_pci_device *esb_dev;
398 /*
399 * Ideally it should have volatile qualifier to denote that
400 * the memory may be updated by someone else. However, it adds
401 * qualifier discard warnings when the pointer or its derivative
402 * is passed to memset() or rte_mov16().
403 * So, skip the qualifier here, but make sure that it is added
404 * below in access macros.
405 */
406 void *esb_base;
407 } efsys_bar_t;
408
409 #define SFC_BAR_LOCK_INIT(_esbp, _ifname) \
410 do { \
411 rte_spinlock_init(&(_esbp)->esb_lock); \
412 _NOTE(CONSTANTCONDITION); \
413 } while (B_FALSE)
414 #define SFC_BAR_LOCK_DESTROY(_esbp) ((void)0)
415 #define SFC_BAR_LOCK(_esbp) rte_spinlock_lock(&(_esbp)->esb_lock)
416 #define SFC_BAR_UNLOCK(_esbp) rte_spinlock_unlock(&(_esbp)->esb_lock)
417
418 #define EFSYS_BAR_READD(_esbp, _offset, _edp, _lock) \
419 do { \
420 volatile uint8_t *_base = (_esbp)->esb_base; \
421 volatile uint32_t *_addr; \
422 \
423 _NOTE(CONSTANTCONDITION); \
424 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
425 sizeof(efx_dword_t))); \
426 _NOTE(CONSTANTCONDITION); \
427 if (_lock) \
428 SFC_BAR_LOCK(_esbp); \
429 \
430 _addr = (volatile uint32_t *)(_base + (_offset)); \
431 rte_rmb(); \
432 (_edp)->ed_u32[0] = rte_read32_relaxed(_addr); \
433 \
434 EFSYS_PROBE2(bar_readd, unsigned int, (_offset), \
435 uint32_t, (_edp)->ed_u32[0]); \
436 \
437 _NOTE(CONSTANTCONDITION); \
438 if (_lock) \
439 SFC_BAR_UNLOCK(_esbp); \
440 _NOTE(CONSTANTCONDITION); \
441 } while (B_FALSE)
442
443 #define EFSYS_BAR_READQ(_esbp, _offset, _eqp) \
444 do { \
445 volatile uint8_t *_base = (_esbp)->esb_base; \
446 volatile uint64_t *_addr; \
447 \
448 _NOTE(CONSTANTCONDITION); \
449 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
450 sizeof(efx_qword_t))); \
451 \
452 SFC_BAR_LOCK(_esbp); \
453 \
454 _addr = (volatile uint64_t *)(_base + (_offset)); \
455 rte_rmb(); \
456 (_eqp)->eq_u64[0] = rte_read64_relaxed(_addr); \
457 \
458 EFSYS_PROBE3(bar_readq, unsigned int, (_offset), \
459 uint32_t, (_eqp)->eq_u32[1], \
460 uint32_t, (_eqp)->eq_u32[0]); \
461 \
462 SFC_BAR_UNLOCK(_esbp); \
463 _NOTE(CONSTANTCONDITION); \
464 } while (B_FALSE)
465
466 #define EFSYS_BAR_READO(_esbp, _offset, _eop, _lock) \
467 do { \
468 volatile uint8_t *_base = (_esbp)->esb_base; \
469 volatile efsys_uint128_t *_addr; \
470 \
471 _NOTE(CONSTANTCONDITION); \
472 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
473 sizeof(efx_oword_t))); \
474 \
475 _NOTE(CONSTANTCONDITION); \
476 if (_lock) \
477 SFC_BAR_LOCK(_esbp); \
478 \
479 _addr = (volatile efsys_uint128_t *)(_base + (_offset));\
480 rte_rmb(); \
481 /* There is no rte_read128_relaxed() yet */ \
482 (_eop)->eo_u128[0] = _addr[0]; \
483 \
484 EFSYS_PROBE5(bar_reado, unsigned int, (_offset), \
485 uint32_t, (_eop)->eo_u32[3], \
486 uint32_t, (_eop)->eo_u32[2], \
487 uint32_t, (_eop)->eo_u32[1], \
488 uint32_t, (_eop)->eo_u32[0]); \
489 \
490 _NOTE(CONSTANTCONDITION); \
491 if (_lock) \
492 SFC_BAR_UNLOCK(_esbp); \
493 _NOTE(CONSTANTCONDITION); \
494 } while (B_FALSE)
495
496
497 #define EFSYS_BAR_WRITED(_esbp, _offset, _edp, _lock) \
498 do { \
499 volatile uint8_t *_base = (_esbp)->esb_base; \
500 volatile uint32_t *_addr; \
501 \
502 _NOTE(CONSTANTCONDITION); \
503 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
504 sizeof(efx_dword_t))); \
505 \
506 _NOTE(CONSTANTCONDITION); \
507 if (_lock) \
508 SFC_BAR_LOCK(_esbp); \
509 \
510 EFSYS_PROBE2(bar_writed, unsigned int, (_offset), \
511 uint32_t, (_edp)->ed_u32[0]); \
512 \
513 _addr = (volatile uint32_t *)(_base + (_offset)); \
514 rte_write32_relaxed((_edp)->ed_u32[0], _addr); \
515 rte_wmb(); \
516 \
517 _NOTE(CONSTANTCONDITION); \
518 if (_lock) \
519 SFC_BAR_UNLOCK(_esbp); \
520 _NOTE(CONSTANTCONDITION); \
521 } while (B_FALSE)
522
523 #define EFSYS_BAR_WRITEQ(_esbp, _offset, _eqp) \
524 do { \
525 volatile uint8_t *_base = (_esbp)->esb_base; \
526 volatile uint64_t *_addr; \
527 \
528 _NOTE(CONSTANTCONDITION); \
529 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
530 sizeof(efx_qword_t))); \
531 \
532 SFC_BAR_LOCK(_esbp); \
533 \
534 EFSYS_PROBE3(bar_writeq, unsigned int, (_offset), \
535 uint32_t, (_eqp)->eq_u32[1], \
536 uint32_t, (_eqp)->eq_u32[0]); \
537 \
538 _addr = (volatile uint64_t *)(_base + (_offset)); \
539 rte_write64_relaxed((_eqp)->eq_u64[0], _addr); \
540 rte_wmb(); \
541 \
542 SFC_BAR_UNLOCK(_esbp); \
543 _NOTE(CONSTANTCONDITION); \
544 } while (B_FALSE)
545
546 /*
547 * Guarantees 64bit aligned 64bit writes to write combined BAR mapping
548 * (required by PIO hardware).
549 *
550 * Neither VFIO, nor UIO, nor NIC UIO (on FreeBSD) support
551 * write-combined memory mapped to user-land, so just abort if used.
552 */
553 #define EFSYS_BAR_WC_WRITEQ(_esbp, _offset, _eqp) \
554 do { \
555 rte_panic("Write-combined BAR access not supported"); \
556 } while (B_FALSE)
557
558 #define EFSYS_BAR_WRITEO(_esbp, _offset, _eop, _lock) \
559 do { \
560 volatile uint8_t *_base = (_esbp)->esb_base; \
561 volatile efsys_uint128_t *_addr; \
562 \
563 _NOTE(CONSTANTCONDITION); \
564 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
565 sizeof(efx_oword_t))); \
566 \
567 _NOTE(CONSTANTCONDITION); \
568 if (_lock) \
569 SFC_BAR_LOCK(_esbp); \
570 \
571 EFSYS_PROBE5(bar_writeo, unsigned int, (_offset), \
572 uint32_t, (_eop)->eo_u32[3], \
573 uint32_t, (_eop)->eo_u32[2], \
574 uint32_t, (_eop)->eo_u32[1], \
575 uint32_t, (_eop)->eo_u32[0]); \
576 \
577 _addr = (volatile efsys_uint128_t *)(_base + (_offset));\
578 /* There is no rte_write128_relaxed() yet */ \
579 _addr[0] = (_eop)->eo_u128[0]; \
580 rte_wmb(); \
581 \
582 _NOTE(CONSTANTCONDITION); \
583 if (_lock) \
584 SFC_BAR_UNLOCK(_esbp); \
585 _NOTE(CONSTANTCONDITION); \
586 } while (B_FALSE)
587
588 /* Use the standard octo-word write for doorbell writes */
589 #define EFSYS_BAR_DOORBELL_WRITEO(_esbp, _offset, _eop) \
590 do { \
591 EFSYS_BAR_WRITEO((_esbp), (_offset), (_eop), B_FALSE); \
592 _NOTE(CONSTANTCONDITION); \
593 } while (B_FALSE)
594
595 /* SPIN */
596
597 #define EFSYS_SPIN(_us) \
598 do { \
599 rte_delay_us(_us); \
600 _NOTE(CONSTANTCONDITION); \
601 } while (B_FALSE)
602
603 #define EFSYS_SLEEP EFSYS_SPIN
604
605 /* BARRIERS */
606
607 #define EFSYS_MEM_READ_BARRIER() rte_rmb()
608 #define EFSYS_PIO_WRITE_BARRIER() rte_io_wmb()
609
610 /* DMA SYNC */
611
612 /*
613 * DPDK does not provide any DMA syncing API, and no PMDs
614 * have any traces of explicit DMA syncing.
615 * DMA mapping is assumed to be coherent.
616 */
617
618 #define EFSYS_DMA_SYNC_FOR_KERNEL(_esmp, _offset, _size) ((void)0)
619
620 /* Just avoid store and compiler (implicitly) reordering */
621 #define EFSYS_DMA_SYNC_FOR_DEVICE(_esmp, _offset, _size) rte_wmb()
622
623 /* TIMESTAMP */
624
625 typedef uint64_t efsys_timestamp_t;
626
627 #define EFSYS_TIMESTAMP(_usp) \
628 do { \
629 *(_usp) = rte_get_timer_cycles() * 1000000 / \
630 rte_get_timer_hz(); \
631 _NOTE(CONSTANTCONDITION); \
632 } while (B_FALSE)
633
634 /* KMEM */
635
636 #define EFSYS_KMEM_ALLOC(_esip, _size, _p) \
637 do { \
638 (_esip) = (_esip); \
639 (_p) = rte_zmalloc("sfc", (_size), 0); \
640 _NOTE(CONSTANTCONDITION); \
641 } while (B_FALSE)
642
643 #define EFSYS_KMEM_FREE(_esip, _size, _p) \
644 do { \
645 (void)(_esip); \
646 (void)(_size); \
647 rte_free((_p)); \
648 _NOTE(CONSTANTCONDITION); \
649 } while (B_FALSE)
650
651 /* LOCK */
652
653 typedef rte_spinlock_t efsys_lock_t;
654
655 #define SFC_EFSYS_LOCK_INIT(_eslp, _ifname, _label) \
656 rte_spinlock_init((_eslp))
657 #define SFC_EFSYS_LOCK_DESTROY(_eslp) ((void)0)
658 #define SFC_EFSYS_LOCK(_eslp) \
659 rte_spinlock_lock((_eslp))
660 #define SFC_EFSYS_UNLOCK(_eslp) \
661 rte_spinlock_unlock((_eslp))
662 #define SFC_EFSYS_LOCK_ASSERT_OWNED(_eslp) \
663 SFC_EFX_ASSERT(rte_spinlock_is_locked((_eslp)))
664
665 typedef int efsys_lock_state_t;
666
667 #define EFSYS_LOCK_MAGIC 0x000010c4
668
669 #define EFSYS_LOCK(_lockp, _state) \
670 do { \
671 SFC_EFSYS_LOCK(_lockp); \
672 (_state) = EFSYS_LOCK_MAGIC; \
673 _NOTE(CONSTANTCONDITION); \
674 } while (B_FALSE)
675
676 #define EFSYS_UNLOCK(_lockp, _state) \
677 do { \
678 SFC_EFX_ASSERT((_state) == EFSYS_LOCK_MAGIC); \
679 SFC_EFSYS_UNLOCK(_lockp); \
680 _NOTE(CONSTANTCONDITION); \
681 } while (B_FALSE)
682
683 /* STAT */
684
685 typedef uint64_t efsys_stat_t;
686
687 #define EFSYS_STAT_INCR(_knp, _delta) \
688 do { \
689 *(_knp) += (_delta); \
690 _NOTE(CONSTANTCONDITION); \
691 } while (B_FALSE)
692
693 #define EFSYS_STAT_DECR(_knp, _delta) \
694 do { \
695 *(_knp) -= (_delta); \
696 _NOTE(CONSTANTCONDITION); \
697 } while (B_FALSE)
698
699 #define EFSYS_STAT_SET(_knp, _val) \
700 do { \
701 *(_knp) = (_val); \
702 _NOTE(CONSTANTCONDITION); \
703 } while (B_FALSE)
704
705 #define EFSYS_STAT_SET_QWORD(_knp, _valp) \
706 do { \
707 *(_knp) = rte_le_to_cpu_64((_valp)->eq_u64[0]); \
708 _NOTE(CONSTANTCONDITION); \
709 } while (B_FALSE)
710
711 #define EFSYS_STAT_SET_DWORD(_knp, _valp) \
712 do { \
713 *(_knp) = rte_le_to_cpu_32((_valp)->ed_u32[0]); \
714 _NOTE(CONSTANTCONDITION); \
715 } while (B_FALSE)
716
717 #define EFSYS_STAT_INCR_QWORD(_knp, _valp) \
718 do { \
719 *(_knp) += rte_le_to_cpu_64((_valp)->eq_u64[0]); \
720 _NOTE(CONSTANTCONDITION); \
721 } while (B_FALSE)
722
723 #define EFSYS_STAT_SUBR_QWORD(_knp, _valp) \
724 do { \
725 *(_knp) -= rte_le_to_cpu_64((_valp)->eq_u64[0]); \
726 _NOTE(CONSTANTCONDITION); \
727 } while (B_FALSE)
728
729 /* ERR */
730
731 #if EFSYS_OPT_DECODE_INTR_FATAL
732 #define EFSYS_ERR(_esip, _code, _dword0, _dword1) \
733 do { \
734 (void)(_esip); \
735 SFC_EFX_LOG(ERR, "FATAL ERROR #%u (0x%08x%08x)", \
736 (_code), (_dword0), (_dword1)); \
737 _NOTE(CONSTANTCONDITION); \
738 } while (B_FALSE)
739 #endif
740
741 /* ASSERT */
742
743 /* RTE_VERIFY from DPDK treats expressions with % operator incorrectly,
744 * so we re-implement it here
745 */
746 #ifdef RTE_DEBUG_COMMON_SFC_EFX
747 #define EFSYS_ASSERT(_exp) \
748 do { \
749 if (unlikely(!(_exp))) \
750 rte_panic("line %d\tassert \"%s\" failed\n", \
751 __LINE__, (#_exp)); \
752 } while (0)
753 #else
754 #define EFSYS_ASSERT(_exp) (void)(_exp)
755 #endif
756
757 #define EFSYS_ASSERT3(_x, _op, _y, _t) EFSYS_ASSERT((_t)(_x) _op (_t)(_y))
758
759 #define EFSYS_ASSERT3U(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uint64_t)
760 #define EFSYS_ASSERT3S(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, int64_t)
761 #define EFSYS_ASSERT3P(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uintptr_t)
762
763 /* ROTATE */
764
765 #define EFSYS_HAS_ROTL_DWORD 0
766
767 /* PCI */
768
769 typedef struct efsys_pci_config_s {
770 struct rte_pci_device *espc_dev;
771 } efsys_pci_config_t;
772
773 #ifdef __cplusplus
774 }
775 #endif
776
777 #endif /* _SFC_COMMON_EFSYS_H */
778