1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright(c) 2019-2020 Xilinx, Inc. 4 * Copyright(c) 2016-2019 Solarflare Communications Inc. 5 * 6 * This software was jointly developed between OKTET Labs (under contract 7 * for Solarflare) and Solarflare Communications, Inc. 8 */ 9 10 #ifndef _SFC_COMMON_EFSYS_H 11 #define _SFC_COMMON_EFSYS_H 12 13 #include <stdbool.h> 14 15 #include <rte_spinlock.h> 16 #include <rte_byteorder.h> 17 #include <rte_debug.h> 18 #include <rte_memzone.h> 19 #include <rte_memory.h> 20 #include <rte_memcpy.h> 21 #include <rte_cycles.h> 22 #include <rte_prefetch.h> 23 #include <rte_common.h> 24 #include <rte_malloc.h> 25 #include <rte_log.h> 26 #include <rte_io.h> 27 28 #include "sfc_efx_debug.h" 29 #include "sfc_efx_log.h" 30 31 #ifdef __cplusplus 32 extern "C" { 33 #endif 34 35 #define LIBEFX_API __rte_internal 36 37 /* No specific decorations required since functions are local by default */ 38 #define LIBEFX_INTERNAL 39 40 #define EFSYS_HAS_UINT64 1 41 #define EFSYS_USE_UINT64 1 42 #define EFSYS_HAS_SSE2_M128 1 43 44 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 45 #define EFSYS_IS_BIG_ENDIAN 1 46 #define EFSYS_IS_LITTLE_ENDIAN 0 47 #elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 48 #define EFSYS_IS_BIG_ENDIAN 0 49 #define EFSYS_IS_LITTLE_ENDIAN 1 50 #else 51 #error "Cannot determine system endianness" 52 #endif 53 54 55 typedef bool boolean_t; 56 57 #ifndef B_FALSE 58 #define B_FALSE false 59 #endif 60 #ifndef B_TRUE 61 #define B_TRUE true 62 #endif 63 64 /* 65 * RTE_MAX() and RTE_MIN() cannot be used since braced-group within 66 * expression allowed only inside a function, but MAX() is used as 67 * a number of elements in array. 68 */ 69 #ifndef MAX 70 #define MAX(v1, v2) ((v1) > (v2) ? (v1) : (v2)) 71 #endif 72 #ifndef MIN 73 #define MIN(v1, v2) ((v1) < (v2) ? (v1) : (v2)) 74 #endif 75 76 #ifndef ISP2 77 #define ISP2(x) rte_is_power_of_2(x) 78 #endif 79 80 #define ENOTACTIVE ENOTCONN 81 82 static inline void 83 prefetch_read_many(const volatile void *addr) 84 { 85 rte_prefetch0(addr); 86 } 87 88 static inline void 89 prefetch_read_once(const volatile void *addr) 90 { 91 rte_prefetch_non_temporal(addr); 92 } 93 94 /* Code inclusion options */ 95 96 97 #define EFSYS_OPT_NAMES 1 98 99 /* Disable SFN5xxx/SFN6xxx since it requires specific support in the PMD */ 100 #define EFSYS_OPT_SIENA 0 101 /* Enable SFN7xxx support */ 102 #define EFSYS_OPT_HUNTINGTON 1 103 /* Enable SFN8xxx support */ 104 #define EFSYS_OPT_MEDFORD 1 105 /* Enable SFN2xxx support */ 106 #define EFSYS_OPT_MEDFORD2 1 107 /* Enable Riverhead support */ 108 #define EFSYS_OPT_RIVERHEAD 1 109 110 #ifdef RTE_LIBRTE_SFC_EFX_DEBUG 111 #define EFSYS_OPT_CHECK_REG 1 112 #else 113 #define EFSYS_OPT_CHECK_REG 0 114 #endif 115 116 /* MCDI is required for SFN7xxx and SFN8xx */ 117 #define EFSYS_OPT_MCDI 1 118 #define EFSYS_OPT_MCDI_LOGGING 1 119 #define EFSYS_OPT_MCDI_PROXY_AUTH 1 120 121 #define EFSYS_OPT_MAC_STATS 1 122 123 #define EFSYS_OPT_LOOPBACK 1 124 125 #define EFSYS_OPT_MON_MCDI 0 126 #define EFSYS_OPT_MON_STATS 0 127 128 #define EFSYS_OPT_PHY_STATS 0 129 #define EFSYS_OPT_BIST 0 130 #define EFSYS_OPT_PHY_LED_CONTROL 0 131 #define EFSYS_OPT_PHY_FLAGS 0 132 133 #define EFSYS_OPT_VPD 0 134 #define EFSYS_OPT_NVRAM 0 135 #define EFSYS_OPT_BOOTCFG 0 136 #define EFSYS_OPT_IMAGE_LAYOUT 0 137 138 #define EFSYS_OPT_DIAG 0 139 #define EFSYS_OPT_RX_SCALE 1 140 #define EFSYS_OPT_QSTATS 0 141 /* Filters support is required for SFN7xxx and SFN8xx */ 142 #define EFSYS_OPT_FILTER 1 143 #define EFSYS_OPT_RX_SCATTER 0 144 145 #define EFSYS_OPT_EV_EXTENDED_WIDTH 0 146 #define EFSYS_OPT_EV_PREFETCH 0 147 148 #define EFSYS_OPT_DECODE_INTR_FATAL 0 149 150 #define EFSYS_OPT_LICENSING 0 151 152 #define EFSYS_OPT_ALLOW_UNCONFIGURED_NIC 0 153 154 #define EFSYS_OPT_RX_PACKED_STREAM 0 155 156 #define EFSYS_OPT_RX_ES_SUPER_BUFFER 1 157 158 #define EFSYS_OPT_TUNNEL 1 159 160 #define EFSYS_OPT_FW_SUBVARIANT_AWARE 1 161 162 #define EFSYS_OPT_EVB 1 163 164 #define EFSYS_OPT_MCDI_PROXY_AUTH_SERVER 0 165 166 #define EFSYS_OPT_PCI 1 167 168 #define EFSYS_OPT_DESC_PROXY 0 169 170 /* ID */ 171 172 typedef struct __efsys_identifier_s efsys_identifier_t; 173 174 175 #define EFSYS_PROBE(_name) \ 176 do { } while (0) 177 178 #define EFSYS_PROBE1(_name, _type1, _arg1) \ 179 do { } while (0) 180 181 #define EFSYS_PROBE2(_name, _type1, _arg1, _type2, _arg2) \ 182 do { } while (0) 183 184 #define EFSYS_PROBE3(_name, _type1, _arg1, _type2, _arg2, \ 185 _type3, _arg3) \ 186 do { } while (0) 187 188 #define EFSYS_PROBE4(_name, _type1, _arg1, _type2, _arg2, \ 189 _type3, _arg3, _type4, _arg4) \ 190 do { } while (0) 191 192 #define EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2, \ 193 _type3, _arg3, _type4, _arg4, _type5, _arg5) \ 194 do { } while (0) 195 196 #define EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2, \ 197 _type3, _arg3, _type4, _arg4, _type5, _arg5, \ 198 _type6, _arg6) \ 199 do { } while (0) 200 201 #define EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2, \ 202 _type3, _arg3, _type4, _arg4, _type5, _arg5, \ 203 _type6, _arg6, _type7, _arg7) \ 204 do { } while (0) 205 206 207 /* DMA */ 208 209 typedef rte_iova_t efsys_dma_addr_t; 210 211 typedef struct efsys_mem_s { 212 const struct rte_memzone *esm_mz; 213 /* 214 * Ideally it should have volatile qualifier to denote that 215 * the memory may be updated by someone else. However, it adds 216 * qualifier discard warnings when the pointer or its derivative 217 * is passed to memset() or rte_mov16(). 218 * So, skip the qualifier here, but make sure that it is added 219 * below in access macros. 220 */ 221 void *esm_base; 222 efsys_dma_addr_t esm_addr; 223 } efsys_mem_t; 224 225 226 #define EFSYS_MEM_ZERO(_esmp, _size) \ 227 do { \ 228 (void)memset((void *)(_esmp)->esm_base, 0, (_size)); \ 229 \ 230 _NOTE(CONSTANTCONDITION); \ 231 } while (B_FALSE) 232 233 #define EFSYS_MEM_READD(_esmp, _offset, _edp) \ 234 do { \ 235 volatile uint8_t *_base = (_esmp)->esm_base; \ 236 volatile uint32_t *_addr; \ 237 \ 238 _NOTE(CONSTANTCONDITION); \ 239 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ 240 sizeof(efx_dword_t))); \ 241 \ 242 _addr = (volatile uint32_t *)(_base + (_offset)); \ 243 (_edp)->ed_u32[0] = _addr[0]; \ 244 \ 245 EFSYS_PROBE2(mem_readl, unsigned int, (_offset), \ 246 uint32_t, (_edp)->ed_u32[0]); \ 247 \ 248 _NOTE(CONSTANTCONDITION); \ 249 } while (B_FALSE) 250 251 #define EFSYS_MEM_READQ(_esmp, _offset, _eqp) \ 252 do { \ 253 volatile uint8_t *_base = (_esmp)->esm_base; \ 254 volatile uint64_t *_addr; \ 255 \ 256 _NOTE(CONSTANTCONDITION); \ 257 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ 258 sizeof(efx_qword_t))); \ 259 \ 260 _addr = (volatile uint64_t *)(_base + (_offset)); \ 261 (_eqp)->eq_u64[0] = _addr[0]; \ 262 \ 263 EFSYS_PROBE3(mem_readq, unsigned int, (_offset), \ 264 uint32_t, (_eqp)->eq_u32[1], \ 265 uint32_t, (_eqp)->eq_u32[0]); \ 266 \ 267 _NOTE(CONSTANTCONDITION); \ 268 } while (B_FALSE) 269 270 #define EFSYS_MEM_READO(_esmp, _offset, _eop) \ 271 do { \ 272 volatile uint8_t *_base = (_esmp)->esm_base; \ 273 volatile __m128i *_addr; \ 274 \ 275 _NOTE(CONSTANTCONDITION); \ 276 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ 277 sizeof(efx_oword_t))); \ 278 \ 279 _addr = (volatile __m128i *)(_base + (_offset)); \ 280 (_eop)->eo_u128[0] = _addr[0]; \ 281 \ 282 EFSYS_PROBE5(mem_reado, unsigned int, (_offset), \ 283 uint32_t, (_eop)->eo_u32[3], \ 284 uint32_t, (_eop)->eo_u32[2], \ 285 uint32_t, (_eop)->eo_u32[1], \ 286 uint32_t, (_eop)->eo_u32[0]); \ 287 \ 288 _NOTE(CONSTANTCONDITION); \ 289 } while (B_FALSE) 290 291 292 #define EFSYS_MEM_WRITED(_esmp, _offset, _edp) \ 293 do { \ 294 volatile uint8_t *_base = (_esmp)->esm_base; \ 295 volatile uint32_t *_addr; \ 296 \ 297 _NOTE(CONSTANTCONDITION); \ 298 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ 299 sizeof(efx_dword_t))); \ 300 \ 301 EFSYS_PROBE2(mem_writed, unsigned int, (_offset), \ 302 uint32_t, (_edp)->ed_u32[0]); \ 303 \ 304 _addr = (volatile uint32_t *)(_base + (_offset)); \ 305 _addr[0] = (_edp)->ed_u32[0]; \ 306 \ 307 _NOTE(CONSTANTCONDITION); \ 308 } while (B_FALSE) 309 310 #define EFSYS_MEM_WRITEQ(_esmp, _offset, _eqp) \ 311 do { \ 312 volatile uint8_t *_base = (_esmp)->esm_base; \ 313 volatile uint64_t *_addr; \ 314 \ 315 _NOTE(CONSTANTCONDITION); \ 316 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ 317 sizeof(efx_qword_t))); \ 318 \ 319 EFSYS_PROBE3(mem_writeq, unsigned int, (_offset), \ 320 uint32_t, (_eqp)->eq_u32[1], \ 321 uint32_t, (_eqp)->eq_u32[0]); \ 322 \ 323 _addr = (volatile uint64_t *)(_base + (_offset)); \ 324 _addr[0] = (_eqp)->eq_u64[0]; \ 325 \ 326 _NOTE(CONSTANTCONDITION); \ 327 } while (B_FALSE) 328 329 #define EFSYS_MEM_WRITEO(_esmp, _offset, _eop) \ 330 do { \ 331 volatile uint8_t *_base = (_esmp)->esm_base; \ 332 volatile __m128i *_addr; \ 333 \ 334 _NOTE(CONSTANTCONDITION); \ 335 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ 336 sizeof(efx_oword_t))); \ 337 \ 338 \ 339 EFSYS_PROBE5(mem_writeo, unsigned int, (_offset), \ 340 uint32_t, (_eop)->eo_u32[3], \ 341 uint32_t, (_eop)->eo_u32[2], \ 342 uint32_t, (_eop)->eo_u32[1], \ 343 uint32_t, (_eop)->eo_u32[0]); \ 344 \ 345 _addr = (volatile __m128i *)(_base + (_offset)); \ 346 _addr[0] = (_eop)->eo_u128[0]; \ 347 \ 348 _NOTE(CONSTANTCONDITION); \ 349 } while (B_FALSE) 350 351 352 #define EFSYS_MEM_SIZE(_esmp) \ 353 ((_esmp)->esm_mz->len) 354 355 #define EFSYS_MEM_ADDR(_esmp) \ 356 ((_esmp)->esm_addr) 357 358 #define EFSYS_MEM_IS_NULL(_esmp) \ 359 ((_esmp)->esm_base == NULL) 360 361 #define EFSYS_MEM_PREFETCH(_esmp, _offset) \ 362 do { \ 363 volatile uint8_t *_base = (_esmp)->esm_base; \ 364 \ 365 rte_prefetch0(_base + (_offset)); \ 366 } while (0) 367 368 369 /* BAR */ 370 371 typedef struct efsys_bar_s { 372 rte_spinlock_t esb_lock; 373 int esb_rid; 374 struct rte_pci_device *esb_dev; 375 /* 376 * Ideally it should have volatile qualifier to denote that 377 * the memory may be updated by someone else. However, it adds 378 * qualifier discard warnings when the pointer or its derivative 379 * is passed to memset() or rte_mov16(). 380 * So, skip the qualifier here, but make sure that it is added 381 * below in access macros. 382 */ 383 void *esb_base; 384 } efsys_bar_t; 385 386 #define SFC_BAR_LOCK_INIT(_esbp, _ifname) \ 387 do { \ 388 rte_spinlock_init(&(_esbp)->esb_lock); \ 389 _NOTE(CONSTANTCONDITION); \ 390 } while (B_FALSE) 391 #define SFC_BAR_LOCK_DESTROY(_esbp) ((void)0) 392 #define SFC_BAR_LOCK(_esbp) rte_spinlock_lock(&(_esbp)->esb_lock) 393 #define SFC_BAR_UNLOCK(_esbp) rte_spinlock_unlock(&(_esbp)->esb_lock) 394 395 #define EFSYS_BAR_READD(_esbp, _offset, _edp, _lock) \ 396 do { \ 397 volatile uint8_t *_base = (_esbp)->esb_base; \ 398 volatile uint32_t *_addr; \ 399 \ 400 _NOTE(CONSTANTCONDITION); \ 401 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ 402 sizeof(efx_dword_t))); \ 403 _NOTE(CONSTANTCONDITION); \ 404 if (_lock) \ 405 SFC_BAR_LOCK(_esbp); \ 406 \ 407 _addr = (volatile uint32_t *)(_base + (_offset)); \ 408 rte_rmb(); \ 409 (_edp)->ed_u32[0] = rte_read32_relaxed(_addr); \ 410 \ 411 EFSYS_PROBE2(bar_readd, unsigned int, (_offset), \ 412 uint32_t, (_edp)->ed_u32[0]); \ 413 \ 414 _NOTE(CONSTANTCONDITION); \ 415 if (_lock) \ 416 SFC_BAR_UNLOCK(_esbp); \ 417 _NOTE(CONSTANTCONDITION); \ 418 } while (B_FALSE) 419 420 #define EFSYS_BAR_READQ(_esbp, _offset, _eqp) \ 421 do { \ 422 volatile uint8_t *_base = (_esbp)->esb_base; \ 423 volatile uint64_t *_addr; \ 424 \ 425 _NOTE(CONSTANTCONDITION); \ 426 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ 427 sizeof(efx_qword_t))); \ 428 \ 429 SFC_BAR_LOCK(_esbp); \ 430 \ 431 _addr = (volatile uint64_t *)(_base + (_offset)); \ 432 rte_rmb(); \ 433 (_eqp)->eq_u64[0] = rte_read64_relaxed(_addr); \ 434 \ 435 EFSYS_PROBE3(bar_readq, unsigned int, (_offset), \ 436 uint32_t, (_eqp)->eq_u32[1], \ 437 uint32_t, (_eqp)->eq_u32[0]); \ 438 \ 439 SFC_BAR_UNLOCK(_esbp); \ 440 _NOTE(CONSTANTCONDITION); \ 441 } while (B_FALSE) 442 443 #define EFSYS_BAR_READO(_esbp, _offset, _eop, _lock) \ 444 do { \ 445 volatile uint8_t *_base = (_esbp)->esb_base; \ 446 volatile __m128i *_addr; \ 447 \ 448 _NOTE(CONSTANTCONDITION); \ 449 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ 450 sizeof(efx_oword_t))); \ 451 \ 452 _NOTE(CONSTANTCONDITION); \ 453 if (_lock) \ 454 SFC_BAR_LOCK(_esbp); \ 455 \ 456 _addr = (volatile __m128i *)(_base + (_offset)); \ 457 rte_rmb(); \ 458 /* There is no rte_read128_relaxed() yet */ \ 459 (_eop)->eo_u128[0] = _addr[0]; \ 460 \ 461 EFSYS_PROBE5(bar_reado, unsigned int, (_offset), \ 462 uint32_t, (_eop)->eo_u32[3], \ 463 uint32_t, (_eop)->eo_u32[2], \ 464 uint32_t, (_eop)->eo_u32[1], \ 465 uint32_t, (_eop)->eo_u32[0]); \ 466 \ 467 _NOTE(CONSTANTCONDITION); \ 468 if (_lock) \ 469 SFC_BAR_UNLOCK(_esbp); \ 470 _NOTE(CONSTANTCONDITION); \ 471 } while (B_FALSE) 472 473 474 #define EFSYS_BAR_WRITED(_esbp, _offset, _edp, _lock) \ 475 do { \ 476 volatile uint8_t *_base = (_esbp)->esb_base; \ 477 volatile uint32_t *_addr; \ 478 \ 479 _NOTE(CONSTANTCONDITION); \ 480 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ 481 sizeof(efx_dword_t))); \ 482 \ 483 _NOTE(CONSTANTCONDITION); \ 484 if (_lock) \ 485 SFC_BAR_LOCK(_esbp); \ 486 \ 487 EFSYS_PROBE2(bar_writed, unsigned int, (_offset), \ 488 uint32_t, (_edp)->ed_u32[0]); \ 489 \ 490 _addr = (volatile uint32_t *)(_base + (_offset)); \ 491 rte_write32_relaxed((_edp)->ed_u32[0], _addr); \ 492 rte_wmb(); \ 493 \ 494 _NOTE(CONSTANTCONDITION); \ 495 if (_lock) \ 496 SFC_BAR_UNLOCK(_esbp); \ 497 _NOTE(CONSTANTCONDITION); \ 498 } while (B_FALSE) 499 500 #define EFSYS_BAR_WRITEQ(_esbp, _offset, _eqp) \ 501 do { \ 502 volatile uint8_t *_base = (_esbp)->esb_base; \ 503 volatile uint64_t *_addr; \ 504 \ 505 _NOTE(CONSTANTCONDITION); \ 506 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ 507 sizeof(efx_qword_t))); \ 508 \ 509 SFC_BAR_LOCK(_esbp); \ 510 \ 511 EFSYS_PROBE3(bar_writeq, unsigned int, (_offset), \ 512 uint32_t, (_eqp)->eq_u32[1], \ 513 uint32_t, (_eqp)->eq_u32[0]); \ 514 \ 515 _addr = (volatile uint64_t *)(_base + (_offset)); \ 516 rte_write64_relaxed((_eqp)->eq_u64[0], _addr); \ 517 rte_wmb(); \ 518 \ 519 SFC_BAR_UNLOCK(_esbp); \ 520 _NOTE(CONSTANTCONDITION); \ 521 } while (B_FALSE) 522 523 /* 524 * Guarantees 64bit aligned 64bit writes to write combined BAR mapping 525 * (required by PIO hardware). 526 * 527 * Neither VFIO, nor UIO, nor NIC UIO (on FreeBSD) support 528 * write-combined memory mapped to user-land, so just abort if used. 529 */ 530 #define EFSYS_BAR_WC_WRITEQ(_esbp, _offset, _eqp) \ 531 do { \ 532 rte_panic("Write-combined BAR access not supported"); \ 533 } while (B_FALSE) 534 535 #define EFSYS_BAR_WRITEO(_esbp, _offset, _eop, _lock) \ 536 do { \ 537 volatile uint8_t *_base = (_esbp)->esb_base; \ 538 volatile __m128i *_addr; \ 539 \ 540 _NOTE(CONSTANTCONDITION); \ 541 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ 542 sizeof(efx_oword_t))); \ 543 \ 544 _NOTE(CONSTANTCONDITION); \ 545 if (_lock) \ 546 SFC_BAR_LOCK(_esbp); \ 547 \ 548 EFSYS_PROBE5(bar_writeo, unsigned int, (_offset), \ 549 uint32_t, (_eop)->eo_u32[3], \ 550 uint32_t, (_eop)->eo_u32[2], \ 551 uint32_t, (_eop)->eo_u32[1], \ 552 uint32_t, (_eop)->eo_u32[0]); \ 553 \ 554 _addr = (volatile __m128i *)(_base + (_offset)); \ 555 /* There is no rte_write128_relaxed() yet */ \ 556 _addr[0] = (_eop)->eo_u128[0]; \ 557 rte_wmb(); \ 558 \ 559 _NOTE(CONSTANTCONDITION); \ 560 if (_lock) \ 561 SFC_BAR_UNLOCK(_esbp); \ 562 _NOTE(CONSTANTCONDITION); \ 563 } while (B_FALSE) 564 565 /* Use the standard octo-word write for doorbell writes */ 566 #define EFSYS_BAR_DOORBELL_WRITEO(_esbp, _offset, _eop) \ 567 do { \ 568 EFSYS_BAR_WRITEO((_esbp), (_offset), (_eop), B_FALSE); \ 569 _NOTE(CONSTANTCONDITION); \ 570 } while (B_FALSE) 571 572 /* SPIN */ 573 574 #define EFSYS_SPIN(_us) \ 575 do { \ 576 rte_delay_us(_us); \ 577 _NOTE(CONSTANTCONDITION); \ 578 } while (B_FALSE) 579 580 #define EFSYS_SLEEP EFSYS_SPIN 581 582 /* BARRIERS */ 583 584 #define EFSYS_MEM_READ_BARRIER() rte_rmb() 585 #define EFSYS_PIO_WRITE_BARRIER() rte_io_wmb() 586 587 /* DMA SYNC */ 588 589 /* 590 * DPDK does not provide any DMA syncing API, and no PMD drivers 591 * have any traces of explicit DMA syncing. 592 * DMA mapping is assumed to be coherent. 593 */ 594 595 #define EFSYS_DMA_SYNC_FOR_KERNEL(_esmp, _offset, _size) ((void)0) 596 597 /* Just avoid store and compiler (impliciltly) reordering */ 598 #define EFSYS_DMA_SYNC_FOR_DEVICE(_esmp, _offset, _size) rte_wmb() 599 600 /* TIMESTAMP */ 601 602 typedef uint64_t efsys_timestamp_t; 603 604 #define EFSYS_TIMESTAMP(_usp) \ 605 do { \ 606 *(_usp) = rte_get_timer_cycles() * 1000000 / \ 607 rte_get_timer_hz(); \ 608 _NOTE(CONSTANTCONDITION); \ 609 } while (B_FALSE) 610 611 /* KMEM */ 612 613 #define EFSYS_KMEM_ALLOC(_esip, _size, _p) \ 614 do { \ 615 (_esip) = (_esip); \ 616 (_p) = rte_zmalloc("sfc", (_size), 0); \ 617 _NOTE(CONSTANTCONDITION); \ 618 } while (B_FALSE) 619 620 #define EFSYS_KMEM_FREE(_esip, _size, _p) \ 621 do { \ 622 (void)(_esip); \ 623 (void)(_size); \ 624 rte_free((_p)); \ 625 _NOTE(CONSTANTCONDITION); \ 626 } while (B_FALSE) 627 628 /* LOCK */ 629 630 typedef rte_spinlock_t efsys_lock_t; 631 632 #define SFC_EFSYS_LOCK_INIT(_eslp, _ifname, _label) \ 633 rte_spinlock_init((_eslp)) 634 #define SFC_EFSYS_LOCK_DESTROY(_eslp) ((void)0) 635 #define SFC_EFSYS_LOCK(_eslp) \ 636 rte_spinlock_lock((_eslp)) 637 #define SFC_EFSYS_UNLOCK(_eslp) \ 638 rte_spinlock_unlock((_eslp)) 639 #define SFC_EFSYS_LOCK_ASSERT_OWNED(_eslp) \ 640 SFC_EFX_ASSERT(rte_spinlock_is_locked((_eslp))) 641 642 typedef int efsys_lock_state_t; 643 644 #define EFSYS_LOCK_MAGIC 0x000010c4 645 646 #define EFSYS_LOCK(_lockp, _state) \ 647 do { \ 648 SFC_EFSYS_LOCK(_lockp); \ 649 (_state) = EFSYS_LOCK_MAGIC; \ 650 _NOTE(CONSTANTCONDITION); \ 651 } while (B_FALSE) 652 653 #define EFSYS_UNLOCK(_lockp, _state) \ 654 do { \ 655 SFC_EFX_ASSERT((_state) == EFSYS_LOCK_MAGIC); \ 656 SFC_EFSYS_UNLOCK(_lockp); \ 657 _NOTE(CONSTANTCONDITION); \ 658 } while (B_FALSE) 659 660 /* STAT */ 661 662 typedef uint64_t efsys_stat_t; 663 664 #define EFSYS_STAT_INCR(_knp, _delta) \ 665 do { \ 666 *(_knp) += (_delta); \ 667 _NOTE(CONSTANTCONDITION); \ 668 } while (B_FALSE) 669 670 #define EFSYS_STAT_DECR(_knp, _delta) \ 671 do { \ 672 *(_knp) -= (_delta); \ 673 _NOTE(CONSTANTCONDITION); \ 674 } while (B_FALSE) 675 676 #define EFSYS_STAT_SET(_knp, _val) \ 677 do { \ 678 *(_knp) = (_val); \ 679 _NOTE(CONSTANTCONDITION); \ 680 } while (B_FALSE) 681 682 #define EFSYS_STAT_SET_QWORD(_knp, _valp) \ 683 do { \ 684 *(_knp) = rte_le_to_cpu_64((_valp)->eq_u64[0]); \ 685 _NOTE(CONSTANTCONDITION); \ 686 } while (B_FALSE) 687 688 #define EFSYS_STAT_SET_DWORD(_knp, _valp) \ 689 do { \ 690 *(_knp) = rte_le_to_cpu_32((_valp)->ed_u32[0]); \ 691 _NOTE(CONSTANTCONDITION); \ 692 } while (B_FALSE) 693 694 #define EFSYS_STAT_INCR_QWORD(_knp, _valp) \ 695 do { \ 696 *(_knp) += rte_le_to_cpu_64((_valp)->eq_u64[0]); \ 697 _NOTE(CONSTANTCONDITION); \ 698 } while (B_FALSE) 699 700 #define EFSYS_STAT_SUBR_QWORD(_knp, _valp) \ 701 do { \ 702 *(_knp) -= rte_le_to_cpu_64((_valp)->eq_u64[0]); \ 703 _NOTE(CONSTANTCONDITION); \ 704 } while (B_FALSE) 705 706 /* ERR */ 707 708 #if EFSYS_OPT_DECODE_INTR_FATAL 709 #define EFSYS_ERR(_esip, _code, _dword0, _dword1) \ 710 do { \ 711 (void)(_esip); \ 712 SFC_EFX_LOG(ERR, "FATAL ERROR #%u (0x%08x%08x)", \ 713 (_code), (_dword0), (_dword1)); \ 714 _NOTE(CONSTANTCONDITION); \ 715 } while (B_FALSE) 716 #endif 717 718 /* ASSERT */ 719 720 /* RTE_VERIFY from DPDK treats expressions with % operator incorrectly, 721 * so we re-implement it here 722 */ 723 #ifdef RTE_LIBRTE_SFC_EFX_DEBUG 724 #define EFSYS_ASSERT(_exp) \ 725 do { \ 726 if (unlikely(!(_exp))) \ 727 rte_panic("line %d\tassert \"%s\" failed\n", \ 728 __LINE__, (#_exp)); \ 729 } while (0) 730 #else 731 #define EFSYS_ASSERT(_exp) (void)(_exp) 732 #endif 733 734 #define EFSYS_ASSERT3(_x, _op, _y, _t) EFSYS_ASSERT((_t)(_x) _op (_t)(_y)) 735 736 #define EFSYS_ASSERT3U(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uint64_t) 737 #define EFSYS_ASSERT3S(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, int64_t) 738 #define EFSYS_ASSERT3P(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uintptr_t) 739 740 /* ROTATE */ 741 742 #define EFSYS_HAS_ROTL_DWORD 0 743 744 /* PCI */ 745 746 typedef struct efsys_pci_config_s { 747 struct rte_pci_device *espc_dev; 748 } efsys_pci_config_t; 749 750 #ifdef __cplusplus 751 } 752 #endif 753 754 #endif /* _SFC_COMMON_EFSYS_H */ 755