1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2014-2018 Chelsio Communications. 3 * All rights reserved. 4 */ 5 6 /* This file should not be included directly. Include common.h instead. */ 7 8 #ifndef __T4_ADAPTER_H__ 9 #define __T4_ADAPTER_H__ 10 11 #include <rte_bus_pci.h> 12 #include <rte_mbuf.h> 13 #include <rte_io.h> 14 #include <rte_rwlock.h> 15 #include <rte_ethdev.h> 16 17 #include "../cxgbe_compat.h" 18 #include "../cxgbe_ofld.h" 19 #include "t4_regs_values.h" 20 21 enum { 22 MAX_ETH_QSETS = 64, /* # of Ethernet Tx/Rx queue sets */ 23 MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */ 24 }; 25 26 struct adapter; 27 struct sge_rspq; 28 29 enum { 30 PORT_RSS_DONE = (1 << 0), 31 }; 32 33 struct port_info { 34 struct adapter *adapter; /* adapter that this port belongs to */ 35 struct rte_eth_dev *eth_dev; /* associated rte eth device */ 36 struct port_stats stats_base; /* port statistics base */ 37 struct link_config link_cfg; /* link configuration info */ 38 39 unsigned long flags; /* port related flags */ 40 short int xact_addr_filt; /* index of exact MAC address filter */ 41 42 u16 viid; /* associated virtual interface id */ 43 s8 mdio_addr; /* address of the PHY */ 44 u8 port_type; /* firmware port type */ 45 u8 mod_type; /* firmware module type */ 46 u8 port_id; /* physical port ID */ 47 u8 pidx; /* port index for this PF */ 48 u8 tx_chan; /* associated channel */ 49 50 u8 n_rx_qsets; /* # of rx qsets */ 51 u8 n_tx_qsets; /* # of tx qsets */ 52 u8 first_qset; /* index of first qset */ 53 54 u16 *rss; /* rss table */ 55 u8 rss_mode; /* rss mode */ 56 u16 rss_size; /* size of VI's RSS table slice */ 57 u64 rss_hf; /* RSS Hash Function */ 58 }; 59 60 /* Enable or disable autonegotiation. If this is set to enable, 61 * the forced link modes above are completely ignored. 62 */ 63 #define AUTONEG_DISABLE 0x00 64 #define AUTONEG_ENABLE 0x01 65 66 enum { /* adapter flags */ 67 FULL_INIT_DONE = (1 << 0), 68 USING_MSI = (1 << 1), 69 USING_MSIX = (1 << 2), 70 FW_QUEUE_BOUND = (1 << 3), 71 FW_OK = (1 << 4), 72 CFG_QUEUES = (1 << 5), 73 MASTER_PF = (1 << 6), 74 }; 75 76 struct rx_sw_desc { /* SW state per Rx descriptor */ 77 void *buf; /* struct page or mbuf */ 78 dma_addr_t dma_addr; 79 }; 80 81 struct sge_fl { /* SGE free-buffer queue state */ 82 /* RO fields */ 83 struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */ 84 85 dma_addr_t addr; /* bus address of HW ring start */ 86 __be64 *desc; /* address of HW Rx descriptor ring */ 87 88 void __iomem *bar2_addr; /* address of BAR2 Queue registers */ 89 unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */ 90 91 unsigned int cntxt_id; /* SGE relative QID for the free list */ 92 unsigned int size; /* capacity of free list */ 93 94 unsigned int avail; /* # of available Rx buffers */ 95 unsigned int pend_cred; /* new buffers since last FL DB ring */ 96 unsigned int cidx; /* consumer index */ 97 unsigned int pidx; /* producer index */ 98 99 unsigned long alloc_failed; /* # of times buffer allocation failed */ 100 unsigned long low; /* # of times momentarily starving */ 101 }; 102 103 #define MAX_MBUF_FRAGS (16384 / 512 + 2) 104 105 /* A packet gather list */ 106 struct pkt_gl { 107 union { 108 struct rte_mbuf *mbufs[MAX_MBUF_FRAGS]; 109 } /* UNNAMED */; 110 void *va; /* virtual address of first byte */ 111 unsigned int nfrags; /* # of fragments */ 112 unsigned int tot_len; /* total length of fragments */ 113 bool usembufs; /* use mbufs for fragments */ 114 }; 115 116 typedef int (*rspq_handler_t)(struct sge_rspq *q, const __be64 *rsp, 117 const struct pkt_gl *gl); 118 119 struct sge_rspq { /* state for an SGE response queue */ 120 struct adapter *adapter; /* adapter that this queue belongs to */ 121 struct rte_eth_dev *eth_dev; /* associated rte eth device */ 122 struct rte_mempool *mb_pool; /* associated mempool */ 123 124 dma_addr_t phys_addr; /* physical address of the ring */ 125 __be64 *desc; /* address of HW response ring */ 126 const __be64 *cur_desc; /* current descriptor in queue */ 127 128 void __iomem *bar2_addr; /* address of BAR2 Queue registers */ 129 unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */ 130 struct sge_qstat *stat; 131 132 unsigned int cidx; /* consumer index */ 133 unsigned int gts_idx; /* last gts write sent */ 134 unsigned int iqe_len; /* entry size */ 135 unsigned int size; /* capacity of response queue */ 136 int offset; /* offset into current Rx buffer */ 137 138 u8 gen; /* current generation bit */ 139 u8 intr_params; /* interrupt holdoff parameters */ 140 u8 next_intr_params; /* holdoff params for next interrupt */ 141 u8 pktcnt_idx; /* interrupt packet threshold */ 142 u8 port_id; /* associated port-id */ 143 u8 idx; /* queue index within its group */ 144 u16 cntxt_id; /* SGE relative QID for the response Q */ 145 u16 abs_id; /* absolute SGE id for the response q */ 146 147 rspq_handler_t handler; /* associated handler for this response q */ 148 }; 149 150 struct sge_eth_rx_stats { /* Ethernet rx queue statistics */ 151 u64 pkts; /* # of ethernet packets */ 152 u64 rx_bytes; /* # of ethernet bytes */ 153 u64 rx_cso; /* # of Rx checksum offloads */ 154 u64 vlan_ex; /* # of Rx VLAN extractions */ 155 u64 rx_drops; /* # of packets dropped due to no mem */ 156 }; 157 158 struct sge_eth_rxq { /* a SW Ethernet Rx queue */ 159 struct sge_rspq rspq; 160 struct sge_fl fl; 161 struct sge_eth_rx_stats stats; 162 bool usembufs; /* one ingress packet per mbuf FL buffer */ 163 } __rte_cache_aligned; 164 165 /* 166 * Currently there are two types of coalesce WR. Type 0 needs 48 bytes per 167 * packet (if one sgl is present) and type 1 needs 32 bytes. This means 168 * that type 0 can fit a maximum of 10 packets per WR and type 1 can fit 169 * 15 packets. We need to keep track of the mbuf pointers in a coalesce WR 170 * to be able to free those mbufs when we get completions back from the FW. 171 * Allocating the maximum number of pointers in every tx desc is a waste 172 * of memory resources so we only store 2 pointers per tx desc which should 173 * be enough since a tx desc can only fit 2 packets in the best case 174 * scenario where a packet needs 32 bytes. 175 */ 176 #define ETH_COALESCE_PKT_NUM 15 177 #define ETH_COALESCE_VF_PKT_NUM 7 178 #define ETH_COALESCE_PKT_PER_DESC 2 179 180 struct tx_eth_coal_desc { 181 struct rte_mbuf *mbuf[ETH_COALESCE_PKT_PER_DESC]; 182 struct ulptx_sgl *sgl[ETH_COALESCE_PKT_PER_DESC]; 183 int idx; 184 }; 185 186 struct tx_desc { 187 __be64 flit[8]; 188 }; 189 190 struct tx_sw_desc { /* SW state per Tx descriptor */ 191 struct rte_mbuf *mbuf; 192 struct ulptx_sgl *sgl; 193 struct tx_eth_coal_desc coalesce; 194 }; 195 196 enum { 197 EQ_STOPPED = (1 << 0), 198 }; 199 200 struct eth_coalesce { 201 unsigned char *ptr; 202 unsigned char type; 203 unsigned int idx; 204 unsigned int len; 205 unsigned int flits; 206 unsigned int max; 207 __u8 ethmacdst[ETHER_ADDR_LEN]; 208 __u8 ethmacsrc[ETHER_ADDR_LEN]; 209 __be16 ethtype; 210 __be16 vlantci; 211 }; 212 213 struct sge_txq { 214 struct tx_desc *desc; /* address of HW Tx descriptor ring */ 215 struct tx_sw_desc *sdesc; /* address of SW Tx descriptor ring */ 216 struct sge_qstat *stat; /* queue status entry */ 217 struct eth_coalesce coalesce; /* coalesce info */ 218 219 uint64_t phys_addr; /* physical address of the ring */ 220 221 void __iomem *bar2_addr; /* address of BAR2 Queue registers */ 222 unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */ 223 224 unsigned int cntxt_id; /* SGE relative QID for the Tx Q */ 225 unsigned int in_use; /* # of in-use Tx descriptors */ 226 unsigned int size; /* # of descriptors */ 227 unsigned int cidx; /* SW consumer index */ 228 unsigned int pidx; /* producer index */ 229 unsigned int dbidx; /* last idx when db ring was done */ 230 unsigned int equeidx; /* last sent credit request */ 231 unsigned int last_pidx; /* last pidx recorded by tx monitor */ 232 unsigned int last_coal_idx;/* last coal-idx recorded by tx monitor */ 233 unsigned int abs_id; 234 235 int db_disabled; /* doorbell state */ 236 unsigned short db_pidx; /* doorbell producer index */ 237 unsigned short db_pidx_inc; /* doorbell producer increment */ 238 }; 239 240 struct sge_eth_tx_stats { /* Ethernet tx queue statistics */ 241 u64 pkts; /* # of ethernet packets */ 242 u64 tx_bytes; /* # of ethernet bytes */ 243 u64 tso; /* # of TSO requests */ 244 u64 tx_cso; /* # of Tx checksum offloads */ 245 u64 vlan_ins; /* # of Tx VLAN insertions */ 246 u64 mapping_err; /* # of I/O MMU packet mapping errors */ 247 u64 coal_wr; /* # of coalesced wr */ 248 u64 coal_pkts; /* # of coalesced packets */ 249 }; 250 251 struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */ 252 struct sge_txq q; 253 struct rte_eth_dev *eth_dev; /* port that this queue belongs to */ 254 struct rte_eth_dev_data *data; 255 struct sge_eth_tx_stats stats; /* queue statistics */ 256 rte_spinlock_t txq_lock; 257 258 unsigned int flags; /* flags for state of the queue */ 259 } __rte_cache_aligned; 260 261 struct sge_ctrl_txq { /* State for an SGE control Tx queue */ 262 struct sge_txq q; /* txq */ 263 struct adapter *adapter; /* adapter associated with this queue */ 264 rte_spinlock_t ctrlq_lock; /* control queue lock */ 265 u8 full; /* the Tx ring is full */ 266 u64 txp; /* number of transmits */ 267 struct rte_mempool *mb_pool; /* mempool to generate ctrl pkts */ 268 } __rte_cache_aligned; 269 270 struct sge { 271 struct sge_eth_txq ethtxq[MAX_ETH_QSETS]; 272 struct sge_eth_rxq ethrxq[MAX_ETH_QSETS]; 273 struct sge_rspq fw_evtq __rte_cache_aligned; 274 struct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES]; 275 276 u16 max_ethqsets; /* # of available Ethernet queue sets */ 277 u32 stat_len; /* length of status page at ring end */ 278 u32 pktshift; /* padding between CPL & packet data */ 279 280 /* response queue interrupt parameters */ 281 u16 timer_val[SGE_NTIMERS]; 282 u8 counter_val[SGE_NCOUNTERS]; 283 284 u32 fl_align; /* response queue message alignment */ 285 u32 fl_pg_order; /* large page allocation size */ 286 u32 fl_starve_thres; /* Free List starvation threshold */ 287 }; 288 289 #define T4_OS_NEEDS_MBOX_LOCKING 1 290 291 /* 292 * OS Lock/List primitives for those interfaces in the Common Code which 293 * need this. 294 */ 295 296 struct mbox_entry { 297 TAILQ_ENTRY(mbox_entry) next; 298 }; 299 300 TAILQ_HEAD(mbox_list, mbox_entry); 301 302 struct adapter { 303 struct rte_pci_device *pdev; /* associated rte pci device */ 304 struct rte_eth_dev *eth_dev; /* first port's rte eth device */ 305 struct adapter_params params; /* adapter parameters */ 306 struct port_info *port[MAX_NPORTS];/* ports belonging to this adapter */ 307 struct sge sge; /* associated SGE */ 308 309 /* support for single-threading access to adapter mailbox registers */ 310 struct mbox_list mbox_list; 311 rte_spinlock_t mbox_lock; 312 313 u8 *regs; /* pointer to registers region */ 314 u8 *bar2; /* pointer to bar2 region */ 315 unsigned long flags; /* adapter flags */ 316 unsigned int mbox; /* associated mailbox */ 317 unsigned int pf; /* associated physical function id */ 318 319 unsigned int vpd_busy; 320 unsigned int vpd_flag; 321 322 int use_unpacked_mode; /* unpacked rx mode state */ 323 rte_spinlock_t win0_lock; 324 325 unsigned int clipt_start; /* CLIP table start */ 326 unsigned int clipt_end; /* CLIP table end */ 327 unsigned int l2t_start; /* Layer 2 table start */ 328 unsigned int l2t_end; /* Layer 2 table end */ 329 struct clip_tbl *clipt; /* CLIP table */ 330 struct l2t_data *l2t; /* Layer 2 table */ 331 struct mpstcam_table *mpstcam; 332 333 struct tid_info tids; /* Info used to access TID related tables */ 334 }; 335 336 /** 337 * t4_os_rwlock_init - initialize rwlock 338 * @lock: the rwlock 339 */ 340 static inline void t4_os_rwlock_init(rte_rwlock_t *lock) 341 { 342 rte_rwlock_init(lock); 343 } 344 345 /** 346 * t4_os_write_lock - get a write lock 347 * @lock: the rwlock 348 */ 349 static inline void t4_os_write_lock(rte_rwlock_t *lock) 350 { 351 rte_rwlock_write_lock(lock); 352 } 353 354 /** 355 * t4_os_write_unlock - unlock a write lock 356 * @lock: the rwlock 357 */ 358 static inline void t4_os_write_unlock(rte_rwlock_t *lock) 359 { 360 rte_rwlock_write_unlock(lock); 361 } 362 363 /** 364 * ethdev2pinfo - return the port_info structure associated with a rte_eth_dev 365 * @dev: the rte_eth_dev 366 * 367 * Return the struct port_info associated with a rte_eth_dev 368 */ 369 static inline struct port_info *ethdev2pinfo(const struct rte_eth_dev *dev) 370 { 371 return dev->data->dev_private; 372 } 373 374 /** 375 * adap2pinfo - return the port_info of a port 376 * @adap: the adapter 377 * @idx: the port index 378 * 379 * Return the port_info structure for the port of the given index. 380 */ 381 static inline struct port_info *adap2pinfo(const struct adapter *adap, int idx) 382 { 383 return adap->port[idx]; 384 } 385 386 /** 387 * ethdev2adap - return the adapter structure associated with a rte_eth_dev 388 * @dev: the rte_eth_dev 389 * 390 * Return the struct adapter associated with a rte_eth_dev 391 */ 392 static inline struct adapter *ethdev2adap(const struct rte_eth_dev *dev) 393 { 394 return ethdev2pinfo(dev)->adapter; 395 } 396 397 #define CXGBE_PCI_REG(reg) rte_read32(reg) 398 399 static inline uint64_t cxgbe_read_addr64(volatile void *addr) 400 { 401 uint64_t val = CXGBE_PCI_REG(addr); 402 uint64_t val2 = CXGBE_PCI_REG(((volatile uint8_t *)(addr) + 4)); 403 404 val2 = (uint64_t)(val2 << 32); 405 val += val2; 406 return val; 407 } 408 409 static inline uint32_t cxgbe_read_addr(volatile void *addr) 410 { 411 return CXGBE_PCI_REG(addr); 412 } 413 414 #define CXGBE_PCI_REG_ADDR(adap, reg) \ 415 ((volatile uint32_t *)((char *)(adap)->regs + (reg))) 416 417 #define CXGBE_READ_REG(adap, reg) \ 418 cxgbe_read_addr(CXGBE_PCI_REG_ADDR((adap), (reg))) 419 420 #define CXGBE_READ_REG64(adap, reg) \ 421 cxgbe_read_addr64(CXGBE_PCI_REG_ADDR((adap), (reg))) 422 423 #define CXGBE_PCI_REG_WRITE(reg, value) rte_write32((value), (reg)) 424 425 #define CXGBE_PCI_REG_WRITE_RELAXED(reg, value) \ 426 rte_write32_relaxed((value), (reg)) 427 428 #define CXGBE_WRITE_REG(adap, reg, value) \ 429 CXGBE_PCI_REG_WRITE(CXGBE_PCI_REG_ADDR((adap), (reg)), (value)) 430 431 #define CXGBE_WRITE_REG_RELAXED(adap, reg, value) \ 432 CXGBE_PCI_REG_WRITE_RELAXED(CXGBE_PCI_REG_ADDR((adap), (reg)), (value)) 433 434 static inline uint64_t cxgbe_write_addr64(volatile void *addr, uint64_t val) 435 { 436 CXGBE_PCI_REG_WRITE(addr, val); 437 CXGBE_PCI_REG_WRITE(((volatile uint8_t *)(addr) + 4), (val >> 32)); 438 return val; 439 } 440 441 #define CXGBE_WRITE_REG64(adap, reg, value) \ 442 cxgbe_write_addr64(CXGBE_PCI_REG_ADDR((adap), (reg)), (value)) 443 444 /** 445 * t4_read_reg - read a HW register 446 * @adapter: the adapter 447 * @reg_addr: the register address 448 * 449 * Returns the 32-bit value of the given HW register. 450 */ 451 static inline u32 t4_read_reg(struct adapter *adapter, u32 reg_addr) 452 { 453 u32 val = CXGBE_READ_REG(adapter, reg_addr); 454 455 CXGBE_DEBUG_REG(adapter, "read register 0x%x value 0x%x\n", reg_addr, 456 val); 457 return val; 458 } 459 460 /** 461 * t4_write_reg - write a HW register with barrier 462 * @adapter: the adapter 463 * @reg_addr: the register address 464 * @val: the value to write 465 * 466 * Write a 32-bit value into the given HW register. 467 */ 468 static inline void t4_write_reg(struct adapter *adapter, u32 reg_addr, u32 val) 469 { 470 CXGBE_DEBUG_REG(adapter, "setting register 0x%x to 0x%x\n", reg_addr, 471 val); 472 CXGBE_WRITE_REG(adapter, reg_addr, val); 473 } 474 475 /** 476 * t4_write_reg_relaxed - write a HW register with no barrier 477 * @adapter: the adapter 478 * @reg_addr: the register address 479 * @val: the value to write 480 * 481 * Write a 32-bit value into the given HW register. 482 */ 483 static inline void t4_write_reg_relaxed(struct adapter *adapter, u32 reg_addr, 484 u32 val) 485 { 486 CXGBE_DEBUG_REG(adapter, "setting register 0x%x to 0x%x\n", reg_addr, 487 val); 488 CXGBE_WRITE_REG_RELAXED(adapter, reg_addr, val); 489 } 490 491 /** 492 * t4_read_reg64 - read a 64-bit HW register 493 * @adapter: the adapter 494 * @reg_addr: the register address 495 * 496 * Returns the 64-bit value of the given HW register. 497 */ 498 static inline u64 t4_read_reg64(struct adapter *adapter, u32 reg_addr) 499 { 500 u64 val = CXGBE_READ_REG64(adapter, reg_addr); 501 502 CXGBE_DEBUG_REG(adapter, "64-bit read register %#x value %#llx\n", 503 reg_addr, (unsigned long long)val); 504 return val; 505 } 506 507 /** 508 * t4_write_reg64 - write a 64-bit HW register 509 * @adapter: the adapter 510 * @reg_addr: the register address 511 * @val: the value to write 512 * 513 * Write a 64-bit value into the given HW register. 514 */ 515 static inline void t4_write_reg64(struct adapter *adapter, u32 reg_addr, 516 u64 val) 517 { 518 CXGBE_DEBUG_REG(adapter, "setting register %#x to %#llx\n", reg_addr, 519 (unsigned long long)val); 520 521 CXGBE_WRITE_REG64(adapter, reg_addr, val); 522 } 523 524 #define PCI_STATUS 0x06 /* 16 bits */ 525 #define PCI_STATUS_CAP_LIST 0x10 /* Support Capability List */ 526 #define PCI_CAPABILITY_LIST 0x34 527 /* Offset of first capability list entry */ 528 #define PCI_CAP_ID_EXP 0x10 /* PCI Express */ 529 #define PCI_CAP_LIST_ID 0 /* Capability ID */ 530 #define PCI_CAP_LIST_NEXT 1 /* Next capability in the list */ 531 #define PCI_EXP_DEVCTL 0x0008 /* Device control */ 532 #define PCI_EXP_DEVCTL2 40 /* Device Control 2 */ 533 #define PCI_EXP_DEVCTL_EXT_TAG 0x0100 /* Extended Tag Field Enable */ 534 #define PCI_EXP_DEVCTL_PAYLOAD 0x00E0 /* Max payload */ 535 #define PCI_CAP_ID_VPD 0x03 /* Vital Product Data */ 536 #define PCI_VPD_ADDR 2 /* Address to access (15 bits!) */ 537 #define PCI_VPD_ADDR_F 0x8000 /* Write 0, 1 indicates completion */ 538 #define PCI_VPD_DATA 4 /* 32-bits of data returned here */ 539 540 /** 541 * t4_os_pci_write_cfg4 - 32-bit write to PCI config space 542 * @adapter: the adapter 543 * @addr: the register address 544 * @val: the value to write 545 * 546 * Write a 32-bit value into the given register in PCI config space. 547 */ 548 static inline void t4_os_pci_write_cfg4(struct adapter *adapter, size_t addr, 549 off_t val) 550 { 551 u32 val32 = val; 552 553 if (rte_pci_write_config(adapter->pdev, &val32, sizeof(val32), 554 addr) < 0) 555 dev_err(adapter, "Can't write to PCI config space\n"); 556 } 557 558 /** 559 * t4_os_pci_read_cfg4 - read a 32-bit value from PCI config space 560 * @adapter: the adapter 561 * @addr: the register address 562 * @val: where to store the value read 563 * 564 * Read a 32-bit value from the given register in PCI config space. 565 */ 566 static inline void t4_os_pci_read_cfg4(struct adapter *adapter, size_t addr, 567 u32 *val) 568 { 569 if (rte_pci_read_config(adapter->pdev, val, sizeof(*val), 570 addr) < 0) 571 dev_err(adapter, "Can't read from PCI config space\n"); 572 } 573 574 /** 575 * t4_os_pci_write_cfg2 - 16-bit write to PCI config space 576 * @adapter: the adapter 577 * @addr: the register address 578 * @val: the value to write 579 * 580 * Write a 16-bit value into the given register in PCI config space. 581 */ 582 static inline void t4_os_pci_write_cfg2(struct adapter *adapter, size_t addr, 583 off_t val) 584 { 585 u16 val16 = val; 586 587 if (rte_pci_write_config(adapter->pdev, &val16, sizeof(val16), 588 addr) < 0) 589 dev_err(adapter, "Can't write to PCI config space\n"); 590 } 591 592 /** 593 * t4_os_pci_read_cfg2 - read a 16-bit value from PCI config space 594 * @adapter: the adapter 595 * @addr: the register address 596 * @val: where to store the value read 597 * 598 * Read a 16-bit value from the given register in PCI config space. 599 */ 600 static inline void t4_os_pci_read_cfg2(struct adapter *adapter, size_t addr, 601 u16 *val) 602 { 603 if (rte_pci_read_config(adapter->pdev, val, sizeof(*val), 604 addr) < 0) 605 dev_err(adapter, "Can't read from PCI config space\n"); 606 } 607 608 /** 609 * t4_os_pci_read_cfg - read a 8-bit value from PCI config space 610 * @adapter: the adapter 611 * @addr: the register address 612 * @val: where to store the value read 613 * 614 * Read a 8-bit value from the given register in PCI config space. 615 */ 616 static inline void t4_os_pci_read_cfg(struct adapter *adapter, size_t addr, 617 u8 *val) 618 { 619 if (rte_pci_read_config(adapter->pdev, val, sizeof(*val), 620 addr) < 0) 621 dev_err(adapter, "Can't read from PCI config space\n"); 622 } 623 624 /** 625 * t4_os_find_pci_capability - lookup a capability in the PCI capability list 626 * @adapter: the adapter 627 * @cap: the capability 628 * 629 * Return the address of the given capability within the PCI capability list. 630 */ 631 static inline int t4_os_find_pci_capability(struct adapter *adapter, int cap) 632 { 633 u16 status; 634 int ttl = 48; 635 u8 pos = 0; 636 u8 id = 0; 637 638 t4_os_pci_read_cfg2(adapter, PCI_STATUS, &status); 639 if (!(status & PCI_STATUS_CAP_LIST)) { 640 dev_err(adapter, "PCIe capability reading failed\n"); 641 return -1; 642 } 643 644 t4_os_pci_read_cfg(adapter, PCI_CAPABILITY_LIST, &pos); 645 while (ttl-- && pos >= 0x40) { 646 pos &= ~3; 647 t4_os_pci_read_cfg(adapter, (pos + PCI_CAP_LIST_ID), &id); 648 649 if (id == 0xff) 650 break; 651 652 if (id == cap) 653 return (int)pos; 654 655 t4_os_pci_read_cfg(adapter, (pos + PCI_CAP_LIST_NEXT), &pos); 656 } 657 return 0; 658 } 659 660 /** 661 * t4_os_set_hw_addr - store a port's MAC address in SW 662 * @adapter: the adapter 663 * @port_idx: the port index 664 * @hw_addr: the Ethernet address 665 * 666 * Store the Ethernet address of the given port in SW. Called by the 667 * common code when it retrieves a port's Ethernet address from EEPROM. 668 */ 669 static inline void t4_os_set_hw_addr(struct adapter *adapter, int port_idx, 670 u8 hw_addr[]) 671 { 672 struct port_info *pi = adap2pinfo(adapter, port_idx); 673 674 rte_ether_addr_copy((struct rte_ether_addr *)hw_addr, 675 &pi->eth_dev->data->mac_addrs[0]); 676 } 677 678 /** 679 * t4_os_lock_init - initialize spinlock 680 * @lock: the spinlock 681 */ 682 static inline void t4_os_lock_init(rte_spinlock_t *lock) 683 { 684 rte_spinlock_init(lock); 685 } 686 687 /** 688 * t4_os_lock - spin until lock is acquired 689 * @lock: the spinlock 690 */ 691 static inline void t4_os_lock(rte_spinlock_t *lock) 692 { 693 rte_spinlock_lock(lock); 694 } 695 696 /** 697 * t4_os_unlock - unlock a spinlock 698 * @lock: the spinlock 699 */ 700 static inline void t4_os_unlock(rte_spinlock_t *lock) 701 { 702 rte_spinlock_unlock(lock); 703 } 704 705 /** 706 * t4_os_trylock - try to get a lock 707 * @lock: the spinlock 708 */ 709 static inline int t4_os_trylock(rte_spinlock_t *lock) 710 { 711 return rte_spinlock_trylock(lock); 712 } 713 714 /** 715 * t4_os_init_list_head - initialize 716 * @head: head of list to initialize [to empty] 717 */ 718 static inline void t4_os_init_list_head(struct mbox_list *head) 719 { 720 TAILQ_INIT(head); 721 } 722 723 static inline struct mbox_entry *t4_os_list_first_entry(struct mbox_list *head) 724 { 725 return TAILQ_FIRST(head); 726 } 727 728 /** 729 * t4_os_atomic_add_tail - Enqueue list element atomically onto list 730 * @new: the entry to be addded to the queue 731 * @head: current head of the linked list 732 * @lock: lock to use to guarantee atomicity 733 */ 734 static inline void t4_os_atomic_add_tail(struct mbox_entry *entry, 735 struct mbox_list *head, 736 rte_spinlock_t *lock) 737 { 738 t4_os_lock(lock); 739 TAILQ_INSERT_TAIL(head, entry, next); 740 t4_os_unlock(lock); 741 } 742 743 /** 744 * t4_os_atomic_list_del - Dequeue list element atomically from list 745 * @entry: the entry to be remove/dequeued from the list. 746 * @lock: the spinlock 747 */ 748 static inline void t4_os_atomic_list_del(struct mbox_entry *entry, 749 struct mbox_list *head, 750 rte_spinlock_t *lock) 751 { 752 t4_os_lock(lock); 753 TAILQ_REMOVE(head, entry, next); 754 t4_os_unlock(lock); 755 } 756 757 /** 758 * t4_init_completion - initialize completion 759 * @c: the completion context 760 */ 761 static inline void t4_init_completion(struct t4_completion *c) 762 { 763 c->done = 0; 764 t4_os_lock_init(&c->lock); 765 } 766 767 /** 768 * t4_complete - set completion as done 769 * @c: the completion context 770 */ 771 static inline void t4_complete(struct t4_completion *c) 772 { 773 t4_os_lock(&c->lock); 774 c->done = 1; 775 t4_os_unlock(&c->lock); 776 } 777 778 /** 779 * cxgbe_port_viid - get the VI id of a port 780 * @dev: the device for the port 781 * 782 * Return the VI id of the given port. 783 */ 784 static inline unsigned int cxgbe_port_viid(const struct rte_eth_dev *dev) 785 { 786 return ethdev2pinfo(dev)->viid; 787 } 788 789 void *t4_alloc_mem(size_t size); 790 void t4_free_mem(void *addr); 791 #define t4_os_alloc(_size) t4_alloc_mem((_size)) 792 #define t4_os_free(_ptr) t4_free_mem((_ptr)) 793 794 void t4_os_portmod_changed(const struct adapter *adap, int port_id); 795 void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat); 796 797 void reclaim_completed_tx(struct sge_txq *q); 798 void t4_free_sge_resources(struct adapter *adap); 799 void t4_sge_tx_monitor_start(struct adapter *adap); 800 void t4_sge_tx_monitor_stop(struct adapter *adap); 801 int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf, 802 uint16_t nb_pkts); 803 int t4_mgmt_tx(struct sge_ctrl_txq *txq, struct rte_mbuf *mbuf); 804 int t4_sge_init(struct adapter *adap); 805 int t4vf_sge_init(struct adapter *adap); 806 int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, 807 struct rte_eth_dev *eth_dev, uint16_t queue_id, 808 unsigned int iqid, int socket_id); 809 int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, 810 struct rte_eth_dev *eth_dev, uint16_t queue_id, 811 unsigned int iqid, int socket_id); 812 int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *rspq, bool fwevtq, 813 struct rte_eth_dev *eth_dev, int intr_idx, 814 struct sge_fl *fl, rspq_handler_t handler, 815 int cong, struct rte_mempool *mp, int queue_id, 816 int socket_id); 817 int t4_sge_eth_txq_start(struct sge_eth_txq *txq); 818 int t4_sge_eth_txq_stop(struct sge_eth_txq *txq); 819 void t4_sge_eth_txq_release(struct adapter *adap, struct sge_eth_txq *txq); 820 int t4_sge_eth_rxq_start(struct adapter *adap, struct sge_rspq *rq); 821 int t4_sge_eth_rxq_stop(struct adapter *adap, struct sge_rspq *rq); 822 void t4_sge_eth_rxq_release(struct adapter *adap, struct sge_eth_rxq *rxq); 823 void t4_sge_eth_clear_queues(struct port_info *pi); 824 int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us, 825 unsigned int cnt); 826 int cxgbe_poll(struct sge_rspq *q, struct rte_mbuf **rx_pkts, 827 unsigned int budget, unsigned int *work_done); 828 int cxgbe_write_rss(const struct port_info *pi, const u16 *queues); 829 int cxgbe_write_rss_conf(const struct port_info *pi, uint64_t flags); 830 831 #endif /* __T4_ADAPTER_H__ */ 832