1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2016 Cavium, Inc 3 */ 4 5 #ifndef __THUNDERX_NICVF_ETHDEV_H__ 6 #define __THUNDERX_NICVF_ETHDEV_H__ 7 8 #include <rte_ethdev_driver.h> 9 10 #define THUNDERX_NICVF_PMD_VERSION "2.0" 11 #define THUNDERX_REG_BYTES 8 12 13 #define NICVF_INTR_POLL_INTERVAL_MS 50 14 #define NICVF_HALF_DUPLEX 0x00 15 #define NICVF_FULL_DUPLEX 0x01 16 #define NICVF_UNKNOWN_DUPLEX 0xff 17 18 #define NICVF_RSS_OFFLOAD_PASS1 ( \ 19 ETH_RSS_PORT | \ 20 ETH_RSS_IPV4 | \ 21 ETH_RSS_NONFRAG_IPV4_TCP | \ 22 ETH_RSS_NONFRAG_IPV4_UDP | \ 23 ETH_RSS_IPV6 | \ 24 ETH_RSS_NONFRAG_IPV6_TCP | \ 25 ETH_RSS_NONFRAG_IPV6_UDP) 26 27 #define NICVF_RSS_OFFLOAD_TUNNEL ( \ 28 ETH_RSS_VXLAN | \ 29 ETH_RSS_GENEVE | \ 30 ETH_RSS_NVGRE) 31 32 #define NICVF_TX_OFFLOAD_CAPA ( \ 33 DEV_TX_OFFLOAD_IPV4_CKSUM | \ 34 DEV_TX_OFFLOAD_UDP_CKSUM | \ 35 DEV_TX_OFFLOAD_TCP_CKSUM | \ 36 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \ 37 DEV_TX_OFFLOAD_MBUF_FAST_FREE | \ 38 DEV_TX_OFFLOAD_MULTI_SEGS) 39 40 #define NICVF_RX_OFFLOAD_CAPA ( \ 41 DEV_RX_OFFLOAD_VLAN_STRIP | \ 42 DEV_RX_OFFLOAD_CRC_STRIP | \ 43 DEV_RX_OFFLOAD_JUMBO_FRAME | \ 44 DEV_RX_OFFLOAD_SCATTER) 45 46 #define NICVF_DEFAULT_RX_FREE_THRESH 224 47 #define NICVF_DEFAULT_TX_FREE_THRESH 224 48 #define NICVF_TX_FREE_MPOOL_THRESH 16 49 #define NICVF_MAX_RX_FREE_THRESH 1024 50 #define NICVF_MAX_TX_FREE_THRESH 1024 51 52 #define VLAN_TAG_SIZE 4 /* 802.3ac tag */ 53 54 static inline struct nicvf * 55 nicvf_pmd_priv(struct rte_eth_dev *eth_dev) 56 { 57 return eth_dev->data->dev_private; 58 } 59 60 static inline uint64_t 61 nicvf_mempool_phy_offset(struct rte_mempool *mp) 62 { 63 struct rte_mempool_memhdr *hdr; 64 65 hdr = STAILQ_FIRST(&mp->mem_list); 66 assert(hdr != NULL); 67 return (uint64_t)((uintptr_t)hdr->addr - hdr->iova); 68 } 69 70 static inline uint16_t 71 nicvf_mbuff_meta_length(struct rte_mbuf *mbuf) 72 { 73 return (uint16_t)((uintptr_t)mbuf->buf_addr - (uintptr_t)mbuf); 74 } 75 76 static inline uint16_t 77 nicvf_netdev_qidx(struct nicvf *nic, uint8_t local_qidx) 78 { 79 uint16_t global_qidx = local_qidx; 80 81 if (nic->sqs_mode) 82 global_qidx += ((nic->sqs_id + 1) * MAX_CMP_QUEUES_PER_QS); 83 84 return global_qidx; 85 } 86 87 /* 88 * Simple phy2virt functions assuming mbufs are in a single huge page 89 * V = P + offset 90 * P = V - offset 91 */ 92 static inline uintptr_t 93 nicvf_mbuff_phy2virt(rte_iova_t phy, uint64_t mbuf_phys_off) 94 { 95 return (uintptr_t)(phy + mbuf_phys_off); 96 } 97 98 static inline uintptr_t 99 nicvf_mbuff_virt2phy(uintptr_t virt, uint64_t mbuf_phys_off) 100 { 101 return (rte_iova_t)(virt - mbuf_phys_off); 102 } 103 104 static inline void 105 nicvf_tx_range(struct rte_eth_dev *dev, struct nicvf *nic, uint16_t *tx_start, 106 uint16_t *tx_end) 107 { 108 uint16_t tmp; 109 110 *tx_start = RTE_ALIGN_FLOOR(nicvf_netdev_qidx(nic, 0), 111 MAX_SND_QUEUES_PER_QS); 112 tmp = RTE_ALIGN_CEIL(nicvf_netdev_qidx(nic, 0) + 1, 113 MAX_SND_QUEUES_PER_QS) - 1; 114 *tx_end = dev->data->nb_tx_queues ? 115 RTE_MIN(tmp, dev->data->nb_tx_queues - 1) : 0; 116 } 117 118 static inline void 119 nicvf_rx_range(struct rte_eth_dev *dev, struct nicvf *nic, uint16_t *rx_start, 120 uint16_t *rx_end) 121 { 122 uint16_t tmp; 123 124 *rx_start = RTE_ALIGN_FLOOR(nicvf_netdev_qidx(nic, 0), 125 MAX_RCV_QUEUES_PER_QS); 126 tmp = RTE_ALIGN_CEIL(nicvf_netdev_qidx(nic, 0) + 1, 127 MAX_RCV_QUEUES_PER_QS) - 1; 128 *rx_end = dev->data->nb_rx_queues ? 129 RTE_MIN(tmp, dev->data->nb_rx_queues - 1) : 0; 130 } 131 132 #endif /* __THUNDERX_NICVF_ETHDEV_H__ */ 133