1 /* 2 * BSD LICENSE 3 * 4 * Copyright (C) Cavium networks Ltd. 2016. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 10 * * Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * * Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in 14 * the documentation and/or other materials provided with the 15 * distribution. 16 * * Neither the name of Cavium networks nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #ifndef __THUNDERX_NICVF_ETHDEV_H__ 34 #define __THUNDERX_NICVF_ETHDEV_H__ 35 36 #include <rte_ethdev.h> 37 38 #define THUNDERX_NICVF_PMD_VERSION "2.0" 39 #define THUNDERX_REG_BYTES 8 40 41 #define NICVF_INTR_POLL_INTERVAL_MS 50 42 #define NICVF_HALF_DUPLEX 0x00 43 #define NICVF_FULL_DUPLEX 0x01 44 #define NICVF_UNKNOWN_DUPLEX 0xff 45 46 #define NICVF_RSS_OFFLOAD_PASS1 ( \ 47 ETH_RSS_PORT | \ 48 ETH_RSS_IPV4 | \ 49 ETH_RSS_NONFRAG_IPV4_TCP | \ 50 ETH_RSS_NONFRAG_IPV4_UDP | \ 51 ETH_RSS_IPV6 | \ 52 ETH_RSS_NONFRAG_IPV6_TCP | \ 53 ETH_RSS_NONFRAG_IPV6_UDP) 54 55 #define NICVF_RSS_OFFLOAD_TUNNEL ( \ 56 ETH_RSS_VXLAN | \ 57 ETH_RSS_GENEVE | \ 58 ETH_RSS_NVGRE) 59 60 #define NICVF_DEFAULT_RX_FREE_THRESH 224 61 #define NICVF_DEFAULT_TX_FREE_THRESH 224 62 #define NICVF_TX_FREE_MPOOL_THRESH 16 63 #define NICVF_MAX_RX_FREE_THRESH 1024 64 #define NICVF_MAX_TX_FREE_THRESH 1024 65 66 #define VLAN_TAG_SIZE 4 /* 802.3ac tag */ 67 68 static inline struct nicvf * 69 nicvf_pmd_priv(struct rte_eth_dev *eth_dev) 70 { 71 return eth_dev->data->dev_private; 72 } 73 74 static inline uint64_t 75 nicvf_mempool_phy_offset(struct rte_mempool *mp) 76 { 77 struct rte_mempool_memhdr *hdr; 78 79 hdr = STAILQ_FIRST(&mp->mem_list); 80 assert(hdr != NULL); 81 return (uint64_t)((uintptr_t)hdr->addr - hdr->phys_addr); 82 } 83 84 static inline uint16_t 85 nicvf_mbuff_meta_length(struct rte_mbuf *mbuf) 86 { 87 return (uint16_t)((uintptr_t)mbuf->buf_addr - (uintptr_t)mbuf); 88 } 89 90 static inline uint16_t 91 nicvf_netdev_qidx(struct nicvf *nic, uint8_t local_qidx) 92 { 93 uint16_t global_qidx = local_qidx; 94 95 if (nic->sqs_mode) 96 global_qidx += ((nic->sqs_id + 1) * MAX_CMP_QUEUES_PER_QS); 97 98 return global_qidx; 99 } 100 101 /* 102 * Simple phy2virt functions assuming mbufs are in a single huge page 103 * V = P + offset 104 * P = V - offset 105 */ 106 static inline uintptr_t 107 nicvf_mbuff_phy2virt(phys_addr_t phy, uint64_t mbuf_phys_off) 108 { 109 return (uintptr_t)(phy + mbuf_phys_off); 110 } 111 112 static inline uintptr_t 113 nicvf_mbuff_virt2phy(uintptr_t virt, uint64_t mbuf_phys_off) 114 { 115 return (phys_addr_t)(virt - mbuf_phys_off); 116 } 117 118 static inline void 119 nicvf_tx_range(struct rte_eth_dev *dev, struct nicvf *nic, uint16_t *tx_start, 120 uint16_t *tx_end) 121 { 122 uint16_t tmp; 123 124 *tx_start = RTE_ALIGN_FLOOR(nicvf_netdev_qidx(nic, 0), 125 MAX_SND_QUEUES_PER_QS); 126 tmp = RTE_ALIGN_CEIL(nicvf_netdev_qidx(nic, 0) + 1, 127 MAX_SND_QUEUES_PER_QS) - 1; 128 *tx_end = dev->data->nb_tx_queues ? 129 RTE_MIN(tmp, dev->data->nb_tx_queues - 1) : 0; 130 } 131 132 static inline void 133 nicvf_rx_range(struct rte_eth_dev *dev, struct nicvf *nic, uint16_t *rx_start, 134 uint16_t *rx_end) 135 { 136 uint16_t tmp; 137 138 *rx_start = RTE_ALIGN_FLOOR(nicvf_netdev_qidx(nic, 0), 139 MAX_RCV_QUEUES_PER_QS); 140 tmp = RTE_ALIGN_CEIL(nicvf_netdev_qidx(nic, 0) + 1, 141 MAX_RCV_QUEUES_PER_QS) - 1; 142 *rx_end = dev->data->nb_rx_queues ? 143 RTE_MIN(tmp, dev->data->nb_rx_queues - 1) : 0; 144 } 145 146 #endif /* __THUNDERX_NICVF_ETHDEV_H__ */ 147