1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 Cavium, Inc
3 */
4
5 #ifndef __THUNDERX_NICVF_ETHDEV_H__
6 #define __THUNDERX_NICVF_ETHDEV_H__
7
8 #include <ethdev_driver.h>
9
10 #define THUNDERX_NICVF_PMD_VERSION "2.0"
11 #define THUNDERX_REG_BYTES 8
12
13 #define NICVF_INTR_POLL_INTERVAL_MS 50
14 /* Poll for link state for every 2 sec */
15 #define NICVF_INTR_LINK_POLL_INTERVAL_MS 2000
16 #define NICVF_HALF_DUPLEX 0x00
17 #define NICVF_FULL_DUPLEX 0x01
18 #define NICVF_UNKNOWN_DUPLEX 0xff
19
20 #define NICVF_RSS_OFFLOAD_PASS1 ( \
21 RTE_ETH_RSS_PORT | \
22 RTE_ETH_RSS_IPV4 | \
23 RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
24 RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
25 RTE_ETH_RSS_IPV6 | \
26 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
27 RTE_ETH_RSS_NONFRAG_IPV6_UDP)
28
29 #define NICVF_RSS_OFFLOAD_TUNNEL ( \
30 RTE_ETH_RSS_VXLAN | \
31 RTE_ETH_RSS_GENEVE | \
32 RTE_ETH_RSS_NVGRE)
33
34 #define NICVF_TX_OFFLOAD_CAPA ( \
35 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
36 RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
37 RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
38 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
39 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE | \
40 RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
41
42 #define NICVF_RX_OFFLOAD_CAPA ( \
43 RTE_ETH_RX_OFFLOAD_CHECKSUM | \
44 RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
45 RTE_ETH_RX_OFFLOAD_SCATTER | \
46 RTE_ETH_RX_OFFLOAD_RSS_HASH)
47
48 #define NICVF_DEFAULT_RX_FREE_THRESH 224
49 #define NICVF_DEFAULT_TX_FREE_THRESH 224
50 #define NICVF_TX_FREE_MPOOL_THRESH 16
51 #define NICVF_MAX_RX_FREE_THRESH 1024
52 #define NICVF_MAX_TX_FREE_THRESH 1024
53
54 #define VLAN_TAG_SIZE 4 /* 802.3ac tag */
55
56 #define SKIP_DATA_BYTES "skip_data_bytes"
57 static inline struct nicvf *
nicvf_pmd_priv(struct rte_eth_dev * eth_dev)58 nicvf_pmd_priv(struct rte_eth_dev *eth_dev)
59 {
60 return eth_dev->data->dev_private;
61 }
62
63 static inline uint64_t
nicvf_mempool_phy_offset(struct rte_mempool * mp)64 nicvf_mempool_phy_offset(struct rte_mempool *mp)
65 {
66 struct rte_mempool_memhdr *hdr;
67
68 hdr = STAILQ_FIRST(&mp->mem_list);
69 assert(hdr != NULL);
70 return (uint64_t)((uintptr_t)hdr->addr - hdr->iova);
71 }
72
73 static inline uint16_t
nicvf_mbuff_meta_length(struct rte_mbuf * mbuf)74 nicvf_mbuff_meta_length(struct rte_mbuf *mbuf)
75 {
76 return (uint16_t)((uintptr_t)mbuf->buf_addr - (uintptr_t)mbuf);
77 }
78
79 static inline uint16_t
nicvf_netdev_qidx(struct nicvf * nic,uint8_t local_qidx)80 nicvf_netdev_qidx(struct nicvf *nic, uint8_t local_qidx)
81 {
82 uint16_t global_qidx = local_qidx;
83
84 if (nic->sqs_mode)
85 global_qidx += ((nic->sqs_id + 1) * MAX_CMP_QUEUES_PER_QS);
86
87 return global_qidx;
88 }
89
90 /*
91 * Simple phy2virt functions assuming mbufs are in a single huge page
92 * V = P + offset
93 * P = V - offset
94 */
95 static inline uintptr_t
nicvf_mbuff_phy2virt(rte_iova_t phy,uint64_t mbuf_phys_off)96 nicvf_mbuff_phy2virt(rte_iova_t phy, uint64_t mbuf_phys_off)
97 {
98 return (uintptr_t)(phy + mbuf_phys_off);
99 }
100
101 static inline uintptr_t
nicvf_mbuff_virt2phy(uintptr_t virt,uint64_t mbuf_phys_off)102 nicvf_mbuff_virt2phy(uintptr_t virt, uint64_t mbuf_phys_off)
103 {
104 return (rte_iova_t)(virt - mbuf_phys_off);
105 }
106
107 static inline void
nicvf_tx_range(struct rte_eth_dev * dev,struct nicvf * nic,uint16_t * tx_start,uint16_t * tx_end)108 nicvf_tx_range(struct rte_eth_dev *dev, struct nicvf *nic, uint16_t *tx_start,
109 uint16_t *tx_end)
110 {
111 uint16_t tmp;
112
113 *tx_start = RTE_ALIGN_FLOOR(nicvf_netdev_qidx(nic, 0),
114 MAX_SND_QUEUES_PER_QS);
115 tmp = RTE_ALIGN_CEIL(nicvf_netdev_qidx(nic, 0) + 1,
116 MAX_SND_QUEUES_PER_QS) - 1;
117 *tx_end = dev->data->nb_tx_queues ?
118 RTE_MIN(tmp, dev->data->nb_tx_queues - 1) : 0;
119 }
120
121 static inline void
nicvf_rx_range(struct rte_eth_dev * dev,struct nicvf * nic,uint16_t * rx_start,uint16_t * rx_end)122 nicvf_rx_range(struct rte_eth_dev *dev, struct nicvf *nic, uint16_t *rx_start,
123 uint16_t *rx_end)
124 {
125 uint16_t tmp;
126
127 *rx_start = RTE_ALIGN_FLOOR(nicvf_netdev_qidx(nic, 0),
128 MAX_RCV_QUEUES_PER_QS);
129 tmp = RTE_ALIGN_CEIL(nicvf_netdev_qidx(nic, 0) + 1,
130 MAX_RCV_QUEUES_PER_QS) - 1;
131 *rx_end = dev->data->nb_rx_queues ?
132 RTE_MIN(tmp, dev->data->nb_rx_queues - 1) : 0;
133 }
134
135 #endif /* __THUNDERX_NICVF_ETHDEV_H__ */
136