xref: /dpdk/drivers/net/thunderx/nicvf_ethdev.h (revision 59f3a8acbcdbafeebe816a26d76dfb06e6450f31)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 Cavium, Inc
3  */
4 
5 #ifndef __THUNDERX_NICVF_ETHDEV_H__
6 #define __THUNDERX_NICVF_ETHDEV_H__
7 
8 #include <ethdev_driver.h>
9 
10 #define THUNDERX_NICVF_PMD_VERSION      "2.0"
11 #define THUNDERX_REG_BYTES		8
12 
13 #define NICVF_INTR_POLL_INTERVAL_MS	50
14 #define NICVF_HALF_DUPLEX		0x00
15 #define NICVF_FULL_DUPLEX		0x01
16 #define NICVF_UNKNOWN_DUPLEX		0xff
17 
18 #define NICVF_RSS_OFFLOAD_PASS1 ( \
19 	ETH_RSS_PORT | \
20 	ETH_RSS_IPV4 | \
21 	ETH_RSS_NONFRAG_IPV4_TCP | \
22 	ETH_RSS_NONFRAG_IPV4_UDP | \
23 	ETH_RSS_IPV6 | \
24 	ETH_RSS_NONFRAG_IPV6_TCP | \
25 	ETH_RSS_NONFRAG_IPV6_UDP)
26 
27 #define NICVF_RSS_OFFLOAD_TUNNEL ( \
28 	ETH_RSS_VXLAN | \
29 	ETH_RSS_GENEVE | \
30 	ETH_RSS_NVGRE)
31 
32 #define NICVF_TX_OFFLOAD_CAPA ( \
33 	DEV_TX_OFFLOAD_IPV4_CKSUM       | \
34 	DEV_TX_OFFLOAD_UDP_CKSUM        | \
35 	DEV_TX_OFFLOAD_TCP_CKSUM        | \
36 	DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
37 	DEV_TX_OFFLOAD_MBUF_FAST_FREE   | \
38 	DEV_TX_OFFLOAD_MULTI_SEGS)
39 
40 #define NICVF_RX_OFFLOAD_CAPA ( \
41 	DEV_RX_OFFLOAD_CHECKSUM    | \
42 	DEV_RX_OFFLOAD_VLAN_STRIP  | \
43 	DEV_RX_OFFLOAD_SCATTER     | \
44 	DEV_RX_OFFLOAD_RSS_HASH)
45 
46 #define NICVF_DEFAULT_RX_FREE_THRESH    224
47 #define NICVF_DEFAULT_TX_FREE_THRESH    224
48 #define NICVF_TX_FREE_MPOOL_THRESH      16
49 #define NICVF_MAX_RX_FREE_THRESH        1024
50 #define NICVF_MAX_TX_FREE_THRESH        1024
51 
52 #define VLAN_TAG_SIZE                   4	/* 802.3ac tag */
53 
54 #define SKIP_DATA_BYTES "skip_data_bytes"
55 static inline struct nicvf *
56 nicvf_pmd_priv(struct rte_eth_dev *eth_dev)
57 {
58 	return eth_dev->data->dev_private;
59 }
60 
61 static inline uint64_t
62 nicvf_mempool_phy_offset(struct rte_mempool *mp)
63 {
64 	struct rte_mempool_memhdr *hdr;
65 
66 	hdr = STAILQ_FIRST(&mp->mem_list);
67 	assert(hdr != NULL);
68 	return (uint64_t)((uintptr_t)hdr->addr - hdr->iova);
69 }
70 
71 static inline uint16_t
72 nicvf_mbuff_meta_length(struct rte_mbuf *mbuf)
73 {
74 	return (uint16_t)((uintptr_t)mbuf->buf_addr - (uintptr_t)mbuf);
75 }
76 
77 static inline uint16_t
78 nicvf_netdev_qidx(struct nicvf *nic, uint8_t local_qidx)
79 {
80 	uint16_t global_qidx = local_qidx;
81 
82 	if (nic->sqs_mode)
83 		global_qidx += ((nic->sqs_id + 1) * MAX_CMP_QUEUES_PER_QS);
84 
85 	return global_qidx;
86 }
87 
88 /*
89  * Simple phy2virt functions assuming mbufs are in a single huge page
90  * V = P + offset
91  * P = V - offset
92  */
93 static inline uintptr_t
94 nicvf_mbuff_phy2virt(rte_iova_t phy, uint64_t mbuf_phys_off)
95 {
96 	return (uintptr_t)(phy + mbuf_phys_off);
97 }
98 
99 static inline uintptr_t
100 nicvf_mbuff_virt2phy(uintptr_t virt, uint64_t mbuf_phys_off)
101 {
102 	return (rte_iova_t)(virt - mbuf_phys_off);
103 }
104 
105 static inline void
106 nicvf_tx_range(struct rte_eth_dev *dev, struct nicvf *nic, uint16_t *tx_start,
107 	       uint16_t *tx_end)
108 {
109 	uint16_t tmp;
110 
111 	*tx_start = RTE_ALIGN_FLOOR(nicvf_netdev_qidx(nic, 0),
112 				    MAX_SND_QUEUES_PER_QS);
113 	tmp = RTE_ALIGN_CEIL(nicvf_netdev_qidx(nic, 0) + 1,
114 			     MAX_SND_QUEUES_PER_QS) - 1;
115 	*tx_end = dev->data->nb_tx_queues ?
116 		RTE_MIN(tmp, dev->data->nb_tx_queues - 1) : 0;
117 }
118 
119 static inline void
120 nicvf_rx_range(struct rte_eth_dev *dev, struct nicvf *nic, uint16_t *rx_start,
121 	       uint16_t *rx_end)
122 {
123 	uint16_t tmp;
124 
125 	*rx_start = RTE_ALIGN_FLOOR(nicvf_netdev_qidx(nic, 0),
126 				    MAX_RCV_QUEUES_PER_QS);
127 	tmp = RTE_ALIGN_CEIL(nicvf_netdev_qidx(nic, 0) + 1,
128 			     MAX_RCV_QUEUES_PER_QS) - 1;
129 	*rx_end = dev->data->nb_rx_queues ?
130 		RTE_MIN(tmp, dev->data->nb_rx_queues - 1) : 0;
131 }
132 
133 #endif /* __THUNDERX_NICVF_ETHDEV_H__  */
134