xref: /dpdk/drivers/net/axgbe/axgbe_rxtx.h (revision 43fd3624fdfe3a33904a9b64d94306dd3d4f2c13)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
3  *   Copyright(c) 2018 Synopsys, Inc. All rights reserved.
4  */
5 
6 #ifndef _AXGBE_RXTX_H_
7 #define _AXGBE_RXTX_H_
8 
9 /* Descriptor related defines */
10 #define AXGBE_MAX_RING_DESC		4096 /*should be power of 2*/
11 #define AXGBE_TX_DESC_MIN_FREE		(AXGBE_MAX_RING_DESC >> 3)
12 #define AXGBE_TX_DESC_MAX_PROC		(AXGBE_MAX_RING_DESC >> 1)
13 #define AXGBE_MIN_RING_DESC		32
14 #define RTE_AXGBE_DESCS_PER_LOOP	4
15 #define RTE_AXGBE_MAX_RX_BURST		32
16 
17 #define AXGBE_RX_FREE_THRESH		32
18 #define AXGBE_TX_FREE_THRESH		32
19 
20 #define AXGBE_DESC_ALIGN		128
21 #define AXGBE_DESC_OWN			0x80000000
22 #define AXGBE_ERR_STATUS		0x000f0000
23 #define AXGBE_L3_CSUM_ERR		0x00050000
24 #define AXGBE_L4_CSUM_ERR		0x00060000
25 
26 #include "axgbe_common.h"
27 
28 #define AXGBE_GET_DESC_PT(_queue, _idx)			\
29 	(((_queue)->desc) +				\
30 	((_idx) & ((_queue)->nb_desc - 1)))
31 
32 #define AXGBE_GET_DESC_IDX(_queue, _idx)			\
33 	((_idx) & ((_queue)->nb_desc - 1))			\
34 
35 /* Rx desc format */
36 union axgbe_rx_desc {
37 	struct {
38 		uint64_t baddr;
39 		uint32_t desc2;
40 		uint32_t desc3;
41 	} read;
42 	struct {
43 		uint32_t desc0;
44 		uint32_t desc1;
45 		uint32_t desc2;
46 		uint32_t desc3;
47 	} write;
48 };
49 
50 struct __rte_cache_aligned axgbe_rx_queue {
51 	/* membuf pool for rx buffers */
52 	struct rte_mempool *mb_pool;
53 	/* H/w Rx buffer size configured in DMA */
54 	unsigned int buf_size;
55 	/* CRC h/w offload */
56 	uint16_t crc_len;
57 	/* address of  s/w rx buffers */
58 	struct rte_mbuf **sw_ring;
59 
60 	/* For segemented packets - save the current state
61 	 * of packet, if next descriptor is not ready yet
62 	 */
63 	struct rte_mbuf *saved_mbuf;
64 
65 	/* Port private data */
66 	struct axgbe_port *pdata;
67 	/* Number of Rx descriptors in queue */
68 	uint16_t nb_desc;
69 	/* max free RX desc to hold */
70 	uint16_t free_thresh;
71 	/* Index of descriptor to check for packet availability */
72 	uint64_t cur;
73 	/* Index of descriptor to check for buffer reallocation */
74 	uint64_t dirty;
75 	/* Software Rx descriptor ring*/
76 	volatile union axgbe_rx_desc *desc;
77 	/* Ring physical address */
78 	uint64_t ring_phys_addr;
79 	/* Dma Channel register address */
80 	void *dma_regs;
81 	/* Dma channel tail register address*/
82 	volatile uint32_t *dma_tail_reg;
83 	/* DPDK queue index */
84 	uint16_t queue_id;
85 	/* dpdk port id*/
86 	uint16_t port_id;
87 	/* queue stats */
88 	uint64_t pkts;
89 	uint64_t bytes;
90 	uint64_t errors;
91 	uint64_t rx_mbuf_alloc_failed;
92 	/* Number of mbufs allocated from pool*/
93 	uint64_t mbuf_alloc;
94 	uint64_t offloads; /**< Rx offloads with RTE_ETH_RX_OFFLOAD_**/
95 };
96 
97 /*Tx descriptor format */
98 struct axgbe_tx_desc {
99 	phys_addr_t baddr;
100 	uint32_t desc2;
101 	uint32_t desc3;
102 };
103 
104 struct __rte_cache_aligned axgbe_tx_queue {
105 	/* Port private data reference */
106 	struct axgbe_port *pdata;
107 	/* Number of Tx descriptors in queue*/
108 	uint16_t nb_desc;
109 	/* Start freeing TX buffers if there are less free descriptors than
110 	 * this value
111 	 */
112 	uint16_t free_thresh;
113 	/* Available descriptors for Tx processing*/
114 	uint16_t nb_desc_free;
115 	/* Batch of mbufs/descs to release */
116 	uint16_t free_batch_cnt;
117 	/* Flag for vector support */
118 	uint16_t vector_disable;
119 	/* Index of descriptor to be used for current transfer */
120 	uint64_t cur;
121 	/* Index of descriptor to check for transfer complete */
122 	uint64_t dirty;
123 	/* Virtual address of ring */
124 	volatile struct axgbe_tx_desc *desc;
125 	/* Physical address of ring */
126 	uint64_t ring_phys_addr;
127 	/* Dma channel register space */
128 	void  *dma_regs;
129 	/* Dma tail register address of ring*/
130 	volatile uint32_t *dma_tail_reg;
131 	/* Tx queue index/id*/
132 	uint16_t queue_id;
133 	/* Reference to hold Tx mbufs mapped to Tx descriptors freed
134 	 * after transmission confirmation
135 	 */
136 	struct rte_mbuf **sw_ring;
137 	/* dpdk port id*/
138 	uint16_t port_id;
139 	/* queue stats */
140 	uint64_t pkts;
141 	uint64_t bytes;
142 	uint64_t errors;
143 	uint64_t offloads; /**< Tx offload flags of RTE_ETH_TX_OFFLOAD_* */
144 };
145 
146 /*Queue related APIs */
147 
148 /*
149  * RX/TX function prototypes
150  */
151 
152 /* Used in dev_start by primary process and then
153  * in dev_init by secondary process when attaching to an existing ethdev.
154  */
155 void axgbe_set_tx_function(struct rte_eth_dev *dev);
156 void axgbe_set_rx_function(struct rte_eth_dev *dev);
157 
158 void axgbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx);
159 int  axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
160 			      uint16_t nb_tx_desc, unsigned int socket_id,
161 			      const struct rte_eth_txconf *tx_conf);
162 void axgbe_dev_enable_tx(struct rte_eth_dev *dev);
163 void axgbe_dev_disable_tx(struct rte_eth_dev *dev);
164 int axgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
165 int axgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
166 
167 int axgbe_dev_fw_version_get(struct rte_eth_dev *eth_dev,
168 			char *fw_version, size_t fw_size);
169 
170 uint16_t axgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
171 			 uint16_t nb_pkts);
172 
173 uint16_t axgbe_xmit_pkts_seg(void *tx_queue, struct rte_mbuf **tx_pkts,
174 		uint16_t nb_pkts);
175 
176 uint16_t axgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
177 			 uint16_t nb_pkts);
178 
179 
180 void axgbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx);
181 int  axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
182 			      uint16_t nb_rx_desc, unsigned int socket_id,
183 			      const struct rte_eth_rxconf *rx_conf,
184 			      struct rte_mempool *mb_pool);
185 void axgbe_dev_enable_rx(struct rte_eth_dev *dev);
186 void axgbe_dev_disable_rx(struct rte_eth_dev *dev);
187 int axgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
188 int axgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
189 uint16_t axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
190 			 uint16_t nb_pkts);
191 uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue,
192 		struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
193 uint16_t axgbe_recv_pkts_threshold_refresh(void *rx_queue,
194 					   struct rte_mbuf **rx_pkts,
195 					   uint16_t nb_pkts);
196 void axgbe_dev_clear_queues(struct rte_eth_dev *dev);
197 int axgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);
198 int axgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset);
199 
200 #endif /* _AXGBE_RXTX_H_ */
201