xref: /dpdk/drivers/net/axgbe/axgbe_rxtx.h (revision 43fd3624fdfe3a33904a9b64d94306dd3d4f2c13)
19e890103SRavi Kumar /*   SPDX-License-Identifier: BSD-3-Clause
29e890103SRavi Kumar  *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
39e890103SRavi Kumar  *   Copyright(c) 2018 Synopsys, Inc. All rights reserved.
49e890103SRavi Kumar  */
59e890103SRavi Kumar 
69e890103SRavi Kumar #ifndef _AXGBE_RXTX_H_
79e890103SRavi Kumar #define _AXGBE_RXTX_H_
89e890103SRavi Kumar 
99e890103SRavi Kumar /* Descriptor related defines */
109e890103SRavi Kumar #define AXGBE_MAX_RING_DESC		4096 /*should be power of 2*/
119e890103SRavi Kumar #define AXGBE_TX_DESC_MIN_FREE		(AXGBE_MAX_RING_DESC >> 3)
129e890103SRavi Kumar #define AXGBE_TX_DESC_MAX_PROC		(AXGBE_MAX_RING_DESC >> 1)
139e890103SRavi Kumar #define AXGBE_MIN_RING_DESC		32
149e890103SRavi Kumar #define RTE_AXGBE_DESCS_PER_LOOP	4
159e890103SRavi Kumar #define RTE_AXGBE_MAX_RX_BURST		32
169e890103SRavi Kumar 
179e890103SRavi Kumar #define AXGBE_RX_FREE_THRESH		32
189e890103SRavi Kumar #define AXGBE_TX_FREE_THRESH		32
199e890103SRavi Kumar 
209e890103SRavi Kumar #define AXGBE_DESC_ALIGN		128
219e890103SRavi Kumar #define AXGBE_DESC_OWN			0x80000000
229e890103SRavi Kumar #define AXGBE_ERR_STATUS		0x000f0000
239e890103SRavi Kumar #define AXGBE_L3_CSUM_ERR		0x00050000
249e890103SRavi Kumar #define AXGBE_L4_CSUM_ERR		0x00060000
259e890103SRavi Kumar 
269e890103SRavi Kumar #include "axgbe_common.h"
279e890103SRavi Kumar 
289e890103SRavi Kumar #define AXGBE_GET_DESC_PT(_queue, _idx)			\
299e890103SRavi Kumar 	(((_queue)->desc) +				\
309e890103SRavi Kumar 	((_idx) & ((_queue)->nb_desc - 1)))
319e890103SRavi Kumar 
329e890103SRavi Kumar #define AXGBE_GET_DESC_IDX(_queue, _idx)			\
339e890103SRavi Kumar 	((_idx) & ((_queue)->nb_desc - 1))			\
349e890103SRavi Kumar 
359e890103SRavi Kumar /* Rx desc format */
369e890103SRavi Kumar union axgbe_rx_desc {
379e890103SRavi Kumar 	struct {
389e890103SRavi Kumar 		uint64_t baddr;
399e890103SRavi Kumar 		uint32_t desc2;
409e890103SRavi Kumar 		uint32_t desc3;
419e890103SRavi Kumar 	} read;
429e890103SRavi Kumar 	struct {
439e890103SRavi Kumar 		uint32_t desc0;
449e890103SRavi Kumar 		uint32_t desc1;
459e890103SRavi Kumar 		uint32_t desc2;
469e890103SRavi Kumar 		uint32_t desc3;
479e890103SRavi Kumar 	} write;
489e890103SRavi Kumar };
499e890103SRavi Kumar 
50*27595cd8STyler Retzlaff struct __rte_cache_aligned axgbe_rx_queue {
519e890103SRavi Kumar 	/* membuf pool for rx buffers */
529e890103SRavi Kumar 	struct rte_mempool *mb_pool;
539e890103SRavi Kumar 	/* H/w Rx buffer size configured in DMA */
549e890103SRavi Kumar 	unsigned int buf_size;
559e890103SRavi Kumar 	/* CRC h/w offload */
569e890103SRavi Kumar 	uint16_t crc_len;
579e890103SRavi Kumar 	/* address of  s/w rx buffers */
589e890103SRavi Kumar 	struct rte_mbuf **sw_ring;
5991907ec2SBhagyada Modali 
6091907ec2SBhagyada Modali 	/* For segemented packets - save the current state
6191907ec2SBhagyada Modali 	 * of packet, if next descriptor is not ready yet
6291907ec2SBhagyada Modali 	 */
6391907ec2SBhagyada Modali 	struct rte_mbuf *saved_mbuf;
6491907ec2SBhagyada Modali 
659e890103SRavi Kumar 	/* Port private data */
669e890103SRavi Kumar 	struct axgbe_port *pdata;
679e890103SRavi Kumar 	/* Number of Rx descriptors in queue */
689e890103SRavi Kumar 	uint16_t nb_desc;
699e890103SRavi Kumar 	/* max free RX desc to hold */
709e890103SRavi Kumar 	uint16_t free_thresh;
719e890103SRavi Kumar 	/* Index of descriptor to check for packet availability */
729e890103SRavi Kumar 	uint64_t cur;
739e890103SRavi Kumar 	/* Index of descriptor to check for buffer reallocation */
749e890103SRavi Kumar 	uint64_t dirty;
759e890103SRavi Kumar 	/* Software Rx descriptor ring*/
769e890103SRavi Kumar 	volatile union axgbe_rx_desc *desc;
779e890103SRavi Kumar 	/* Ring physical address */
789e890103SRavi Kumar 	uint64_t ring_phys_addr;
799e890103SRavi Kumar 	/* Dma Channel register address */
807784d0d3SRavi Kumar 	void *dma_regs;
819e890103SRavi Kumar 	/* Dma channel tail register address*/
829e890103SRavi Kumar 	volatile uint32_t *dma_tail_reg;
839e890103SRavi Kumar 	/* DPDK queue index */
849e890103SRavi Kumar 	uint16_t queue_id;
859e890103SRavi Kumar 	/* dpdk port id*/
869e890103SRavi Kumar 	uint16_t port_id;
879e890103SRavi Kumar 	/* queue stats */
889e890103SRavi Kumar 	uint64_t pkts;
899e890103SRavi Kumar 	uint64_t bytes;
909e890103SRavi Kumar 	uint64_t errors;
919d1ef6b2SChandu Babu N 	uint64_t rx_mbuf_alloc_failed;
929e890103SRavi Kumar 	/* Number of mbufs allocated from pool*/
939e890103SRavi Kumar 	uint64_t mbuf_alloc;
9408977da0SJesna K E 	uint64_t offloads; /**< Rx offloads with RTE_ETH_RX_OFFLOAD_**/
95*27595cd8STyler Retzlaff };
969e890103SRavi Kumar 
979e890103SRavi Kumar /*Tx descriptor format */
989e890103SRavi Kumar struct axgbe_tx_desc {
999e890103SRavi Kumar 	phys_addr_t baddr;
1009e890103SRavi Kumar 	uint32_t desc2;
1019e890103SRavi Kumar 	uint32_t desc3;
1029e890103SRavi Kumar };
1039e890103SRavi Kumar 
104*27595cd8STyler Retzlaff struct __rte_cache_aligned axgbe_tx_queue {
1059e890103SRavi Kumar 	/* Port private data reference */
1069e890103SRavi Kumar 	struct axgbe_port *pdata;
1079e890103SRavi Kumar 	/* Number of Tx descriptors in queue*/
1089e890103SRavi Kumar 	uint16_t nb_desc;
1099e890103SRavi Kumar 	/* Start freeing TX buffers if there are less free descriptors than
1109e890103SRavi Kumar 	 * this value
1119e890103SRavi Kumar 	 */
1129e890103SRavi Kumar 	uint16_t free_thresh;
1139e890103SRavi Kumar 	/* Available descriptors for Tx processing*/
1149e890103SRavi Kumar 	uint16_t nb_desc_free;
1159e890103SRavi Kumar 	/* Batch of mbufs/descs to release */
1169e890103SRavi Kumar 	uint16_t free_batch_cnt;
1179e890103SRavi Kumar 	/* Flag for vector support */
1189e890103SRavi Kumar 	uint16_t vector_disable;
1199e890103SRavi Kumar 	/* Index of descriptor to be used for current transfer */
1209e890103SRavi Kumar 	uint64_t cur;
1219e890103SRavi Kumar 	/* Index of descriptor to check for transfer complete */
1229e890103SRavi Kumar 	uint64_t dirty;
1239e890103SRavi Kumar 	/* Virtual address of ring */
1249e890103SRavi Kumar 	volatile struct axgbe_tx_desc *desc;
1259e890103SRavi Kumar 	/* Physical address of ring */
1269e890103SRavi Kumar 	uint64_t ring_phys_addr;
1279e890103SRavi Kumar 	/* Dma channel register space */
1287784d0d3SRavi Kumar 	void  *dma_regs;
1299e890103SRavi Kumar 	/* Dma tail register address of ring*/
1309e890103SRavi Kumar 	volatile uint32_t *dma_tail_reg;
1319e890103SRavi Kumar 	/* Tx queue index/id*/
1329e890103SRavi Kumar 	uint16_t queue_id;
1339e890103SRavi Kumar 	/* Reference to hold Tx mbufs mapped to Tx descriptors freed
1349e890103SRavi Kumar 	 * after transmission confirmation
1359e890103SRavi Kumar 	 */
1369e890103SRavi Kumar 	struct rte_mbuf **sw_ring;
1379e890103SRavi Kumar 	/* dpdk port id*/
1389e890103SRavi Kumar 	uint16_t port_id;
1399e890103SRavi Kumar 	/* queue stats */
1409e890103SRavi Kumar 	uint64_t pkts;
1419e890103SRavi Kumar 	uint64_t bytes;
1429e890103SRavi Kumar 	uint64_t errors;
14308977da0SJesna K E 	uint64_t offloads; /**< Tx offload flags of RTE_ETH_TX_OFFLOAD_* */
144*27595cd8STyler Retzlaff };
1459e890103SRavi Kumar 
1469e890103SRavi Kumar /*Queue related APIs */
1479e890103SRavi Kumar 
1489e890103SRavi Kumar /*
1499e890103SRavi Kumar  * RX/TX function prototypes
1509e890103SRavi Kumar  */
1519e890103SRavi Kumar 
1529963b513SJesna K E /* Used in dev_start by primary process and then
1539963b513SJesna K E  * in dev_init by secondary process when attaching to an existing ethdev.
1549963b513SJesna K E  */
1559963b513SJesna K E void axgbe_set_tx_function(struct rte_eth_dev *dev);
1569963b513SJesna K E void axgbe_set_rx_function(struct rte_eth_dev *dev);
1579e890103SRavi Kumar 
1587483341aSXueming Li void axgbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx);
1599e890103SRavi Kumar int  axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
1609e890103SRavi Kumar 			      uint16_t nb_tx_desc, unsigned int socket_id,
1619e890103SRavi Kumar 			      const struct rte_eth_txconf *tx_conf);
1628590b93dSRavi Kumar void axgbe_dev_enable_tx(struct rte_eth_dev *dev);
1638590b93dSRavi Kumar void axgbe_dev_disable_tx(struct rte_eth_dev *dev);
1648590b93dSRavi Kumar int axgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
1658590b93dSRavi Kumar int axgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
1668590b93dSRavi Kumar 
167ff70acdfSSelwin Sebastian int axgbe_dev_fw_version_get(struct rte_eth_dev *eth_dev,
168ff70acdfSSelwin Sebastian 			char *fw_version, size_t fw_size);
169ff70acdfSSelwin Sebastian 
1708590b93dSRavi Kumar uint16_t axgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
1718590b93dSRavi Kumar 			 uint16_t nb_pkts);
17293bffd8fSBhagyada Modali 
17393bffd8fSBhagyada Modali uint16_t axgbe_xmit_pkts_seg(void *tx_queue, struct rte_mbuf **tx_pkts,
17493bffd8fSBhagyada Modali 		uint16_t nb_pkts);
17593bffd8fSBhagyada Modali 
1768590b93dSRavi Kumar uint16_t axgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
1778590b93dSRavi Kumar 			 uint16_t nb_pkts);
1788590b93dSRavi Kumar 
1799e890103SRavi Kumar 
1807483341aSXueming Li void axgbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx);
1819e890103SRavi Kumar int  axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
1829e890103SRavi Kumar 			      uint16_t nb_rx_desc, unsigned int socket_id,
1839e890103SRavi Kumar 			      const struct rte_eth_rxconf *rx_conf,
1849e890103SRavi Kumar 			      struct rte_mempool *mb_pool);
1858590b93dSRavi Kumar void axgbe_dev_enable_rx(struct rte_eth_dev *dev);
1868590b93dSRavi Kumar void axgbe_dev_disable_rx(struct rte_eth_dev *dev);
1878590b93dSRavi Kumar int axgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
1888590b93dSRavi Kumar int axgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
1898590b93dSRavi Kumar uint16_t axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1908590b93dSRavi Kumar 			 uint16_t nb_pkts);
191965b3127SSelwin Sebastian uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue,
192965b3127SSelwin Sebastian 		struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
1938590b93dSRavi Kumar uint16_t axgbe_recv_pkts_threshold_refresh(void *rx_queue,
1948590b93dSRavi Kumar 					   struct rte_mbuf **rx_pkts,
1958590b93dSRavi Kumar 					   uint16_t nb_pkts);
1969e890103SRavi Kumar void axgbe_dev_clear_queues(struct rte_eth_dev *dev);
1970962b605SAmaranath Somalapuram int axgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);
1980962b605SAmaranath Somalapuram int axgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset);
1999e890103SRavi Kumar 
2009e890103SRavi Kumar #endif /* _AXGBE_RXTX_H_ */
201