xref: /dpdk/drivers/net/hns3/hns3_rxtx_vec.h (revision 01843ab2f2fc8c3137258ec39b2cb6f62ba7b8a2)
1e31f123dSWei Hu (Xavier) /* SPDX-License-Identifier: BSD-3-Clause
253e6f86cSMin Hu (Connor)  * Copyright(c) 2020-2021 HiSilicon Limited.
3e31f123dSWei Hu (Xavier)  */
4e31f123dSWei Hu (Xavier) 
52ad146efSChengwen Feng #ifndef HNS3_RXTX_VEC_H
62ad146efSChengwen Feng #define HNS3_RXTX_VEC_H
7e31f123dSWei Hu (Xavier) 
8e31f123dSWei Hu (Xavier) #include "hns3_rxtx.h"
9e31f123dSWei Hu (Xavier) #include "hns3_ethdev.h"
10e31f123dSWei Hu (Xavier) 
11e31f123dSWei Hu (Xavier) static inline void
hns3_tx_bulk_free_buffers(struct hns3_tx_queue * txq)12f0c243a6SChengwen Feng hns3_tx_bulk_free_buffers(struct hns3_tx_queue *txq)
13e31f123dSWei Hu (Xavier) {
14e31f123dSWei Hu (Xavier) 	struct rte_mbuf **free = txq->free;
15e31f123dSWei Hu (Xavier) 	struct hns3_entry *tx_entry;
16e31f123dSWei Hu (Xavier) 	struct rte_mbuf *m;
17e31f123dSWei Hu (Xavier) 	int nb_free = 0;
1867d01034SHuisong Li 	uint16_t i;
19e31f123dSWei Hu (Xavier) 
20e31f123dSWei Hu (Xavier) 	tx_entry = &txq->sw_ring[txq->next_to_clean];
213cc817c1SChengwen Feng 	if (txq->mbuf_fast_free_en) {
223cc817c1SChengwen Feng 		rte_mempool_put_bulk(tx_entry->mbuf->pool, (void **)tx_entry,
233cc817c1SChengwen Feng 				     txq->tx_rs_thresh);
243cc817c1SChengwen Feng 		for (i = 0; i < txq->tx_rs_thresh; i++)
253cc817c1SChengwen Feng 			tx_entry[i].mbuf = NULL;
263cc817c1SChengwen Feng 		goto update_field;
273cc817c1SChengwen Feng 	}
283cc817c1SChengwen Feng 
29e31f123dSWei Hu (Xavier) 	for (i = 0; i < txq->tx_rs_thresh; i++, tx_entry++) {
30e31f123dSWei Hu (Xavier) 		m = rte_pktmbuf_prefree_seg(tx_entry->mbuf);
31e31f123dSWei Hu (Xavier) 		tx_entry->mbuf = NULL;
32e31f123dSWei Hu (Xavier) 
33e31f123dSWei Hu (Xavier) 		if (m == NULL)
34e31f123dSWei Hu (Xavier) 			continue;
35e31f123dSWei Hu (Xavier) 
36e31f123dSWei Hu (Xavier) 		if (nb_free && m->pool != free[0]->pool) {
37e31f123dSWei Hu (Xavier) 			rte_mempool_put_bulk(free[0]->pool, (void **)free,
38e31f123dSWei Hu (Xavier) 					     nb_free);
39e31f123dSWei Hu (Xavier) 			nb_free = 0;
40e31f123dSWei Hu (Xavier) 		}
41e31f123dSWei Hu (Xavier) 		free[nb_free++] = m;
42e31f123dSWei Hu (Xavier) 	}
43e31f123dSWei Hu (Xavier) 
44e31f123dSWei Hu (Xavier) 	if (nb_free)
45e31f123dSWei Hu (Xavier) 		rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
46e31f123dSWei Hu (Xavier) 
473cc817c1SChengwen Feng update_field:
48e31f123dSWei Hu (Xavier) 	/* Update numbers of available descriptor due to buffer freed */
49e31f123dSWei Hu (Xavier) 	txq->tx_bd_ready += txq->tx_rs_thresh;
50e31f123dSWei Hu (Xavier) 	txq->next_to_clean += txq->tx_rs_thresh;
51e31f123dSWei Hu (Xavier) 	if (txq->next_to_clean >= txq->nb_tx_desc)
52e31f123dSWei Hu (Xavier) 		txq->next_to_clean = 0;
53e31f123dSWei Hu (Xavier) }
54a3d4f4d2SWei Hu (Xavier) 
55f0c243a6SChengwen Feng static inline void
hns3_tx_free_buffers(struct hns3_tx_queue * txq)56f0c243a6SChengwen Feng hns3_tx_free_buffers(struct hns3_tx_queue *txq)
57f0c243a6SChengwen Feng {
58f0c243a6SChengwen Feng 	struct hns3_desc *tx_desc;
5967d01034SHuisong Li 	uint16_t i;
60f0c243a6SChengwen Feng 
61f0c243a6SChengwen Feng 	/*
62f0c243a6SChengwen Feng 	 * All mbufs can be released only when the VLD bits of all
63f0c243a6SChengwen Feng 	 * descriptors in a batch are cleared.
64f0c243a6SChengwen Feng 	 */
65f0c243a6SChengwen Feng 	tx_desc = &txq->tx_ring[txq->next_to_clean];
66f0c243a6SChengwen Feng 	for (i = 0; i < txq->tx_rs_thresh; i++, tx_desc++) {
67f0c243a6SChengwen Feng 		if (tx_desc->tx.tp_fe_sc_vld_ra_ri &
68f0c243a6SChengwen Feng 				rte_le_to_cpu_16(BIT(HNS3_TXD_VLD_B)))
69f0c243a6SChengwen Feng 			return;
70f0c243a6SChengwen Feng 	}
71f0c243a6SChengwen Feng 
72f0c243a6SChengwen Feng 	hns3_tx_bulk_free_buffers(txq);
73f0c243a6SChengwen Feng }
74f0c243a6SChengwen Feng 
75a3d4f4d2SWei Hu (Xavier) static inline uint16_t
hns3_rx_reassemble_pkts(struct rte_mbuf ** rx_pkts,uint16_t nb_pkts,uint64_t pkt_err_mask)76a3d4f4d2SWei Hu (Xavier) hns3_rx_reassemble_pkts(struct rte_mbuf **rx_pkts,
77a3d4f4d2SWei Hu (Xavier) 			uint16_t nb_pkts,
78a3d4f4d2SWei Hu (Xavier) 			uint64_t pkt_err_mask)
79a3d4f4d2SWei Hu (Xavier) {
80a3d4f4d2SWei Hu (Xavier) 	uint16_t count, i;
81a3d4f4d2SWei Hu (Xavier) 	uint64_t mask;
82a3d4f4d2SWei Hu (Xavier) 
832d408d06SChengwen Feng 	if (likely(pkt_err_mask == 0))
842d408d06SChengwen Feng 		return nb_pkts;
852d408d06SChengwen Feng 
86a3d4f4d2SWei Hu (Xavier) 	count = 0;
87a3d4f4d2SWei Hu (Xavier) 	for (i = 0; i < nb_pkts; i++) {
88a3d4f4d2SWei Hu (Xavier) 		mask = ((uint64_t)1u) << i;
89a3d4f4d2SWei Hu (Xavier) 		if (pkt_err_mask & mask)
90a3d4f4d2SWei Hu (Xavier) 			rte_pktmbuf_free_seg(rx_pkts[i]);
91a3d4f4d2SWei Hu (Xavier) 		else
92a3d4f4d2SWei Hu (Xavier) 			rx_pkts[count++] = rx_pkts[i];
93a3d4f4d2SWei Hu (Xavier) 	}
94a3d4f4d2SWei Hu (Xavier) 
95a3d4f4d2SWei Hu (Xavier) 	return count;
96a3d4f4d2SWei Hu (Xavier) }
97d49b6447SHuisong Li 
98d49b6447SHuisong Li static inline void
hns3_rxq_rearm_mbuf(struct hns3_rx_queue * rxq)99d49b6447SHuisong Li hns3_rxq_rearm_mbuf(struct hns3_rx_queue *rxq)
100d49b6447SHuisong Li {
101d49b6447SHuisong Li #define REARM_LOOP_STEP_NUM	4
102d49b6447SHuisong Li 	struct hns3_entry *rxep = &rxq->sw_ring[rxq->rx_rearm_start];
103d49b6447SHuisong Li 	struct hns3_desc *rxdp = rxq->rx_ring + rxq->rx_rearm_start;
104d49b6447SHuisong Li 	uint64_t dma_addr;
105d49b6447SHuisong Li 	int i;
106d49b6447SHuisong Li 
107d49b6447SHuisong Li 	if (unlikely(rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep,
108d49b6447SHuisong Li 					  HNS3_DEFAULT_RXQ_REARM_THRESH) < 0)) {
109*01843ab2SHuisong Li 		/*
110*01843ab2SHuisong Li 		 * Clear VLD bit for the first descriptor rearmed in case
111*01843ab2SHuisong Li 		 * of going to receive packets later.
112*01843ab2SHuisong Li 		 */
113*01843ab2SHuisong Li 		rxdp[0].rx.bd_base_info = 0;
114d49b6447SHuisong Li 		rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
115d49b6447SHuisong Li 		return;
116d49b6447SHuisong Li 	}
117d49b6447SHuisong Li 
118d49b6447SHuisong Li 	for (i = 0; i < HNS3_DEFAULT_RXQ_REARM_THRESH; i += REARM_LOOP_STEP_NUM,
119d49b6447SHuisong Li 		rxep += REARM_LOOP_STEP_NUM, rxdp += REARM_LOOP_STEP_NUM) {
120d49b6447SHuisong Li 		if (likely(i <
121d49b6447SHuisong Li 			HNS3_DEFAULT_RXQ_REARM_THRESH - REARM_LOOP_STEP_NUM)) {
122d49b6447SHuisong Li 			rte_prefetch_non_temporal(rxep[4].mbuf);
123d49b6447SHuisong Li 			rte_prefetch_non_temporal(rxep[5].mbuf);
124d49b6447SHuisong Li 			rte_prefetch_non_temporal(rxep[6].mbuf);
125d49b6447SHuisong Li 			rte_prefetch_non_temporal(rxep[7].mbuf);
126d49b6447SHuisong Li 		}
127d49b6447SHuisong Li 
128d49b6447SHuisong Li 		dma_addr = rte_mbuf_data_iova_default(rxep[0].mbuf);
129d49b6447SHuisong Li 		rxdp[0].addr = rte_cpu_to_le_64(dma_addr);
130d49b6447SHuisong Li 		rxdp[0].rx.bd_base_info = 0;
131d49b6447SHuisong Li 
132d49b6447SHuisong Li 		dma_addr = rte_mbuf_data_iova_default(rxep[1].mbuf);
133d49b6447SHuisong Li 		rxdp[1].addr = rte_cpu_to_le_64(dma_addr);
134d49b6447SHuisong Li 		rxdp[1].rx.bd_base_info = 0;
135d49b6447SHuisong Li 
136d49b6447SHuisong Li 		dma_addr = rte_mbuf_data_iova_default(rxep[2].mbuf);
137d49b6447SHuisong Li 		rxdp[2].addr = rte_cpu_to_le_64(dma_addr);
138d49b6447SHuisong Li 		rxdp[2].rx.bd_base_info = 0;
139d49b6447SHuisong Li 
140d49b6447SHuisong Li 		dma_addr = rte_mbuf_data_iova_default(rxep[3].mbuf);
141d49b6447SHuisong Li 		rxdp[3].addr = rte_cpu_to_le_64(dma_addr);
142d49b6447SHuisong Li 		rxdp[3].rx.bd_base_info = 0;
143d49b6447SHuisong Li 	}
144d49b6447SHuisong Li 
145d49b6447SHuisong Li 	rxq->rx_rearm_start += HNS3_DEFAULT_RXQ_REARM_THRESH;
146d49b6447SHuisong Li 	if (rxq->rx_rearm_start >= rxq->nb_rx_desc)
147d49b6447SHuisong Li 		rxq->rx_rearm_start = 0;
148d49b6447SHuisong Li 
149d49b6447SHuisong Li 	rxq->rx_rearm_nb -= HNS3_DEFAULT_RXQ_REARM_THRESH;
150d49b6447SHuisong Li 
151d49b6447SHuisong Li 	hns3_write_reg_opt(rxq->io_head_reg, HNS3_DEFAULT_RXQ_REARM_THRESH);
152d49b6447SHuisong Li }
1532ad146efSChengwen Feng #endif /* HNS3_RXTX_VEC_H */
154