xref: /dpdk/drivers/net/hns3/hns3_rxtx_vec.h (revision 01843ab2f2fc8c3137258ec39b2cb6f62ba7b8a2)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020-2021 HiSilicon Limited.
3  */
4 
5 #ifndef HNS3_RXTX_VEC_H
6 #define HNS3_RXTX_VEC_H
7 
8 #include "hns3_rxtx.h"
9 #include "hns3_ethdev.h"
10 
11 static inline void
hns3_tx_bulk_free_buffers(struct hns3_tx_queue * txq)12 hns3_tx_bulk_free_buffers(struct hns3_tx_queue *txq)
13 {
14 	struct rte_mbuf **free = txq->free;
15 	struct hns3_entry *tx_entry;
16 	struct rte_mbuf *m;
17 	int nb_free = 0;
18 	uint16_t i;
19 
20 	tx_entry = &txq->sw_ring[txq->next_to_clean];
21 	if (txq->mbuf_fast_free_en) {
22 		rte_mempool_put_bulk(tx_entry->mbuf->pool, (void **)tx_entry,
23 				     txq->tx_rs_thresh);
24 		for (i = 0; i < txq->tx_rs_thresh; i++)
25 			tx_entry[i].mbuf = NULL;
26 		goto update_field;
27 	}
28 
29 	for (i = 0; i < txq->tx_rs_thresh; i++, tx_entry++) {
30 		m = rte_pktmbuf_prefree_seg(tx_entry->mbuf);
31 		tx_entry->mbuf = NULL;
32 
33 		if (m == NULL)
34 			continue;
35 
36 		if (nb_free && m->pool != free[0]->pool) {
37 			rte_mempool_put_bulk(free[0]->pool, (void **)free,
38 					     nb_free);
39 			nb_free = 0;
40 		}
41 		free[nb_free++] = m;
42 	}
43 
44 	if (nb_free)
45 		rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
46 
47 update_field:
48 	/* Update numbers of available descriptor due to buffer freed */
49 	txq->tx_bd_ready += txq->tx_rs_thresh;
50 	txq->next_to_clean += txq->tx_rs_thresh;
51 	if (txq->next_to_clean >= txq->nb_tx_desc)
52 		txq->next_to_clean = 0;
53 }
54 
55 static inline void
hns3_tx_free_buffers(struct hns3_tx_queue * txq)56 hns3_tx_free_buffers(struct hns3_tx_queue *txq)
57 {
58 	struct hns3_desc *tx_desc;
59 	uint16_t i;
60 
61 	/*
62 	 * All mbufs can be released only when the VLD bits of all
63 	 * descriptors in a batch are cleared.
64 	 */
65 	tx_desc = &txq->tx_ring[txq->next_to_clean];
66 	for (i = 0; i < txq->tx_rs_thresh; i++, tx_desc++) {
67 		if (tx_desc->tx.tp_fe_sc_vld_ra_ri &
68 				rte_le_to_cpu_16(BIT(HNS3_TXD_VLD_B)))
69 			return;
70 	}
71 
72 	hns3_tx_bulk_free_buffers(txq);
73 }
74 
75 static inline uint16_t
hns3_rx_reassemble_pkts(struct rte_mbuf ** rx_pkts,uint16_t nb_pkts,uint64_t pkt_err_mask)76 hns3_rx_reassemble_pkts(struct rte_mbuf **rx_pkts,
77 			uint16_t nb_pkts,
78 			uint64_t pkt_err_mask)
79 {
80 	uint16_t count, i;
81 	uint64_t mask;
82 
83 	if (likely(pkt_err_mask == 0))
84 		return nb_pkts;
85 
86 	count = 0;
87 	for (i = 0; i < nb_pkts; i++) {
88 		mask = ((uint64_t)1u) << i;
89 		if (pkt_err_mask & mask)
90 			rte_pktmbuf_free_seg(rx_pkts[i]);
91 		else
92 			rx_pkts[count++] = rx_pkts[i];
93 	}
94 
95 	return count;
96 }
97 
98 static inline void
hns3_rxq_rearm_mbuf(struct hns3_rx_queue * rxq)99 hns3_rxq_rearm_mbuf(struct hns3_rx_queue *rxq)
100 {
101 #define REARM_LOOP_STEP_NUM	4
102 	struct hns3_entry *rxep = &rxq->sw_ring[rxq->rx_rearm_start];
103 	struct hns3_desc *rxdp = rxq->rx_ring + rxq->rx_rearm_start;
104 	uint64_t dma_addr;
105 	int i;
106 
107 	if (unlikely(rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep,
108 					  HNS3_DEFAULT_RXQ_REARM_THRESH) < 0)) {
109 		/*
110 		 * Clear VLD bit for the first descriptor rearmed in case
111 		 * of going to receive packets later.
112 		 */
113 		rxdp[0].rx.bd_base_info = 0;
114 		rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
115 		return;
116 	}
117 
118 	for (i = 0; i < HNS3_DEFAULT_RXQ_REARM_THRESH; i += REARM_LOOP_STEP_NUM,
119 		rxep += REARM_LOOP_STEP_NUM, rxdp += REARM_LOOP_STEP_NUM) {
120 		if (likely(i <
121 			HNS3_DEFAULT_RXQ_REARM_THRESH - REARM_LOOP_STEP_NUM)) {
122 			rte_prefetch_non_temporal(rxep[4].mbuf);
123 			rte_prefetch_non_temporal(rxep[5].mbuf);
124 			rte_prefetch_non_temporal(rxep[6].mbuf);
125 			rte_prefetch_non_temporal(rxep[7].mbuf);
126 		}
127 
128 		dma_addr = rte_mbuf_data_iova_default(rxep[0].mbuf);
129 		rxdp[0].addr = rte_cpu_to_le_64(dma_addr);
130 		rxdp[0].rx.bd_base_info = 0;
131 
132 		dma_addr = rte_mbuf_data_iova_default(rxep[1].mbuf);
133 		rxdp[1].addr = rte_cpu_to_le_64(dma_addr);
134 		rxdp[1].rx.bd_base_info = 0;
135 
136 		dma_addr = rte_mbuf_data_iova_default(rxep[2].mbuf);
137 		rxdp[2].addr = rte_cpu_to_le_64(dma_addr);
138 		rxdp[2].rx.bd_base_info = 0;
139 
140 		dma_addr = rte_mbuf_data_iova_default(rxep[3].mbuf);
141 		rxdp[3].addr = rte_cpu_to_le_64(dma_addr);
142 		rxdp[3].rx.bd_base_info = 0;
143 	}
144 
145 	rxq->rx_rearm_start += HNS3_DEFAULT_RXQ_REARM_THRESH;
146 	if (rxq->rx_rearm_start >= rxq->nb_rx_desc)
147 		rxq->rx_rearm_start = 0;
148 
149 	rxq->rx_rearm_nb -= HNS3_DEFAULT_RXQ_REARM_THRESH;
150 
151 	hns3_write_reg_opt(rxq->io_head_reg, HNS3_DEFAULT_RXQ_REARM_THRESH);
152 }
153 #endif /* HNS3_RXTX_VEC_H */
154