xref: /dpdk/drivers/net/hns3/hns3_rxtx.c (revision d78c76dbeffbd2994d77236c403281b34612e024)
1bba63669SWei Hu (Xavier) /* SPDX-License-Identifier: BSD-3-Clause
253e6f86cSMin Hu (Connor)  * Copyright(c) 2018-2021 HiSilicon Limited.
3bba63669SWei Hu (Xavier)  */
4bba63669SWei Hu (Xavier) 
51f37cb2bSDavid Marchand #include <bus_pci_driver.h>
6bba63669SWei Hu (Xavier) #include <rte_common.h>
7bba63669SWei Hu (Xavier) #include <rte_cycles.h>
88f01e2f8SChengchang Tang #include <rte_geneve.h>
9512d873fSFlavia Musatescu #include <rte_vxlan.h>
10df96fd0dSBruce Richardson #include <ethdev_driver.h>
11bba63669SWei Hu (Xavier) #include <rte_io.h>
12bba63669SWei Hu (Xavier) #include <rte_net.h>
13bba63669SWei Hu (Xavier) #include <rte_malloc.h>
14e40ad6fcSChengwen Feng #if defined(RTE_ARCH_ARM64)
15952ebaccSWei Hu (Xavier) #include <rte_cpuflags.h>
1686644b3fSChengwen Feng #include <rte_vect.h>
17952ebaccSWei Hu (Xavier) #endif
18bba63669SWei Hu (Xavier) 
19a4c7152dSHuisong Li #include "hns3_common.h"
20bba63669SWei Hu (Xavier) #include "hns3_regs.h"
21bba63669SWei Hu (Xavier) #include "hns3_logs.h"
22168b7d79SHuisong Li #include "hns3_mp.h"
231c757dd5SChengwen Feng #include "hns3_rxtx.h"
24bba63669SWei Hu (Xavier) 
25bba63669SWei Hu (Xavier) #define HNS3_CFG_DESC_NUM(num)	((num) / 8 - 1)
26521ab3e9SWei Hu (Xavier) #define HNS3_RX_RING_PREFETCTH_MASK	3
27bba63669SWei Hu (Xavier) 
28bba63669SWei Hu (Xavier) static void
29bba63669SWei Hu (Xavier) hns3_rx_queue_release_mbufs(struct hns3_rx_queue *rxq)
30bba63669SWei Hu (Xavier) {
31bba63669SWei Hu (Xavier) 	uint16_t i;
32bba63669SWei Hu (Xavier) 
33a951c1edSWei Hu (Xavier) 	/* Note: Fake rx queue will not enter here */
34521ab3e9SWei Hu (Xavier) 	if (rxq->sw_ring == NULL)
35521ab3e9SWei Hu (Xavier) 		return;
36521ab3e9SWei Hu (Xavier) 
37a3d4f4d2SWei Hu (Xavier) 	if (rxq->rx_rearm_nb == 0) {
38a3d4f4d2SWei Hu (Xavier) 		for (i = 0; i < rxq->nb_rx_desc; i++) {
390134a5c7SChengchang Tang 			if (rxq->sw_ring[i].mbuf != NULL) {
40bba63669SWei Hu (Xavier) 				rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
410134a5c7SChengchang Tang 				rxq->sw_ring[i].mbuf = NULL;
420134a5c7SChengchang Tang 			}
43a3d4f4d2SWei Hu (Xavier) 		}
44a3d4f4d2SWei Hu (Xavier) 	} else {
45a3d4f4d2SWei Hu (Xavier) 		for (i = rxq->next_to_use;
46a3d4f4d2SWei Hu (Xavier) 		     i != rxq->rx_rearm_start;
47a3d4f4d2SWei Hu (Xavier) 		     i = (i + 1) % rxq->nb_rx_desc) {
480134a5c7SChengchang Tang 			if (rxq->sw_ring[i].mbuf != NULL) {
49a3d4f4d2SWei Hu (Xavier) 				rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
500134a5c7SChengchang Tang 				rxq->sw_ring[i].mbuf = NULL;
510134a5c7SChengchang Tang 			}
52a3d4f4d2SWei Hu (Xavier) 		}
53f81a18f4SChengwen Feng 		for (i = 0; i < rxq->rx_rearm_nb; i++)
5401843ab2SHuisong Li 			rxq->sw_ring[(rxq->rx_rearm_start + i) % rxq->nb_rx_desc].mbuf = NULL;
55a3d4f4d2SWei Hu (Xavier) 	}
56521ab3e9SWei Hu (Xavier) 
57521ab3e9SWei Hu (Xavier) 	for (i = 0; i < rxq->bulk_mbuf_num; i++)
58521ab3e9SWei Hu (Xavier) 		rte_pktmbuf_free_seg(rxq->bulk_mbuf[i]);
59521ab3e9SWei Hu (Xavier) 	rxq->bulk_mbuf_num = 0;
60521ab3e9SWei Hu (Xavier) 
61521ab3e9SWei Hu (Xavier) 	if (rxq->pkt_first_seg) {
62521ab3e9SWei Hu (Xavier) 		rte_pktmbuf_free(rxq->pkt_first_seg);
63521ab3e9SWei Hu (Xavier) 		rxq->pkt_first_seg = NULL;
64bba63669SWei Hu (Xavier) 	}
65bba63669SWei Hu (Xavier) }
66bba63669SWei Hu (Xavier) 
67bba63669SWei Hu (Xavier) static void
68bba63669SWei Hu (Xavier) hns3_tx_queue_release_mbufs(struct hns3_tx_queue *txq)
69bba63669SWei Hu (Xavier) {
70bba63669SWei Hu (Xavier) 	uint16_t i;
71bba63669SWei Hu (Xavier) 
72fa29fe45SChengchang Tang 	/* Note: Fake tx queue will not enter here */
73bba63669SWei Hu (Xavier) 	if (txq->sw_ring) {
74bba63669SWei Hu (Xavier) 		for (i = 0; i < txq->nb_tx_desc; i++) {
75bba63669SWei Hu (Xavier) 			if (txq->sw_ring[i].mbuf) {
76bba63669SWei Hu (Xavier) 				rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
77bba63669SWei Hu (Xavier) 				txq->sw_ring[i].mbuf = NULL;
78bba63669SWei Hu (Xavier) 			}
79bba63669SWei Hu (Xavier) 		}
80bba63669SWei Hu (Xavier) 	}
81bba63669SWei Hu (Xavier) }
82bba63669SWei Hu (Xavier) 
83bba63669SWei Hu (Xavier) static void
84bba63669SWei Hu (Xavier) hns3_rx_queue_release(void *queue)
85bba63669SWei Hu (Xavier) {
86bba63669SWei Hu (Xavier) 	struct hns3_rx_queue *rxq = queue;
87bba63669SWei Hu (Xavier) 	if (rxq) {
88bba63669SWei Hu (Xavier) 		hns3_rx_queue_release_mbufs(rxq);
89bbf6fcc0SDengdui Huang 		if (rxq->mz) {
90bba63669SWei Hu (Xavier) 			rte_memzone_free(rxq->mz);
91bbf6fcc0SDengdui Huang 			rxq->mz = NULL;
92bbf6fcc0SDengdui Huang 		}
93bbf6fcc0SDengdui Huang 		if (rxq->sw_ring) {
94bba63669SWei Hu (Xavier) 			rte_free(rxq->sw_ring);
95bbf6fcc0SDengdui Huang 			rxq->sw_ring = NULL;
96bbf6fcc0SDengdui Huang 		}
97bba63669SWei Hu (Xavier) 		rte_free(rxq);
98bba63669SWei Hu (Xavier) 	}
99bba63669SWei Hu (Xavier) }
100bba63669SWei Hu (Xavier) 
101bba63669SWei Hu (Xavier) static void
102bba63669SWei Hu (Xavier) hns3_tx_queue_release(void *queue)
103bba63669SWei Hu (Xavier) {
104bba63669SWei Hu (Xavier) 	struct hns3_tx_queue *txq = queue;
105bba63669SWei Hu (Xavier) 	if (txq) {
106bba63669SWei Hu (Xavier) 		hns3_tx_queue_release_mbufs(txq);
107bbf6fcc0SDengdui Huang 		if (txq->mz) {
108bba63669SWei Hu (Xavier) 			rte_memzone_free(txq->mz);
109bbf6fcc0SDengdui Huang 			txq->mz = NULL;
110bbf6fcc0SDengdui Huang 		}
111bbf6fcc0SDengdui Huang 		if (txq->sw_ring) {
112bba63669SWei Hu (Xavier) 			rte_free(txq->sw_ring);
113bbf6fcc0SDengdui Huang 			txq->sw_ring = NULL;
114bbf6fcc0SDengdui Huang 		}
115bbf6fcc0SDengdui Huang 		if (txq->free) {
116e31f123dSWei Hu (Xavier) 			rte_free(txq->free);
117bbf6fcc0SDengdui Huang 			txq->free = NULL;
118bbf6fcc0SDengdui Huang 		}
119bba63669SWei Hu (Xavier) 		rte_free(txq);
120bba63669SWei Hu (Xavier) 	}
121bba63669SWei Hu (Xavier) }
122bba63669SWei Hu (Xavier) 
1237483341aSXueming Li static void
1247483341aSXueming Li hns3_rx_queue_release_lock(void *queue)
125bba63669SWei Hu (Xavier) {
126bba63669SWei Hu (Xavier) 	struct hns3_rx_queue *rxq = queue;
127bba63669SWei Hu (Xavier) 	struct hns3_adapter *hns;
128bba63669SWei Hu (Xavier) 
129bba63669SWei Hu (Xavier) 	if (rxq == NULL)
130bba63669SWei Hu (Xavier) 		return;
131bba63669SWei Hu (Xavier) 
132bba63669SWei Hu (Xavier) 	hns = rxq->hns;
133bba63669SWei Hu (Xavier) 	rte_spinlock_lock(&hns->hw.lock);
134bba63669SWei Hu (Xavier) 	hns3_rx_queue_release(queue);
135bba63669SWei Hu (Xavier) 	rte_spinlock_unlock(&hns->hw.lock);
136bba63669SWei Hu (Xavier) }
137bba63669SWei Hu (Xavier) 
138bba63669SWei Hu (Xavier) void
1397483341aSXueming Li hns3_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
1407483341aSXueming Li {
1417483341aSXueming Li 	hns3_rx_queue_release_lock(dev->data->rx_queues[queue_id]);
1427483341aSXueming Li }
1437483341aSXueming Li 
1447483341aSXueming Li static void
1457483341aSXueming Li hns3_tx_queue_release_lock(void *queue)
146bba63669SWei Hu (Xavier) {
147bba63669SWei Hu (Xavier) 	struct hns3_tx_queue *txq = queue;
148bba63669SWei Hu (Xavier) 	struct hns3_adapter *hns;
149bba63669SWei Hu (Xavier) 
150bba63669SWei Hu (Xavier) 	if (txq == NULL)
151bba63669SWei Hu (Xavier) 		return;
152bba63669SWei Hu (Xavier) 
153bba63669SWei Hu (Xavier) 	hns = txq->hns;
154bba63669SWei Hu (Xavier) 	rte_spinlock_lock(&hns->hw.lock);
155bba63669SWei Hu (Xavier) 	hns3_tx_queue_release(queue);
156bba63669SWei Hu (Xavier) 	rte_spinlock_unlock(&hns->hw.lock);
157bba63669SWei Hu (Xavier) }
158bba63669SWei Hu (Xavier) 
1597483341aSXueming Li void
1607483341aSXueming Li hns3_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
1617483341aSXueming Li {
1627483341aSXueming Li 	hns3_tx_queue_release_lock(dev->data->tx_queues[queue_id]);
1637483341aSXueming Li }
1647483341aSXueming Li 
165a951c1edSWei Hu (Xavier) static void
166a951c1edSWei Hu (Xavier) hns3_fake_rx_queue_release(struct hns3_rx_queue *queue)
167bba63669SWei Hu (Xavier) {
168a951c1edSWei Hu (Xavier) 	struct hns3_rx_queue *rxq = queue;
169a951c1edSWei Hu (Xavier) 	struct hns3_adapter *hns;
170a951c1edSWei Hu (Xavier) 	struct hns3_hw *hw;
171a951c1edSWei Hu (Xavier) 	uint16_t idx;
172a951c1edSWei Hu (Xavier) 
173a951c1edSWei Hu (Xavier) 	if (rxq == NULL)
174a951c1edSWei Hu (Xavier) 		return;
175a951c1edSWei Hu (Xavier) 
176a951c1edSWei Hu (Xavier) 	hns = rxq->hns;
177a951c1edSWei Hu (Xavier) 	hw = &hns->hw;
178a951c1edSWei Hu (Xavier) 	idx = rxq->queue_id;
179a951c1edSWei Hu (Xavier) 	if (hw->fkq_data.rx_queues[idx]) {
180a951c1edSWei Hu (Xavier) 		hns3_rx_queue_release(hw->fkq_data.rx_queues[idx]);
181a951c1edSWei Hu (Xavier) 		hw->fkq_data.rx_queues[idx] = NULL;
182a951c1edSWei Hu (Xavier) 	}
183a951c1edSWei Hu (Xavier) 
184a951c1edSWei Hu (Xavier) 	/* free fake rx queue arrays */
18556f8be38SHuisong Li 	if (idx == hw->fkq_data.nb_fake_rx_queues - 1) {
186a951c1edSWei Hu (Xavier) 		hw->fkq_data.nb_fake_rx_queues = 0;
187a951c1edSWei Hu (Xavier) 		rte_free(hw->fkq_data.rx_queues);
188a951c1edSWei Hu (Xavier) 		hw->fkq_data.rx_queues = NULL;
189a951c1edSWei Hu (Xavier) 	}
190a951c1edSWei Hu (Xavier) }
191a951c1edSWei Hu (Xavier) 
192a951c1edSWei Hu (Xavier) static void
193a951c1edSWei Hu (Xavier) hns3_fake_tx_queue_release(struct hns3_tx_queue *queue)
194a951c1edSWei Hu (Xavier) {
195a951c1edSWei Hu (Xavier) 	struct hns3_tx_queue *txq = queue;
196a951c1edSWei Hu (Xavier) 	struct hns3_adapter *hns;
197a951c1edSWei Hu (Xavier) 	struct hns3_hw *hw;
198a951c1edSWei Hu (Xavier) 	uint16_t idx;
199a951c1edSWei Hu (Xavier) 
200a951c1edSWei Hu (Xavier) 	if (txq == NULL)
201a951c1edSWei Hu (Xavier) 		return;
202a951c1edSWei Hu (Xavier) 
203a951c1edSWei Hu (Xavier) 	hns = txq->hns;
204a951c1edSWei Hu (Xavier) 	hw = &hns->hw;
205a951c1edSWei Hu (Xavier) 	idx = txq->queue_id;
206a951c1edSWei Hu (Xavier) 	if (hw->fkq_data.tx_queues[idx]) {
207a951c1edSWei Hu (Xavier) 		hns3_tx_queue_release(hw->fkq_data.tx_queues[idx]);
208a951c1edSWei Hu (Xavier) 		hw->fkq_data.tx_queues[idx] = NULL;
209a951c1edSWei Hu (Xavier) 	}
210a951c1edSWei Hu (Xavier) 
211a951c1edSWei Hu (Xavier) 	/* free fake tx queue arrays */
21256f8be38SHuisong Li 	if (idx == hw->fkq_data.nb_fake_tx_queues - 1) {
213a951c1edSWei Hu (Xavier) 		hw->fkq_data.nb_fake_tx_queues = 0;
214a951c1edSWei Hu (Xavier) 		rte_free(hw->fkq_data.tx_queues);
215a951c1edSWei Hu (Xavier) 		hw->fkq_data.tx_queues = NULL;
216a951c1edSWei Hu (Xavier) 	}
217a951c1edSWei Hu (Xavier) }
218a951c1edSWei Hu (Xavier) 
219a951c1edSWei Hu (Xavier) static void
220a951c1edSWei Hu (Xavier) hns3_free_rx_queues(struct rte_eth_dev *dev)
221a951c1edSWei Hu (Xavier) {
222a951c1edSWei Hu (Xavier) 	struct hns3_adapter *hns = dev->data->dev_private;
223a951c1edSWei Hu (Xavier) 	struct hns3_fake_queue_data *fkq_data;
224a951c1edSWei Hu (Xavier) 	struct hns3_hw *hw = &hns->hw;
225a951c1edSWei Hu (Xavier) 	uint16_t nb_rx_q;
226bba63669SWei Hu (Xavier) 	uint16_t i;
227bba63669SWei Hu (Xavier) 
228a951c1edSWei Hu (Xavier) 	nb_rx_q = hw->data->nb_rx_queues;
229a951c1edSWei Hu (Xavier) 	for (i = 0; i < nb_rx_q; i++) {
230a951c1edSWei Hu (Xavier) 		if (dev->data->rx_queues[i]) {
231bba63669SWei Hu (Xavier) 			hns3_rx_queue_release(dev->data->rx_queues[i]);
232bba63669SWei Hu (Xavier) 			dev->data->rx_queues[i] = NULL;
233bba63669SWei Hu (Xavier) 		}
234a951c1edSWei Hu (Xavier) 	}
235bba63669SWei Hu (Xavier) 
236a951c1edSWei Hu (Xavier) 	/* Free fake Rx queues */
237a951c1edSWei Hu (Xavier) 	fkq_data = &hw->fkq_data;
238a951c1edSWei Hu (Xavier) 	for (i = 0; i < fkq_data->nb_fake_rx_queues; i++) {
239a951c1edSWei Hu (Xavier) 		if (fkq_data->rx_queues[i])
240a951c1edSWei Hu (Xavier) 			hns3_fake_rx_queue_release(fkq_data->rx_queues[i]);
241a951c1edSWei Hu (Xavier) 	}
242a951c1edSWei Hu (Xavier) }
243a951c1edSWei Hu (Xavier) 
244a951c1edSWei Hu (Xavier) static void
245a951c1edSWei Hu (Xavier) hns3_free_tx_queues(struct rte_eth_dev *dev)
246a951c1edSWei Hu (Xavier) {
247a951c1edSWei Hu (Xavier) 	struct hns3_adapter *hns = dev->data->dev_private;
248a951c1edSWei Hu (Xavier) 	struct hns3_fake_queue_data *fkq_data;
249a951c1edSWei Hu (Xavier) 	struct hns3_hw *hw = &hns->hw;
250a951c1edSWei Hu (Xavier) 	uint16_t nb_tx_q;
251a951c1edSWei Hu (Xavier) 	uint16_t i;
252a951c1edSWei Hu (Xavier) 
253a951c1edSWei Hu (Xavier) 	nb_tx_q = hw->data->nb_tx_queues;
254a951c1edSWei Hu (Xavier) 	for (i = 0; i < nb_tx_q; i++) {
255a951c1edSWei Hu (Xavier) 		if (dev->data->tx_queues[i]) {
256bba63669SWei Hu (Xavier) 			hns3_tx_queue_release(dev->data->tx_queues[i]);
257bba63669SWei Hu (Xavier) 			dev->data->tx_queues[i] = NULL;
258bba63669SWei Hu (Xavier) 		}
259bba63669SWei Hu (Xavier) 	}
260bba63669SWei Hu (Xavier) 
261a951c1edSWei Hu (Xavier) 	/* Free fake Tx queues */
262a951c1edSWei Hu (Xavier) 	fkq_data = &hw->fkq_data;
263a951c1edSWei Hu (Xavier) 	for (i = 0; i < fkq_data->nb_fake_tx_queues; i++) {
264a951c1edSWei Hu (Xavier) 		if (fkq_data->tx_queues[i])
265a951c1edSWei Hu (Xavier) 			hns3_fake_tx_queue_release(fkq_data->tx_queues[i]);
266a951c1edSWei Hu (Xavier) 	}
267a951c1edSWei Hu (Xavier) }
268a951c1edSWei Hu (Xavier) 
269a951c1edSWei Hu (Xavier) void
270a951c1edSWei Hu (Xavier) hns3_free_all_queues(struct rte_eth_dev *dev)
271a951c1edSWei Hu (Xavier) {
272a951c1edSWei Hu (Xavier) 	hns3_free_rx_queues(dev);
273a951c1edSWei Hu (Xavier) 	hns3_free_tx_queues(dev);
274a951c1edSWei Hu (Xavier) }
275a951c1edSWei Hu (Xavier) 
276bba63669SWei Hu (Xavier) static int
277d14c995bSChengwen Feng hns3_check_rx_dma_addr(struct hns3_hw *hw, uint64_t dma_addr)
278d14c995bSChengwen Feng {
279d14c995bSChengwen Feng 	uint64_t rem;
280d14c995bSChengwen Feng 
281d14c995bSChengwen Feng 	rem = dma_addr & (hw->rx_dma_addr_align - 1);
282d14c995bSChengwen Feng 	if (rem > 0) {
283d14c995bSChengwen Feng 		hns3_err(hw, "The IO address of the beginning of the mbuf data "
284d14c995bSChengwen Feng 			 "must be %u-byte aligned", hw->rx_dma_addr_align);
285d14c995bSChengwen Feng 		return -EINVAL;
286d14c995bSChengwen Feng 	}
287d14c995bSChengwen Feng 	return 0;
288d14c995bSChengwen Feng }
289d14c995bSChengwen Feng 
290d14c995bSChengwen Feng static int
291bba63669SWei Hu (Xavier) hns3_alloc_rx_queue_mbufs(struct hns3_hw *hw, struct hns3_rx_queue *rxq)
292bba63669SWei Hu (Xavier) {
293bba63669SWei Hu (Xavier) 	struct rte_mbuf *mbuf;
294bba63669SWei Hu (Xavier) 	uint64_t dma_addr;
295bba63669SWei Hu (Xavier) 	uint16_t i;
296d14c995bSChengwen Feng 	int ret;
297bba63669SWei Hu (Xavier) 
298bba63669SWei Hu (Xavier) 	for (i = 0; i < rxq->nb_rx_desc; i++) {
299bba63669SWei Hu (Xavier) 		mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
300bba63669SWei Hu (Xavier) 		if (unlikely(mbuf == NULL)) {
3012427c27eSHongbo Zheng 			hns3_err(hw, "Failed to allocate RXD[%u] for rx queue!",
302bba63669SWei Hu (Xavier) 				 i);
303bba63669SWei Hu (Xavier) 			hns3_rx_queue_release_mbufs(rxq);
304bba63669SWei Hu (Xavier) 			return -ENOMEM;
305bba63669SWei Hu (Xavier) 		}
306bba63669SWei Hu (Xavier) 
307bba63669SWei Hu (Xavier) 		rte_mbuf_refcnt_set(mbuf, 1);
308bba63669SWei Hu (Xavier) 		mbuf->next = NULL;
309bba63669SWei Hu (Xavier) 		mbuf->data_off = RTE_PKTMBUF_HEADROOM;
310bba63669SWei Hu (Xavier) 		mbuf->nb_segs = 1;
311bba63669SWei Hu (Xavier) 		mbuf->port = rxq->port_id;
312bba63669SWei Hu (Xavier) 
313bba63669SWei Hu (Xavier) 		rxq->sw_ring[i].mbuf = mbuf;
314bba63669SWei Hu (Xavier) 		dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
315bba63669SWei Hu (Xavier) 		rxq->rx_ring[i].addr = dma_addr;
316bba63669SWei Hu (Xavier) 		rxq->rx_ring[i].rx.bd_base_info = 0;
317d14c995bSChengwen Feng 
318d14c995bSChengwen Feng 		ret = hns3_check_rx_dma_addr(hw, dma_addr);
319d14c995bSChengwen Feng 		if (ret != 0) {
320d14c995bSChengwen Feng 			hns3_rx_queue_release_mbufs(rxq);
321d14c995bSChengwen Feng 			return ret;
322d14c995bSChengwen Feng 		}
323bba63669SWei Hu (Xavier) 	}
324bba63669SWei Hu (Xavier) 
325bba63669SWei Hu (Xavier) 	return 0;
326bba63669SWei Hu (Xavier) }
327bba63669SWei Hu (Xavier) 
328bba63669SWei Hu (Xavier) static int
329bba63669SWei Hu (Xavier) hns3_buf_size2type(uint32_t buf_size)
330bba63669SWei Hu (Xavier) {
331bba63669SWei Hu (Xavier) 	int bd_size_type;
332bba63669SWei Hu (Xavier) 
333bba63669SWei Hu (Xavier) 	switch (buf_size) {
334bba63669SWei Hu (Xavier) 	case 512:
335bba63669SWei Hu (Xavier) 		bd_size_type = HNS3_BD_SIZE_512_TYPE;
336bba63669SWei Hu (Xavier) 		break;
337bba63669SWei Hu (Xavier) 	case 1024:
338bba63669SWei Hu (Xavier) 		bd_size_type = HNS3_BD_SIZE_1024_TYPE;
339bba63669SWei Hu (Xavier) 		break;
340bba63669SWei Hu (Xavier) 	case 4096:
341bba63669SWei Hu (Xavier) 		bd_size_type = HNS3_BD_SIZE_4096_TYPE;
342bba63669SWei Hu (Xavier) 		break;
343bba63669SWei Hu (Xavier) 	default:
344bba63669SWei Hu (Xavier) 		bd_size_type = HNS3_BD_SIZE_2048_TYPE;
345bba63669SWei Hu (Xavier) 	}
346bba63669SWei Hu (Xavier) 
347bba63669SWei Hu (Xavier) 	return bd_size_type;
348bba63669SWei Hu (Xavier) }
349bba63669SWei Hu (Xavier) 
350bba63669SWei Hu (Xavier) static void
351bba63669SWei Hu (Xavier) hns3_init_rx_queue_hw(struct hns3_rx_queue *rxq)
352bba63669SWei Hu (Xavier) {
353bba63669SWei Hu (Xavier) 	uint32_t rx_buf_len = rxq->rx_buf_len;
354bba63669SWei Hu (Xavier) 	uint64_t dma_addr = rxq->rx_ring_phys_addr;
355bba63669SWei Hu (Xavier) 
356bba63669SWei Hu (Xavier) 	hns3_write_dev(rxq, HNS3_RING_RX_BASEADDR_L_REG, (uint32_t)dma_addr);
357bba63669SWei Hu (Xavier) 	hns3_write_dev(rxq, HNS3_RING_RX_BASEADDR_H_REG,
358f658f415SHuisong Li 		       (uint32_t)(dma_addr >> 32));
359bba63669SWei Hu (Xavier) 
360bba63669SWei Hu (Xavier) 	hns3_write_dev(rxq, HNS3_RING_RX_BD_LEN_REG,
361bba63669SWei Hu (Xavier) 		       hns3_buf_size2type(rx_buf_len));
362bba63669SWei Hu (Xavier) 	hns3_write_dev(rxq, HNS3_RING_RX_BD_NUM_REG,
363bba63669SWei Hu (Xavier) 		       HNS3_CFG_DESC_NUM(rxq->nb_rx_desc));
364bba63669SWei Hu (Xavier) }
365bba63669SWei Hu (Xavier) 
366bba63669SWei Hu (Xavier) static void
367bba63669SWei Hu (Xavier) hns3_init_tx_queue_hw(struct hns3_tx_queue *txq)
368bba63669SWei Hu (Xavier) {
369bba63669SWei Hu (Xavier) 	uint64_t dma_addr = txq->tx_ring_phys_addr;
370bba63669SWei Hu (Xavier) 
371bba63669SWei Hu (Xavier) 	hns3_write_dev(txq, HNS3_RING_TX_BASEADDR_L_REG, (uint32_t)dma_addr);
372bba63669SWei Hu (Xavier) 	hns3_write_dev(txq, HNS3_RING_TX_BASEADDR_H_REG,
373f658f415SHuisong Li 		       (uint32_t)(dma_addr >> 32));
374bba63669SWei Hu (Xavier) 
375bba63669SWei Hu (Xavier) 	hns3_write_dev(txq, HNS3_RING_TX_BD_NUM_REG,
376bba63669SWei Hu (Xavier) 		       HNS3_CFG_DESC_NUM(txq->nb_tx_desc));
377bba63669SWei Hu (Xavier) }
378bba63669SWei Hu (Xavier) 
379c4ae39b2SChengwen Feng void
380992b24a1SWei Hu (Xavier) hns3_update_all_queues_pvid_proc_en(struct hns3_hw *hw)
3818c744977SChengchang Tang {
3828c744977SChengchang Tang 	uint16_t nb_rx_q = hw->data->nb_rx_queues;
3838c744977SChengchang Tang 	uint16_t nb_tx_q = hw->data->nb_tx_queues;
3848c744977SChengchang Tang 	struct hns3_rx_queue *rxq;
3858c744977SChengchang Tang 	struct hns3_tx_queue *txq;
386992b24a1SWei Hu (Xavier) 	bool pvid_en;
3878c744977SChengchang Tang 	int i;
3888c744977SChengchang Tang 
389992b24a1SWei Hu (Xavier) 	pvid_en = hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_ENABLE;
3908c744977SChengchang Tang 	for (i = 0; i < hw->cfg_max_queues; i++) {
3918c744977SChengchang Tang 		if (i < nb_rx_q) {
3928c744977SChengchang Tang 			rxq = hw->data->rx_queues[i];
3938c744977SChengchang Tang 			if (rxq != NULL)
394992b24a1SWei Hu (Xavier) 				rxq->pvid_sw_discard_en = pvid_en;
3958c744977SChengchang Tang 		}
3968c744977SChengchang Tang 		if (i < nb_tx_q) {
3978c744977SChengchang Tang 			txq = hw->data->tx_queues[i];
3988c744977SChengchang Tang 			if (txq != NULL)
399992b24a1SWei Hu (Xavier) 				txq->pvid_sw_shift_en = pvid_en;
4008c744977SChengchang Tang 		}
4018c744977SChengchang Tang 	}
4028c744977SChengchang Tang }
4038c744977SChengchang Tang 
404a1e7e04bSChengchang Tang static void
405a1e7e04bSChengchang Tang hns3_stop_unused_queue(void *tqp_base, enum hns3_ring_type queue_type)
406a1e7e04bSChengchang Tang {
407a1e7e04bSChengchang Tang 	uint32_t reg_offset;
408a1e7e04bSChengchang Tang 	uint32_t reg;
409a1e7e04bSChengchang Tang 
410a1e7e04bSChengchang Tang 	reg_offset = queue_type == HNS3_RING_TYPE_TX ?
411a1e7e04bSChengchang Tang 				   HNS3_RING_TX_EN_REG : HNS3_RING_RX_EN_REG;
412a1e7e04bSChengchang Tang 	reg = hns3_read_reg(tqp_base, reg_offset);
413a1e7e04bSChengchang Tang 	reg &= ~BIT(HNS3_RING_EN_B);
414a1e7e04bSChengchang Tang 	hns3_write_reg(tqp_base, reg_offset, reg);
415a1e7e04bSChengchang Tang }
416a1e7e04bSChengchang Tang 
4178c744977SChengchang Tang void
418bba63669SWei Hu (Xavier) hns3_enable_all_queues(struct hns3_hw *hw, bool en)
419bba63669SWei Hu (Xavier) {
420a951c1edSWei Hu (Xavier) 	uint16_t nb_rx_q = hw->data->nb_rx_queues;
421a951c1edSWei Hu (Xavier) 	uint16_t nb_tx_q = hw->data->nb_tx_queues;
422bba63669SWei Hu (Xavier) 	struct hns3_rx_queue *rxq;
423bba63669SWei Hu (Xavier) 	struct hns3_tx_queue *txq;
424bba63669SWei Hu (Xavier) 	uint32_t rcb_reg;
425fa29fe45SChengchang Tang 	void *tqp_base;
42667d01034SHuisong Li 	uint16_t i;
427bba63669SWei Hu (Xavier) 
428a951c1edSWei Hu (Xavier) 	for (i = 0; i < hw->cfg_max_queues; i++) {
429efcaa81eSChengchang Tang 		if (hns3_dev_get_support(hw, INDEP_TXRX)) {
430fa29fe45SChengchang Tang 			rxq = i < nb_rx_q ? hw->data->rx_queues[i] : NULL;
431fa29fe45SChengchang Tang 			txq = i < nb_tx_q ? hw->data->tx_queues[i] : NULL;
432a1e7e04bSChengchang Tang 
433a1e7e04bSChengchang Tang 			tqp_base = (void *)((char *)hw->io_base +
434a1e7e04bSChengchang Tang 					hns3_get_tqp_reg_offset(i));
435fa29fe45SChengchang Tang 			/*
436a1e7e04bSChengchang Tang 			 * If queue struct is not initialized, it means the
437a1e7e04bSChengchang Tang 			 * related HW ring has not been initialized yet.
438a1e7e04bSChengchang Tang 			 * So, these queues should be disabled before enable
439a1e7e04bSChengchang Tang 			 * the tqps to avoid a HW exception since the queues
440a1e7e04bSChengchang Tang 			 * are enabled by default.
441fa29fe45SChengchang Tang 			 */
442a1e7e04bSChengchang Tang 			if (rxq == NULL)
443a1e7e04bSChengchang Tang 				hns3_stop_unused_queue(tqp_base,
444a1e7e04bSChengchang Tang 							HNS3_RING_TYPE_RX);
445a1e7e04bSChengchang Tang 			if (txq == NULL)
446a1e7e04bSChengchang Tang 				hns3_stop_unused_queue(tqp_base,
447a1e7e04bSChengchang Tang 							HNS3_RING_TYPE_TX);
448fa29fe45SChengchang Tang 		} else {
449fa29fe45SChengchang Tang 			rxq = i < nb_rx_q ? hw->data->rx_queues[i] :
450fa29fe45SChengchang Tang 			      hw->fkq_data.rx_queues[i - nb_rx_q];
451a951c1edSWei Hu (Xavier) 
452fa29fe45SChengchang Tang 			tqp_base = rxq->io_base;
453fa29fe45SChengchang Tang 		}
454fa29fe45SChengchang Tang 		/*
455fa29fe45SChengchang Tang 		 * This is the master switch that used to control the enabling
456fa29fe45SChengchang Tang 		 * of a pair of Tx and Rx queues. Both the Rx and Tx point to
457fa29fe45SChengchang Tang 		 * the same register
458fa29fe45SChengchang Tang 		 */
459fa29fe45SChengchang Tang 		rcb_reg = hns3_read_reg(tqp_base, HNS3_RING_EN_REG);
460bba63669SWei Hu (Xavier) 		if (en)
461bba63669SWei Hu (Xavier) 			rcb_reg |= BIT(HNS3_RING_EN_B);
462bba63669SWei Hu (Xavier) 		else
463bba63669SWei Hu (Xavier) 			rcb_reg &= ~BIT(HNS3_RING_EN_B);
464fa29fe45SChengchang Tang 		hns3_write_reg(tqp_base, HNS3_RING_EN_REG, rcb_reg);
465fa29fe45SChengchang Tang 	}
466fa29fe45SChengchang Tang }
467fa29fe45SChengchang Tang 
468fa29fe45SChengchang Tang static void
469fa29fe45SChengchang Tang hns3_enable_txq(struct hns3_tx_queue *txq, bool en)
470fa29fe45SChengchang Tang {
471fa29fe45SChengchang Tang 	struct hns3_hw *hw = &txq->hns->hw;
472fa29fe45SChengchang Tang 	uint32_t reg;
473fa29fe45SChengchang Tang 
474efcaa81eSChengchang Tang 	if (hns3_dev_get_support(hw, INDEP_TXRX)) {
475fa29fe45SChengchang Tang 		reg = hns3_read_dev(txq, HNS3_RING_TX_EN_REG);
476fa29fe45SChengchang Tang 		if (en)
477fa29fe45SChengchang Tang 			reg |= BIT(HNS3_RING_EN_B);
478fa29fe45SChengchang Tang 		else
479fa29fe45SChengchang Tang 			reg &= ~BIT(HNS3_RING_EN_B);
480fa29fe45SChengchang Tang 		hns3_write_dev(txq, HNS3_RING_TX_EN_REG, reg);
481fa29fe45SChengchang Tang 	}
482fa29fe45SChengchang Tang 	txq->enabled = en;
483fa29fe45SChengchang Tang }
484fa29fe45SChengchang Tang 
485fa29fe45SChengchang Tang static void
486fa29fe45SChengchang Tang hns3_enable_rxq(struct hns3_rx_queue *rxq, bool en)
487fa29fe45SChengchang Tang {
488fa29fe45SChengchang Tang 	struct hns3_hw *hw = &rxq->hns->hw;
489fa29fe45SChengchang Tang 	uint32_t reg;
490fa29fe45SChengchang Tang 
491efcaa81eSChengchang Tang 	if (hns3_dev_get_support(hw, INDEP_TXRX)) {
492fa29fe45SChengchang Tang 		reg = hns3_read_dev(rxq, HNS3_RING_RX_EN_REG);
493fa29fe45SChengchang Tang 		if (en)
494fa29fe45SChengchang Tang 			reg |= BIT(HNS3_RING_EN_B);
495fa29fe45SChengchang Tang 		else
496fa29fe45SChengchang Tang 			reg &= ~BIT(HNS3_RING_EN_B);
497fa29fe45SChengchang Tang 		hns3_write_dev(rxq, HNS3_RING_RX_EN_REG, reg);
498fa29fe45SChengchang Tang 	}
499fa29fe45SChengchang Tang 	rxq->enabled = en;
500fa29fe45SChengchang Tang }
501fa29fe45SChengchang Tang 
502fa29fe45SChengchang Tang int
503fa29fe45SChengchang Tang hns3_start_all_txqs(struct rte_eth_dev *dev)
504fa29fe45SChengchang Tang {
505fa29fe45SChengchang Tang 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
506fa29fe45SChengchang Tang 	struct hns3_tx_queue *txq;
507fa29fe45SChengchang Tang 	uint16_t i, j;
508fa29fe45SChengchang Tang 
509fa29fe45SChengchang Tang 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
510fa29fe45SChengchang Tang 		txq = hw->data->tx_queues[i];
511fa29fe45SChengchang Tang 		if (!txq) {
512fa29fe45SChengchang Tang 			hns3_err(hw, "Tx queue %u not available or setup.", i);
513fa29fe45SChengchang Tang 			goto start_txqs_fail;
514fa29fe45SChengchang Tang 		}
515fa29fe45SChengchang Tang 		/*
516fa29fe45SChengchang Tang 		 * Tx queue is enabled by default. Therefore, the Tx queues
517fa29fe45SChengchang Tang 		 * needs to be disabled when deferred_start is set. There is
518fa29fe45SChengchang Tang 		 * another master switch used to control the enabling of a pair
519fa29fe45SChengchang Tang 		 * of Tx and Rx queues. And the master switch is disabled by
520fa29fe45SChengchang Tang 		 * default.
521fa29fe45SChengchang Tang 		 */
522fa29fe45SChengchang Tang 		if (txq->tx_deferred_start)
523fa29fe45SChengchang Tang 			hns3_enable_txq(txq, false);
524fa29fe45SChengchang Tang 		else
525fa29fe45SChengchang Tang 			hns3_enable_txq(txq, true);
526fa29fe45SChengchang Tang 	}
527fa29fe45SChengchang Tang 	return 0;
528fa29fe45SChengchang Tang 
529fa29fe45SChengchang Tang start_txqs_fail:
530fa29fe45SChengchang Tang 	for (j = 0; j < i; j++) {
531fa29fe45SChengchang Tang 		txq = hw->data->tx_queues[j];
532fa29fe45SChengchang Tang 		hns3_enable_txq(txq, false);
533fa29fe45SChengchang Tang 	}
534fa29fe45SChengchang Tang 	return -EINVAL;
535fa29fe45SChengchang Tang }
536fa29fe45SChengchang Tang 
537fa29fe45SChengchang Tang int
538fa29fe45SChengchang Tang hns3_start_all_rxqs(struct rte_eth_dev *dev)
539fa29fe45SChengchang Tang {
540fa29fe45SChengchang Tang 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
541fa29fe45SChengchang Tang 	struct hns3_rx_queue *rxq;
542fa29fe45SChengchang Tang 	uint16_t i, j;
543fa29fe45SChengchang Tang 
544fa29fe45SChengchang Tang 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
545fa29fe45SChengchang Tang 		rxq = hw->data->rx_queues[i];
546fa29fe45SChengchang Tang 		if (!rxq) {
547fa29fe45SChengchang Tang 			hns3_err(hw, "Rx queue %u not available or setup.", i);
548fa29fe45SChengchang Tang 			goto start_rxqs_fail;
549fa29fe45SChengchang Tang 		}
550fa29fe45SChengchang Tang 		/*
551fa29fe45SChengchang Tang 		 * Rx queue is enabled by default. Therefore, the Rx queues
552fa29fe45SChengchang Tang 		 * needs to be disabled when deferred_start is set. There is
553fa29fe45SChengchang Tang 		 * another master switch used to control the enabling of a pair
554fa29fe45SChengchang Tang 		 * of Tx and Rx queues. And the master switch is disabled by
555fa29fe45SChengchang Tang 		 * default.
556fa29fe45SChengchang Tang 		 */
557fa29fe45SChengchang Tang 		if (rxq->rx_deferred_start)
558fa29fe45SChengchang Tang 			hns3_enable_rxq(rxq, false);
559fa29fe45SChengchang Tang 		else
560fa29fe45SChengchang Tang 			hns3_enable_rxq(rxq, true);
561fa29fe45SChengchang Tang 	}
562fa29fe45SChengchang Tang 	return 0;
563fa29fe45SChengchang Tang 
564fa29fe45SChengchang Tang start_rxqs_fail:
565fa29fe45SChengchang Tang 	for (j = 0; j < i; j++) {
566fa29fe45SChengchang Tang 		rxq = hw->data->rx_queues[j];
567fa29fe45SChengchang Tang 		hns3_enable_rxq(rxq, false);
568fa29fe45SChengchang Tang 	}
569fa29fe45SChengchang Tang 	return -EINVAL;
570fa29fe45SChengchang Tang }
571fa29fe45SChengchang Tang 
572fa29fe45SChengchang Tang void
57380ec1bbdSChengchang Tang hns3_restore_tqp_enable_state(struct hns3_hw *hw)
57480ec1bbdSChengchang Tang {
57580ec1bbdSChengchang Tang 	struct hns3_rx_queue *rxq;
57680ec1bbdSChengchang Tang 	struct hns3_tx_queue *txq;
57780ec1bbdSChengchang Tang 	uint16_t i;
57880ec1bbdSChengchang Tang 
57980ec1bbdSChengchang Tang 	for (i = 0; i < hw->data->nb_rx_queues; i++) {
58080ec1bbdSChengchang Tang 		rxq = hw->data->rx_queues[i];
58180ec1bbdSChengchang Tang 		if (rxq != NULL)
58280ec1bbdSChengchang Tang 			hns3_enable_rxq(rxq, rxq->enabled);
58380ec1bbdSChengchang Tang 	}
58480ec1bbdSChengchang Tang 
58580ec1bbdSChengchang Tang 	for (i = 0; i < hw->data->nb_tx_queues; i++) {
58680ec1bbdSChengchang Tang 		txq = hw->data->tx_queues[i];
58780ec1bbdSChengchang Tang 		if (txq != NULL)
58880ec1bbdSChengchang Tang 			hns3_enable_txq(txq, txq->enabled);
58980ec1bbdSChengchang Tang 	}
59080ec1bbdSChengchang Tang }
59180ec1bbdSChengchang Tang 
59280ec1bbdSChengchang Tang void
593fa29fe45SChengchang Tang hns3_stop_all_txqs(struct rte_eth_dev *dev)
594fa29fe45SChengchang Tang {
595fa29fe45SChengchang Tang 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
596fa29fe45SChengchang Tang 	struct hns3_tx_queue *txq;
597fa29fe45SChengchang Tang 	uint16_t i;
598fa29fe45SChengchang Tang 
599fa29fe45SChengchang Tang 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
600fa29fe45SChengchang Tang 		txq = hw->data->tx_queues[i];
601fa29fe45SChengchang Tang 		if (!txq)
602fa29fe45SChengchang Tang 			continue;
603fa29fe45SChengchang Tang 		hns3_enable_txq(txq, false);
604bba63669SWei Hu (Xavier) 	}
605bba63669SWei Hu (Xavier) }
606bba63669SWei Hu (Xavier) 
607bba63669SWei Hu (Xavier) static int
608bba63669SWei Hu (Xavier) hns3_tqp_enable(struct hns3_hw *hw, uint16_t queue_id, bool enable)
609bba63669SWei Hu (Xavier) {
610bba63669SWei Hu (Xavier) 	struct hns3_cfg_com_tqp_queue_cmd *req;
611bba63669SWei Hu (Xavier) 	struct hns3_cmd_desc desc;
612bba63669SWei Hu (Xavier) 	int ret;
613bba63669SWei Hu (Xavier) 
614bba63669SWei Hu (Xavier) 	req = (struct hns3_cfg_com_tqp_queue_cmd *)desc.data;
615bba63669SWei Hu (Xavier) 
616bba63669SWei Hu (Xavier) 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_COM_TQP_QUEUE, false);
61776d79456SWei Hu (Xavier) 	req->tqp_id = rte_cpu_to_le_16(queue_id);
618bba63669SWei Hu (Xavier) 	req->stream_id = 0;
619bba63669SWei Hu (Xavier) 	hns3_set_bit(req->enable, HNS3_TQP_ENABLE_B, enable ? 1 : 0);
620bba63669SWei Hu (Xavier) 
621bba63669SWei Hu (Xavier) 	ret = hns3_cmd_send(hw, &desc, 1);
622bba63669SWei Hu (Xavier) 	if (ret)
62394e9574fSDengdui Huang 		hns3_err(hw, "TQP %s fail, ret = %d", enable ? "enable" : "disable", ret);
624bba63669SWei Hu (Xavier) 
625bba63669SWei Hu (Xavier) 	return ret;
626bba63669SWei Hu (Xavier) }
627bba63669SWei Hu (Xavier) 
628bba63669SWei Hu (Xavier) static int
629bba63669SWei Hu (Xavier) hns3_send_reset_tqp_cmd(struct hns3_hw *hw, uint16_t queue_id, bool enable)
630bba63669SWei Hu (Xavier) {
631bba63669SWei Hu (Xavier) 	struct hns3_reset_tqp_queue_cmd *req;
632bba63669SWei Hu (Xavier) 	struct hns3_cmd_desc desc;
633bba63669SWei Hu (Xavier) 	int ret;
634bba63669SWei Hu (Xavier) 
635bba63669SWei Hu (Xavier) 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE, false);
636bba63669SWei Hu (Xavier) 
637bba63669SWei Hu (Xavier) 	req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
63876d79456SWei Hu (Xavier) 	req->tqp_id = rte_cpu_to_le_16(queue_id);
639bba63669SWei Hu (Xavier) 	hns3_set_bit(req->reset_req, HNS3_TQP_RESET_B, enable ? 1 : 0);
640bba63669SWei Hu (Xavier) 	ret = hns3_cmd_send(hw, &desc, 1);
641bba63669SWei Hu (Xavier) 	if (ret)
6425bddaf38SHuisong Li 		hns3_err(hw, "send tqp reset cmd error, queue_id = %u, ret = %d",
6435bddaf38SHuisong Li 			 queue_id, ret);
644bba63669SWei Hu (Xavier) 
645bba63669SWei Hu (Xavier) 	return ret;
646bba63669SWei Hu (Xavier) }
647bba63669SWei Hu (Xavier) 
648bba63669SWei Hu (Xavier) static int
649fa29fe45SChengchang Tang hns3_get_tqp_reset_status(struct hns3_hw *hw, uint16_t queue_id,
650fa29fe45SChengchang Tang 			  uint8_t *reset_status)
651bba63669SWei Hu (Xavier) {
652bba63669SWei Hu (Xavier) 	struct hns3_reset_tqp_queue_cmd *req;
653bba63669SWei Hu (Xavier) 	struct hns3_cmd_desc desc;
654bba63669SWei Hu (Xavier) 	int ret;
655bba63669SWei Hu (Xavier) 
656bba63669SWei Hu (Xavier) 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE, true);
657bba63669SWei Hu (Xavier) 
658bba63669SWei Hu (Xavier) 	req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
65976d79456SWei Hu (Xavier) 	req->tqp_id = rte_cpu_to_le_16(queue_id);
660bba63669SWei Hu (Xavier) 
661bba63669SWei Hu (Xavier) 	ret = hns3_cmd_send(hw, &desc, 1);
662bba63669SWei Hu (Xavier) 	if (ret) {
6635bddaf38SHuisong Li 		hns3_err(hw, "get tqp reset status error, queue_id = %u, ret = %d.",
6645bddaf38SHuisong Li 			 queue_id, ret);
665fa29fe45SChengchang Tang 		return ret;
666fa29fe45SChengchang Tang 	}
667fa29fe45SChengchang Tang 	*reset_status = hns3_get_bit(req->ready_to_reset, HNS3_TQP_RESET_B);
668bba63669SWei Hu (Xavier) 	return ret;
669bba63669SWei Hu (Xavier) }
670bba63669SWei Hu (Xavier) 
671bba63669SWei Hu (Xavier) static int
672fa29fe45SChengchang Tang hns3pf_reset_tqp(struct hns3_hw *hw, uint16_t queue_id)
673bba63669SWei Hu (Xavier) {
674bba63669SWei Hu (Xavier) #define HNS3_TQP_RESET_TRY_MS	200
675fde636caSChengchang Tang 	uint16_t wait_time = 0;
676fa29fe45SChengchang Tang 	uint8_t reset_status;
677bba63669SWei Hu (Xavier) 	int ret;
678bba63669SWei Hu (Xavier) 
679bba63669SWei Hu (Xavier) 	/*
680bba63669SWei Hu (Xavier) 	 * In current version VF is not supported when PF is driven by DPDK
681bba63669SWei Hu (Xavier) 	 * driver, all task queue pairs are mapped to PF function, so PF's queue
682bba63669SWei Hu (Xavier) 	 * id is equals to the global queue id in PF range.
683bba63669SWei Hu (Xavier) 	 */
684bba63669SWei Hu (Xavier) 	ret = hns3_send_reset_tqp_cmd(hw, queue_id, true);
685bba63669SWei Hu (Xavier) 	if (ret) {
686bba63669SWei Hu (Xavier) 		hns3_err(hw, "Send reset tqp cmd fail, ret = %d", ret);
687bba63669SWei Hu (Xavier) 		return ret;
688bba63669SWei Hu (Xavier) 	}
689fde636caSChengchang Tang 
690bba63669SWei Hu (Xavier) 	do {
691bba63669SWei Hu (Xavier) 		/* Wait for tqp hw reset */
692bba63669SWei Hu (Xavier) 		rte_delay_ms(HNS3_POLL_RESPONE_MS);
693fde636caSChengchang Tang 		wait_time += HNS3_POLL_RESPONE_MS;
694fa29fe45SChengchang Tang 		ret = hns3_get_tqp_reset_status(hw, queue_id, &reset_status);
695fa29fe45SChengchang Tang 		if (ret)
696fa29fe45SChengchang Tang 			goto tqp_reset_fail;
697fa29fe45SChengchang Tang 
698fa29fe45SChengchang Tang 		if (reset_status)
699bba63669SWei Hu (Xavier) 			break;
700fde636caSChengchang Tang 	} while (wait_time < HNS3_TQP_RESET_TRY_MS);
701bba63669SWei Hu (Xavier) 
702fa29fe45SChengchang Tang 	if (!reset_status) {
703fa29fe45SChengchang Tang 		ret = -ETIMEDOUT;
704fa29fe45SChengchang Tang 		hns3_err(hw, "reset tqp timeout, queue_id = %u, ret = %d",
705fa29fe45SChengchang Tang 			 queue_id, ret);
706fa29fe45SChengchang Tang 		goto tqp_reset_fail;
707bba63669SWei Hu (Xavier) 	}
708bba63669SWei Hu (Xavier) 
709bba63669SWei Hu (Xavier) 	ret = hns3_send_reset_tqp_cmd(hw, queue_id, false);
710bba63669SWei Hu (Xavier) 	if (ret)
711bba63669SWei Hu (Xavier) 		hns3_err(hw, "Deassert the soft reset fail, ret = %d", ret);
712bba63669SWei Hu (Xavier) 
713bba63669SWei Hu (Xavier) 	return ret;
714fa29fe45SChengchang Tang 
715fa29fe45SChengchang Tang tqp_reset_fail:
716fa29fe45SChengchang Tang 	hns3_send_reset_tqp_cmd(hw, queue_id, false);
717fa29fe45SChengchang Tang 	return ret;
718bba63669SWei Hu (Xavier) }
719bba63669SWei Hu (Xavier) 
720bba63669SWei Hu (Xavier) static int
721bba63669SWei Hu (Xavier) hns3vf_reset_tqp(struct hns3_hw *hw, uint16_t queue_id)
722bba63669SWei Hu (Xavier) {
723c9bd98d8SDengdui Huang 	struct hns3_vf_to_pf_msg req;
724bba63669SWei Hu (Xavier) 	int ret;
725bba63669SWei Hu (Xavier) 
726c9bd98d8SDengdui Huang 	hns3vf_mbx_setup(&req, HNS3_MBX_QUEUE_RESET, 0);
727c9bd98d8SDengdui Huang 	memcpy(req.data, &queue_id, sizeof(uint16_t));
728c9bd98d8SDengdui Huang 	ret = hns3vf_mbx_send(hw, &req, true, NULL, 0);
729fa29fe45SChengchang Tang 	if (ret)
730fa29fe45SChengchang Tang 		hns3_err(hw, "fail to reset tqp, queue_id = %u, ret = %d.",
731fa29fe45SChengchang Tang 			 queue_id, ret);
732fa29fe45SChengchang Tang 	return ret;
733bba63669SWei Hu (Xavier) }
734bba63669SWei Hu (Xavier) 
735bba63669SWei Hu (Xavier) static int
7366911e7c2SChengchang Tang hns3_reset_rcb_cmd(struct hns3_hw *hw, uint8_t *reset_status)
737bba63669SWei Hu (Xavier) {
7386911e7c2SChengchang Tang 	struct hns3_reset_cmd *req;
7396911e7c2SChengchang Tang 	struct hns3_cmd_desc desc;
7406911e7c2SChengchang Tang 	int ret;
741fa29fe45SChengchang Tang 
7426911e7c2SChengchang Tang 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_RST_TRIGGER, false);
7436911e7c2SChengchang Tang 	req = (struct hns3_reset_cmd *)desc.data;
7443fb0df31SChengchang Tang 	hns3_set_bit(req->fun_reset_rcb, HNS3_CFG_RESET_RCB_B, 1);
7456911e7c2SChengchang Tang 
7466911e7c2SChengchang Tang 	/*
7476911e7c2SChengchang Tang 	 * The start qid should be the global qid of the first tqp of the
7486911e7c2SChengchang Tang 	 * function which should be reset in this port. Since our PF not
7496911e7c2SChengchang Tang 	 * support take over of VFs, so we only need to reset function 0,
7506911e7c2SChengchang Tang 	 * and its start qid is always 0.
7516911e7c2SChengchang Tang 	 */
7526911e7c2SChengchang Tang 	req->fun_reset_rcb_vqid_start = rte_cpu_to_le_16(0);
7536911e7c2SChengchang Tang 	req->fun_reset_rcb_vqid_num = rte_cpu_to_le_16(hw->cfg_max_queues);
7546911e7c2SChengchang Tang 
7556911e7c2SChengchang Tang 	ret = hns3_cmd_send(hw, &desc, 1);
7566911e7c2SChengchang Tang 	if (ret) {
7576911e7c2SChengchang Tang 		hns3_err(hw, "fail to send rcb reset cmd, ret = %d.", ret);
7586911e7c2SChengchang Tang 		return ret;
7596911e7c2SChengchang Tang 	}
7606911e7c2SChengchang Tang 
7616911e7c2SChengchang Tang 	*reset_status = req->fun_reset_rcb_return_status;
7626911e7c2SChengchang Tang 	return 0;
7636911e7c2SChengchang Tang }
7646911e7c2SChengchang Tang 
7656911e7c2SChengchang Tang static int
7666911e7c2SChengchang Tang hns3pf_reset_all_tqps(struct hns3_hw *hw)
7676911e7c2SChengchang Tang {
7686911e7c2SChengchang Tang #define HNS3_RESET_RCB_NOT_SUPPORT	0U
7696911e7c2SChengchang Tang #define HNS3_RESET_ALL_TQP_SUCCESS	1U
7706911e7c2SChengchang Tang 	uint8_t reset_status;
77167d01034SHuisong Li 	uint16_t i;
7726911e7c2SChengchang Tang 	int ret;
7736911e7c2SChengchang Tang 
7746911e7c2SChengchang Tang 	ret = hns3_reset_rcb_cmd(hw, &reset_status);
7756911e7c2SChengchang Tang 	if (ret)
7766911e7c2SChengchang Tang 		return ret;
7776911e7c2SChengchang Tang 
7786911e7c2SChengchang Tang 	/*
7796911e7c2SChengchang Tang 	 * If the firmware version is low, it may not support the rcb reset
7806911e7c2SChengchang Tang 	 * which means reset all the tqps at a time. In this case, we should
7816911e7c2SChengchang Tang 	 * reset tqps one by one.
7826911e7c2SChengchang Tang 	 */
7836911e7c2SChengchang Tang 	if (reset_status == HNS3_RESET_RCB_NOT_SUPPORT) {
7846911e7c2SChengchang Tang 		for (i = 0; i < hw->cfg_max_queues; i++) {
7856911e7c2SChengchang Tang 			ret = hns3pf_reset_tqp(hw, i);
7866911e7c2SChengchang Tang 			if (ret) {
787dc967f97SDengdui Huang 				hns3_err(hw, "fail to reset tqp, queue_id = %u, ret = %d.",
7886911e7c2SChengchang Tang 					 i, ret);
7896911e7c2SChengchang Tang 				return ret;
7906911e7c2SChengchang Tang 			}
7916911e7c2SChengchang Tang 		}
7926911e7c2SChengchang Tang 	} else if (reset_status != HNS3_RESET_ALL_TQP_SUCCESS) {
7936911e7c2SChengchang Tang 		hns3_err(hw, "fail to reset all tqps, reset_status = %u.",
7946911e7c2SChengchang Tang 			 reset_status);
7956911e7c2SChengchang Tang 		return -EIO;
7966911e7c2SChengchang Tang 	}
7976911e7c2SChengchang Tang 
7986911e7c2SChengchang Tang 	return 0;
7996911e7c2SChengchang Tang }
8006911e7c2SChengchang Tang 
8016911e7c2SChengchang Tang static int
8026911e7c2SChengchang Tang hns3vf_reset_all_tqps(struct hns3_hw *hw)
8036911e7c2SChengchang Tang {
8046911e7c2SChengchang Tang #define HNS3VF_RESET_ALL_TQP_DONE	1U
805c9bd98d8SDengdui Huang 	struct hns3_vf_to_pf_msg req;
8066911e7c2SChengchang Tang 	uint8_t reset_status;
8076911e7c2SChengchang Tang 	int ret;
80867d01034SHuisong Li 	uint16_t i;
8096911e7c2SChengchang Tang 
810c9bd98d8SDengdui Huang 	hns3vf_mbx_setup(&req, HNS3_MBX_QUEUE_RESET, 0);
811c9bd98d8SDengdui Huang 	ret = hns3vf_mbx_send(hw, &req, true,
812c9bd98d8SDengdui Huang 			      &reset_status, sizeof(reset_status));
8136911e7c2SChengchang Tang 	if (ret) {
8146911e7c2SChengchang Tang 		hns3_err(hw, "fail to send rcb reset mbx, ret = %d.", ret);
8156911e7c2SChengchang Tang 		return ret;
8166911e7c2SChengchang Tang 	}
8176911e7c2SChengchang Tang 
8186911e7c2SChengchang Tang 	if (reset_status == HNS3VF_RESET_ALL_TQP_DONE)
8196911e7c2SChengchang Tang 		return 0;
8206911e7c2SChengchang Tang 
8216911e7c2SChengchang Tang 	/*
8226911e7c2SChengchang Tang 	 * If the firmware version or kernel PF version is low, it may not
8236911e7c2SChengchang Tang 	 * support the rcb reset which means reset all the tqps at a time.
8246911e7c2SChengchang Tang 	 * In this case, we should reset tqps one by one.
8256911e7c2SChengchang Tang 	 */
8266911e7c2SChengchang Tang 	for (i = 1; i < hw->cfg_max_queues; i++) {
8276911e7c2SChengchang Tang 		ret = hns3vf_reset_tqp(hw, i);
8286911e7c2SChengchang Tang 		if (ret)
8296911e7c2SChengchang Tang 			return ret;
8306911e7c2SChengchang Tang 	}
8316911e7c2SChengchang Tang 
8326911e7c2SChengchang Tang 	return 0;
833bba63669SWei Hu (Xavier) }
834bba63669SWei Hu (Xavier) 
835bba63669SWei Hu (Xavier) int
836fa29fe45SChengchang Tang hns3_reset_all_tqps(struct hns3_adapter *hns)
837bba63669SWei Hu (Xavier) {
838bba63669SWei Hu (Xavier) 	struct hns3_hw *hw = &hns->hw;
83967d01034SHuisong Li 	uint16_t i;
84067d01034SHuisong Li 	int ret;
841bba63669SWei Hu (Xavier) 
8426911e7c2SChengchang Tang 	/* Disable all queues before reset all queues */
843a951c1edSWei Hu (Xavier) 	for (i = 0; i < hw->cfg_max_queues; i++) {
8446911e7c2SChengchang Tang 		ret = hns3_tqp_enable(hw, i, false);
845bba63669SWei Hu (Xavier) 		if (ret) {
8465bddaf38SHuisong Li 			hns3_err(hw, "fail to disable tqps before tqps reset, ret = %d.",
8476911e7c2SChengchang Tang 				 ret);
848bba63669SWei Hu (Xavier) 			return ret;
849bba63669SWei Hu (Xavier) 		}
850bba63669SWei Hu (Xavier) 	}
8516911e7c2SChengchang Tang 
8526911e7c2SChengchang Tang 	if (hns->is_vf)
8536911e7c2SChengchang Tang 		return hns3vf_reset_all_tqps(hw);
8546911e7c2SChengchang Tang 	else
8556911e7c2SChengchang Tang 		return hns3pf_reset_all_tqps(hw);
856bba63669SWei Hu (Xavier) }
857bba63669SWei Hu (Xavier) 
858fa29fe45SChengchang Tang static int
859fa29fe45SChengchang Tang hns3_send_reset_queue_cmd(struct hns3_hw *hw, uint16_t queue_id,
860fa29fe45SChengchang Tang 			  enum hns3_ring_type queue_type, bool enable)
861fa29fe45SChengchang Tang {
862fa29fe45SChengchang Tang 	struct hns3_reset_tqp_queue_cmd *req;
863fa29fe45SChengchang Tang 	struct hns3_cmd_desc desc;
864fa29fe45SChengchang Tang 	int ret;
865fa29fe45SChengchang Tang 
866fa29fe45SChengchang Tang 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE_INDEP, false);
867fa29fe45SChengchang Tang 
868fa29fe45SChengchang Tang 	req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
869fa29fe45SChengchang Tang 	req->tqp_id = rte_cpu_to_le_16(queue_id);
870dc967f97SDengdui Huang 	req->queue_direction = queue_type == HNS3_RING_TYPE_TX ? 0 : 1;
871fa29fe45SChengchang Tang 	hns3_set_bit(req->reset_req, HNS3_TQP_RESET_B, enable ? 1 : 0);
872fa29fe45SChengchang Tang 
873fa29fe45SChengchang Tang 	ret = hns3_cmd_send(hw, &desc, 1);
874fa29fe45SChengchang Tang 	if (ret)
875fa29fe45SChengchang Tang 		hns3_err(hw, "send queue reset cmd error, queue_id = %u, "
876fa29fe45SChengchang Tang 			 "queue_type = %s, ret = %d.", queue_id,
877fa29fe45SChengchang Tang 			 queue_type == HNS3_RING_TYPE_TX ? "Tx" : "Rx", ret);
878fa29fe45SChengchang Tang 	return ret;
879fa29fe45SChengchang Tang }
880fa29fe45SChengchang Tang 
881fa29fe45SChengchang Tang static int
882fa29fe45SChengchang Tang hns3_get_queue_reset_status(struct hns3_hw *hw, uint16_t queue_id,
883fa29fe45SChengchang Tang 			    enum hns3_ring_type queue_type,
884fa29fe45SChengchang Tang 			    uint8_t *reset_status)
885fa29fe45SChengchang Tang {
886fa29fe45SChengchang Tang 	struct hns3_reset_tqp_queue_cmd *req;
887fa29fe45SChengchang Tang 	struct hns3_cmd_desc desc;
888fa29fe45SChengchang Tang 	int ret;
889fa29fe45SChengchang Tang 
890fa29fe45SChengchang Tang 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE_INDEP, true);
891fa29fe45SChengchang Tang 
892fa29fe45SChengchang Tang 	req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
893fa29fe45SChengchang Tang 	req->tqp_id = rte_cpu_to_le_16(queue_id);
894dc967f97SDengdui Huang 	req->queue_direction = queue_type == HNS3_RING_TYPE_TX ? 0 : 1;
895fa29fe45SChengchang Tang 
896fa29fe45SChengchang Tang 	ret = hns3_cmd_send(hw, &desc, 1);
897fa29fe45SChengchang Tang 	if (ret) {
898fa29fe45SChengchang Tang 		hns3_err(hw, "get queue reset status error, queue_id = %u "
899fa29fe45SChengchang Tang 			 "queue_type = %s, ret = %d.", queue_id,
900fa29fe45SChengchang Tang 			 queue_type == HNS3_RING_TYPE_TX ? "Tx" : "Rx", ret);
901fa29fe45SChengchang Tang 		return ret;
902fa29fe45SChengchang Tang 	}
903fa29fe45SChengchang Tang 
904fa29fe45SChengchang Tang 	*reset_status = hns3_get_bit(req->ready_to_reset, HNS3_TQP_RESET_B);
905fa29fe45SChengchang Tang 	return  ret;
906fa29fe45SChengchang Tang }
907fa29fe45SChengchang Tang 
908fa29fe45SChengchang Tang static int
909fa29fe45SChengchang Tang hns3_reset_queue(struct hns3_hw *hw, uint16_t queue_id,
910fa29fe45SChengchang Tang 		 enum hns3_ring_type queue_type)
911fa29fe45SChengchang Tang {
912fa29fe45SChengchang Tang #define HNS3_QUEUE_RESET_TRY_MS	200
913fa29fe45SChengchang Tang 	struct hns3_tx_queue *txq;
914fa29fe45SChengchang Tang 	struct hns3_rx_queue *rxq;
915fa29fe45SChengchang Tang 	uint32_t reset_wait_times;
916fa29fe45SChengchang Tang 	uint32_t max_wait_times;
917fa29fe45SChengchang Tang 	uint8_t reset_status;
918fa29fe45SChengchang Tang 	int ret;
919fa29fe45SChengchang Tang 
920fa29fe45SChengchang Tang 	if (queue_type == HNS3_RING_TYPE_TX) {
921fa29fe45SChengchang Tang 		txq = hw->data->tx_queues[queue_id];
922fa29fe45SChengchang Tang 		hns3_enable_txq(txq, false);
923fa29fe45SChengchang Tang 	} else {
924fa29fe45SChengchang Tang 		rxq = hw->data->rx_queues[queue_id];
925fa29fe45SChengchang Tang 		hns3_enable_rxq(rxq, false);
926fa29fe45SChengchang Tang 	}
927fa29fe45SChengchang Tang 
928fa29fe45SChengchang Tang 	ret = hns3_send_reset_queue_cmd(hw, queue_id, queue_type, true);
929fa29fe45SChengchang Tang 	if (ret) {
930fa29fe45SChengchang Tang 		hns3_err(hw, "send reset queue cmd fail, ret = %d.", ret);
931fa29fe45SChengchang Tang 		return ret;
932fa29fe45SChengchang Tang 	}
933fa29fe45SChengchang Tang 
934fa29fe45SChengchang Tang 	reset_wait_times = 0;
935fa29fe45SChengchang Tang 	max_wait_times = HNS3_QUEUE_RESET_TRY_MS / HNS3_POLL_RESPONE_MS;
936fa29fe45SChengchang Tang 	while (reset_wait_times < max_wait_times) {
937fa29fe45SChengchang Tang 		/* Wait for queue hw reset */
938fa29fe45SChengchang Tang 		rte_delay_ms(HNS3_POLL_RESPONE_MS);
939fa29fe45SChengchang Tang 		ret = hns3_get_queue_reset_status(hw, queue_id,
940fa29fe45SChengchang Tang 						queue_type, &reset_status);
941fa29fe45SChengchang Tang 		if (ret)
942fa29fe45SChengchang Tang 			goto queue_reset_fail;
943fa29fe45SChengchang Tang 
944fa29fe45SChengchang Tang 		if (reset_status)
945fa29fe45SChengchang Tang 			break;
946fa29fe45SChengchang Tang 		reset_wait_times++;
947fa29fe45SChengchang Tang 	}
948fa29fe45SChengchang Tang 
949fa29fe45SChengchang Tang 	if (!reset_status) {
9505bddaf38SHuisong Li 		hns3_err(hw, "reset queue timeout, queue_id = %u, queue_type = %s",
9515bddaf38SHuisong Li 			 queue_id,
952fa29fe45SChengchang Tang 			 queue_type == HNS3_RING_TYPE_TX ? "Tx" : "Rx");
953fa29fe45SChengchang Tang 		ret = -ETIMEDOUT;
954fa29fe45SChengchang Tang 		goto queue_reset_fail;
955fa29fe45SChengchang Tang 	}
956fa29fe45SChengchang Tang 
957fa29fe45SChengchang Tang 	ret = hns3_send_reset_queue_cmd(hw, queue_id, queue_type, false);
958fa29fe45SChengchang Tang 	if (ret)
959fa29fe45SChengchang Tang 		hns3_err(hw, "deassert queue reset fail, ret = %d.", ret);
960fa29fe45SChengchang Tang 
961fa29fe45SChengchang Tang 	return ret;
962fa29fe45SChengchang Tang 
963fa29fe45SChengchang Tang queue_reset_fail:
964fa29fe45SChengchang Tang 	hns3_send_reset_queue_cmd(hw, queue_id, queue_type, false);
965fa29fe45SChengchang Tang 	return ret;
966fa29fe45SChengchang Tang }
967fa29fe45SChengchang Tang 
9682b6b0981SChengchang Tang uint32_t
9692b6b0981SChengchang Tang hns3_get_tqp_intr_reg_offset(uint16_t tqp_intr_id)
9702b6b0981SChengchang Tang {
9712b6b0981SChengchang Tang 	uint32_t reg_offset;
9722b6b0981SChengchang Tang 
9732b6b0981SChengchang Tang 	/* Need an extend offset to config queues > 64 */
9742b6b0981SChengchang Tang 	if (tqp_intr_id < HNS3_MIN_EXT_TQP_INTR_ID)
9752b6b0981SChengchang Tang 		reg_offset = HNS3_TQP_INTR_REG_BASE +
9762b6b0981SChengchang Tang 			     tqp_intr_id * HNS3_TQP_INTR_LOW_ORDER_OFFSET;
9772b6b0981SChengchang Tang 	else
9782b6b0981SChengchang Tang 		reg_offset = HNS3_TQP_INTR_EXT_REG_BASE +
9792b6b0981SChengchang Tang 			     tqp_intr_id / HNS3_MIN_EXT_TQP_INTR_ID *
9802b6b0981SChengchang Tang 			     HNS3_TQP_INTR_HIGH_ORDER_OFFSET +
9812b6b0981SChengchang Tang 			     tqp_intr_id % HNS3_MIN_EXT_TQP_INTR_ID *
9822b6b0981SChengchang Tang 			     HNS3_TQP_INTR_LOW_ORDER_OFFSET;
9832b6b0981SChengchang Tang 
9842b6b0981SChengchang Tang 	return reg_offset;
9852b6b0981SChengchang Tang }
986fa29fe45SChengchang Tang 
98702a7b556SHao Chen void
988ef2e785cSWei Hu (Xavier) hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id,
989ef2e785cSWei Hu (Xavier) 		       uint8_t gl_idx, uint16_t gl_value)
990ef2e785cSWei Hu (Xavier) {
991ef2e785cSWei Hu (Xavier) 	uint32_t offset[] = {HNS3_TQP_INTR_GL0_REG,
992ef2e785cSWei Hu (Xavier) 			     HNS3_TQP_INTR_GL1_REG,
993ef2e785cSWei Hu (Xavier) 			     HNS3_TQP_INTR_GL2_REG};
994ef2e785cSWei Hu (Xavier) 	uint32_t addr, value;
995ef2e785cSWei Hu (Xavier) 
996ef2e785cSWei Hu (Xavier) 	if (gl_idx >= RTE_DIM(offset) || gl_value > HNS3_TQP_INTR_GL_MAX)
997ef2e785cSWei Hu (Xavier) 		return;
998ef2e785cSWei Hu (Xavier) 
9992b6b0981SChengchang Tang 	addr = offset[gl_idx] + hns3_get_tqp_intr_reg_offset(queue_id);
100027911a6eSWei Hu (Xavier) 	if (hw->intr.gl_unit == HNS3_INTR_COALESCE_GL_UINT_1US)
100127911a6eSWei Hu (Xavier) 		value = gl_value | HNS3_TQP_INTR_GL_UNIT_1US;
100227911a6eSWei Hu (Xavier) 	else
1003ef2e785cSWei Hu (Xavier) 		value = HNS3_GL_USEC_TO_REG(gl_value);
1004ef2e785cSWei Hu (Xavier) 
1005ef2e785cSWei Hu (Xavier) 	hns3_write_dev(hw, addr, value);
1006ef2e785cSWei Hu (Xavier) }
1007ef2e785cSWei Hu (Xavier) 
1008ef2e785cSWei Hu (Xavier) void
1009ef2e785cSWei Hu (Xavier) hns3_set_queue_intr_rl(struct hns3_hw *hw, uint16_t queue_id, uint16_t rl_value)
101002a7b556SHao Chen {
101102a7b556SHao Chen 	uint32_t addr, value;
101202a7b556SHao Chen 
1013ef2e785cSWei Hu (Xavier) 	if (rl_value > HNS3_TQP_INTR_RL_MAX)
1014ef2e785cSWei Hu (Xavier) 		return;
1015ef2e785cSWei Hu (Xavier) 
10162b6b0981SChengchang Tang 	addr = HNS3_TQP_INTR_RL_REG + hns3_get_tqp_intr_reg_offset(queue_id);
1017ef2e785cSWei Hu (Xavier) 	value = HNS3_RL_USEC_TO_REG(rl_value);
1018ef2e785cSWei Hu (Xavier) 	if (value > 0)
1019ef2e785cSWei Hu (Xavier) 		value |= HNS3_TQP_INTR_RL_ENABLE_MASK;
1020ef2e785cSWei Hu (Xavier) 
1021ef2e785cSWei Hu (Xavier) 	hns3_write_dev(hw, addr, value);
1022ef2e785cSWei Hu (Xavier) }
1023ef2e785cSWei Hu (Xavier) 
102427911a6eSWei Hu (Xavier) void
102527911a6eSWei Hu (Xavier) hns3_set_queue_intr_ql(struct hns3_hw *hw, uint16_t queue_id, uint16_t ql_value)
102627911a6eSWei Hu (Xavier) {
102727911a6eSWei Hu (Xavier) 	uint32_t addr;
102827911a6eSWei Hu (Xavier) 
102944df0175SHongbo Zheng 	/*
103044df0175SHongbo Zheng 	 * int_ql_max == 0 means the hardware does not support QL,
103144df0175SHongbo Zheng 	 * QL regs config is not permitted if QL is not supported,
103244df0175SHongbo Zheng 	 * here just return.
103344df0175SHongbo Zheng 	 */
103444df0175SHongbo Zheng 	if (hw->intr.int_ql_max == HNS3_INTR_QL_NONE)
103527911a6eSWei Hu (Xavier) 		return;
103627911a6eSWei Hu (Xavier) 
10372b6b0981SChengchang Tang 	addr = HNS3_TQP_INTR_TX_QL_REG + hns3_get_tqp_intr_reg_offset(queue_id);
103827911a6eSWei Hu (Xavier) 	hns3_write_dev(hw, addr, ql_value);
103927911a6eSWei Hu (Xavier) 
10402b6b0981SChengchang Tang 	addr = HNS3_TQP_INTR_RX_QL_REG + hns3_get_tqp_intr_reg_offset(queue_id);
104127911a6eSWei Hu (Xavier) 	hns3_write_dev(hw, addr, ql_value);
104227911a6eSWei Hu (Xavier) }
104327911a6eSWei Hu (Xavier) 
1044ef2e785cSWei Hu (Xavier) static void
1045ef2e785cSWei Hu (Xavier) hns3_queue_intr_enable(struct hns3_hw *hw, uint16_t queue_id, bool en)
1046ef2e785cSWei Hu (Xavier) {
1047ef2e785cSWei Hu (Xavier) 	uint32_t addr, value;
1048ef2e785cSWei Hu (Xavier) 
10492b6b0981SChengchang Tang 	addr = HNS3_TQP_INTR_CTRL_REG + hns3_get_tqp_intr_reg_offset(queue_id);
105002a7b556SHao Chen 	value = en ? 1 : 0;
105102a7b556SHao Chen 
105202a7b556SHao Chen 	hns3_write_dev(hw, addr, value);
105302a7b556SHao Chen }
105402a7b556SHao Chen 
1055c4ae39b2SChengwen Feng /*
1056c4ae39b2SChengwen Feng  * Enable all rx queue interrupt when in interrupt rx mode.
1057c4ae39b2SChengwen Feng  * This api was called before enable queue rx&tx (in normal start or reset
1058c4ae39b2SChengwen Feng  * recover scenes), used to fix hardware rx queue interrupt enable was clear
1059c4ae39b2SChengwen Feng  * when FLR.
1060c4ae39b2SChengwen Feng  */
1061c4ae39b2SChengwen Feng void
1062c4ae39b2SChengwen Feng hns3_dev_all_rx_queue_intr_enable(struct hns3_hw *hw, bool en)
1063c4ae39b2SChengwen Feng {
1064c4ae39b2SChengwen Feng 	struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
1065c4ae39b2SChengwen Feng 	uint16_t nb_rx_q = hw->data->nb_rx_queues;
106667d01034SHuisong Li 	uint16_t i;
1067c4ae39b2SChengwen Feng 
1068c4ae39b2SChengwen Feng 	if (dev->data->dev_conf.intr_conf.rxq == 0)
1069c4ae39b2SChengwen Feng 		return;
1070c4ae39b2SChengwen Feng 
1071c4ae39b2SChengwen Feng 	for (i = 0; i < nb_rx_q; i++)
1072c4ae39b2SChengwen Feng 		hns3_queue_intr_enable(hw, i, en);
1073c4ae39b2SChengwen Feng }
1074c4ae39b2SChengwen Feng 
107502a7b556SHao Chen int
107602a7b556SHao Chen hns3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
107702a7b556SHao Chen {
107802a7b556SHao Chen 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1079d61138d4SHarman Kalra 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
108002a7b556SHao Chen 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
108102a7b556SHao Chen 
108202a7b556SHao Chen 	if (dev->data->dev_conf.intr_conf.rxq == 0)
108302a7b556SHao Chen 		return -ENOTSUP;
108402a7b556SHao Chen 
1085ef2e785cSWei Hu (Xavier) 	hns3_queue_intr_enable(hw, queue_id, true);
108602a7b556SHao Chen 
108702a7b556SHao Chen 	return rte_intr_ack(intr_handle);
108802a7b556SHao Chen }
108902a7b556SHao Chen 
109002a7b556SHao Chen int
109102a7b556SHao Chen hns3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
109202a7b556SHao Chen {
109302a7b556SHao Chen 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
109402a7b556SHao Chen 
109502a7b556SHao Chen 	if (dev->data->dev_conf.intr_conf.rxq == 0)
109602a7b556SHao Chen 		return -ENOTSUP;
109702a7b556SHao Chen 
1098ef2e785cSWei Hu (Xavier) 	hns3_queue_intr_enable(hw, queue_id, false);
109902a7b556SHao Chen 
110002a7b556SHao Chen 	return 0;
110102a7b556SHao Chen }
110202a7b556SHao Chen 
1103bba63669SWei Hu (Xavier) static int
1104fa29fe45SChengchang Tang hns3_init_rxq(struct hns3_adapter *hns, uint16_t idx)
1105bba63669SWei Hu (Xavier) {
1106bba63669SWei Hu (Xavier) 	struct hns3_hw *hw = &hns->hw;
1107bba63669SWei Hu (Xavier) 	struct hns3_rx_queue *rxq;
1108bba63669SWei Hu (Xavier) 	int ret;
1109bba63669SWei Hu (Xavier) 
1110bba63669SWei Hu (Xavier) 	PMD_INIT_FUNC_TRACE();
1111bba63669SWei Hu (Xavier) 
1112a951c1edSWei Hu (Xavier) 	rxq = (struct hns3_rx_queue *)hw->data->rx_queues[idx];
1113bba63669SWei Hu (Xavier) 	ret = hns3_alloc_rx_queue_mbufs(hw, rxq);
1114bba63669SWei Hu (Xavier) 	if (ret) {
1115fa29fe45SChengchang Tang 		hns3_err(hw, "fail to alloc mbuf for Rx queue %u, ret = %d.",
1116bba63669SWei Hu (Xavier) 			 idx, ret);
1117bba63669SWei Hu (Xavier) 		return ret;
1118bba63669SWei Hu (Xavier) 	}
1119bba63669SWei Hu (Xavier) 
1120bba63669SWei Hu (Xavier) 	rxq->next_to_use = 0;
1121a3d4f4d2SWei Hu (Xavier) 	rxq->rx_rearm_start = 0;
1122ceabee45SWei Hu (Xavier) 	rxq->rx_free_hold = 0;
1123a3d4f4d2SWei Hu (Xavier) 	rxq->rx_rearm_nb = 0;
1124521ab3e9SWei Hu (Xavier) 	rxq->pkt_first_seg = NULL;
1125521ab3e9SWei Hu (Xavier) 	rxq->pkt_last_seg = NULL;
1126bba63669SWei Hu (Xavier) 	hns3_init_rx_queue_hw(rxq);
1127a3d4f4d2SWei Hu (Xavier) 	hns3_rxq_vec_setup(rxq);
1128bba63669SWei Hu (Xavier) 
1129bba63669SWei Hu (Xavier) 	return 0;
1130bba63669SWei Hu (Xavier) }
1131bba63669SWei Hu (Xavier) 
1132bba63669SWei Hu (Xavier) static void
1133fa29fe45SChengchang Tang hns3_init_fake_rxq(struct hns3_adapter *hns, uint16_t idx)
1134bba63669SWei Hu (Xavier) {
1135bba63669SWei Hu (Xavier) 	struct hns3_hw *hw = &hns->hw;
1136a951c1edSWei Hu (Xavier) 	struct hns3_rx_queue *rxq;
1137a951c1edSWei Hu (Xavier) 
1138a951c1edSWei Hu (Xavier) 	rxq = (struct hns3_rx_queue *)hw->fkq_data.rx_queues[idx];
1139a951c1edSWei Hu (Xavier) 	rxq->next_to_use = 0;
1140ceabee45SWei Hu (Xavier) 	rxq->rx_free_hold = 0;
1141a3d4f4d2SWei Hu (Xavier) 	rxq->rx_rearm_start = 0;
1142a3d4f4d2SWei Hu (Xavier) 	rxq->rx_rearm_nb = 0;
1143a951c1edSWei Hu (Xavier) 	hns3_init_rx_queue_hw(rxq);
1144a951c1edSWei Hu (Xavier) }
1145a951c1edSWei Hu (Xavier) 
1146a951c1edSWei Hu (Xavier) static void
1147fa29fe45SChengchang Tang hns3_init_txq(struct hns3_tx_queue *txq)
1148a951c1edSWei Hu (Xavier) {
1149bba63669SWei Hu (Xavier) 	struct hns3_desc *desc;
115067d01034SHuisong Li 	uint16_t i;
1151bba63669SWei Hu (Xavier) 
1152bba63669SWei Hu (Xavier) 	/* Clear tx bd */
1153bba63669SWei Hu (Xavier) 	desc = txq->tx_ring;
1154bba63669SWei Hu (Xavier) 	for (i = 0; i < txq->nb_tx_desc; i++) {
1155bba63669SWei Hu (Xavier) 		desc->tx.tp_fe_sc_vld_ra_ri = 0;
1156bba63669SWei Hu (Xavier) 		desc++;
1157bba63669SWei Hu (Xavier) 	}
1158bba63669SWei Hu (Xavier) 
1159bba63669SWei Hu (Xavier) 	txq->next_to_use = 0;
1160bba63669SWei Hu (Xavier) 	txq->next_to_clean = 0;
1161eb570862SYisen Zhuang 	txq->tx_bd_ready = txq->nb_tx_desc - 1;
1162bba63669SWei Hu (Xavier) 	hns3_init_tx_queue_hw(txq);
1163bba63669SWei Hu (Xavier) }
1164bba63669SWei Hu (Xavier) 
1165bba63669SWei Hu (Xavier) static void
1166bba63669SWei Hu (Xavier) hns3_init_tx_ring_tc(struct hns3_adapter *hns)
1167bba63669SWei Hu (Xavier) {
1168bba63669SWei Hu (Xavier) 	struct hns3_hw *hw = &hns->hw;
1169bba63669SWei Hu (Xavier) 	struct hns3_tx_queue *txq;
1170bba63669SWei Hu (Xavier) 	int i, num;
1171bba63669SWei Hu (Xavier) 
1172bba63669SWei Hu (Xavier) 	for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
1173bba63669SWei Hu (Xavier) 		struct hns3_tc_queue_info *tc_queue = &hw->tc_queue[i];
117467d01034SHuisong Li 		uint16_t j;
1175bba63669SWei Hu (Xavier) 
1176bba63669SWei Hu (Xavier) 		if (!tc_queue->enable)
1177bba63669SWei Hu (Xavier) 			continue;
1178bba63669SWei Hu (Xavier) 
1179bba63669SWei Hu (Xavier) 		for (j = 0; j < tc_queue->tqp_count; j++) {
1180bba63669SWei Hu (Xavier) 			num = tc_queue->tqp_offset + j;
1181a951c1edSWei Hu (Xavier) 			txq = (struct hns3_tx_queue *)hw->data->tx_queues[num];
1182bba63669SWei Hu (Xavier) 			if (txq == NULL)
1183bba63669SWei Hu (Xavier) 				continue;
1184bba63669SWei Hu (Xavier) 
1185bba63669SWei Hu (Xavier) 			hns3_write_dev(txq, HNS3_RING_TX_TC_REG, tc_queue->tc);
1186bba63669SWei Hu (Xavier) 		}
1187bba63669SWei Hu (Xavier) 	}
1188bba63669SWei Hu (Xavier) }
1189bba63669SWei Hu (Xavier) 
1190a951c1edSWei Hu (Xavier) static int
1191fa29fe45SChengchang Tang hns3_init_rx_queues(struct hns3_adapter *hns)
1192bba63669SWei Hu (Xavier) {
1193bba63669SWei Hu (Xavier) 	struct hns3_hw *hw = &hns->hw;
1194bba63669SWei Hu (Xavier) 	struct hns3_rx_queue *rxq;
1195fa29fe45SChengchang Tang 	uint16_t i, j;
1196bba63669SWei Hu (Xavier) 	int ret;
1197bba63669SWei Hu (Xavier) 
1198bba63669SWei Hu (Xavier) 	/* Initialize RSS for queues */
1199bba63669SWei Hu (Xavier) 	ret = hns3_config_rss(hns);
1200bba63669SWei Hu (Xavier) 	if (ret) {
1201fa29fe45SChengchang Tang 		hns3_err(hw, "failed to configure rss, ret = %d.", ret);
1202bba63669SWei Hu (Xavier) 		return ret;
1203bba63669SWei Hu (Xavier) 	}
1204bba63669SWei Hu (Xavier) 
1205a951c1edSWei Hu (Xavier) 	for (i = 0; i < hw->data->nb_rx_queues; i++) {
1206a951c1edSWei Hu (Xavier) 		rxq = (struct hns3_rx_queue *)hw->data->rx_queues[i];
1207fa29fe45SChengchang Tang 		if (!rxq) {
1208fa29fe45SChengchang Tang 			hns3_err(hw, "Rx queue %u not available or setup.", i);
1209fa29fe45SChengchang Tang 			goto out;
1210fa29fe45SChengchang Tang 		}
1211fa29fe45SChengchang Tang 
1212fa29fe45SChengchang Tang 		if (rxq->rx_deferred_start)
1213a951c1edSWei Hu (Xavier) 			continue;
1214fa29fe45SChengchang Tang 
1215fa29fe45SChengchang Tang 		ret = hns3_init_rxq(hns, i);
1216a951c1edSWei Hu (Xavier) 		if (ret) {
1217fa29fe45SChengchang Tang 			hns3_err(hw, "failed to init Rx queue %u, ret = %d.", i,
1218a951c1edSWei Hu (Xavier) 				 ret);
1219a951c1edSWei Hu (Xavier) 			goto out;
1220a951c1edSWei Hu (Xavier) 		}
1221a951c1edSWei Hu (Xavier) 	}
1222a951c1edSWei Hu (Xavier) 
1223fa29fe45SChengchang Tang 	for (i = 0; i < hw->fkq_data.nb_fake_rx_queues; i++)
1224fa29fe45SChengchang Tang 		hns3_init_fake_rxq(hns, i);
1225fa29fe45SChengchang Tang 
1226a951c1edSWei Hu (Xavier) 	return 0;
1227a951c1edSWei Hu (Xavier) 
1228a951c1edSWei Hu (Xavier) out:
1229a951c1edSWei Hu (Xavier) 	for (j = 0; j < i; j++) {
1230a951c1edSWei Hu (Xavier) 		rxq = (struct hns3_rx_queue *)hw->data->rx_queues[j];
1231ae5f21f2SHuisong Li 		if (rxq->rx_deferred_start)
1232ae5f21f2SHuisong Li 			continue;
1233ae5f21f2SHuisong Li 
1234a951c1edSWei Hu (Xavier) 		hns3_rx_queue_release_mbufs(rxq);
1235a951c1edSWei Hu (Xavier) 	}
1236a951c1edSWei Hu (Xavier) 
1237a951c1edSWei Hu (Xavier) 	return ret;
1238a951c1edSWei Hu (Xavier) }
1239a951c1edSWei Hu (Xavier) 
1240fa29fe45SChengchang Tang static int
1241fa29fe45SChengchang Tang hns3_init_tx_queues(struct hns3_adapter *hns)
1242a951c1edSWei Hu (Xavier) {
1243a951c1edSWei Hu (Xavier) 	struct hns3_hw *hw = &hns->hw;
1244a951c1edSWei Hu (Xavier) 	struct hns3_tx_queue *txq;
1245fa29fe45SChengchang Tang 	uint16_t i;
1246a951c1edSWei Hu (Xavier) 
1247a951c1edSWei Hu (Xavier) 	for (i = 0; i < hw->data->nb_tx_queues; i++) {
1248a951c1edSWei Hu (Xavier) 		txq = (struct hns3_tx_queue *)hw->data->tx_queues[i];
1249fa29fe45SChengchang Tang 		if (!txq) {
1250fa29fe45SChengchang Tang 			hns3_err(hw, "Tx queue %u not available or setup.", i);
1251fa29fe45SChengchang Tang 			return -EINVAL;
1252fa29fe45SChengchang Tang 		}
1253fa29fe45SChengchang Tang 
1254fa29fe45SChengchang Tang 		if (txq->tx_deferred_start)
1255a951c1edSWei Hu (Xavier) 			continue;
1256fa29fe45SChengchang Tang 		hns3_init_txq(txq);
1257a951c1edSWei Hu (Xavier) 	}
1258a951c1edSWei Hu (Xavier) 
1259a951c1edSWei Hu (Xavier) 	for (i = 0; i < hw->fkq_data.nb_fake_tx_queues; i++) {
1260a951c1edSWei Hu (Xavier) 		txq = (struct hns3_tx_queue *)hw->fkq_data.tx_queues[i];
1261fa29fe45SChengchang Tang 		hns3_init_txq(txq);
1262a951c1edSWei Hu (Xavier) 	}
1263a951c1edSWei Hu (Xavier) 	hns3_init_tx_ring_tc(hns);
1264fa29fe45SChengchang Tang 
1265fa29fe45SChengchang Tang 	return 0;
1266a951c1edSWei Hu (Xavier) }
1267a951c1edSWei Hu (Xavier) 
1268c4ae39b2SChengwen Feng /*
1269fa29fe45SChengchang Tang  * Init all queues.
1270fa29fe45SChengchang Tang  * Note: just init and setup queues, and don't enable tqps.
1271c4ae39b2SChengwen Feng  */
1272a951c1edSWei Hu (Xavier) int
1273fa29fe45SChengchang Tang hns3_init_queues(struct hns3_adapter *hns, bool reset_queue)
1274a951c1edSWei Hu (Xavier) {
1275a951c1edSWei Hu (Xavier) 	struct hns3_hw *hw = &hns->hw;
1276a951c1edSWei Hu (Xavier) 	int ret;
1277a951c1edSWei Hu (Xavier) 
1278bba63669SWei Hu (Xavier) 	if (reset_queue) {
1279fa29fe45SChengchang Tang 		ret = hns3_reset_all_tqps(hns);
1280bba63669SWei Hu (Xavier) 		if (ret) {
1281fa29fe45SChengchang Tang 			hns3_err(hw, "failed to reset all queues, ret = %d.",
1282fa29fe45SChengchang Tang 				 ret);
1283bba63669SWei Hu (Xavier) 			return ret;
1284bba63669SWei Hu (Xavier) 		}
1285bba63669SWei Hu (Xavier) 	}
1286bba63669SWei Hu (Xavier) 
1287fa29fe45SChengchang Tang 	ret = hns3_init_rx_queues(hns);
1288bba63669SWei Hu (Xavier) 	if (ret) {
1289fa29fe45SChengchang Tang 		hns3_err(hw, "failed to init rx queues, ret = %d.", ret);
1290bba63669SWei Hu (Xavier) 		return ret;
1291bba63669SWei Hu (Xavier) 	}
1292bba63669SWei Hu (Xavier) 
1293fa29fe45SChengchang Tang 	ret = hns3_init_tx_queues(hns);
1294fa29fe45SChengchang Tang 	if (ret) {
1295fa29fe45SChengchang Tang 		hns3_dev_release_mbufs(hns);
1296fa29fe45SChengchang Tang 		hns3_err(hw, "failed to init tx queues, ret = %d.", ret);
1297a951c1edSWei Hu (Xavier) 	}
1298a951c1edSWei Hu (Xavier) 
1299fa29fe45SChengchang Tang 	return ret;
1300fa29fe45SChengchang Tang }
1301fa29fe45SChengchang Tang 
1302fa29fe45SChengchang Tang void
1303fa29fe45SChengchang Tang hns3_start_tqps(struct hns3_hw *hw)
1304bba63669SWei Hu (Xavier) {
1305fa29fe45SChengchang Tang 	struct hns3_tx_queue *txq;
1306fa29fe45SChengchang Tang 	struct hns3_rx_queue *rxq;
1307fa29fe45SChengchang Tang 	uint16_t i;
1308fa29fe45SChengchang Tang 
1309fa29fe45SChengchang Tang 	hns3_enable_all_queues(hw, true);
1310fa29fe45SChengchang Tang 
1311fa29fe45SChengchang Tang 	for (i = 0; i < hw->data->nb_tx_queues; i++) {
1312084d0cdbSMorten Brørup 		__rte_assume(i < RTE_MAX_QUEUES_PER_PORT);
1313fa29fe45SChengchang Tang 		txq = hw->data->tx_queues[i];
1314fa29fe45SChengchang Tang 		if (txq->enabled)
1315fa29fe45SChengchang Tang 			hw->data->tx_queue_state[i] =
1316fa29fe45SChengchang Tang 				RTE_ETH_QUEUE_STATE_STARTED;
1317fa29fe45SChengchang Tang 	}
1318fa29fe45SChengchang Tang 
1319fa29fe45SChengchang Tang 	for (i = 0; i < hw->data->nb_rx_queues; i++) {
1320084d0cdbSMorten Brørup 		__rte_assume(i < RTE_MAX_QUEUES_PER_PORT);
1321fa29fe45SChengchang Tang 		rxq = hw->data->rx_queues[i];
1322fa29fe45SChengchang Tang 		if (rxq->enabled)
1323fa29fe45SChengchang Tang 			hw->data->rx_queue_state[i] =
1324fa29fe45SChengchang Tang 				RTE_ETH_QUEUE_STATE_STARTED;
1325fa29fe45SChengchang Tang 	}
1326fa29fe45SChengchang Tang }
1327fa29fe45SChengchang Tang 
1328fa29fe45SChengchang Tang void
1329fa29fe45SChengchang Tang hns3_stop_tqps(struct hns3_hw *hw)
1330fa29fe45SChengchang Tang {
1331fa29fe45SChengchang Tang 	uint16_t i;
1332bba63669SWei Hu (Xavier) 
1333bba63669SWei Hu (Xavier) 	hns3_enable_all_queues(hw, false);
1334fa29fe45SChengchang Tang 
1335fa29fe45SChengchang Tang 	for (i = 0; i < hw->data->nb_tx_queues; i++)
1336fa29fe45SChengchang Tang 		hw->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
1337fa29fe45SChengchang Tang 
1338fa29fe45SChengchang Tang 	for (i = 0; i < hw->data->nb_rx_queues; i++)
1339fa29fe45SChengchang Tang 		hw->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
1340bba63669SWei Hu (Xavier) }
1341bba63669SWei Hu (Xavier) 
1342a3d4f4d2SWei Hu (Xavier) /*
1343a3d4f4d2SWei Hu (Xavier)  * Iterate over all Rx Queue, and call the callback() function for each Rx
1344a3d4f4d2SWei Hu (Xavier)  * queue.
1345a3d4f4d2SWei Hu (Xavier)  *
1346a3d4f4d2SWei Hu (Xavier)  * @param[in] dev
1347a3d4f4d2SWei Hu (Xavier)  *   The target eth dev.
1348a3d4f4d2SWei Hu (Xavier)  * @param[in] callback
1349a3d4f4d2SWei Hu (Xavier)  *   The function to call for each queue.
1350a3d4f4d2SWei Hu (Xavier)  *   if callback function return nonzero will stop iterate and return it's value
1351a3d4f4d2SWei Hu (Xavier)  * @param[in] arg
1352a3d4f4d2SWei Hu (Xavier)  *   The arguments to provide the callback function with.
1353a3d4f4d2SWei Hu (Xavier)  *
1354a3d4f4d2SWei Hu (Xavier)  * @return
1355a3d4f4d2SWei Hu (Xavier)  *   0 on success, otherwise with errno set.
1356a3d4f4d2SWei Hu (Xavier)  */
1357a3d4f4d2SWei Hu (Xavier) int
1358a3d4f4d2SWei Hu (Xavier) hns3_rxq_iterate(struct rte_eth_dev *dev,
1359a3d4f4d2SWei Hu (Xavier) 		 int (*callback)(struct hns3_rx_queue *, void *), void *arg)
1360a3d4f4d2SWei Hu (Xavier) {
1361a3d4f4d2SWei Hu (Xavier) 	uint32_t i;
1362a3d4f4d2SWei Hu (Xavier) 	int ret;
1363a3d4f4d2SWei Hu (Xavier) 
1364a3d4f4d2SWei Hu (Xavier) 	if (dev->data->rx_queues == NULL)
1365a3d4f4d2SWei Hu (Xavier) 		return -EINVAL;
1366a3d4f4d2SWei Hu (Xavier) 
1367a3d4f4d2SWei Hu (Xavier) 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
1368a3d4f4d2SWei Hu (Xavier) 		ret = callback(dev->data->rx_queues[i], arg);
1369a3d4f4d2SWei Hu (Xavier) 		if (ret != 0)
1370a3d4f4d2SWei Hu (Xavier) 			return ret;
1371a3d4f4d2SWei Hu (Xavier) 	}
1372a3d4f4d2SWei Hu (Xavier) 
1373a3d4f4d2SWei Hu (Xavier) 	return 0;
1374a3d4f4d2SWei Hu (Xavier) }
1375a3d4f4d2SWei Hu (Xavier) 
1376a951c1edSWei Hu (Xavier) static void*
1377a951c1edSWei Hu (Xavier) hns3_alloc_rxq_and_dma_zone(struct rte_eth_dev *dev,
1378a951c1edSWei Hu (Xavier) 			    struct hns3_queue_info *q_info)
1379a951c1edSWei Hu (Xavier) {
1380a951c1edSWei Hu (Xavier) 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1381a951c1edSWei Hu (Xavier) 	const struct rte_memzone *rx_mz;
1382a951c1edSWei Hu (Xavier) 	struct hns3_rx_queue *rxq;
1383a951c1edSWei Hu (Xavier) 	unsigned int rx_desc;
1384a951c1edSWei Hu (Xavier) 
1385a951c1edSWei Hu (Xavier) 	rxq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_rx_queue),
1386a951c1edSWei Hu (Xavier) 				 RTE_CACHE_LINE_SIZE, q_info->socket_id);
1387a951c1edSWei Hu (Xavier) 	if (rxq == NULL) {
13882427c27eSHongbo Zheng 		hns3_err(hw, "Failed to allocate memory for No.%u rx ring!",
1389a951c1edSWei Hu (Xavier) 			 q_info->idx);
1390a951c1edSWei Hu (Xavier) 		return NULL;
1391a951c1edSWei Hu (Xavier) 	}
1392a951c1edSWei Hu (Xavier) 
1393a951c1edSWei Hu (Xavier) 	/* Allocate rx ring hardware descriptors. */
1394a951c1edSWei Hu (Xavier) 	rxq->queue_id = q_info->idx;
1395a951c1edSWei Hu (Xavier) 	rxq->nb_rx_desc = q_info->nb_desc;
1396a3d4f4d2SWei Hu (Xavier) 
1397a3d4f4d2SWei Hu (Xavier) 	/*
1398a3d4f4d2SWei Hu (Xavier) 	 * Allocate a litter more memory because rx vector functions
1399a3d4f4d2SWei Hu (Xavier) 	 * don't check boundaries each time.
1400a3d4f4d2SWei Hu (Xavier) 	 */
1401a3d4f4d2SWei Hu (Xavier) 	rx_desc = (rxq->nb_rx_desc + HNS3_DEFAULT_RX_BURST) *
1402a3d4f4d2SWei Hu (Xavier) 			sizeof(struct hns3_desc);
1403a951c1edSWei Hu (Xavier) 	rx_mz = rte_eth_dma_zone_reserve(dev, q_info->ring_name, q_info->idx,
1404a951c1edSWei Hu (Xavier) 					 rx_desc, HNS3_RING_BASE_ALIGN,
1405a951c1edSWei Hu (Xavier) 					 q_info->socket_id);
1406a951c1edSWei Hu (Xavier) 	if (rx_mz == NULL) {
14072427c27eSHongbo Zheng 		hns3_err(hw, "Failed to reserve DMA memory for No.%u rx ring!",
1408a951c1edSWei Hu (Xavier) 			 q_info->idx);
1409a951c1edSWei Hu (Xavier) 		hns3_rx_queue_release(rxq);
1410a951c1edSWei Hu (Xavier) 		return NULL;
1411a951c1edSWei Hu (Xavier) 	}
1412a951c1edSWei Hu (Xavier) 	rxq->mz = rx_mz;
1413a951c1edSWei Hu (Xavier) 	rxq->rx_ring = (struct hns3_desc *)rx_mz->addr;
1414a951c1edSWei Hu (Xavier) 	rxq->rx_ring_phys_addr = rx_mz->iova;
1415a951c1edSWei Hu (Xavier) 
1416a951c1edSWei Hu (Xavier) 	return rxq;
1417a951c1edSWei Hu (Xavier) }
1418a951c1edSWei Hu (Xavier) 
1419a951c1edSWei Hu (Xavier) static int
1420a951c1edSWei Hu (Xavier) hns3_fake_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
1421a951c1edSWei Hu (Xavier) 			 uint16_t nb_desc, unsigned int socket_id)
1422a951c1edSWei Hu (Xavier) {
1423a951c1edSWei Hu (Xavier) 	struct hns3_adapter *hns = dev->data->dev_private;
1424a951c1edSWei Hu (Xavier) 	struct hns3_hw *hw = &hns->hw;
1425a951c1edSWei Hu (Xavier) 	struct hns3_queue_info q_info;
1426a951c1edSWei Hu (Xavier) 	struct hns3_rx_queue *rxq;
1427a951c1edSWei Hu (Xavier) 	uint16_t nb_rx_q;
1428a951c1edSWei Hu (Xavier) 
1429a951c1edSWei Hu (Xavier) 	if (hw->fkq_data.rx_queues[idx]) {
1430a951c1edSWei Hu (Xavier) 		hns3_rx_queue_release(hw->fkq_data.rx_queues[idx]);
1431a951c1edSWei Hu (Xavier) 		hw->fkq_data.rx_queues[idx] = NULL;
1432a951c1edSWei Hu (Xavier) 	}
1433a951c1edSWei Hu (Xavier) 
1434a951c1edSWei Hu (Xavier) 	q_info.idx = idx;
1435a951c1edSWei Hu (Xavier) 	q_info.socket_id = socket_id;
1436a951c1edSWei Hu (Xavier) 	q_info.nb_desc = nb_desc;
1437a951c1edSWei Hu (Xavier) 	q_info.type = "hns3 fake RX queue";
1438a951c1edSWei Hu (Xavier) 	q_info.ring_name = "rx_fake_ring";
1439a951c1edSWei Hu (Xavier) 	rxq = hns3_alloc_rxq_and_dma_zone(dev, &q_info);
1440a951c1edSWei Hu (Xavier) 	if (rxq == NULL) {
14412427c27eSHongbo Zheng 		hns3_err(hw, "Failed to setup No.%u fake rx ring.", idx);
1442a951c1edSWei Hu (Xavier) 		return -ENOMEM;
1443a951c1edSWei Hu (Xavier) 	}
1444a951c1edSWei Hu (Xavier) 
1445a951c1edSWei Hu (Xavier) 	/* Don't need alloc sw_ring, because upper applications don't use it */
1446a951c1edSWei Hu (Xavier) 	rxq->sw_ring = NULL;
1447a951c1edSWei Hu (Xavier) 
1448a951c1edSWei Hu (Xavier) 	rxq->hns = hns;
1449a951c1edSWei Hu (Xavier) 	rxq->rx_deferred_start = false;
1450a951c1edSWei Hu (Xavier) 	rxq->port_id = dev->data->port_id;
1451a951c1edSWei Hu (Xavier) 	rxq->configured = true;
1452a951c1edSWei Hu (Xavier) 	nb_rx_q = dev->data->nb_rx_queues;
1453a951c1edSWei Hu (Xavier) 	rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
1454a951c1edSWei Hu (Xavier) 				(nb_rx_q + idx) * HNS3_TQP_REG_SIZE);
1455dfac40d9SWei Hu (Xavier) 	rxq->rx_buf_len = HNS3_MIN_BD_BUF_SIZE;
1456a951c1edSWei Hu (Xavier) 
1457a951c1edSWei Hu (Xavier) 	rte_spinlock_lock(&hw->lock);
1458a951c1edSWei Hu (Xavier) 	hw->fkq_data.rx_queues[idx] = rxq;
1459a951c1edSWei Hu (Xavier) 	rte_spinlock_unlock(&hw->lock);
1460a951c1edSWei Hu (Xavier) 
1461a951c1edSWei Hu (Xavier) 	return 0;
1462a951c1edSWei Hu (Xavier) }
1463a951c1edSWei Hu (Xavier) 
1464a951c1edSWei Hu (Xavier) static void*
1465a951c1edSWei Hu (Xavier) hns3_alloc_txq_and_dma_zone(struct rte_eth_dev *dev,
1466a951c1edSWei Hu (Xavier) 			    struct hns3_queue_info *q_info)
1467a951c1edSWei Hu (Xavier) {
1468a951c1edSWei Hu (Xavier) 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1469a951c1edSWei Hu (Xavier) 	const struct rte_memzone *tx_mz;
1470a951c1edSWei Hu (Xavier) 	struct hns3_tx_queue *txq;
1471a951c1edSWei Hu (Xavier) 	struct hns3_desc *desc;
1472a951c1edSWei Hu (Xavier) 	unsigned int tx_desc;
147367d01034SHuisong Li 	uint16_t i;
1474a951c1edSWei Hu (Xavier) 
1475a951c1edSWei Hu (Xavier) 	txq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_tx_queue),
1476a951c1edSWei Hu (Xavier) 				 RTE_CACHE_LINE_SIZE, q_info->socket_id);
1477a951c1edSWei Hu (Xavier) 	if (txq == NULL) {
14782427c27eSHongbo Zheng 		hns3_err(hw, "Failed to allocate memory for No.%u tx ring!",
1479a951c1edSWei Hu (Xavier) 			 q_info->idx);
1480a951c1edSWei Hu (Xavier) 		return NULL;
1481a951c1edSWei Hu (Xavier) 	}
1482a951c1edSWei Hu (Xavier) 
1483a951c1edSWei Hu (Xavier) 	/* Allocate tx ring hardware descriptors. */
1484a951c1edSWei Hu (Xavier) 	txq->queue_id = q_info->idx;
1485a951c1edSWei Hu (Xavier) 	txq->nb_tx_desc = q_info->nb_desc;
1486a951c1edSWei Hu (Xavier) 	tx_desc = txq->nb_tx_desc * sizeof(struct hns3_desc);
1487a951c1edSWei Hu (Xavier) 	tx_mz = rte_eth_dma_zone_reserve(dev, q_info->ring_name, q_info->idx,
1488a951c1edSWei Hu (Xavier) 					 tx_desc, HNS3_RING_BASE_ALIGN,
1489a951c1edSWei Hu (Xavier) 					 q_info->socket_id);
1490a951c1edSWei Hu (Xavier) 	if (tx_mz == NULL) {
14912427c27eSHongbo Zheng 		hns3_err(hw, "Failed to reserve DMA memory for No.%u tx ring!",
1492a951c1edSWei Hu (Xavier) 			 q_info->idx);
1493a951c1edSWei Hu (Xavier) 		hns3_tx_queue_release(txq);
1494a951c1edSWei Hu (Xavier) 		return NULL;
1495a951c1edSWei Hu (Xavier) 	}
1496a951c1edSWei Hu (Xavier) 	txq->mz = tx_mz;
1497a951c1edSWei Hu (Xavier) 	txq->tx_ring = (struct hns3_desc *)tx_mz->addr;
1498a951c1edSWei Hu (Xavier) 	txq->tx_ring_phys_addr = tx_mz->iova;
1499a951c1edSWei Hu (Xavier) 
1500a951c1edSWei Hu (Xavier) 	/* Clear tx bd */
1501a951c1edSWei Hu (Xavier) 	desc = txq->tx_ring;
1502a951c1edSWei Hu (Xavier) 	for (i = 0; i < txq->nb_tx_desc; i++) {
1503a951c1edSWei Hu (Xavier) 		desc->tx.tp_fe_sc_vld_ra_ri = 0;
1504a951c1edSWei Hu (Xavier) 		desc++;
1505a951c1edSWei Hu (Xavier) 	}
1506a951c1edSWei Hu (Xavier) 
1507a951c1edSWei Hu (Xavier) 	return txq;
1508a951c1edSWei Hu (Xavier) }
1509a951c1edSWei Hu (Xavier) 
1510a951c1edSWei Hu (Xavier) static int
1511a951c1edSWei Hu (Xavier) hns3_fake_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
1512a951c1edSWei Hu (Xavier) 			 uint16_t nb_desc, unsigned int socket_id)
1513a951c1edSWei Hu (Xavier) {
1514a951c1edSWei Hu (Xavier) 	struct hns3_adapter *hns = dev->data->dev_private;
1515a951c1edSWei Hu (Xavier) 	struct hns3_hw *hw = &hns->hw;
1516a951c1edSWei Hu (Xavier) 	struct hns3_queue_info q_info;
1517a951c1edSWei Hu (Xavier) 	struct hns3_tx_queue *txq;
1518a951c1edSWei Hu (Xavier) 	uint16_t nb_tx_q;
1519a951c1edSWei Hu (Xavier) 
1520a951c1edSWei Hu (Xavier) 	if (hw->fkq_data.tx_queues[idx] != NULL) {
1521a951c1edSWei Hu (Xavier) 		hns3_tx_queue_release(hw->fkq_data.tx_queues[idx]);
1522a951c1edSWei Hu (Xavier) 		hw->fkq_data.tx_queues[idx] = NULL;
1523a951c1edSWei Hu (Xavier) 	}
1524a951c1edSWei Hu (Xavier) 
1525a951c1edSWei Hu (Xavier) 	q_info.idx = idx;
1526a951c1edSWei Hu (Xavier) 	q_info.socket_id = socket_id;
1527a951c1edSWei Hu (Xavier) 	q_info.nb_desc = nb_desc;
1528a951c1edSWei Hu (Xavier) 	q_info.type = "hns3 fake TX queue";
1529a951c1edSWei Hu (Xavier) 	q_info.ring_name = "tx_fake_ring";
1530a951c1edSWei Hu (Xavier) 	txq = hns3_alloc_txq_and_dma_zone(dev, &q_info);
1531a951c1edSWei Hu (Xavier) 	if (txq == NULL) {
15322427c27eSHongbo Zheng 		hns3_err(hw, "Failed to setup No.%u fake tx ring.", idx);
1533a951c1edSWei Hu (Xavier) 		return -ENOMEM;
1534a951c1edSWei Hu (Xavier) 	}
1535a951c1edSWei Hu (Xavier) 
1536a951c1edSWei Hu (Xavier) 	/* Don't need alloc sw_ring, because upper applications don't use it */
1537a951c1edSWei Hu (Xavier) 	txq->sw_ring = NULL;
1538e31f123dSWei Hu (Xavier) 	txq->free = NULL;
1539a951c1edSWei Hu (Xavier) 
1540a951c1edSWei Hu (Xavier) 	txq->hns = hns;
1541a951c1edSWei Hu (Xavier) 	txq->tx_deferred_start = false;
1542a951c1edSWei Hu (Xavier) 	txq->port_id = dev->data->port_id;
1543a951c1edSWei Hu (Xavier) 	txq->configured = true;
1544a951c1edSWei Hu (Xavier) 	nb_tx_q = dev->data->nb_tx_queues;
1545a951c1edSWei Hu (Xavier) 	txq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
1546a951c1edSWei Hu (Xavier) 				(nb_tx_q + idx) * HNS3_TQP_REG_SIZE);
1547a951c1edSWei Hu (Xavier) 
1548a951c1edSWei Hu (Xavier) 	rte_spinlock_lock(&hw->lock);
1549a951c1edSWei Hu (Xavier) 	hw->fkq_data.tx_queues[idx] = txq;
1550a951c1edSWei Hu (Xavier) 	rte_spinlock_unlock(&hw->lock);
1551a951c1edSWei Hu (Xavier) 
1552a951c1edSWei Hu (Xavier) 	return 0;
1553a951c1edSWei Hu (Xavier) }
1554a951c1edSWei Hu (Xavier) 
1555a951c1edSWei Hu (Xavier) static int
1556a951c1edSWei Hu (Xavier) hns3_fake_rx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
1557a951c1edSWei Hu (Xavier) {
1558a951c1edSWei Hu (Xavier) 	uint16_t old_nb_queues = hw->fkq_data.nb_fake_rx_queues;
1559a951c1edSWei Hu (Xavier) 	void **rxq;
1560708ecc07SHuisong Li 	uint16_t i;
1561a951c1edSWei Hu (Xavier) 
1562a951c1edSWei Hu (Xavier) 	if (hw->fkq_data.rx_queues == NULL && nb_queues != 0) {
1563a951c1edSWei Hu (Xavier) 		/* first time configuration */
1564a951c1edSWei Hu (Xavier) 		uint32_t size;
1565a951c1edSWei Hu (Xavier) 		size = sizeof(hw->fkq_data.rx_queues[0]) * nb_queues;
1566a951c1edSWei Hu (Xavier) 		hw->fkq_data.rx_queues = rte_zmalloc("fake_rx_queues", size,
1567a951c1edSWei Hu (Xavier) 						     RTE_CACHE_LINE_SIZE);
1568a951c1edSWei Hu (Xavier) 		if (hw->fkq_data.rx_queues == NULL) {
1569a951c1edSWei Hu (Xavier) 			hw->fkq_data.nb_fake_rx_queues = 0;
1570a951c1edSWei Hu (Xavier) 			return -ENOMEM;
1571a951c1edSWei Hu (Xavier) 		}
1572a951c1edSWei Hu (Xavier) 	} else if (hw->fkq_data.rx_queues != NULL && nb_queues != 0) {
1573a951c1edSWei Hu (Xavier) 		/* re-configure */
1574a951c1edSWei Hu (Xavier) 		rxq = hw->fkq_data.rx_queues;
1575a951c1edSWei Hu (Xavier) 		for (i = nb_queues; i < old_nb_queues; i++)
15767483341aSXueming Li 			hns3_rx_queue_release_lock(rxq[i]);
1577a951c1edSWei Hu (Xavier) 
1578a951c1edSWei Hu (Xavier) 		rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
1579a951c1edSWei Hu (Xavier) 				  RTE_CACHE_LINE_SIZE);
1580a951c1edSWei Hu (Xavier) 		if (rxq == NULL)
1581a951c1edSWei Hu (Xavier) 			return -ENOMEM;
1582a951c1edSWei Hu (Xavier) 		if (nb_queues > old_nb_queues) {
1583a951c1edSWei Hu (Xavier) 			uint16_t new_qs = nb_queues - old_nb_queues;
1584a951c1edSWei Hu (Xavier) 			memset(rxq + old_nb_queues, 0, sizeof(rxq[0]) * new_qs);
1585a951c1edSWei Hu (Xavier) 		}
1586a951c1edSWei Hu (Xavier) 
1587a951c1edSWei Hu (Xavier) 		hw->fkq_data.rx_queues = rxq;
1588a951c1edSWei Hu (Xavier) 	} else if (hw->fkq_data.rx_queues != NULL && nb_queues == 0) {
1589a951c1edSWei Hu (Xavier) 		rxq = hw->fkq_data.rx_queues;
1590a951c1edSWei Hu (Xavier) 		for (i = nb_queues; i < old_nb_queues; i++)
15917483341aSXueming Li 			hns3_rx_queue_release_lock(rxq[i]);
1592a951c1edSWei Hu (Xavier) 
1593a951c1edSWei Hu (Xavier) 		rte_free(hw->fkq_data.rx_queues);
1594a951c1edSWei Hu (Xavier) 		hw->fkq_data.rx_queues = NULL;
1595a951c1edSWei Hu (Xavier) 	}
1596a951c1edSWei Hu (Xavier) 
1597a951c1edSWei Hu (Xavier) 	hw->fkq_data.nb_fake_rx_queues = nb_queues;
1598a951c1edSWei Hu (Xavier) 
1599a951c1edSWei Hu (Xavier) 	return 0;
1600a951c1edSWei Hu (Xavier) }
1601a951c1edSWei Hu (Xavier) 
1602a951c1edSWei Hu (Xavier) static int
1603a951c1edSWei Hu (Xavier) hns3_fake_tx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
1604a951c1edSWei Hu (Xavier) {
1605a951c1edSWei Hu (Xavier) 	uint16_t old_nb_queues = hw->fkq_data.nb_fake_tx_queues;
1606a951c1edSWei Hu (Xavier) 	void **txq;
1607708ecc07SHuisong Li 	uint16_t i;
1608a951c1edSWei Hu (Xavier) 
1609a951c1edSWei Hu (Xavier) 	if (hw->fkq_data.tx_queues == NULL && nb_queues != 0) {
1610a951c1edSWei Hu (Xavier) 		/* first time configuration */
1611a951c1edSWei Hu (Xavier) 		uint32_t size;
1612a951c1edSWei Hu (Xavier) 		size = sizeof(hw->fkq_data.tx_queues[0]) * nb_queues;
1613a951c1edSWei Hu (Xavier) 		hw->fkq_data.tx_queues = rte_zmalloc("fake_tx_queues", size,
1614a951c1edSWei Hu (Xavier) 						     RTE_CACHE_LINE_SIZE);
1615a951c1edSWei Hu (Xavier) 		if (hw->fkq_data.tx_queues == NULL) {
1616a951c1edSWei Hu (Xavier) 			hw->fkq_data.nb_fake_tx_queues = 0;
1617a951c1edSWei Hu (Xavier) 			return -ENOMEM;
1618a951c1edSWei Hu (Xavier) 		}
1619a951c1edSWei Hu (Xavier) 	} else if (hw->fkq_data.tx_queues != NULL && nb_queues != 0) {
1620a951c1edSWei Hu (Xavier) 		/* re-configure */
1621a951c1edSWei Hu (Xavier) 		txq = hw->fkq_data.tx_queues;
1622a951c1edSWei Hu (Xavier) 		for (i = nb_queues; i < old_nb_queues; i++)
16237483341aSXueming Li 			hns3_tx_queue_release_lock(txq[i]);
1624a951c1edSWei Hu (Xavier) 		txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
1625a951c1edSWei Hu (Xavier) 				  RTE_CACHE_LINE_SIZE);
1626a951c1edSWei Hu (Xavier) 		if (txq == NULL)
1627a951c1edSWei Hu (Xavier) 			return -ENOMEM;
1628a951c1edSWei Hu (Xavier) 		if (nb_queues > old_nb_queues) {
1629a951c1edSWei Hu (Xavier) 			uint16_t new_qs = nb_queues - old_nb_queues;
1630a951c1edSWei Hu (Xavier) 			memset(txq + old_nb_queues, 0, sizeof(txq[0]) * new_qs);
1631a951c1edSWei Hu (Xavier) 		}
1632a951c1edSWei Hu (Xavier) 
1633a951c1edSWei Hu (Xavier) 		hw->fkq_data.tx_queues = txq;
1634a951c1edSWei Hu (Xavier) 	} else if (hw->fkq_data.tx_queues != NULL && nb_queues == 0) {
1635a951c1edSWei Hu (Xavier) 		txq = hw->fkq_data.tx_queues;
1636a951c1edSWei Hu (Xavier) 		for (i = nb_queues; i < old_nb_queues; i++)
16377483341aSXueming Li 			hns3_tx_queue_release_lock(txq[i]);
1638a951c1edSWei Hu (Xavier) 
1639a951c1edSWei Hu (Xavier) 		rte_free(hw->fkq_data.tx_queues);
1640a951c1edSWei Hu (Xavier) 		hw->fkq_data.tx_queues = NULL;
1641a951c1edSWei Hu (Xavier) 	}
1642a951c1edSWei Hu (Xavier) 	hw->fkq_data.nb_fake_tx_queues = nb_queues;
1643a951c1edSWei Hu (Xavier) 
1644a951c1edSWei Hu (Xavier) 	return 0;
1645a951c1edSWei Hu (Xavier) }
1646a951c1edSWei Hu (Xavier) 
1647a951c1edSWei Hu (Xavier) int
1648a951c1edSWei Hu (Xavier) hns3_set_fake_rx_or_tx_queues(struct rte_eth_dev *dev, uint16_t nb_rx_q,
1649a951c1edSWei Hu (Xavier) 			      uint16_t nb_tx_q)
1650a951c1edSWei Hu (Xavier) {
1651a951c1edSWei Hu (Xavier) 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1652a951c1edSWei Hu (Xavier) 	uint16_t rx_need_add_nb_q;
1653a951c1edSWei Hu (Xavier) 	uint16_t tx_need_add_nb_q;
1654a951c1edSWei Hu (Xavier) 	uint16_t port_id;
1655a951c1edSWei Hu (Xavier) 	uint16_t q;
1656a951c1edSWei Hu (Xavier) 	int ret;
1657a951c1edSWei Hu (Xavier) 
1658efcaa81eSChengchang Tang 	if (hns3_dev_get_support(hw, INDEP_TXRX))
1659a2ddaac1SHuisong Li 		return 0;
1660a2ddaac1SHuisong Li 
1661a951c1edSWei Hu (Xavier) 	/* Setup new number of fake RX/TX queues and reconfigure device. */
1662a951c1edSWei Hu (Xavier) 	rx_need_add_nb_q = hw->cfg_max_queues - nb_rx_q;
1663a951c1edSWei Hu (Xavier) 	tx_need_add_nb_q = hw->cfg_max_queues - nb_tx_q;
1664a951c1edSWei Hu (Xavier) 	ret = hns3_fake_rx_queue_config(hw, rx_need_add_nb_q);
1665a951c1edSWei Hu (Xavier) 	if (ret) {
1666a951c1edSWei Hu (Xavier) 		hns3_err(hw, "Fail to configure fake rx queues: %d", ret);
1667fa29fe45SChengchang Tang 		return ret;
1668a951c1edSWei Hu (Xavier) 	}
1669a951c1edSWei Hu (Xavier) 
1670a951c1edSWei Hu (Xavier) 	ret = hns3_fake_tx_queue_config(hw, tx_need_add_nb_q);
1671a951c1edSWei Hu (Xavier) 	if (ret) {
167294e9574fSDengdui Huang 		hns3_err(hw, "Fail to configure fake tx queues: %d", ret);
1673a951c1edSWei Hu (Xavier) 		goto cfg_fake_tx_q_fail;
1674a951c1edSWei Hu (Xavier) 	}
1675a951c1edSWei Hu (Xavier) 
1676a951c1edSWei Hu (Xavier) 	/* Allocate and set up fake RX queue per Ethernet port. */
1677a951c1edSWei Hu (Xavier) 	port_id = hw->data->port_id;
1678a951c1edSWei Hu (Xavier) 	for (q = 0; q < rx_need_add_nb_q; q++) {
1679a951c1edSWei Hu (Xavier) 		ret = hns3_fake_rx_queue_setup(dev, q, HNS3_MIN_RING_DESC,
1680a951c1edSWei Hu (Xavier) 					       rte_eth_dev_socket_id(port_id));
1681a951c1edSWei Hu (Xavier) 		if (ret)
1682a951c1edSWei Hu (Xavier) 			goto setup_fake_rx_q_fail;
1683a951c1edSWei Hu (Xavier) 	}
1684a951c1edSWei Hu (Xavier) 
1685a951c1edSWei Hu (Xavier) 	/* Allocate and set up fake TX queue per Ethernet port. */
1686a951c1edSWei Hu (Xavier) 	for (q = 0; q < tx_need_add_nb_q; q++) {
1687a951c1edSWei Hu (Xavier) 		ret = hns3_fake_tx_queue_setup(dev, q, HNS3_MIN_RING_DESC,
1688a951c1edSWei Hu (Xavier) 					       rte_eth_dev_socket_id(port_id));
1689a951c1edSWei Hu (Xavier) 		if (ret)
1690a951c1edSWei Hu (Xavier) 			goto setup_fake_tx_q_fail;
1691a951c1edSWei Hu (Xavier) 	}
1692a951c1edSWei Hu (Xavier) 
1693a951c1edSWei Hu (Xavier) 	return 0;
1694a951c1edSWei Hu (Xavier) 
1695a951c1edSWei Hu (Xavier) setup_fake_tx_q_fail:
1696a951c1edSWei Hu (Xavier) setup_fake_rx_q_fail:
1697a951c1edSWei Hu (Xavier) 	(void)hns3_fake_tx_queue_config(hw, 0);
1698a951c1edSWei Hu (Xavier) cfg_fake_tx_q_fail:
1699a951c1edSWei Hu (Xavier) 	(void)hns3_fake_rx_queue_config(hw, 0);
1700a951c1edSWei Hu (Xavier) 
1701a951c1edSWei Hu (Xavier) 	return ret;
1702a951c1edSWei Hu (Xavier) }
1703a951c1edSWei Hu (Xavier) 
1704bba63669SWei Hu (Xavier) void
1705bba63669SWei Hu (Xavier) hns3_dev_release_mbufs(struct hns3_adapter *hns)
1706bba63669SWei Hu (Xavier) {
1707bba63669SWei Hu (Xavier) 	struct rte_eth_dev_data *dev_data = hns->hw.data;
1708bba63669SWei Hu (Xavier) 	struct hns3_rx_queue *rxq;
1709bba63669SWei Hu (Xavier) 	struct hns3_tx_queue *txq;
171067d01034SHuisong Li 	uint16_t i;
1711bba63669SWei Hu (Xavier) 
1712bba63669SWei Hu (Xavier) 	if (dev_data->rx_queues)
1713bba63669SWei Hu (Xavier) 		for (i = 0; i < dev_data->nb_rx_queues; i++) {
1714bba63669SWei Hu (Xavier) 			rxq = dev_data->rx_queues[i];
1715fa29fe45SChengchang Tang 			if (rxq == NULL)
1716bba63669SWei Hu (Xavier) 				continue;
1717bba63669SWei Hu (Xavier) 			hns3_rx_queue_release_mbufs(rxq);
1718bba63669SWei Hu (Xavier) 		}
1719bba63669SWei Hu (Xavier) 
1720bba63669SWei Hu (Xavier) 	if (dev_data->tx_queues)
1721bba63669SWei Hu (Xavier) 		for (i = 0; i < dev_data->nb_tx_queues; i++) {
1722bba63669SWei Hu (Xavier) 			txq = dev_data->tx_queues[i];
1723fa29fe45SChengchang Tang 			if (txq == NULL)
1724bba63669SWei Hu (Xavier) 				continue;
1725bba63669SWei Hu (Xavier) 			hns3_tx_queue_release_mbufs(txq);
1726bba63669SWei Hu (Xavier) 		}
1727bba63669SWei Hu (Xavier) }
1728bba63669SWei Hu (Xavier) 
1729dfac40d9SWei Hu (Xavier) static int
1730dfac40d9SWei Hu (Xavier) hns3_rx_buf_len_calc(struct rte_mempool *mp, uint16_t *rx_buf_len)
1731dfac40d9SWei Hu (Xavier) {
1732dfac40d9SWei Hu (Xavier) 	uint16_t vld_buf_size;
1733dfac40d9SWei Hu (Xavier) 	uint16_t num_hw_specs;
1734dfac40d9SWei Hu (Xavier) 	uint16_t i;
1735dfac40d9SWei Hu (Xavier) 
1736dfac40d9SWei Hu (Xavier) 	/*
1737dfac40d9SWei Hu (Xavier) 	 * hns3 network engine only support to set 4 typical specification, and
1738dfac40d9SWei Hu (Xavier) 	 * different buffer size will affect the max packet_len and the max
1739dfac40d9SWei Hu (Xavier) 	 * number of segmentation when hw gro is turned on in receive side. The
1740dfac40d9SWei Hu (Xavier) 	 * relationship between them is as follows:
1741dfac40d9SWei Hu (Xavier) 	 *      rx_buf_size     |  max_gro_pkt_len  |  max_gro_nb_seg
1742dfac40d9SWei Hu (Xavier) 	 * ---------------------|-------------------|----------------
1743dfac40d9SWei Hu (Xavier) 	 * HNS3_4K_BD_BUF_SIZE  |        60KB       |       15
1744dfac40d9SWei Hu (Xavier) 	 * HNS3_2K_BD_BUF_SIZE  |        62KB       |       31
1745dfac40d9SWei Hu (Xavier) 	 * HNS3_1K_BD_BUF_SIZE  |        63KB       |       63
1746dfac40d9SWei Hu (Xavier) 	 * HNS3_512_BD_BUF_SIZE |      31.5KB       |       63
1747dfac40d9SWei Hu (Xavier) 	 */
1748dfac40d9SWei Hu (Xavier) 	static const uint16_t hw_rx_buf_size[] = {
1749dfac40d9SWei Hu (Xavier) 		HNS3_4K_BD_BUF_SIZE,
1750dfac40d9SWei Hu (Xavier) 		HNS3_2K_BD_BUF_SIZE,
1751dfac40d9SWei Hu (Xavier) 		HNS3_1K_BD_BUF_SIZE,
1752dfac40d9SWei Hu (Xavier) 		HNS3_512_BD_BUF_SIZE
1753dfac40d9SWei Hu (Xavier) 	};
1754dfac40d9SWei Hu (Xavier) 
1755dfac40d9SWei Hu (Xavier) 	vld_buf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) -
1756dfac40d9SWei Hu (Xavier) 			RTE_PKTMBUF_HEADROOM);
1757dfac40d9SWei Hu (Xavier) 	if (vld_buf_size < HNS3_MIN_BD_BUF_SIZE)
1758dfac40d9SWei Hu (Xavier) 		return -EINVAL;
1759dfac40d9SWei Hu (Xavier) 
1760dfac40d9SWei Hu (Xavier) 	num_hw_specs = RTE_DIM(hw_rx_buf_size);
1761dfac40d9SWei Hu (Xavier) 	for (i = 0; i < num_hw_specs; i++) {
1762dfac40d9SWei Hu (Xavier) 		if (vld_buf_size >= hw_rx_buf_size[i]) {
1763dfac40d9SWei Hu (Xavier) 			*rx_buf_len = hw_rx_buf_size[i];
1764dfac40d9SWei Hu (Xavier) 			break;
1765dfac40d9SWei Hu (Xavier) 		}
1766dfac40d9SWei Hu (Xavier) 	}
1767dfac40d9SWei Hu (Xavier) 	return 0;
1768dfac40d9SWei Hu (Xavier) }
1769dfac40d9SWei Hu (Xavier) 
1770521ab3e9SWei Hu (Xavier) static int
1771fa29fe45SChengchang Tang hns3_rxq_conf_runtime_check(struct hns3_hw *hw, uint16_t buf_size,
1772fa29fe45SChengchang Tang 				uint16_t nb_desc)
1773fa29fe45SChengchang Tang {
1774fa29fe45SChengchang Tang 	struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
1775fa29fe45SChengchang Tang 	eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
17761bb4a528SFerruh Yigit 	uint32_t frame_size = dev->data->mtu + HNS3_ETH_OVERHEAD;
1777fa29fe45SChengchang Tang 	uint16_t min_vec_bds;
1778fa29fe45SChengchang Tang 
1779fa29fe45SChengchang Tang 	/*
1780fa29fe45SChengchang Tang 	 * HNS3 hardware network engine set scattered as default. If the driver
1781fa29fe45SChengchang Tang 	 * is not work in scattered mode and the pkts greater than buf_size
17821bb4a528SFerruh Yigit 	 * but smaller than frame size will be distributed to multiple BDs.
1783fa29fe45SChengchang Tang 	 * Driver cannot handle this situation.
1784fa29fe45SChengchang Tang 	 */
17851bb4a528SFerruh Yigit 	if (!hw->data->scattered_rx && frame_size > buf_size) {
17861bb4a528SFerruh Yigit 		hns3_err(hw, "frame size is not allowed to be set greater "
1787fa29fe45SChengchang Tang 			     "than rx_buf_len if scattered is off.");
1788fa29fe45SChengchang Tang 		return -EINVAL;
1789fa29fe45SChengchang Tang 	}
1790fa29fe45SChengchang Tang 
17915125731fSChengwen Feng 	if (pkt_burst == hns3_recv_pkts_vec ||
17925125731fSChengwen Feng 	    pkt_burst == hns3_recv_pkts_vec_sve) {
1793fa29fe45SChengchang Tang 		min_vec_bds = HNS3_DEFAULT_RXQ_REARM_THRESH +
1794fa29fe45SChengchang Tang 			      HNS3_DEFAULT_RX_BURST;
1795fa29fe45SChengchang Tang 		if (nb_desc < min_vec_bds ||
1796fa29fe45SChengchang Tang 		    nb_desc % HNS3_DEFAULT_RXQ_REARM_THRESH) {
1797fa29fe45SChengchang Tang 			hns3_err(hw, "if Rx burst mode is vector, "
1798fa29fe45SChengchang Tang 				 "number of descriptor is required to be "
1799fa29fe45SChengchang Tang 				 "bigger than min vector bds:%u, and could be "
1800fa29fe45SChengchang Tang 				 "divided by rxq rearm thresh:%u.",
1801fa29fe45SChengchang Tang 				 min_vec_bds, HNS3_DEFAULT_RXQ_REARM_THRESH);
1802fa29fe45SChengchang Tang 			return -EINVAL;
1803fa29fe45SChengchang Tang 		}
1804fa29fe45SChengchang Tang 	}
1805fa29fe45SChengchang Tang 	return 0;
1806fa29fe45SChengchang Tang }
1807fa29fe45SChengchang Tang 
1808fa29fe45SChengchang Tang static int
1809521ab3e9SWei Hu (Xavier) hns3_rx_queue_conf_check(struct hns3_hw *hw, const struct rte_eth_rxconf *conf,
1810521ab3e9SWei Hu (Xavier) 			 struct rte_mempool *mp, uint16_t nb_desc,
1811521ab3e9SWei Hu (Xavier) 			 uint16_t *buf_size)
1812521ab3e9SWei Hu (Xavier) {
1813fa29fe45SChengchang Tang 	int ret;
1814fa29fe45SChengchang Tang 
1815521ab3e9SWei Hu (Xavier) 	if (nb_desc > HNS3_MAX_RING_DESC || nb_desc < HNS3_MIN_RING_DESC ||
1816521ab3e9SWei Hu (Xavier) 	    nb_desc % HNS3_ALIGN_RING_DESC) {
1817521ab3e9SWei Hu (Xavier) 		hns3_err(hw, "Number (%u) of rx descriptors is invalid",
1818521ab3e9SWei Hu (Xavier) 			 nb_desc);
1819521ab3e9SWei Hu (Xavier) 		return -EINVAL;
1820521ab3e9SWei Hu (Xavier) 	}
1821521ab3e9SWei Hu (Xavier) 
1822c1f0cd3aSDengdui Huang 	if (conf->rx_free_thresh >= nb_desc) {
1823c1f0cd3aSDengdui Huang 		hns3_err(hw, "rx_free_thresh (%u) must be less than %u",
1824c1f0cd3aSDengdui Huang 			 conf->rx_free_thresh, nb_desc);
1825c1f0cd3aSDengdui Huang 		return -EINVAL;
1826c1f0cd3aSDengdui Huang 	}
1827c1f0cd3aSDengdui Huang 
1828521ab3e9SWei Hu (Xavier) 	if (conf->rx_drop_en == 0)
1829521ab3e9SWei Hu (Xavier) 		hns3_warn(hw, "if no descriptors available, packets are always "
1830521ab3e9SWei Hu (Xavier) 			  "dropped and rx_drop_en (1) is fixed on");
1831521ab3e9SWei Hu (Xavier) 
1832521ab3e9SWei Hu (Xavier) 	if (hns3_rx_buf_len_calc(mp, buf_size)) {
1833521ab3e9SWei Hu (Xavier) 		hns3_err(hw, "rxq mbufs' data room size (%u) is not enough! "
1834521ab3e9SWei Hu (Xavier) 				"minimal data room size (%u).",
1835521ab3e9SWei Hu (Xavier) 				rte_pktmbuf_data_room_size(mp),
1836521ab3e9SWei Hu (Xavier) 				HNS3_MIN_BD_BUF_SIZE + RTE_PKTMBUF_HEADROOM);
1837521ab3e9SWei Hu (Xavier) 		return -EINVAL;
1838521ab3e9SWei Hu (Xavier) 	}
1839521ab3e9SWei Hu (Xavier) 
1840fa29fe45SChengchang Tang 	if (hw->data->dev_started) {
1841fa29fe45SChengchang Tang 		ret = hns3_rxq_conf_runtime_check(hw, *buf_size, nb_desc);
1842fa29fe45SChengchang Tang 		if (ret) {
1843fa29fe45SChengchang Tang 			hns3_err(hw, "Rx queue runtime setup fail.");
1844fa29fe45SChengchang Tang 			return ret;
1845fa29fe45SChengchang Tang 		}
1846fa29fe45SChengchang Tang 	}
1847fa29fe45SChengchang Tang 
1848521ab3e9SWei Hu (Xavier) 	return 0;
1849521ab3e9SWei Hu (Xavier) }
1850521ab3e9SWei Hu (Xavier) 
185176d79456SWei Hu (Xavier) uint32_t
185276d79456SWei Hu (Xavier) hns3_get_tqp_reg_offset(uint16_t queue_id)
185376d79456SWei Hu (Xavier) {
185476d79456SWei Hu (Xavier) 	uint32_t reg_offset;
185576d79456SWei Hu (Xavier) 
185676d79456SWei Hu (Xavier) 	/* Need an extend offset to config queue > 1024 */
185776d79456SWei Hu (Xavier) 	if (queue_id < HNS3_MIN_EXTEND_QUEUE_ID)
185876d79456SWei Hu (Xavier) 		reg_offset = HNS3_TQP_REG_OFFSET + queue_id * HNS3_TQP_REG_SIZE;
185976d79456SWei Hu (Xavier) 	else
186076d79456SWei Hu (Xavier) 		reg_offset = HNS3_TQP_REG_OFFSET + HNS3_TQP_EXT_REG_OFFSET +
186176d79456SWei Hu (Xavier) 			     (queue_id - HNS3_MIN_EXTEND_QUEUE_ID) *
186276d79456SWei Hu (Xavier) 			     HNS3_TQP_REG_SIZE;
186376d79456SWei Hu (Xavier) 
186476d79456SWei Hu (Xavier) 	return reg_offset;
186576d79456SWei Hu (Xavier) }
186676d79456SWei Hu (Xavier) 
1867bba63669SWei Hu (Xavier) int
1868bba63669SWei Hu (Xavier) hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
1869bba63669SWei Hu (Xavier) 		    unsigned int socket_id, const struct rte_eth_rxconf *conf,
1870bba63669SWei Hu (Xavier) 		    struct rte_mempool *mp)
1871bba63669SWei Hu (Xavier) {
1872bba63669SWei Hu (Xavier) 	struct hns3_adapter *hns = dev->data->dev_private;
1873bba63669SWei Hu (Xavier) 	struct hns3_hw *hw = &hns->hw;
1874a951c1edSWei Hu (Xavier) 	struct hns3_queue_info q_info;
1875bba63669SWei Hu (Xavier) 	struct hns3_rx_queue *rxq;
1876dfac40d9SWei Hu (Xavier) 	uint16_t rx_buf_size;
1877bba63669SWei Hu (Xavier) 	int rx_entry_len;
1878521ab3e9SWei Hu (Xavier) 	int ret;
1879bba63669SWei Hu (Xavier) 
1880521ab3e9SWei Hu (Xavier) 	ret = hns3_rx_queue_conf_check(hw, conf, mp, nb_desc, &rx_buf_size);
1881521ab3e9SWei Hu (Xavier) 	if (ret)
1882521ab3e9SWei Hu (Xavier) 		return ret;
1883a02f1461SWei Hu (Xavier) 
1884bba63669SWei Hu (Xavier) 	if (dev->data->rx_queues[idx]) {
1885bba63669SWei Hu (Xavier) 		hns3_rx_queue_release(dev->data->rx_queues[idx]);
1886bba63669SWei Hu (Xavier) 		dev->data->rx_queues[idx] = NULL;
1887bba63669SWei Hu (Xavier) 	}
1888bba63669SWei Hu (Xavier) 
1889a951c1edSWei Hu (Xavier) 	q_info.idx = idx;
1890a951c1edSWei Hu (Xavier) 	q_info.socket_id = socket_id;
1891a951c1edSWei Hu (Xavier) 	q_info.nb_desc = nb_desc;
1892a951c1edSWei Hu (Xavier) 	q_info.type = "hns3 RX queue";
1893a951c1edSWei Hu (Xavier) 	q_info.ring_name = "rx_ring";
1894dfac40d9SWei Hu (Xavier) 
1895a951c1edSWei Hu (Xavier) 	rxq = hns3_alloc_rxq_and_dma_zone(dev, &q_info);
1896bba63669SWei Hu (Xavier) 	if (rxq == NULL) {
1897a951c1edSWei Hu (Xavier) 		hns3_err(hw,
1898a951c1edSWei Hu (Xavier) 			 "Failed to alloc mem and reserve DMA mem for rx ring!");
1899bba63669SWei Hu (Xavier) 		return -ENOMEM;
1900bba63669SWei Hu (Xavier) 	}
1901bba63669SWei Hu (Xavier) 
1902bba63669SWei Hu (Xavier) 	rxq->hns = hns;
1903521ab3e9SWei Hu (Xavier) 	rxq->ptype_tbl = &hns->ptype_tbl;
1904bba63669SWei Hu (Xavier) 	rxq->mb_pool = mp;
1905ceabee45SWei Hu (Xavier) 	rxq->rx_free_thresh = (conf->rx_free_thresh > 0) ?
1906ceabee45SWei Hu (Xavier) 		conf->rx_free_thresh : HNS3_DEFAULT_RX_FREE_THRESH;
1907fa29fe45SChengchang Tang 
1908bba63669SWei Hu (Xavier) 	rxq->rx_deferred_start = conf->rx_deferred_start;
1909efcaa81eSChengchang Tang 	if (rxq->rx_deferred_start && !hns3_dev_get_support(hw, INDEP_TXRX)) {
1910fa29fe45SChengchang Tang 		hns3_warn(hw, "deferred start is not supported.");
1911fa29fe45SChengchang Tang 		rxq->rx_deferred_start = false;
1912fa29fe45SChengchang Tang 	}
1913bba63669SWei Hu (Xavier) 
1914a3d4f4d2SWei Hu (Xavier) 	rx_entry_len = (rxq->nb_rx_desc + HNS3_DEFAULT_RX_BURST) *
1915a3d4f4d2SWei Hu (Xavier) 			sizeof(struct hns3_entry);
1916bba63669SWei Hu (Xavier) 	rxq->sw_ring = rte_zmalloc_socket("hns3 RX sw ring", rx_entry_len,
1917bba63669SWei Hu (Xavier) 					  RTE_CACHE_LINE_SIZE, socket_id);
1918bba63669SWei Hu (Xavier) 	if (rxq->sw_ring == NULL) {
1919bba63669SWei Hu (Xavier) 		hns3_err(hw, "Failed to allocate memory for rx sw ring!");
1920bba63669SWei Hu (Xavier) 		hns3_rx_queue_release(rxq);
1921bba63669SWei Hu (Xavier) 		return -ENOMEM;
1922bba63669SWei Hu (Xavier) 	}
1923bba63669SWei Hu (Xavier) 
1924bba63669SWei Hu (Xavier) 	rxq->next_to_use = 0;
1925ceabee45SWei Hu (Xavier) 	rxq->rx_free_hold = 0;
1926a3d4f4d2SWei Hu (Xavier) 	rxq->rx_rearm_start = 0;
1927a3d4f4d2SWei Hu (Xavier) 	rxq->rx_rearm_nb = 0;
1928bba63669SWei Hu (Xavier) 	rxq->pkt_first_seg = NULL;
1929bba63669SWei Hu (Xavier) 	rxq->pkt_last_seg = NULL;
1930bba63669SWei Hu (Xavier) 	rxq->port_id = dev->data->port_id;
1931992b24a1SWei Hu (Xavier) 	/*
1932992b24a1SWei Hu (Xavier) 	 * For hns3 PF device, if the VLAN mode is HW_SHIFT_AND_DISCARD_MODE,
1933992b24a1SWei Hu (Xavier) 	 * the pvid_sw_discard_en in the queue struct should not be changed,
1934f8dbaebbSSean Morrissey 	 * because PVID-related operations do not need to be processed by PMD.
1935f8dbaebbSSean Morrissey 	 * For hns3 VF device, whether it needs to process PVID depends
1936992b24a1SWei Hu (Xavier) 	 * on the configuration of PF kernel mode netdevice driver. And the
1937992b24a1SWei Hu (Xavier) 	 * related PF configuration is delivered through the mailbox and finally
19387be78d02SJosh Soref 	 * reflected in port_base_vlan_cfg.
1939992b24a1SWei Hu (Xavier) 	 */
1940992b24a1SWei Hu (Xavier) 	if (hns->is_vf || hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE)
1941992b24a1SWei Hu (Xavier) 		rxq->pvid_sw_discard_en = hw->port_base_vlan_cfg.state ==
1942992b24a1SWei Hu (Xavier) 						HNS3_PORT_BASE_VLAN_ENABLE;
1943992b24a1SWei Hu (Xavier) 	else
1944992b24a1SWei Hu (Xavier) 		rxq->pvid_sw_discard_en = false;
1945efcaa81eSChengchang Tang 	rxq->ptype_en = hns3_dev_get_support(hw, RXD_ADV_LAYOUT) ? true : false;
1946bba63669SWei Hu (Xavier) 	rxq->configured = true;
194776d79456SWei Hu (Xavier) 	rxq->io_base = (void *)((char *)hw->io_base +
194876d79456SWei Hu (Xavier) 					hns3_get_tqp_reg_offset(idx));
1949323df894SWei Hu (Xavier) 	rxq->io_head_reg = (volatile void *)((char *)rxq->io_base +
1950323df894SWei Hu (Xavier) 			   HNS3_RING_RX_HEAD_REG);
1951dfac40d9SWei Hu (Xavier) 	rxq->rx_buf_len = rx_buf_size;
195286c551d1SHuisong Li 	memset(&rxq->basic_stats, 0, sizeof(struct hns3_rx_basic_stats));
19539b77f1feSHuisong Li 	memset(&rxq->err_stats, 0, sizeof(struct hns3_rx_bd_errors_stats));
19549b77f1feSHuisong Li 	memset(&rxq->dfx_stats, 0, sizeof(struct hns3_rx_dfx_stats));
1955bba63669SWei Hu (Xavier) 
19568973d7c4SMin Hu (Connor) 	/* CRC len set here is used for amending packet length */
1957295968d1SFerruh Yigit 	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
19588973d7c4SMin Hu (Connor) 		rxq->crc_len = RTE_ETHER_CRC_LEN;
19598973d7c4SMin Hu (Connor) 	else
19608973d7c4SMin Hu (Connor) 		rxq->crc_len = 0;
19618973d7c4SMin Hu (Connor) 
1962521ab3e9SWei Hu (Xavier) 	rxq->bulk_mbuf_num = 0;
1963521ab3e9SWei Hu (Xavier) 
1964bba63669SWei Hu (Xavier) 	rte_spinlock_lock(&hw->lock);
1965bba63669SWei Hu (Xavier) 	dev->data->rx_queues[idx] = rxq;
1966bba63669SWei Hu (Xavier) 	rte_spinlock_unlock(&hw->lock);
1967bba63669SWei Hu (Xavier) 
1968bba63669SWei Hu (Xavier) 	return 0;
1969bba63669SWei Hu (Xavier) }
1970bba63669SWei Hu (Xavier) 
1971521ab3e9SWei Hu (Xavier) void
1972521ab3e9SWei Hu (Xavier) hns3_rx_scattered_reset(struct rte_eth_dev *dev)
1973bba63669SWei Hu (Xavier) {
1974521ab3e9SWei Hu (Xavier) 	struct hns3_adapter *hns = dev->data->dev_private;
1975521ab3e9SWei Hu (Xavier) 	struct hns3_hw *hw = &hns->hw;
1976bba63669SWei Hu (Xavier) 
1977521ab3e9SWei Hu (Xavier) 	hw->rx_buf_len = 0;
1978521ab3e9SWei Hu (Xavier) 	dev->data->scattered_rx = false;
1979521ab3e9SWei Hu (Xavier) }
1980bba63669SWei Hu (Xavier) 
1981521ab3e9SWei Hu (Xavier) void
1982521ab3e9SWei Hu (Xavier) hns3_rx_scattered_calc(struct rte_eth_dev *dev)
1983521ab3e9SWei Hu (Xavier) {
1984521ab3e9SWei Hu (Xavier) 	struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1985521ab3e9SWei Hu (Xavier) 	struct hns3_adapter *hns = dev->data->dev_private;
1986521ab3e9SWei Hu (Xavier) 	struct hns3_hw *hw = &hns->hw;
1987521ab3e9SWei Hu (Xavier) 	struct hns3_rx_queue *rxq;
1988521ab3e9SWei Hu (Xavier) 	uint32_t queue_id;
1989bba63669SWei Hu (Xavier) 
1990521ab3e9SWei Hu (Xavier) 	if (dev->data->rx_queues == NULL)
1991521ab3e9SWei Hu (Xavier) 		return;
1992bba63669SWei Hu (Xavier) 
1993521ab3e9SWei Hu (Xavier) 	for (queue_id = 0; queue_id < dev->data->nb_rx_queues; queue_id++) {
1994521ab3e9SWei Hu (Xavier) 		rxq = dev->data->rx_queues[queue_id];
1995521ab3e9SWei Hu (Xavier) 		if (hw->rx_buf_len == 0)
1996521ab3e9SWei Hu (Xavier) 			hw->rx_buf_len = rxq->rx_buf_len;
1997bba63669SWei Hu (Xavier) 		else
1998521ab3e9SWei Hu (Xavier) 			hw->rx_buf_len = RTE_MIN(hw->rx_buf_len,
1999521ab3e9SWei Hu (Xavier) 						 rxq->rx_buf_len);
2000521ab3e9SWei Hu (Xavier) 	}
2001521ab3e9SWei Hu (Xavier) 
2002295968d1SFerruh Yigit 	if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER ||
20031bb4a528SFerruh Yigit 	    dev->data->mtu + HNS3_ETH_OVERHEAD > hw->rx_buf_len)
2004521ab3e9SWei Hu (Xavier) 		dev->data->scattered_rx = true;
2005bba63669SWei Hu (Xavier) }
2006bba63669SWei Hu (Xavier) 
2007bba63669SWei Hu (Xavier) const uint32_t *
2008ba6a168aSSivaramakrishnan Venkat hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements)
2009bba63669SWei Hu (Xavier) {
2010bba63669SWei Hu (Xavier) 	static const uint32_t ptypes[] = {
2011bba63669SWei Hu (Xavier) 		RTE_PTYPE_L2_ETHER,
2012bba63669SWei Hu (Xavier) 		RTE_PTYPE_L2_ETHER_LLDP,
2013bba63669SWei Hu (Xavier) 		RTE_PTYPE_L2_ETHER_ARP,
2014bba63669SWei Hu (Xavier) 		RTE_PTYPE_L3_IPV4,
2015bba63669SWei Hu (Xavier) 		RTE_PTYPE_L3_IPV4_EXT,
2016bba63669SWei Hu (Xavier) 		RTE_PTYPE_L3_IPV6,
2017bba63669SWei Hu (Xavier) 		RTE_PTYPE_L3_IPV6_EXT,
2018bba63669SWei Hu (Xavier) 		RTE_PTYPE_L4_IGMP,
2019bba63669SWei Hu (Xavier) 		RTE_PTYPE_L4_ICMP,
2020bba63669SWei Hu (Xavier) 		RTE_PTYPE_L4_SCTP,
2021bba63669SWei Hu (Xavier) 		RTE_PTYPE_L4_TCP,
2022bba63669SWei Hu (Xavier) 		RTE_PTYPE_L4_UDP,
2023bba63669SWei Hu (Xavier) 		RTE_PTYPE_TUNNEL_GRE,
20240e98d5e6SChengchang Tang 		RTE_PTYPE_INNER_L2_ETHER,
20250e98d5e6SChengchang Tang 		RTE_PTYPE_INNER_L3_IPV4,
20260e98d5e6SChengchang Tang 		RTE_PTYPE_INNER_L3_IPV6,
20270e98d5e6SChengchang Tang 		RTE_PTYPE_INNER_L3_IPV4_EXT,
20280e98d5e6SChengchang Tang 		RTE_PTYPE_INNER_L3_IPV6_EXT,
20290e98d5e6SChengchang Tang 		RTE_PTYPE_INNER_L4_UDP,
20300e98d5e6SChengchang Tang 		RTE_PTYPE_INNER_L4_TCP,
20310e98d5e6SChengchang Tang 		RTE_PTYPE_INNER_L4_SCTP,
20320e98d5e6SChengchang Tang 		RTE_PTYPE_INNER_L4_ICMP,
2033904ee370SHuisong Li 		RTE_PTYPE_TUNNEL_GRENAT,
20340e98d5e6SChengchang Tang 		RTE_PTYPE_TUNNEL_NVGRE,
2035bba63669SWei Hu (Xavier) 	};
20369d2cca54SChengwen Feng 	static const uint32_t adv_layout_ptypes[] = {
20379d2cca54SChengwen Feng 		RTE_PTYPE_L2_ETHER,
20389d2cca54SChengwen Feng 		RTE_PTYPE_L2_ETHER_TIMESYNC,
20399d2cca54SChengwen Feng 		RTE_PTYPE_L2_ETHER_LLDP,
20409d2cca54SChengwen Feng 		RTE_PTYPE_L2_ETHER_ARP,
20419d2cca54SChengwen Feng 		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
20429d2cca54SChengwen Feng 		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
20439d2cca54SChengwen Feng 		RTE_PTYPE_L4_FRAG,
20449d2cca54SChengwen Feng 		RTE_PTYPE_L4_NONFRAG,
20459d2cca54SChengwen Feng 		RTE_PTYPE_L4_UDP,
20469d2cca54SChengwen Feng 		RTE_PTYPE_L4_TCP,
20479d2cca54SChengwen Feng 		RTE_PTYPE_L4_SCTP,
20489d2cca54SChengwen Feng 		RTE_PTYPE_L4_IGMP,
20499d2cca54SChengwen Feng 		RTE_PTYPE_L4_ICMP,
20509d2cca54SChengwen Feng 		RTE_PTYPE_TUNNEL_GRE,
20519d2cca54SChengwen Feng 		RTE_PTYPE_TUNNEL_GRENAT,
20529d2cca54SChengwen Feng 		RTE_PTYPE_INNER_L2_ETHER,
20539d2cca54SChengwen Feng 		RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
20549d2cca54SChengwen Feng 		RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
20559d2cca54SChengwen Feng 		RTE_PTYPE_INNER_L4_FRAG,
20569d2cca54SChengwen Feng 		RTE_PTYPE_INNER_L4_ICMP,
20579d2cca54SChengwen Feng 		RTE_PTYPE_INNER_L4_NONFRAG,
20589d2cca54SChengwen Feng 		RTE_PTYPE_INNER_L4_UDP,
20599d2cca54SChengwen Feng 		RTE_PTYPE_INNER_L4_TCP,
20609d2cca54SChengwen Feng 		RTE_PTYPE_INNER_L4_SCTP,
20619d2cca54SChengwen Feng 		RTE_PTYPE_INNER_L4_ICMP,
20629d2cca54SChengwen Feng 	};
20639d2cca54SChengwen Feng 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2064bba63669SWei Hu (Xavier) 
2065aa5baf47SChengwen Feng 	if (dev->rx_pkt_burst == hns3_recv_pkts_simple ||
2066a3d4f4d2SWei Hu (Xavier) 	    dev->rx_pkt_burst == hns3_recv_scattered_pkts ||
2067952ebaccSWei Hu (Xavier) 	    dev->rx_pkt_burst == hns3_recv_pkts_vec ||
20689d2cca54SChengwen Feng 	    dev->rx_pkt_burst == hns3_recv_pkts_vec_sve) {
2069ba6a168aSSivaramakrishnan Venkat 		if (hns3_dev_get_support(hw, RXD_ADV_LAYOUT)) {
2070ba6a168aSSivaramakrishnan Venkat 			*no_of_elements = RTE_DIM(adv_layout_ptypes);
20719d2cca54SChengwen Feng 			return adv_layout_ptypes;
2072ba6a168aSSivaramakrishnan Venkat 		} else {
2073ba6a168aSSivaramakrishnan Venkat 			*no_of_elements = RTE_DIM(ptypes);
2074bba63669SWei Hu (Xavier) 			return ptypes;
20759d2cca54SChengwen Feng 		}
2076ba6a168aSSivaramakrishnan Venkat 	}
2077bba63669SWei Hu (Xavier) 
2078bba63669SWei Hu (Xavier) 	return NULL;
2079bba63669SWei Hu (Xavier) }
2080bba63669SWei Hu (Xavier) 
20810e98d5e6SChengchang Tang static void
20820e98d5e6SChengchang Tang hns3_init_non_tunnel_ptype_tbl(struct hns3_ptype_table *tbl)
20830e98d5e6SChengchang Tang {
20841f303606SChengwen Feng 	tbl->l3table[0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4;
20851f303606SChengwen Feng 	tbl->l3table[1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6;
20861f303606SChengwen Feng 	tbl->l3table[2] = RTE_PTYPE_L2_ETHER_ARP;
20871f303606SChengwen Feng 	tbl->l3table[4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT;
20881f303606SChengwen Feng 	tbl->l3table[5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT;
20891f303606SChengwen Feng 	tbl->l3table[6] = RTE_PTYPE_L2_ETHER_LLDP;
20900e98d5e6SChengchang Tang 
20910e98d5e6SChengchang Tang 	tbl->l4table[0] = RTE_PTYPE_L4_UDP;
20920e98d5e6SChengchang Tang 	tbl->l4table[1] = RTE_PTYPE_L4_TCP;
20930e98d5e6SChengchang Tang 	tbl->l4table[2] = RTE_PTYPE_TUNNEL_GRE;
20940e98d5e6SChengchang Tang 	tbl->l4table[3] = RTE_PTYPE_L4_SCTP;
20950e98d5e6SChengchang Tang 	tbl->l4table[4] = RTE_PTYPE_L4_IGMP;
20960e98d5e6SChengchang Tang 	tbl->l4table[5] = RTE_PTYPE_L4_ICMP;
20970e98d5e6SChengchang Tang }
20980e98d5e6SChengchang Tang 
20990e98d5e6SChengchang Tang static void
21000e98d5e6SChengchang Tang hns3_init_tunnel_ptype_tbl(struct hns3_ptype_table *tbl)
21010e98d5e6SChengchang Tang {
21021f303606SChengwen Feng 	tbl->inner_l3table[0] = RTE_PTYPE_INNER_L2_ETHER |
21031f303606SChengwen Feng 				RTE_PTYPE_INNER_L3_IPV4;
21041f303606SChengwen Feng 	tbl->inner_l3table[1] = RTE_PTYPE_INNER_L2_ETHER |
21051f303606SChengwen Feng 				RTE_PTYPE_INNER_L3_IPV6;
21060e98d5e6SChengchang Tang 	/* There is not a ptype for inner ARP/RARP */
21070e98d5e6SChengchang Tang 	tbl->inner_l3table[2] = RTE_PTYPE_UNKNOWN;
21080e98d5e6SChengchang Tang 	tbl->inner_l3table[3] = RTE_PTYPE_UNKNOWN;
21091f303606SChengwen Feng 	tbl->inner_l3table[4] = RTE_PTYPE_INNER_L2_ETHER |
21101f303606SChengwen Feng 				RTE_PTYPE_INNER_L3_IPV4_EXT;
21111f303606SChengwen Feng 	tbl->inner_l3table[5] = RTE_PTYPE_INNER_L2_ETHER |
21121f303606SChengwen Feng 				RTE_PTYPE_INNER_L3_IPV6_EXT;
21130e98d5e6SChengchang Tang 
21140e98d5e6SChengchang Tang 	tbl->inner_l4table[0] = RTE_PTYPE_INNER_L4_UDP;
21150e98d5e6SChengchang Tang 	tbl->inner_l4table[1] = RTE_PTYPE_INNER_L4_TCP;
21160e98d5e6SChengchang Tang 	/* There is not a ptype for inner GRE */
21170e98d5e6SChengchang Tang 	tbl->inner_l4table[2] = RTE_PTYPE_UNKNOWN;
21180e98d5e6SChengchang Tang 	tbl->inner_l4table[3] = RTE_PTYPE_INNER_L4_SCTP;
21190e98d5e6SChengchang Tang 	/* There is not a ptype for inner IGMP */
21200e98d5e6SChengchang Tang 	tbl->inner_l4table[4] = RTE_PTYPE_UNKNOWN;
21210e98d5e6SChengchang Tang 	tbl->inner_l4table[5] = RTE_PTYPE_INNER_L4_ICMP;
21220e98d5e6SChengchang Tang 
21231f303606SChengwen Feng 	tbl->ol3table[0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4;
21241f303606SChengwen Feng 	tbl->ol3table[1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6;
21250e98d5e6SChengchang Tang 	tbl->ol3table[2] = RTE_PTYPE_UNKNOWN;
21260e98d5e6SChengchang Tang 	tbl->ol3table[3] = RTE_PTYPE_UNKNOWN;
21271f303606SChengwen Feng 	tbl->ol3table[4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT;
21281f303606SChengwen Feng 	tbl->ol3table[5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT;
21290e98d5e6SChengchang Tang 
21300e98d5e6SChengchang Tang 	tbl->ol4table[0] = RTE_PTYPE_UNKNOWN;
2131904ee370SHuisong Li 	tbl->ol4table[1] = RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_GRENAT;
21320e98d5e6SChengchang Tang 	tbl->ol4table[2] = RTE_PTYPE_TUNNEL_NVGRE;
21330e98d5e6SChengchang Tang }
21340e98d5e6SChengchang Tang 
2135fb5e9069SChengwen Feng static void
2136fb5e9069SChengwen Feng hns3_init_adv_layout_ptype(struct hns3_ptype_table *tbl)
2137fb5e9069SChengwen Feng {
2138fb5e9069SChengwen Feng 	uint32_t *ptype = tbl->ptype;
2139fb5e9069SChengwen Feng 
2140fb5e9069SChengwen Feng 	/* Non-tunnel L2 */
2141fb5e9069SChengwen Feng 	ptype[1] = RTE_PTYPE_L2_ETHER_ARP;
2142fb5e9069SChengwen Feng 	ptype[3] = RTE_PTYPE_L2_ETHER_LLDP;
2143fb5e9069SChengwen Feng 	ptype[8] = RTE_PTYPE_L2_ETHER_TIMESYNC;
2144fb5e9069SChengwen Feng 
2145fb5e9069SChengwen Feng 	/* Non-tunnel IPv4 */
2146fb5e9069SChengwen Feng 	ptype[17] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2147fb5e9069SChengwen Feng 		    RTE_PTYPE_L4_FRAG;
2148fb5e9069SChengwen Feng 	ptype[18] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2149fb5e9069SChengwen Feng 		    RTE_PTYPE_L4_NONFRAG;
2150fb5e9069SChengwen Feng 	ptype[19] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2151fb5e9069SChengwen Feng 		    RTE_PTYPE_L4_UDP;
2152fb5e9069SChengwen Feng 	ptype[20] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2153fb5e9069SChengwen Feng 		    RTE_PTYPE_L4_TCP;
215459dc4604SChengwen Feng 	ptype[21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
215559dc4604SChengwen Feng 		    RTE_PTYPE_TUNNEL_GRE;
2156fb5e9069SChengwen Feng 	ptype[22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2157fb5e9069SChengwen Feng 		    RTE_PTYPE_L4_SCTP;
2158fb5e9069SChengwen Feng 	ptype[23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2159fb5e9069SChengwen Feng 		    RTE_PTYPE_L4_IGMP;
2160fb5e9069SChengwen Feng 	ptype[24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2161fb5e9069SChengwen Feng 		    RTE_PTYPE_L4_ICMP;
2162fb5e9069SChengwen Feng 	/* The next ptype is PTP over IPv4 + UDP */
2163fb5e9069SChengwen Feng 	ptype[25] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2164fb5e9069SChengwen Feng 		    RTE_PTYPE_L4_UDP;
2165fb5e9069SChengwen Feng 
2166fb5e9069SChengwen Feng 	/* IPv4 --> GRE/Teredo/VXLAN */
2167fb5e9069SChengwen Feng 	ptype[29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2168fb5e9069SChengwen Feng 		    RTE_PTYPE_TUNNEL_GRENAT;
2169fb5e9069SChengwen Feng 	/* IPv4 --> GRE/Teredo/VXLAN --> MAC */
2170fb5e9069SChengwen Feng 	ptype[30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2171fb5e9069SChengwen Feng 		    RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER;
2172fb5e9069SChengwen Feng 
2173fb5e9069SChengwen Feng 	/* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
2174fb5e9069SChengwen Feng 	ptype[31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2175fb5e9069SChengwen Feng 		    RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2176fb5e9069SChengwen Feng 		    RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2177fb5e9069SChengwen Feng 		    RTE_PTYPE_INNER_L4_FRAG;
2178fb5e9069SChengwen Feng 	ptype[32] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2179fb5e9069SChengwen Feng 		    RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2180fb5e9069SChengwen Feng 		    RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2181fb5e9069SChengwen Feng 		    RTE_PTYPE_INNER_L4_NONFRAG;
2182fb5e9069SChengwen Feng 	ptype[33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2183fb5e9069SChengwen Feng 		    RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2184fb5e9069SChengwen Feng 		    RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2185fb5e9069SChengwen Feng 		    RTE_PTYPE_INNER_L4_UDP;
2186fb5e9069SChengwen Feng 	ptype[34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2187fb5e9069SChengwen Feng 		    RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2188fb5e9069SChengwen Feng 		    RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2189fb5e9069SChengwen Feng 		    RTE_PTYPE_INNER_L4_TCP;
2190fb5e9069SChengwen Feng 	ptype[35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2191fb5e9069SChengwen Feng 		    RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2192fb5e9069SChengwen Feng 		    RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2193fb5e9069SChengwen Feng 		    RTE_PTYPE_INNER_L4_SCTP;
2194fb5e9069SChengwen Feng 	/* The next ptype's inner L4 is IGMP */
2195fb5e9069SChengwen Feng 	ptype[36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2196fb5e9069SChengwen Feng 		    RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2197fb5e9069SChengwen Feng 		    RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
2198fb5e9069SChengwen Feng 	ptype[37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2199fb5e9069SChengwen Feng 		    RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2200fb5e9069SChengwen Feng 		    RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2201fb5e9069SChengwen Feng 		    RTE_PTYPE_INNER_L4_ICMP;
2202fb5e9069SChengwen Feng 
2203fb5e9069SChengwen Feng 	/* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
2204fb5e9069SChengwen Feng 	ptype[39] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2205fb5e9069SChengwen Feng 		    RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2206fb5e9069SChengwen Feng 		    RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2207fb5e9069SChengwen Feng 		    RTE_PTYPE_INNER_L4_FRAG;
2208fb5e9069SChengwen Feng 	ptype[40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2209fb5e9069SChengwen Feng 		    RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2210fb5e9069SChengwen Feng 		    RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2211fb5e9069SChengwen Feng 		    RTE_PTYPE_INNER_L4_NONFRAG;
2212fb5e9069SChengwen Feng 	ptype[41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2213fb5e9069SChengwen Feng 		    RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2214fb5e9069SChengwen Feng 		    RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2215fb5e9069SChengwen Feng 		    RTE_PTYPE_INNER_L4_UDP;
2216fb5e9069SChengwen Feng 	ptype[42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2217fb5e9069SChengwen Feng 		    RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2218fb5e9069SChengwen Feng 		    RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2219fb5e9069SChengwen Feng 		    RTE_PTYPE_INNER_L4_TCP;
2220fb5e9069SChengwen Feng 	ptype[43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2221fb5e9069SChengwen Feng 		    RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2222fb5e9069SChengwen Feng 		    RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2223fb5e9069SChengwen Feng 		    RTE_PTYPE_INNER_L4_SCTP;
2224fb5e9069SChengwen Feng 	/* The next ptype's inner L4 is IGMP */
2225fb5e9069SChengwen Feng 	ptype[44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2226fb5e9069SChengwen Feng 		    RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2227fb5e9069SChengwen Feng 		    RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
2228fb5e9069SChengwen Feng 	ptype[45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2229fb5e9069SChengwen Feng 		    RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2230fb5e9069SChengwen Feng 		    RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2231fb5e9069SChengwen Feng 		    RTE_PTYPE_INNER_L4_ICMP;
2232fb5e9069SChengwen Feng 
2233fb5e9069SChengwen Feng 	/* Non-tunnel IPv6 */
2234fb5e9069SChengwen Feng 	ptype[111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2235fb5e9069SChengwen Feng 		     RTE_PTYPE_L4_FRAG;
2236fb5e9069SChengwen Feng 	ptype[112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2237fb5e9069SChengwen Feng 		     RTE_PTYPE_L4_NONFRAG;
2238fb5e9069SChengwen Feng 	ptype[113] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2239fb5e9069SChengwen Feng 		     RTE_PTYPE_L4_UDP;
2240fb5e9069SChengwen Feng 	ptype[114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2241fb5e9069SChengwen Feng 		     RTE_PTYPE_L4_TCP;
224259dc4604SChengwen Feng 	ptype[115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
224359dc4604SChengwen Feng 		     RTE_PTYPE_TUNNEL_GRE;
2244fb5e9069SChengwen Feng 	ptype[116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2245fb5e9069SChengwen Feng 		     RTE_PTYPE_L4_SCTP;
2246fb5e9069SChengwen Feng 	ptype[117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2247fb5e9069SChengwen Feng 		     RTE_PTYPE_L4_IGMP;
2248fb5e9069SChengwen Feng 	ptype[118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2249fb5e9069SChengwen Feng 		     RTE_PTYPE_L4_ICMP;
2250fb5e9069SChengwen Feng 	/* Special for PTP over IPv6 + UDP */
2251fb5e9069SChengwen Feng 	ptype[119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2252fb5e9069SChengwen Feng 		     RTE_PTYPE_L4_UDP;
2253fb5e9069SChengwen Feng 
2254fb5e9069SChengwen Feng 	/* IPv6 --> GRE/Teredo/VXLAN */
2255fb5e9069SChengwen Feng 	ptype[123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2256fb5e9069SChengwen Feng 		     RTE_PTYPE_TUNNEL_GRENAT;
2257fb5e9069SChengwen Feng 	/* IPv6 --> GRE/Teredo/VXLAN --> MAC */
2258fb5e9069SChengwen Feng 	ptype[124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2259fb5e9069SChengwen Feng 		     RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER;
2260fb5e9069SChengwen Feng 
2261fb5e9069SChengwen Feng 	/* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
2262fb5e9069SChengwen Feng 	ptype[125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2263fb5e9069SChengwen Feng 		     RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2264fb5e9069SChengwen Feng 		     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2265fb5e9069SChengwen Feng 		     RTE_PTYPE_INNER_L4_FRAG;
2266fb5e9069SChengwen Feng 	ptype[126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2267fb5e9069SChengwen Feng 		     RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2268fb5e9069SChengwen Feng 		     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2269fb5e9069SChengwen Feng 		     RTE_PTYPE_INNER_L4_NONFRAG;
2270fb5e9069SChengwen Feng 	ptype[127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2271fb5e9069SChengwen Feng 		     RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2272fb5e9069SChengwen Feng 		     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2273fb5e9069SChengwen Feng 		     RTE_PTYPE_INNER_L4_UDP;
2274fb5e9069SChengwen Feng 	ptype[128] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2275fb5e9069SChengwen Feng 		     RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2276fb5e9069SChengwen Feng 		     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2277fb5e9069SChengwen Feng 		     RTE_PTYPE_INNER_L4_TCP;
2278fb5e9069SChengwen Feng 	ptype[129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2279fb5e9069SChengwen Feng 		     RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2280fb5e9069SChengwen Feng 		     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2281fb5e9069SChengwen Feng 		     RTE_PTYPE_INNER_L4_SCTP;
2282fb5e9069SChengwen Feng 	/* The next ptype's inner L4 is IGMP */
2283fb5e9069SChengwen Feng 	ptype[130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2284fb5e9069SChengwen Feng 		     RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2285fb5e9069SChengwen Feng 		     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
2286fb5e9069SChengwen Feng 	ptype[131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2287fb5e9069SChengwen Feng 		     RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2288fb5e9069SChengwen Feng 		     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2289fb5e9069SChengwen Feng 		     RTE_PTYPE_INNER_L4_ICMP;
2290fb5e9069SChengwen Feng 
2291fb5e9069SChengwen Feng 	/* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
2292fb5e9069SChengwen Feng 	ptype[133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2293fb5e9069SChengwen Feng 		     RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2294fb5e9069SChengwen Feng 		     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2295fb5e9069SChengwen Feng 		     RTE_PTYPE_INNER_L4_FRAG;
2296fb5e9069SChengwen Feng 	ptype[134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2297fb5e9069SChengwen Feng 		     RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2298fb5e9069SChengwen Feng 		     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2299fb5e9069SChengwen Feng 		     RTE_PTYPE_INNER_L4_NONFRAG;
2300fb5e9069SChengwen Feng 	ptype[135] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2301fb5e9069SChengwen Feng 		     RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2302fb5e9069SChengwen Feng 		     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2303fb5e9069SChengwen Feng 		     RTE_PTYPE_INNER_L4_UDP;
2304fb5e9069SChengwen Feng 	ptype[136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2305fb5e9069SChengwen Feng 		     RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2306fb5e9069SChengwen Feng 		     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2307fb5e9069SChengwen Feng 		     RTE_PTYPE_INNER_L4_TCP;
2308fb5e9069SChengwen Feng 	ptype[137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2309fb5e9069SChengwen Feng 		     RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2310fb5e9069SChengwen Feng 		     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2311fb5e9069SChengwen Feng 		     RTE_PTYPE_INNER_L4_SCTP;
2312fb5e9069SChengwen Feng 	/* The next ptype's inner L4 is IGMP */
2313fb5e9069SChengwen Feng 	ptype[138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2314fb5e9069SChengwen Feng 		     RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2315fb5e9069SChengwen Feng 		     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
2316fb5e9069SChengwen Feng 	ptype[139] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2317fb5e9069SChengwen Feng 		     RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2318fb5e9069SChengwen Feng 		     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2319fb5e9069SChengwen Feng 		     RTE_PTYPE_INNER_L4_ICMP;
2320fb5e9069SChengwen Feng }
2321fb5e9069SChengwen Feng 
2322521ab3e9SWei Hu (Xavier) void
2323521ab3e9SWei Hu (Xavier) hns3_init_rx_ptype_tble(struct rte_eth_dev *dev)
2324bba63669SWei Hu (Xavier) {
2325521ab3e9SWei Hu (Xavier) 	struct hns3_adapter *hns = dev->data->dev_private;
2326521ab3e9SWei Hu (Xavier) 	struct hns3_ptype_table *tbl = &hns->ptype_tbl;
2327bba63669SWei Hu (Xavier) 
2328521ab3e9SWei Hu (Xavier) 	memset(tbl, 0, sizeof(*tbl));
2329bba63669SWei Hu (Xavier) 
23300e98d5e6SChengchang Tang 	hns3_init_non_tunnel_ptype_tbl(tbl);
23310e98d5e6SChengchang Tang 	hns3_init_tunnel_ptype_tbl(tbl);
2332fb5e9069SChengwen Feng 	hns3_init_adv_layout_ptype(tbl);
2333bba63669SWei Hu (Xavier) }
2334bba63669SWei Hu (Xavier) 
2335e28bc147SWei Hu (Xavier) static inline void
23368c744977SChengchang Tang hns3_rxd_to_vlan_tci(struct hns3_rx_queue *rxq, struct rte_mbuf *mb,
2337e28bc147SWei Hu (Xavier) 		     uint32_t l234_info, const struct hns3_desc *rxd)
2338e28bc147SWei Hu (Xavier) {
2339e28bc147SWei Hu (Xavier) #define HNS3_STRP_STATUS_NUM		0x4
2340e28bc147SWei Hu (Xavier) 
2341e28bc147SWei Hu (Xavier) #define HNS3_NO_STRP_VLAN_VLD		0x0
2342e28bc147SWei Hu (Xavier) #define HNS3_INNER_STRP_VLAN_VLD	0x1
2343e28bc147SWei Hu (Xavier) #define HNS3_OUTER_STRP_VLAN_VLD	0x2
2344e28bc147SWei Hu (Xavier) 	uint32_t strip_status;
2345e28bc147SWei Hu (Xavier) 	uint32_t report_mode;
2346e28bc147SWei Hu (Xavier) 
2347e28bc147SWei Hu (Xavier) 	/*
2348e28bc147SWei Hu (Xavier) 	 * Since HW limitation, the vlan tag will always be inserted into RX
2349e28bc147SWei Hu (Xavier) 	 * descriptor when strip the tag from packet, driver needs to determine
2350e28bc147SWei Hu (Xavier) 	 * reporting which tag to mbuf according to the PVID configuration
2351e28bc147SWei Hu (Xavier) 	 * and vlan striped status.
2352e28bc147SWei Hu (Xavier) 	 */
2353e28bc147SWei Hu (Xavier) 	static const uint32_t report_type[][HNS3_STRP_STATUS_NUM] = {
2354e28bc147SWei Hu (Xavier) 		{
2355e28bc147SWei Hu (Xavier) 			HNS3_NO_STRP_VLAN_VLD,
2356e28bc147SWei Hu (Xavier) 			HNS3_OUTER_STRP_VLAN_VLD,
2357e28bc147SWei Hu (Xavier) 			HNS3_INNER_STRP_VLAN_VLD,
2358e28bc147SWei Hu (Xavier) 			HNS3_OUTER_STRP_VLAN_VLD
2359e28bc147SWei Hu (Xavier) 		},
2360e28bc147SWei Hu (Xavier) 		{
2361e28bc147SWei Hu (Xavier) 			HNS3_NO_STRP_VLAN_VLD,
2362e28bc147SWei Hu (Xavier) 			HNS3_NO_STRP_VLAN_VLD,
2363e28bc147SWei Hu (Xavier) 			HNS3_NO_STRP_VLAN_VLD,
2364e28bc147SWei Hu (Xavier) 			HNS3_INNER_STRP_VLAN_VLD
2365e28bc147SWei Hu (Xavier) 		}
2366e28bc147SWei Hu (Xavier) 	};
2367e28bc147SWei Hu (Xavier) 	strip_status = hns3_get_field(l234_info, HNS3_RXD_STRP_TAGP_M,
2368e28bc147SWei Hu (Xavier) 				      HNS3_RXD_STRP_TAGP_S);
2369992b24a1SWei Hu (Xavier) 	report_mode = report_type[rxq->pvid_sw_discard_en][strip_status];
2370e28bc147SWei Hu (Xavier) 	switch (report_mode) {
2371e28bc147SWei Hu (Xavier) 	case HNS3_NO_STRP_VLAN_VLD:
2372e28bc147SWei Hu (Xavier) 		mb->vlan_tci = 0;
2373e28bc147SWei Hu (Xavier) 		return;
2374e28bc147SWei Hu (Xavier) 	case HNS3_INNER_STRP_VLAN_VLD:
2375daa02b5cSOlivier Matz 		mb->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
2376e28bc147SWei Hu (Xavier) 		mb->vlan_tci = rte_le_to_cpu_16(rxd->rx.vlan_tag);
2377e28bc147SWei Hu (Xavier) 		return;
2378e28bc147SWei Hu (Xavier) 	case HNS3_OUTER_STRP_VLAN_VLD:
2379daa02b5cSOlivier Matz 		mb->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
2380e28bc147SWei Hu (Xavier) 		mb->vlan_tci = rte_le_to_cpu_16(rxd->rx.ot_vlan_tag);
2381e28bc147SWei Hu (Xavier) 		return;
2382e7882247SChengchang Tang 	default:
2383e7882247SChengchang Tang 		mb->vlan_tci = 0;
2384e7882247SChengchang Tang 		return;
2385e28bc147SWei Hu (Xavier) 	}
2386e28bc147SWei Hu (Xavier) }
2387e28bc147SWei Hu (Xavier) 
23888973d7c4SMin Hu (Connor) static inline void
23898973d7c4SMin Hu (Connor) recalculate_data_len(struct rte_mbuf *first_seg, struct rte_mbuf *last_seg,
23908973d7c4SMin Hu (Connor) 		    struct rte_mbuf *rxm, struct hns3_rx_queue *rxq,
23918973d7c4SMin Hu (Connor) 		    uint16_t data_len)
23928973d7c4SMin Hu (Connor) {
23938973d7c4SMin Hu (Connor) 	uint8_t crc_len = rxq->crc_len;
23948973d7c4SMin Hu (Connor) 
23958973d7c4SMin Hu (Connor) 	if (data_len <= crc_len) {
23968973d7c4SMin Hu (Connor) 		rte_pktmbuf_free_seg(rxm);
23978973d7c4SMin Hu (Connor) 		first_seg->nb_segs--;
23988973d7c4SMin Hu (Connor) 		last_seg->data_len = (uint16_t)(last_seg->data_len -
23998973d7c4SMin Hu (Connor) 			(crc_len - data_len));
24008973d7c4SMin Hu (Connor) 		last_seg->next = NULL;
24018973d7c4SMin Hu (Connor) 	} else
24028973d7c4SMin Hu (Connor) 		rxm->data_len = (uint16_t)(data_len - crc_len);
24038973d7c4SMin Hu (Connor) }
24048973d7c4SMin Hu (Connor) 
2405521ab3e9SWei Hu (Xavier) static inline struct rte_mbuf *
2406521ab3e9SWei Hu (Xavier) hns3_rx_alloc_buffer(struct hns3_rx_queue *rxq)
2407521ab3e9SWei Hu (Xavier) {
2408521ab3e9SWei Hu (Xavier) 	int ret;
2409521ab3e9SWei Hu (Xavier) 
2410521ab3e9SWei Hu (Xavier) 	if (likely(rxq->bulk_mbuf_num > 0))
2411521ab3e9SWei Hu (Xavier) 		return rxq->bulk_mbuf[--rxq->bulk_mbuf_num];
2412521ab3e9SWei Hu (Xavier) 
2413521ab3e9SWei Hu (Xavier) 	ret = rte_mempool_get_bulk(rxq->mb_pool, (void **)rxq->bulk_mbuf,
2414521ab3e9SWei Hu (Xavier) 				   HNS3_BULK_ALLOC_MBUF_NUM);
2415521ab3e9SWei Hu (Xavier) 	if (likely(ret == 0)) {
2416521ab3e9SWei Hu (Xavier) 		rxq->bulk_mbuf_num = HNS3_BULK_ALLOC_MBUF_NUM;
2417521ab3e9SWei Hu (Xavier) 		return rxq->bulk_mbuf[--rxq->bulk_mbuf_num];
2418521ab3e9SWei Hu (Xavier) 	} else
2419521ab3e9SWei Hu (Xavier) 		return rte_mbuf_raw_alloc(rxq->mb_pool);
2420521ab3e9SWei Hu (Xavier) }
2421521ab3e9SWei Hu (Xavier) 
24223ca3dcd6SMin Hu (Connor) static void
242338b539d9SMin Hu (Connor) hns3_rx_ptp_timestamp_handle(struct hns3_rx_queue *rxq, struct rte_mbuf *mbuf,
24243ca3dcd6SMin Hu (Connor) 			     uint64_t timestamp)
242538b539d9SMin Hu (Connor) {
242638b539d9SMin Hu (Connor) 	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(rxq->hns);
242738b539d9SMin Hu (Connor) 
2428e7141041SDengdui Huang 	mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_TMST;
242938b539d9SMin Hu (Connor) 	if (hns3_timestamp_rx_dynflag > 0) {
243038b539d9SMin Hu (Connor) 		*RTE_MBUF_DYNFIELD(mbuf, hns3_timestamp_dynfield_offset,
243138b539d9SMin Hu (Connor) 			rte_mbuf_timestamp_t *) = timestamp;
243238b539d9SMin Hu (Connor) 		mbuf->ol_flags |= hns3_timestamp_rx_dynflag;
243338b539d9SMin Hu (Connor) 	}
243438b539d9SMin Hu (Connor) 
243538b539d9SMin Hu (Connor) 	pf->rx_timestamp = timestamp;
243638b539d9SMin Hu (Connor) }
243738b539d9SMin Hu (Connor) 
2438bba63669SWei Hu (Xavier) uint16_t
2439aa5baf47SChengwen Feng hns3_recv_pkts_simple(void *rx_queue,
2440aa5baf47SChengwen Feng 		      struct rte_mbuf **rx_pkts,
2441aa5baf47SChengwen Feng 		      uint16_t nb_pkts)
2442bba63669SWei Hu (Xavier) {
24438162238bSChengwen Feng 	volatile struct hns3_desc *rx_ring;  /* RX ring (desc) */
24448162238bSChengwen Feng 	volatile struct hns3_desc *rxdp;     /* pointer of the current desc */
2445bba63669SWei Hu (Xavier) 	struct hns3_rx_queue *rxq;      /* RX queue */
2446bba63669SWei Hu (Xavier) 	struct hns3_entry *sw_ring;
2447bba63669SWei Hu (Xavier) 	struct hns3_entry *rxe;
2448521ab3e9SWei Hu (Xavier) 	struct hns3_desc rxd;
2449521ab3e9SWei Hu (Xavier) 	struct rte_mbuf *nmb;           /* pointer of the new mbuf */
2450521ab3e9SWei Hu (Xavier) 	struct rte_mbuf *rxm;
2451521ab3e9SWei Hu (Xavier) 	uint32_t bd_base_info;
2452521ab3e9SWei Hu (Xavier) 	uint32_t l234_info;
2453521ab3e9SWei Hu (Xavier) 	uint32_t ol_info;
2454521ab3e9SWei Hu (Xavier) 	uint64_t dma_addr;
2455521ab3e9SWei Hu (Xavier) 	uint16_t nb_rx_bd;
2456521ab3e9SWei Hu (Xavier) 	uint16_t nb_rx;
2457521ab3e9SWei Hu (Xavier) 	uint16_t rx_id;
2458521ab3e9SWei Hu (Xavier) 	int ret;
2459521ab3e9SWei Hu (Xavier) 
2460521ab3e9SWei Hu (Xavier) 	nb_rx = 0;
2461521ab3e9SWei Hu (Xavier) 	nb_rx_bd = 0;
2462521ab3e9SWei Hu (Xavier) 	rxq = rx_queue;
2463521ab3e9SWei Hu (Xavier) 	rx_ring = rxq->rx_ring;
2464521ab3e9SWei Hu (Xavier) 	sw_ring = rxq->sw_ring;
2465521ab3e9SWei Hu (Xavier) 	rx_id = rxq->next_to_use;
2466521ab3e9SWei Hu (Xavier) 
2467521ab3e9SWei Hu (Xavier) 	while (nb_rx < nb_pkts) {
2468521ab3e9SWei Hu (Xavier) 		rxdp = &rx_ring[rx_id];
2469521ab3e9SWei Hu (Xavier) 		bd_base_info = rte_le_to_cpu_32(rxdp->rx.bd_base_info);
2470521ab3e9SWei Hu (Xavier) 		if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B))))
2471521ab3e9SWei Hu (Xavier) 			break;
2472521ab3e9SWei Hu (Xavier) 
2473521ab3e9SWei Hu (Xavier) 		rxd = rxdp[(bd_base_info & (1u << HNS3_RXD_VLD_B)) -
2474521ab3e9SWei Hu (Xavier) 			   (1u << HNS3_RXD_VLD_B)];
2475521ab3e9SWei Hu (Xavier) 
2476521ab3e9SWei Hu (Xavier) 		nmb = hns3_rx_alloc_buffer(rxq);
2477521ab3e9SWei Hu (Xavier) 		if (unlikely(nmb == NULL)) {
2478b5978613SDongdong Liu 			rte_eth_devices[rxq->port_id].data->
2479b5978613SDongdong Liu 				rx_mbuf_alloc_failed++;
2480521ab3e9SWei Hu (Xavier) 			break;
2481521ab3e9SWei Hu (Xavier) 		}
2482521ab3e9SWei Hu (Xavier) 
2483521ab3e9SWei Hu (Xavier) 		nb_rx_bd++;
2484521ab3e9SWei Hu (Xavier) 		rxe = &sw_ring[rx_id];
2485521ab3e9SWei Hu (Xavier) 		rx_id++;
2486521ab3e9SWei Hu (Xavier) 		if (unlikely(rx_id == rxq->nb_rx_desc))
2487521ab3e9SWei Hu (Xavier) 			rx_id = 0;
2488521ab3e9SWei Hu (Xavier) 
2489521ab3e9SWei Hu (Xavier) 		rte_prefetch0(sw_ring[rx_id].mbuf);
2490521ab3e9SWei Hu (Xavier) 		if ((rx_id & HNS3_RX_RING_PREFETCTH_MASK) == 0) {
2491521ab3e9SWei Hu (Xavier) 			rte_prefetch0(&rx_ring[rx_id]);
2492521ab3e9SWei Hu (Xavier) 			rte_prefetch0(&sw_ring[rx_id]);
2493521ab3e9SWei Hu (Xavier) 		}
2494521ab3e9SWei Hu (Xavier) 
2495521ab3e9SWei Hu (Xavier) 		rxm = rxe->mbuf;
249638b539d9SMin Hu (Connor) 		rxm->ol_flags = 0;
2497521ab3e9SWei Hu (Xavier) 		rxe->mbuf = nmb;
2498521ab3e9SWei Hu (Xavier) 
249938b539d9SMin Hu (Connor) 		if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B)))
25003ca3dcd6SMin Hu (Connor) 			hns3_rx_ptp_timestamp_handle(rxq, rxm,
25013ca3dcd6SMin Hu (Connor) 				rte_le_to_cpu_64(rxdp->timestamp));
250238b539d9SMin Hu (Connor) 
2503521ab3e9SWei Hu (Xavier) 		dma_addr = rte_mbuf_data_iova_default(nmb);
2504521ab3e9SWei Hu (Xavier) 		rxdp->addr = rte_cpu_to_le_64(dma_addr);
2505521ab3e9SWei Hu (Xavier) 		rxdp->rx.bd_base_info = 0;
2506521ab3e9SWei Hu (Xavier) 
2507521ab3e9SWei Hu (Xavier) 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
2508521ab3e9SWei Hu (Xavier) 		rxm->pkt_len = (uint16_t)(rte_le_to_cpu_16(rxd.rx.pkt_len)) -
2509521ab3e9SWei Hu (Xavier) 				rxq->crc_len;
2510521ab3e9SWei Hu (Xavier) 		rxm->data_len = rxm->pkt_len;
2511521ab3e9SWei Hu (Xavier) 		rxm->port = rxq->port_id;
2512521ab3e9SWei Hu (Xavier) 		rxm->hash.rss = rte_le_to_cpu_32(rxd.rx.rss_hash);
2513daa02b5cSOlivier Matz 		rxm->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
2514521ab3e9SWei Hu (Xavier) 		if (unlikely(bd_base_info & BIT(HNS3_RXD_LUM_B))) {
2515521ab3e9SWei Hu (Xavier) 			rxm->hash.fdir.hi =
2516521ab3e9SWei Hu (Xavier) 				rte_le_to_cpu_16(rxd.rx.fd_id);
2517daa02b5cSOlivier Matz 			rxm->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
2518521ab3e9SWei Hu (Xavier) 		}
2519521ab3e9SWei Hu (Xavier) 		rxm->nb_segs = 1;
2520521ab3e9SWei Hu (Xavier) 		rxm->next = NULL;
2521521ab3e9SWei Hu (Xavier) 
2522521ab3e9SWei Hu (Xavier) 		/* Load remained descriptor data and extract necessary fields */
2523521ab3e9SWei Hu (Xavier) 		l234_info = rte_le_to_cpu_32(rxd.rx.l234_info);
2524521ab3e9SWei Hu (Xavier) 		ol_info = rte_le_to_cpu_32(rxd.rx.ol_info);
2525bd739929SChengwen Feng 		ret = hns3_handle_bdinfo(rxq, rxm, bd_base_info, l234_info);
2526521ab3e9SWei Hu (Xavier) 		if (unlikely(ret))
2527521ab3e9SWei Hu (Xavier) 			goto pkt_err;
2528521ab3e9SWei Hu (Xavier) 
2529521ab3e9SWei Hu (Xavier) 		rxm->packet_type = hns3_rx_calc_ptype(rxq, l234_info, ol_info);
2530521ab3e9SWei Hu (Xavier) 
253138b539d9SMin Hu (Connor) 		if (rxm->packet_type == RTE_PTYPE_L2_ETHER_TIMESYNC)
2532daa02b5cSOlivier Matz 			rxm->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP;
253338b539d9SMin Hu (Connor) 
2534521ab3e9SWei Hu (Xavier) 		hns3_rxd_to_vlan_tci(rxq, rxm, l234_info, &rxd);
2535521ab3e9SWei Hu (Xavier) 
2536fdcd6a3eSMin Hu (Connor) 		/* Increment bytes counter  */
2537fdcd6a3eSMin Hu (Connor) 		rxq->basic_stats.bytes += rxm->pkt_len;
2538fdcd6a3eSMin Hu (Connor) 
2539521ab3e9SWei Hu (Xavier) 		rx_pkts[nb_rx++] = rxm;
2540521ab3e9SWei Hu (Xavier) 		continue;
2541521ab3e9SWei Hu (Xavier) pkt_err:
2542521ab3e9SWei Hu (Xavier) 		rte_pktmbuf_free(rxm);
2543521ab3e9SWei Hu (Xavier) 	}
2544521ab3e9SWei Hu (Xavier) 
2545521ab3e9SWei Hu (Xavier) 	rxq->next_to_use = rx_id;
2546521ab3e9SWei Hu (Xavier) 	rxq->rx_free_hold += nb_rx_bd;
2547521ab3e9SWei Hu (Xavier) 	if (rxq->rx_free_hold > rxq->rx_free_thresh) {
2548521ab3e9SWei Hu (Xavier) 		hns3_write_reg_opt(rxq->io_head_reg, rxq->rx_free_hold);
2549521ab3e9SWei Hu (Xavier) 		rxq->rx_free_hold = 0;
2550521ab3e9SWei Hu (Xavier) 	}
2551521ab3e9SWei Hu (Xavier) 
2552521ab3e9SWei Hu (Xavier) 	return nb_rx;
2553521ab3e9SWei Hu (Xavier) }
2554521ab3e9SWei Hu (Xavier) 
2555521ab3e9SWei Hu (Xavier) uint16_t
2556521ab3e9SWei Hu (Xavier) hns3_recv_scattered_pkts(void *rx_queue,
2557521ab3e9SWei Hu (Xavier) 			 struct rte_mbuf **rx_pkts,
2558521ab3e9SWei Hu (Xavier) 			 uint16_t nb_pkts)
2559521ab3e9SWei Hu (Xavier) {
2560521ab3e9SWei Hu (Xavier) 	volatile struct hns3_desc *rx_ring;  /* RX ring (desc) */
2561521ab3e9SWei Hu (Xavier) 	volatile struct hns3_desc *rxdp;     /* pointer of the current desc */
2562521ab3e9SWei Hu (Xavier) 	struct hns3_rx_queue *rxq;      /* RX queue */
2563521ab3e9SWei Hu (Xavier) 	struct hns3_entry *sw_ring;
2564521ab3e9SWei Hu (Xavier) 	struct hns3_entry *rxe;
2565bba63669SWei Hu (Xavier) 	struct rte_mbuf *first_seg;
2566bba63669SWei Hu (Xavier) 	struct rte_mbuf *last_seg;
25678162238bSChengwen Feng 	struct hns3_desc rxd;
2568bba63669SWei Hu (Xavier) 	struct rte_mbuf *nmb;           /* pointer of the new mbuf */
2569bba63669SWei Hu (Xavier) 	struct rte_mbuf *rxm;
2570bba63669SWei Hu (Xavier) 	struct rte_eth_dev *dev;
2571bba63669SWei Hu (Xavier) 	uint32_t bd_base_info;
25723ca3dcd6SMin Hu (Connor) 	uint64_t timestamp;
2573bba63669SWei Hu (Xavier) 	uint32_t l234_info;
25741f295c40SWei Hu (Xavier) 	uint32_t gro_size;
2575bba63669SWei Hu (Xavier) 	uint32_t ol_info;
2576bba63669SWei Hu (Xavier) 	uint64_t dma_addr;
2577bba63669SWei Hu (Xavier) 	uint16_t nb_rx_bd;
2578bba63669SWei Hu (Xavier) 	uint16_t nb_rx;
2579bba63669SWei Hu (Xavier) 	uint16_t rx_id;
2580bba63669SWei Hu (Xavier) 	int ret;
2581bba63669SWei Hu (Xavier) 
2582bba63669SWei Hu (Xavier) 	nb_rx = 0;
2583bba63669SWei Hu (Xavier) 	nb_rx_bd = 0;
2584bba63669SWei Hu (Xavier) 	rxq = rx_queue;
2585bba63669SWei Hu (Xavier) 
2586ceabee45SWei Hu (Xavier) 	rx_id = rxq->next_to_use;
2587bba63669SWei Hu (Xavier) 	rx_ring = rxq->rx_ring;
2588ceabee45SWei Hu (Xavier) 	sw_ring = rxq->sw_ring;
2589bba63669SWei Hu (Xavier) 	first_seg = rxq->pkt_first_seg;
2590bba63669SWei Hu (Xavier) 	last_seg = rxq->pkt_last_seg;
2591bba63669SWei Hu (Xavier) 
25925cf7a75bSWei Hu (Xavier) 	while (nb_rx < nb_pkts) {
2593bba63669SWei Hu (Xavier) 		rxdp = &rx_ring[rx_id];
2594bba63669SWei Hu (Xavier) 		bd_base_info = rte_le_to_cpu_32(rxdp->rx.bd_base_info);
2595521ab3e9SWei Hu (Xavier) 		if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B))))
2596bba63669SWei Hu (Xavier) 			break;
2597521ab3e9SWei Hu (Xavier) 
25988162238bSChengwen Feng 		/*
25998162238bSChengwen Feng 		 * The interactive process between software and hardware of
26008162238bSChengwen Feng 		 * receiving a new packet in hns3 network engine:
26018162238bSChengwen Feng 		 * 1. Hardware network engine firstly writes the packet content
26028162238bSChengwen Feng 		 *    to the memory pointed by the 'addr' field of the Rx Buffer
26038162238bSChengwen Feng 		 *    Descriptor, secondly fills the result of parsing the
26048162238bSChengwen Feng 		 *    packet include the valid field into the Rx Buffer
26058162238bSChengwen Feng 		 *    Descriptor in one write operation.
26068162238bSChengwen Feng 		 * 2. Driver reads the Rx BD's valid field in the loop to check
26078162238bSChengwen Feng 		 *    whether it's valid, if valid then assign a new address to
26088162238bSChengwen Feng 		 *    the addr field, clear the valid field, get the other
26098162238bSChengwen Feng 		 *    information of the packet by parsing Rx BD's other fields,
26108162238bSChengwen Feng 		 *    finally write back the number of Rx BDs processed by the
26118162238bSChengwen Feng 		 *    driver to the HNS3_RING_RX_HEAD_REG register to inform
26128162238bSChengwen Feng 		 *    hardware.
26138162238bSChengwen Feng 		 * In the above process, the ordering is very important. We must
26148162238bSChengwen Feng 		 * make sure that CPU read Rx BD's other fields only after the
26158162238bSChengwen Feng 		 * Rx BD is valid.
26168162238bSChengwen Feng 		 *
26178162238bSChengwen Feng 		 * There are two type of re-ordering: compiler re-ordering and
26188162238bSChengwen Feng 		 * CPU re-ordering under the ARMv8 architecture.
26198162238bSChengwen Feng 		 * 1. we use volatile to deal with compiler re-ordering, so you
26208162238bSChengwen Feng 		 *    can see that rx_ring/rxdp defined with volatile.
26218162238bSChengwen Feng 		 * 2. we commonly use memory barrier to deal with CPU
26228162238bSChengwen Feng 		 *    re-ordering, but the cost is high.
26238162238bSChengwen Feng 		 *
26248162238bSChengwen Feng 		 * In order to solve the high cost of using memory barrier, we
26258162238bSChengwen Feng 		 * use the data dependency order under the ARMv8 architecture,
26268162238bSChengwen Feng 		 * for example:
26278162238bSChengwen Feng 		 *      instr01: load A
26288162238bSChengwen Feng 		 *      instr02: load B <- A
26298162238bSChengwen Feng 		 * the instr02 will always execute after instr01.
26308162238bSChengwen Feng 		 *
26318162238bSChengwen Feng 		 * To construct the data dependency ordering, we use the
26328162238bSChengwen Feng 		 * following assignment:
26338162238bSChengwen Feng 		 *      rxd = rxdp[(bd_base_info & (1u << HNS3_RXD_VLD_B)) -
26348162238bSChengwen Feng 		 *                 (1u<<HNS3_RXD_VLD_B)]
26358162238bSChengwen Feng 		 * Using gcc compiler under the ARMv8 architecture, the related
26368162238bSChengwen Feng 		 * assembly code example as follows:
26378162238bSChengwen Feng 		 * note: (1u << HNS3_RXD_VLD_B) equal 0x10
26388162238bSChengwen Feng 		 *      instr01: ldr w26, [x22, #28]  --read bd_base_info
26398162238bSChengwen Feng 		 *      instr02: and w0, w26, #0x10   --calc bd_base_info & 0x10
26408162238bSChengwen Feng 		 *      instr03: sub w0, w0, #0x10    --calc (bd_base_info &
26418162238bSChengwen Feng 		 *                                            0x10) - 0x10
26428162238bSChengwen Feng 		 *      instr04: add x0, x22, x0, lsl #5 --calc copy source addr
26438162238bSChengwen Feng 		 *      instr05: ldp x2, x3, [x0]
26448162238bSChengwen Feng 		 *      instr06: stp x2, x3, [x29, #256] --copy BD's [0 ~ 15]B
26458162238bSChengwen Feng 		 *      instr07: ldp x4, x5, [x0, #16]
26468162238bSChengwen Feng 		 *      instr08: stp x4, x5, [x29, #272] --copy BD's [16 ~ 31]B
26478162238bSChengwen Feng 		 * the instr05~08 depend on x0's value, x0 depent on w26's
26488162238bSChengwen Feng 		 * value, the w26 is the bd_base_info, this form the data
26498162238bSChengwen Feng 		 * dependency ordering.
26508162238bSChengwen Feng 		 * note: if BD is valid, (bd_base_info & (1u<<HNS3_RXD_VLD_B)) -
26518162238bSChengwen Feng 		 *       (1u<<HNS3_RXD_VLD_B) will always zero, so the
26528162238bSChengwen Feng 		 *       assignment is correct.
26538162238bSChengwen Feng 		 *
26548162238bSChengwen Feng 		 * So we use the data dependency ordering instead of memory
26558162238bSChengwen Feng 		 * barrier to improve receive performance.
26568162238bSChengwen Feng 		 */
26578162238bSChengwen Feng 		rxd = rxdp[(bd_base_info & (1u << HNS3_RXD_VLD_B)) -
26588162238bSChengwen Feng 			   (1u << HNS3_RXD_VLD_B)];
2659e5e6ffc3SDengdui Huang 		RX_BD_LOG(&rxq->hns->hw, DEBUG, &rxd);
2660bba63669SWei Hu (Xavier) 
2661521ab3e9SWei Hu (Xavier) 		nmb = hns3_rx_alloc_buffer(rxq);
2662bba63669SWei Hu (Xavier) 		if (unlikely(nmb == NULL)) {
26638c744977SChengchang Tang 			dev = &rte_eth_devices[rxq->port_id];
2664bba63669SWei Hu (Xavier) 			dev->data->rx_mbuf_alloc_failed++;
2665bba63669SWei Hu (Xavier) 			break;
2666bba63669SWei Hu (Xavier) 		}
2667bba63669SWei Hu (Xavier) 
2668bba63669SWei Hu (Xavier) 		nb_rx_bd++;
2669bba63669SWei Hu (Xavier) 		rxe = &sw_ring[rx_id];
2670bba63669SWei Hu (Xavier) 		rx_id++;
26715cf7a75bSWei Hu (Xavier) 		if (unlikely(rx_id == rxq->nb_rx_desc))
2672bba63669SWei Hu (Xavier) 			rx_id = 0;
2673bba63669SWei Hu (Xavier) 
2674bba63669SWei Hu (Xavier) 		rte_prefetch0(sw_ring[rx_id].mbuf);
2675521ab3e9SWei Hu (Xavier) 		if ((rx_id & HNS3_RX_RING_PREFETCTH_MASK) == 0) {
2676bba63669SWei Hu (Xavier) 			rte_prefetch0(&rx_ring[rx_id]);
2677bba63669SWei Hu (Xavier) 			rte_prefetch0(&sw_ring[rx_id]);
2678bba63669SWei Hu (Xavier) 		}
2679bba63669SWei Hu (Xavier) 
2680bba63669SWei Hu (Xavier) 		rxm = rxe->mbuf;
2681bba63669SWei Hu (Xavier) 		rxe->mbuf = nmb;
2682bba63669SWei Hu (Xavier) 
26833ca3dcd6SMin Hu (Connor) 		if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B)))
26843ca3dcd6SMin Hu (Connor) 			timestamp = rte_le_to_cpu_64(rxdp->timestamp);
26853ca3dcd6SMin Hu (Connor) 
2686bba63669SWei Hu (Xavier) 		dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
2687bba63669SWei Hu (Xavier) 		rxdp->rx.bd_base_info = 0;
26888162238bSChengwen Feng 		rxdp->addr = dma_addr;
2689bba63669SWei Hu (Xavier) 
2690bba63669SWei Hu (Xavier) 		if (first_seg == NULL) {
2691bba63669SWei Hu (Xavier) 			first_seg = rxm;
2692bba63669SWei Hu (Xavier) 			first_seg->nb_segs = 1;
2693bba63669SWei Hu (Xavier) 		} else {
2694bba63669SWei Hu (Xavier) 			first_seg->nb_segs++;
2695bba63669SWei Hu (Xavier) 			last_seg->next = rxm;
2696bba63669SWei Hu (Xavier) 		}
2697bba63669SWei Hu (Xavier) 
2698bba63669SWei Hu (Xavier) 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
2699521ab3e9SWei Hu (Xavier) 		rxm->data_len = rte_le_to_cpu_16(rxd.rx.size);
2700bba63669SWei Hu (Xavier) 
2701521ab3e9SWei Hu (Xavier) 		if (!(bd_base_info & BIT(HNS3_RXD_FE_B))) {
2702bba63669SWei Hu (Xavier) 			last_seg = rxm;
2703521ab3e9SWei Hu (Xavier) 			rxm->next = NULL;
2704bba63669SWei Hu (Xavier) 			continue;
2705bba63669SWei Hu (Xavier) 		}
2706bba63669SWei Hu (Xavier) 
2707bd8f90f0SDengdui Huang 		first_seg->ol_flags = 0;
27084801f040SMin Hu (Connor) 		if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B)))
27093ca3dcd6SMin Hu (Connor) 			hns3_rx_ptp_timestamp_handle(rxq, first_seg, timestamp);
27104801f040SMin Hu (Connor) 
27118973d7c4SMin Hu (Connor) 		/*
27128973d7c4SMin Hu (Connor) 		 * The last buffer of the received packet. packet len from
27138973d7c4SMin Hu (Connor) 		 * buffer description may contains CRC len, packet len should
27148973d7c4SMin Hu (Connor) 		 * subtract it, same as data len.
27158973d7c4SMin Hu (Connor) 		 */
2716521ab3e9SWei Hu (Xavier) 		first_seg->pkt_len = rte_le_to_cpu_16(rxd.rx.pkt_len);
27178973d7c4SMin Hu (Connor) 
27188973d7c4SMin Hu (Connor) 		/*
27198973d7c4SMin Hu (Connor) 		 * This is the last buffer of the received packet. If the CRC
27208973d7c4SMin Hu (Connor) 		 * is not stripped by the hardware:
27218973d7c4SMin Hu (Connor) 		 *  - Subtract the CRC length from the total packet length.
27228973d7c4SMin Hu (Connor) 		 *  - If the last buffer only contains the whole CRC or a part
27238973d7c4SMin Hu (Connor) 		 *  of it, free the mbuf associated to the last buffer. If part
27248973d7c4SMin Hu (Connor) 		 *  of the CRC is also contained in the previous mbuf, subtract
27258973d7c4SMin Hu (Connor) 		 *  the length of that CRC part from the data length of the
27268973d7c4SMin Hu (Connor) 		 *  previous mbuf.
27278973d7c4SMin Hu (Connor) 		 */
27288973d7c4SMin Hu (Connor) 		rxm->next = NULL;
27298973d7c4SMin Hu (Connor) 		if (unlikely(rxq->crc_len > 0)) {
27308973d7c4SMin Hu (Connor) 			first_seg->pkt_len -= rxq->crc_len;
27318973d7c4SMin Hu (Connor) 			recalculate_data_len(first_seg, last_seg, rxm, rxq,
2732521ab3e9SWei Hu (Xavier) 				rxm->data_len);
27338973d7c4SMin Hu (Connor) 		}
27348973d7c4SMin Hu (Connor) 
2735bba63669SWei Hu (Xavier) 		first_seg->port = rxq->port_id;
27368162238bSChengwen Feng 		first_seg->hash.rss = rte_le_to_cpu_32(rxd.rx.rss_hash);
2737bd8f90f0SDengdui Huang 		first_seg->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
2738521ab3e9SWei Hu (Xavier) 		if (unlikely(bd_base_info & BIT(HNS3_RXD_LUM_B))) {
2739bba63669SWei Hu (Xavier) 			first_seg->hash.fdir.hi =
2740521ab3e9SWei Hu (Xavier) 				rte_le_to_cpu_16(rxd.rx.fd_id);
2741daa02b5cSOlivier Matz 			first_seg->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
2742bba63669SWei Hu (Xavier) 		}
2743bba63669SWei Hu (Xavier) 
27441f295c40SWei Hu (Xavier) 		gro_size = hns3_get_field(bd_base_info, HNS3_RXD_GRO_SIZE_M,
27451f295c40SWei Hu (Xavier) 					  HNS3_RXD_GRO_SIZE_S);
27461f295c40SWei Hu (Xavier) 		if (gro_size != 0) {
2747daa02b5cSOlivier Matz 			first_seg->ol_flags |= RTE_MBUF_F_RX_LRO;
27481f295c40SWei Hu (Xavier) 			first_seg->tso_segsz = gro_size;
27491f295c40SWei Hu (Xavier) 		}
27501f295c40SWei Hu (Xavier) 
2751521ab3e9SWei Hu (Xavier) 		l234_info = rte_le_to_cpu_32(rxd.rx.l234_info);
2752521ab3e9SWei Hu (Xavier) 		ol_info = rte_le_to_cpu_32(rxd.rx.ol_info);
2753bba63669SWei Hu (Xavier) 		ret = hns3_handle_bdinfo(rxq, first_seg, bd_base_info,
2754bd739929SChengwen Feng 					 l234_info);
2755bba63669SWei Hu (Xavier) 		if (unlikely(ret))
2756bba63669SWei Hu (Xavier) 			goto pkt_err;
2757bba63669SWei Hu (Xavier) 
2758521ab3e9SWei Hu (Xavier) 		first_seg->packet_type = hns3_rx_calc_ptype(rxq,
2759521ab3e9SWei Hu (Xavier) 						l234_info, ol_info);
2760bba63669SWei Hu (Xavier) 
27614801f040SMin Hu (Connor) 		if (first_seg->packet_type == RTE_PTYPE_L2_ETHER_TIMESYNC)
2762daa02b5cSOlivier Matz 			rxm->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP;
27634801f040SMin Hu (Connor) 
27648c744977SChengchang Tang 		hns3_rxd_to_vlan_tci(rxq, first_seg, l234_info, &rxd);
2765bba63669SWei Hu (Xavier) 
2766fdcd6a3eSMin Hu (Connor) 		/* Increment bytes counter */
2767fdcd6a3eSMin Hu (Connor) 		rxq->basic_stats.bytes += first_seg->pkt_len;
2768fdcd6a3eSMin Hu (Connor) 
2769bba63669SWei Hu (Xavier) 		rx_pkts[nb_rx++] = first_seg;
2770bba63669SWei Hu (Xavier) 		first_seg = NULL;
2771bba63669SWei Hu (Xavier) 		continue;
2772bba63669SWei Hu (Xavier) pkt_err:
2773bba63669SWei Hu (Xavier) 		rte_pktmbuf_free(first_seg);
2774bba63669SWei Hu (Xavier) 		first_seg = NULL;
2775bba63669SWei Hu (Xavier) 	}
2776bba63669SWei Hu (Xavier) 
2777ceabee45SWei Hu (Xavier) 	rxq->next_to_use = rx_id;
2778bba63669SWei Hu (Xavier) 	rxq->pkt_first_seg = first_seg;
2779bba63669SWei Hu (Xavier) 	rxq->pkt_last_seg = last_seg;
2780ffd0ec01SWei Hu (Xavier) 
2781ceabee45SWei Hu (Xavier) 	rxq->rx_free_hold += nb_rx_bd;
2782ceabee45SWei Hu (Xavier) 	if (rxq->rx_free_hold > rxq->rx_free_thresh) {
2783323df894SWei Hu (Xavier) 		hns3_write_reg_opt(rxq->io_head_reg, rxq->rx_free_hold);
2784ceabee45SWei Hu (Xavier) 		rxq->rx_free_hold = 0;
2785ffd0ec01SWei Hu (Xavier) 	}
2786bba63669SWei Hu (Xavier) 
2787bba63669SWei Hu (Xavier) 	return nb_rx;
2788bba63669SWei Hu (Xavier) }
2789bba63669SWei Hu (Xavier) 
2790a3d4f4d2SWei Hu (Xavier) void __rte_weak
2791a3d4f4d2SWei Hu (Xavier) hns3_rxq_vec_setup(__rte_unused struct hns3_rx_queue *rxq)
2792a3d4f4d2SWei Hu (Xavier) {
2793a3d4f4d2SWei Hu (Xavier) }
2794a3d4f4d2SWei Hu (Xavier) 
2795a3d4f4d2SWei Hu (Xavier) int __rte_weak
2796a3d4f4d2SWei Hu (Xavier) hns3_rx_check_vec_support(__rte_unused struct rte_eth_dev *dev)
2797a3d4f4d2SWei Hu (Xavier) {
2798a3d4f4d2SWei Hu (Xavier) 	return -ENOTSUP;
2799a3d4f4d2SWei Hu (Xavier) }
2800a3d4f4d2SWei Hu (Xavier) 
2801a3d4f4d2SWei Hu (Xavier) uint16_t __rte_weak
280282c2ca6dSMin Hu (Connor) hns3_recv_pkts_vec(__rte_unused void *rx_queue,
2803f0c243a6SChengwen Feng 		   __rte_unused struct rte_mbuf **rx_pkts,
2804a3d4f4d2SWei Hu (Xavier) 		   __rte_unused uint16_t nb_pkts)
2805a3d4f4d2SWei Hu (Xavier) {
2806a3d4f4d2SWei Hu (Xavier) 	return 0;
2807a3d4f4d2SWei Hu (Xavier) }
2808a3d4f4d2SWei Hu (Xavier) 
2809952ebaccSWei Hu (Xavier) uint16_t __rte_weak
281082c2ca6dSMin Hu (Connor) hns3_recv_pkts_vec_sve(__rte_unused void *rx_queue,
2811f0c243a6SChengwen Feng 		       __rte_unused struct rte_mbuf **rx_pkts,
2812952ebaccSWei Hu (Xavier) 		       __rte_unused uint16_t nb_pkts)
2813952ebaccSWei Hu (Xavier) {
2814952ebaccSWei Hu (Xavier) 	return 0;
2815952ebaccSWei Hu (Xavier) }
2816952ebaccSWei Hu (Xavier) 
2817bba63669SWei Hu (Xavier) int
2818521ab3e9SWei Hu (Xavier) hns3_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
2819521ab3e9SWei Hu (Xavier) 		       struct rte_eth_burst_mode *mode)
2820521ab3e9SWei Hu (Xavier) {
2821521ab3e9SWei Hu (Xavier) 	static const struct {
2822521ab3e9SWei Hu (Xavier) 		eth_rx_burst_t pkt_burst;
2823521ab3e9SWei Hu (Xavier) 		const char *info;
2824521ab3e9SWei Hu (Xavier) 	} burst_infos[] = {
2825aa5baf47SChengwen Feng 		{ hns3_recv_pkts_simple,	"Scalar Simple" },
2826521ab3e9SWei Hu (Xavier) 		{ hns3_recv_scattered_pkts,	"Scalar Scattered" },
2827a3d4f4d2SWei Hu (Xavier) 		{ hns3_recv_pkts_vec,		"Vector Neon"   },
2828952ebaccSWei Hu (Xavier) 		{ hns3_recv_pkts_vec_sve,	"Vector Sve"    },
282910f91af5SHuisong Li 		{ rte_eth_pkt_burst_dummy,	"Dummy"         },
2830521ab3e9SWei Hu (Xavier) 	};
2831521ab3e9SWei Hu (Xavier) 
2832521ab3e9SWei Hu (Xavier) 	eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
2833521ab3e9SWei Hu (Xavier) 	int ret = -EINVAL;
2834521ab3e9SWei Hu (Xavier) 	unsigned int i;
2835521ab3e9SWei Hu (Xavier) 
2836521ab3e9SWei Hu (Xavier) 	for (i = 0; i < RTE_DIM(burst_infos); i++) {
2837521ab3e9SWei Hu (Xavier) 		if (pkt_burst == burst_infos[i].pkt_burst) {
2838521ab3e9SWei Hu (Xavier) 			snprintf(mode->info, sizeof(mode->info), "%s",
2839521ab3e9SWei Hu (Xavier) 				 burst_infos[i].info);
2840521ab3e9SWei Hu (Xavier) 			ret = 0;
2841521ab3e9SWei Hu (Xavier) 			break;
2842521ab3e9SWei Hu (Xavier) 		}
2843521ab3e9SWei Hu (Xavier) 	}
2844521ab3e9SWei Hu (Xavier) 
2845521ab3e9SWei Hu (Xavier) 	return ret;
2846521ab3e9SWei Hu (Xavier) }
2847521ab3e9SWei Hu (Xavier) 
2848952ebaccSWei Hu (Xavier) static bool
2849e40ad6fcSChengwen Feng hns3_get_default_vec_support(void)
2850e40ad6fcSChengwen Feng {
2851e40ad6fcSChengwen Feng #if defined(RTE_ARCH_ARM64)
285286644b3fSChengwen Feng 	if (rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128)
285386644b3fSChengwen Feng 		return false;
2854e40ad6fcSChengwen Feng 	if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON))
2855e40ad6fcSChengwen Feng 		return true;
2856e40ad6fcSChengwen Feng #endif
2857e40ad6fcSChengwen Feng 	return false;
2858e40ad6fcSChengwen Feng }
2859e40ad6fcSChengwen Feng 
2860e40ad6fcSChengwen Feng static bool
2861e40ad6fcSChengwen Feng hns3_get_sve_support(void)
2862952ebaccSWei Hu (Xavier) {
2863699fa1d4SChengwen Feng #if defined(RTE_HAS_SVE_ACLE)
286486644b3fSChengwen Feng 	if (rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_256)
286586644b3fSChengwen Feng 		return false;
2866952ebaccSWei Hu (Xavier) 	if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SVE))
2867952ebaccSWei Hu (Xavier) 		return true;
2868952ebaccSWei Hu (Xavier) #endif
2869952ebaccSWei Hu (Xavier) 	return false;
2870952ebaccSWei Hu (Xavier) }
2871952ebaccSWei Hu (Xavier) 
2872521ab3e9SWei Hu (Xavier) static eth_rx_burst_t
2873521ab3e9SWei Hu (Xavier) hns3_get_rx_function(struct rte_eth_dev *dev)
2874521ab3e9SWei Hu (Xavier) {
2875521ab3e9SWei Hu (Xavier) 	struct hns3_adapter *hns = dev->data->dev_private;
2876521ab3e9SWei Hu (Xavier) 	uint64_t offloads = dev->data->dev_conf.rxmode.offloads;
2877a124f9e9SChengwen Feng 	bool vec_allowed, sve_allowed, simple_allowed;
2878e40ad6fcSChengwen Feng 	bool vec_support;
2879521ab3e9SWei Hu (Xavier) 
2880e40ad6fcSChengwen Feng 	vec_support = hns3_rx_check_vec_support(dev) == 0;
2881e40ad6fcSChengwen Feng 	vec_allowed = vec_support && hns3_get_default_vec_support();
2882e40ad6fcSChengwen Feng 	sve_allowed = vec_support && hns3_get_sve_support();
28837e2e162eSChengwen Feng 	simple_allowed = !dev->data->scattered_rx &&
2884295968d1SFerruh Yigit 			 (offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) == 0;
2885a3d4f4d2SWei Hu (Xavier) 
2886a124f9e9SChengwen Feng 	if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_VEC && vec_allowed)
2887a124f9e9SChengwen Feng 		return hns3_recv_pkts_vec;
2888a124f9e9SChengwen Feng 	if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_SVE && sve_allowed)
2889a124f9e9SChengwen Feng 		return hns3_recv_pkts_vec_sve;
2890a124f9e9SChengwen Feng 	if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_SIMPLE && simple_allowed)
2891aa5baf47SChengwen Feng 		return hns3_recv_pkts_simple;
2892a124f9e9SChengwen Feng 	if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_COMMON)
2893a124f9e9SChengwen Feng 		return hns3_recv_scattered_pkts;
2894a124f9e9SChengwen Feng 
2895a124f9e9SChengwen Feng 	if (vec_allowed)
2896a124f9e9SChengwen Feng 		return hns3_recv_pkts_vec;
2897a124f9e9SChengwen Feng 	if (simple_allowed)
2898aa5baf47SChengwen Feng 		return hns3_recv_pkts_simple;
2899521ab3e9SWei Hu (Xavier) 
2900521ab3e9SWei Hu (Xavier) 	return hns3_recv_scattered_pkts;
2901521ab3e9SWei Hu (Xavier) }
29027ef93390SWei Hu (Xavier) 
29037ef93390SWei Hu (Xavier) static int
29047ef93390SWei Hu (Xavier) hns3_tx_queue_conf_check(struct hns3_hw *hw, const struct rte_eth_txconf *conf,
29057ef93390SWei Hu (Xavier) 			 uint16_t nb_desc, uint16_t *tx_rs_thresh,
29067ef93390SWei Hu (Xavier) 			 uint16_t *tx_free_thresh, uint16_t idx)
29077ef93390SWei Hu (Xavier) {
29087ef93390SWei Hu (Xavier) #define HNS3_TX_RS_FREE_THRESH_GAP	8
29097ef93390SWei Hu (Xavier) 	uint16_t rs_thresh, free_thresh, fast_free_thresh;
29107ef93390SWei Hu (Xavier) 
29117ef93390SWei Hu (Xavier) 	if (nb_desc > HNS3_MAX_RING_DESC || nb_desc < HNS3_MIN_RING_DESC ||
29127ef93390SWei Hu (Xavier) 	    nb_desc % HNS3_ALIGN_RING_DESC) {
29137ef93390SWei Hu (Xavier) 		hns3_err(hw, "number (%u) of tx descriptors is invalid",
29147ef93390SWei Hu (Xavier) 			 nb_desc);
29157ef93390SWei Hu (Xavier) 		return -EINVAL;
29167ef93390SWei Hu (Xavier) 	}
29177ef93390SWei Hu (Xavier) 
29187ef93390SWei Hu (Xavier) 	rs_thresh = (conf->tx_rs_thresh > 0) ?
29197ef93390SWei Hu (Xavier) 			conf->tx_rs_thresh : HNS3_DEFAULT_TX_RS_THRESH;
29207ef93390SWei Hu (Xavier) 	free_thresh = (conf->tx_free_thresh > 0) ?
29217ef93390SWei Hu (Xavier) 			conf->tx_free_thresh : HNS3_DEFAULT_TX_FREE_THRESH;
29227ef93390SWei Hu (Xavier) 	if (rs_thresh + free_thresh > nb_desc || nb_desc % rs_thresh ||
29237ef93390SWei Hu (Xavier) 	    rs_thresh >= nb_desc - HNS3_TX_RS_FREE_THRESH_GAP ||
29247ef93390SWei Hu (Xavier) 	    free_thresh >= nb_desc - HNS3_TX_RS_FREE_THRESH_GAP) {
29252427c27eSHongbo Zheng 		hns3_err(hw, "tx_rs_thresh (%u) tx_free_thresh (%u) nb_desc "
29262427c27eSHongbo Zheng 			 "(%u) of tx descriptors for port=%u queue=%u check "
29277ef93390SWei Hu (Xavier) 			 "fail!",
29287ef93390SWei Hu (Xavier) 			 rs_thresh, free_thresh, nb_desc, hw->data->port_id,
29297ef93390SWei Hu (Xavier) 			 idx);
29307ef93390SWei Hu (Xavier) 		return -EINVAL;
29317ef93390SWei Hu (Xavier) 	}
29327ef93390SWei Hu (Xavier) 
29337ef93390SWei Hu (Xavier) 	if (conf->tx_free_thresh == 0) {
29347ef93390SWei Hu (Xavier) 		/* Fast free Tx memory buffer to improve cache hit rate */
29357ef93390SWei Hu (Xavier) 		fast_free_thresh = nb_desc - rs_thresh;
29367ef93390SWei Hu (Xavier) 		if (fast_free_thresh >=
29377ef93390SWei Hu (Xavier) 		    HNS3_TX_FAST_FREE_AHEAD + HNS3_DEFAULT_TX_FREE_THRESH)
29387ef93390SWei Hu (Xavier) 			free_thresh = fast_free_thresh -
29397ef93390SWei Hu (Xavier) 					HNS3_TX_FAST_FREE_AHEAD;
29407ef93390SWei Hu (Xavier) 	}
29417ef93390SWei Hu (Xavier) 
29427ef93390SWei Hu (Xavier) 	*tx_rs_thresh = rs_thresh;
29437ef93390SWei Hu (Xavier) 	*tx_free_thresh = free_thresh;
29447ef93390SWei Hu (Xavier) 	return 0;
29457ef93390SWei Hu (Xavier) }
29467ef93390SWei Hu (Xavier) 
294723e317ddSChengwen Feng static void *
294823e317ddSChengwen Feng hns3_tx_push_get_queue_tail_reg(struct rte_eth_dev *dev, uint16_t queue_id)
294923e317ddSChengwen Feng {
295023e317ddSChengwen Feng #define HNS3_TX_PUSH_TQP_REGION_SIZE		0x10000
295123e317ddSChengwen Feng #define HNS3_TX_PUSH_QUICK_DOORBELL_OFFSET	64
295223e317ddSChengwen Feng #define HNS3_TX_PUSH_PCI_BAR_INDEX		4
295323e317ddSChengwen Feng 
295423e317ddSChengwen Feng 	struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
295523e317ddSChengwen Feng 	uint8_t bar_id = HNS3_TX_PUSH_PCI_BAR_INDEX;
295623e317ddSChengwen Feng 
295723e317ddSChengwen Feng 	/*
295823e317ddSChengwen Feng 	 * If device support Tx push then its PCIe bar45 must exist, and DPDK
295923e317ddSChengwen Feng 	 * framework will mmap the bar45 default in PCI probe stage.
296023e317ddSChengwen Feng 	 *
296123e317ddSChengwen Feng 	 * In the bar45, the first half is for RoCE (RDMA over Converged
296223e317ddSChengwen Feng 	 * Ethernet), and the second half is for NIC, every TQP occupy 64KB.
296323e317ddSChengwen Feng 	 *
296423e317ddSChengwen Feng 	 * The quick doorbell located at 64B offset in the TQP region.
296523e317ddSChengwen Feng 	 */
296623e317ddSChengwen Feng 	return (char *)pci_dev->mem_resource[bar_id].addr +
296723e317ddSChengwen Feng 			(pci_dev->mem_resource[bar_id].len >> 1) +
296823e317ddSChengwen Feng 			HNS3_TX_PUSH_TQP_REGION_SIZE * queue_id +
296923e317ddSChengwen Feng 			HNS3_TX_PUSH_QUICK_DOORBELL_OFFSET;
297023e317ddSChengwen Feng }
297123e317ddSChengwen Feng 
297223e317ddSChengwen Feng void
297323e317ddSChengwen Feng hns3_tx_push_init(struct rte_eth_dev *dev)
297423e317ddSChengwen Feng {
297523e317ddSChengwen Feng 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
297623e317ddSChengwen Feng 	volatile uint32_t *reg;
297723e317ddSChengwen Feng 	uint32_t val;
297823e317ddSChengwen Feng 
2979efcaa81eSChengchang Tang 	if (!hns3_dev_get_support(hw, TX_PUSH))
298023e317ddSChengwen Feng 		return;
298123e317ddSChengwen Feng 
298223e317ddSChengwen Feng 	reg = (volatile uint32_t *)hns3_tx_push_get_queue_tail_reg(dev, 0);
298323e317ddSChengwen Feng 	/*
298423e317ddSChengwen Feng 	 * Because the size of bar45 is about 8GB size, it may take a long time
298523e317ddSChengwen Feng 	 * to do the page fault in Tx process when work with vfio-pci, so use
298623e317ddSChengwen Feng 	 * one read operation to make kernel setup page table mapping for bar45
298723e317ddSChengwen Feng 	 * in the init stage.
298823e317ddSChengwen Feng 	 * Note: the bar45 is readable but the result is all 1.
298923e317ddSChengwen Feng 	 */
299023e317ddSChengwen Feng 	val = *reg;
299123e317ddSChengwen Feng 	RTE_SET_USED(val);
299223e317ddSChengwen Feng }
299323e317ddSChengwen Feng 
299423e317ddSChengwen Feng static void
299523e317ddSChengwen Feng hns3_tx_push_queue_init(struct rte_eth_dev *dev,
299623e317ddSChengwen Feng 			uint16_t queue_id,
299723e317ddSChengwen Feng 			struct hns3_tx_queue *txq)
299823e317ddSChengwen Feng {
299923e317ddSChengwen Feng 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3000efcaa81eSChengchang Tang 	if (!hns3_dev_get_support(hw, TX_PUSH)) {
300123e317ddSChengwen Feng 		txq->tx_push_enable = false;
300223e317ddSChengwen Feng 		return;
300323e317ddSChengwen Feng 	}
300423e317ddSChengwen Feng 
300523e317ddSChengwen Feng 	txq->io_tail_reg = (volatile void *)hns3_tx_push_get_queue_tail_reg(dev,
300623e317ddSChengwen Feng 						queue_id);
300723e317ddSChengwen Feng 	txq->tx_push_enable = true;
300823e317ddSChengwen Feng }
300923e317ddSChengwen Feng 
3010521ab3e9SWei Hu (Xavier) int
3011bba63669SWei Hu (Xavier) hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
3012bba63669SWei Hu (Xavier) 		    unsigned int socket_id, const struct rte_eth_txconf *conf)
3013bba63669SWei Hu (Xavier) {
3014bba63669SWei Hu (Xavier) 	struct hns3_adapter *hns = dev->data->dev_private;
30157ef93390SWei Hu (Xavier) 	uint16_t tx_rs_thresh, tx_free_thresh;
3016bba63669SWei Hu (Xavier) 	struct hns3_hw *hw = &hns->hw;
3017a951c1edSWei Hu (Xavier) 	struct hns3_queue_info q_info;
3018bba63669SWei Hu (Xavier) 	struct hns3_tx_queue *txq;
3019bba63669SWei Hu (Xavier) 	int tx_entry_len;
30207ef93390SWei Hu (Xavier) 	int ret;
3021bba63669SWei Hu (Xavier) 
30227ef93390SWei Hu (Xavier) 	ret = hns3_tx_queue_conf_check(hw, conf, nb_desc,
30237ef93390SWei Hu (Xavier) 				       &tx_rs_thresh, &tx_free_thresh, idx);
30247ef93390SWei Hu (Xavier) 	if (ret)
30257ef93390SWei Hu (Xavier) 		return ret;
3026bba63669SWei Hu (Xavier) 
3027bba63669SWei Hu (Xavier) 	if (dev->data->tx_queues[idx] != NULL) {
3028bba63669SWei Hu (Xavier) 		hns3_tx_queue_release(dev->data->tx_queues[idx]);
3029bba63669SWei Hu (Xavier) 		dev->data->tx_queues[idx] = NULL;
3030bba63669SWei Hu (Xavier) 	}
3031bba63669SWei Hu (Xavier) 
3032a951c1edSWei Hu (Xavier) 	q_info.idx = idx;
3033a951c1edSWei Hu (Xavier) 	q_info.socket_id = socket_id;
3034a951c1edSWei Hu (Xavier) 	q_info.nb_desc = nb_desc;
3035a951c1edSWei Hu (Xavier) 	q_info.type = "hns3 TX queue";
3036a951c1edSWei Hu (Xavier) 	q_info.ring_name = "tx_ring";
3037a951c1edSWei Hu (Xavier) 	txq = hns3_alloc_txq_and_dma_zone(dev, &q_info);
3038bba63669SWei Hu (Xavier) 	if (txq == NULL) {
3039a951c1edSWei Hu (Xavier) 		hns3_err(hw,
3040a951c1edSWei Hu (Xavier) 			 "Failed to alloc mem and reserve DMA mem for tx ring!");
3041bba63669SWei Hu (Xavier) 		return -ENOMEM;
3042bba63669SWei Hu (Xavier) 	}
3043bba63669SWei Hu (Xavier) 
3044bba63669SWei Hu (Xavier) 	txq->tx_deferred_start = conf->tx_deferred_start;
3045efcaa81eSChengchang Tang 	if (txq->tx_deferred_start && !hns3_dev_get_support(hw, INDEP_TXRX)) {
3046fa29fe45SChengchang Tang 		hns3_warn(hw, "deferred start is not supported.");
3047fa29fe45SChengchang Tang 		txq->tx_deferred_start = false;
3048fa29fe45SChengchang Tang 	}
3049fa29fe45SChengchang Tang 
3050bba63669SWei Hu (Xavier) 	tx_entry_len = sizeof(struct hns3_entry) * txq->nb_tx_desc;
3051bba63669SWei Hu (Xavier) 	txq->sw_ring = rte_zmalloc_socket("hns3 TX sw ring", tx_entry_len,
3052bba63669SWei Hu (Xavier) 					  RTE_CACHE_LINE_SIZE, socket_id);
3053bba63669SWei Hu (Xavier) 	if (txq->sw_ring == NULL) {
3054bba63669SWei Hu (Xavier) 		hns3_err(hw, "Failed to allocate memory for tx sw ring!");
3055bba63669SWei Hu (Xavier) 		hns3_tx_queue_release(txq);
3056bba63669SWei Hu (Xavier) 		return -ENOMEM;
3057bba63669SWei Hu (Xavier) 	}
3058bba63669SWei Hu (Xavier) 
3059bba63669SWei Hu (Xavier) 	txq->hns = hns;
3060bba63669SWei Hu (Xavier) 	txq->next_to_use = 0;
3061bba63669SWei Hu (Xavier) 	txq->next_to_clean = 0;
3062eb570862SYisen Zhuang 	txq->tx_bd_ready = txq->nb_tx_desc - 1;
30637ef93390SWei Hu (Xavier) 	txq->tx_free_thresh = tx_free_thresh;
30647ef93390SWei Hu (Xavier) 	txq->tx_rs_thresh = tx_rs_thresh;
3065e31f123dSWei Hu (Xavier) 	txq->free = rte_zmalloc_socket("hns3 TX mbuf free array",
3066e31f123dSWei Hu (Xavier) 				sizeof(struct rte_mbuf *) * txq->tx_rs_thresh,
3067e31f123dSWei Hu (Xavier) 				RTE_CACHE_LINE_SIZE, socket_id);
3068e31f123dSWei Hu (Xavier) 	if (!txq->free) {
3069e31f123dSWei Hu (Xavier) 		hns3_err(hw, "failed to allocate tx mbuf free array!");
3070e31f123dSWei Hu (Xavier) 		hns3_tx_queue_release(txq);
3071e31f123dSWei Hu (Xavier) 		return -ENOMEM;
3072e31f123dSWei Hu (Xavier) 	}
3073e31f123dSWei Hu (Xavier) 
3074bba63669SWei Hu (Xavier) 	txq->port_id = dev->data->port_id;
3075992b24a1SWei Hu (Xavier) 	/*
3076992b24a1SWei Hu (Xavier) 	 * For hns3 PF device, if the VLAN mode is HW_SHIFT_AND_DISCARD_MODE,
3077992b24a1SWei Hu (Xavier) 	 * the pvid_sw_shift_en in the queue struct should not be changed,
3078f8dbaebbSSean Morrissey 	 * because PVID-related operations do not need to be processed by PMD.
3079f8dbaebbSSean Morrissey 	 * For hns3 VF device, whether it needs to process PVID depends
3080992b24a1SWei Hu (Xavier) 	 * on the configuration of PF kernel mode netdev driver. And the
3081992b24a1SWei Hu (Xavier) 	 * related PF configuration is delivered through the mailbox and finally
30827be78d02SJosh Soref 	 * reflected in port_base_vlan_cfg.
3083992b24a1SWei Hu (Xavier) 	 */
3084992b24a1SWei Hu (Xavier) 	if (hns->is_vf || hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE)
3085992b24a1SWei Hu (Xavier) 		txq->pvid_sw_shift_en = hw->port_base_vlan_cfg.state ==
3086992b24a1SWei Hu (Xavier) 					HNS3_PORT_BASE_VLAN_ENABLE;
3087992b24a1SWei Hu (Xavier) 	else
3088992b24a1SWei Hu (Xavier) 		txq->pvid_sw_shift_en = false;
30896393fc0bSDongdong Liu 
30906393fc0bSDongdong Liu 	if (hns3_dev_get_support(hw, SIMPLE_BD))
30916393fc0bSDongdong Liu 		txq->simple_bd_enable = true;
30926393fc0bSDongdong Liu 
3093da17b003SHongbo Zheng 	txq->max_non_tso_bd_num = hw->max_non_tso_bd_num;
3094bba63669SWei Hu (Xavier) 	txq->configured = true;
309576d79456SWei Hu (Xavier) 	txq->io_base = (void *)((char *)hw->io_base +
309676d79456SWei Hu (Xavier) 						hns3_get_tqp_reg_offset(idx));
30977ef93390SWei Hu (Xavier) 	txq->io_tail_reg = (volatile void *)((char *)txq->io_base +
30987ef93390SWei Hu (Xavier) 					     HNS3_RING_TX_TAIL_REG);
3099395b5e08SWei Hu (Xavier) 	txq->min_tx_pkt_len = hw->min_tx_pkt_len;
3100dd1e4611SWei Hu (Xavier) 	txq->tso_mode = hw->tso_mode;
31018f01e2f8SChengchang Tang 	txq->udp_cksum_mode = hw->udp_cksum_mode;
31023cc817c1SChengwen Feng 	txq->mbuf_fast_free_en = !!(dev->data->dev_conf.txmode.offloads &
31033cc817c1SChengwen Feng 				    RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE);
310486c551d1SHuisong Li 	memset(&txq->basic_stats, 0, sizeof(struct hns3_tx_basic_stats));
31059b77f1feSHuisong Li 	memset(&txq->dfx_stats, 0, sizeof(struct hns3_tx_dfx_stats));
31069b77f1feSHuisong Li 
310723e317ddSChengwen Feng 	/*
310823e317ddSChengwen Feng 	 * Call hns3_tx_push_queue_init after assigned io_tail_reg field because
310923e317ddSChengwen Feng 	 * it may overwrite the io_tail_reg field.
311023e317ddSChengwen Feng 	 */
311123e317ddSChengwen Feng 	hns3_tx_push_queue_init(dev, idx, txq);
311223e317ddSChengwen Feng 
3113bba63669SWei Hu (Xavier) 	rte_spinlock_lock(&hw->lock);
3114bba63669SWei Hu (Xavier) 	dev->data->tx_queues[idx] = txq;
3115bba63669SWei Hu (Xavier) 	rte_spinlock_unlock(&hw->lock);
3116bba63669SWei Hu (Xavier) 
3117bba63669SWei Hu (Xavier) 	return 0;
3118bba63669SWei Hu (Xavier) }
3119bba63669SWei Hu (Xavier) 
312060560096SChengwen Feng static void
3121bba63669SWei Hu (Xavier) hns3_tx_free_useless_buffer(struct hns3_tx_queue *txq)
3122bba63669SWei Hu (Xavier) {
3123bba63669SWei Hu (Xavier) 	uint16_t tx_next_clean = txq->next_to_clean;
3124bba63669SWei Hu (Xavier) 	uint16_t tx_next_use   = txq->next_to_use;
312560560096SChengwen Feng 	uint16_t tx_bd_ready   = txq->tx_bd_ready;
312660560096SChengwen Feng 	uint16_t tx_bd_max     = txq->nb_tx_desc;
312760560096SChengwen Feng 	struct hns3_entry *tx_bak_pkt = &txq->sw_ring[tx_next_clean];
3128bba63669SWei Hu (Xavier) 	struct hns3_desc *desc = &txq->tx_ring[tx_next_clean];
312960560096SChengwen Feng 	struct rte_mbuf *mbuf;
3130bba63669SWei Hu (Xavier) 
313160560096SChengwen Feng 	while ((!(desc->tx.tp_fe_sc_vld_ra_ri &
313260560096SChengwen Feng 		rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B)))) &&
313360560096SChengwen Feng 		tx_next_use != tx_next_clean) {
313460560096SChengwen Feng 		mbuf = tx_bak_pkt->mbuf;
313560560096SChengwen Feng 		if (mbuf) {
313660560096SChengwen Feng 			rte_pktmbuf_free_seg(mbuf);
313760560096SChengwen Feng 			tx_bak_pkt->mbuf = NULL;
3138bba63669SWei Hu (Xavier) 		}
3139bba63669SWei Hu (Xavier) 
314060560096SChengwen Feng 		desc++;
314160560096SChengwen Feng 		tx_bak_pkt++;
314260560096SChengwen Feng 		tx_next_clean++;
314360560096SChengwen Feng 		tx_bd_ready++;
314460560096SChengwen Feng 
314560560096SChengwen Feng 		if (tx_next_clean >= tx_bd_max) {
314660560096SChengwen Feng 			tx_next_clean = 0;
314760560096SChengwen Feng 			desc = txq->tx_ring;
314860560096SChengwen Feng 			tx_bak_pkt = txq->sw_ring;
314960560096SChengwen Feng 		}
3150bba63669SWei Hu (Xavier) 	}
3151bba63669SWei Hu (Xavier) 
315260560096SChengwen Feng 	txq->next_to_clean = tx_next_clean;
315360560096SChengwen Feng 	txq->tx_bd_ready   = tx_bd_ready;
3154bba63669SWei Hu (Xavier) }
3155bba63669SWei Hu (Xavier) 
31561f295c40SWei Hu (Xavier) int
31571f295c40SWei Hu (Xavier) hns3_config_gro(struct hns3_hw *hw, bool en)
31581f295c40SWei Hu (Xavier) {
31591f295c40SWei Hu (Xavier) 	struct hns3_cfg_gro_status_cmd *req;
31601f295c40SWei Hu (Xavier) 	struct hns3_cmd_desc desc;
31611f295c40SWei Hu (Xavier) 	int ret;
31621f295c40SWei Hu (Xavier) 
3163a4b2c681SHuisong Li 	if (!hns3_dev_get_support(hw, GRO))
3164a4b2c681SHuisong Li 		return 0;
3165a4b2c681SHuisong Li 
31661f295c40SWei Hu (Xavier) 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GRO_GENERIC_CONFIG, false);
31671f295c40SWei Hu (Xavier) 	req = (struct hns3_cfg_gro_status_cmd *)desc.data;
31681f295c40SWei Hu (Xavier) 
31691f295c40SWei Hu (Xavier) 	req->gro_en = rte_cpu_to_le_16(en ? 1 : 0);
31701f295c40SWei Hu (Xavier) 
31711f295c40SWei Hu (Xavier) 	ret = hns3_cmd_send(hw, &desc, 1);
31721f295c40SWei Hu (Xavier) 	if (ret)
31731f295c40SWei Hu (Xavier) 		hns3_err(hw, "%s hardware GRO failed, ret = %d",
31741f295c40SWei Hu (Xavier) 			 en ? "enable" : "disable", ret);
31751f295c40SWei Hu (Xavier) 
31761f295c40SWei Hu (Xavier) 	return ret;
31771f295c40SWei Hu (Xavier) }
31781f295c40SWei Hu (Xavier) 
31791f295c40SWei Hu (Xavier) int
31801f295c40SWei Hu (Xavier) hns3_restore_gro_conf(struct hns3_hw *hw)
31811f295c40SWei Hu (Xavier) {
31821f295c40SWei Hu (Xavier) 	uint64_t offloads;
31831f295c40SWei Hu (Xavier) 	bool gro_en;
31841f295c40SWei Hu (Xavier) 	int ret;
31851f295c40SWei Hu (Xavier) 
31861f295c40SWei Hu (Xavier) 	offloads = hw->data->dev_conf.rxmode.offloads;
3187295968d1SFerruh Yigit 	gro_en = offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO ? true : false;
31881f295c40SWei Hu (Xavier) 	ret = hns3_config_gro(hw, gro_en);
31891f295c40SWei Hu (Xavier) 	if (ret)
31901f295c40SWei Hu (Xavier) 		hns3_err(hw, "restore hardware GRO to %s failed, ret = %d",
31911f295c40SWei Hu (Xavier) 			 gro_en ? "enabled" : "disabled", ret);
31921f295c40SWei Hu (Xavier) 
31931f295c40SWei Hu (Xavier) 	return ret;
31941f295c40SWei Hu (Xavier) }
31951f295c40SWei Hu (Xavier) 
3196b68259f7SHongbo Zheng static inline bool
3197b68259f7SHongbo Zheng hns3_pkt_is_tso(struct rte_mbuf *m)
3198b68259f7SHongbo Zheng {
3199daa02b5cSOlivier Matz 	return (m->tso_segsz != 0 && m->ol_flags & RTE_MBUF_F_TX_TCP_SEG);
3200b68259f7SHongbo Zheng }
3201b68259f7SHongbo Zheng 
32026dca716cSHongbo Zheng static void
3203fb6eb900SChengchang Tang hns3_set_tso(struct hns3_desc *desc, uint32_t paylen, struct rte_mbuf *rxm)
32046dca716cSHongbo Zheng {
3205b68259f7SHongbo Zheng 	if (!hns3_pkt_is_tso(rxm))
32066dca716cSHongbo Zheng 		return;
32076dca716cSHongbo Zheng 
32086dca716cSHongbo Zheng 	if (paylen <= rxm->tso_segsz)
32096dca716cSHongbo Zheng 		return;
32106dca716cSHongbo Zheng 
3211fb6eb900SChengchang Tang 	desc->tx.type_cs_vlan_tso_len |= rte_cpu_to_le_32(BIT(HNS3_TXD_TSO_B));
32126393fc0bSDongdong Liu 	desc->tx.ckst_mss |= rte_cpu_to_le_16(rxm->tso_segsz);
32136dca716cSHongbo Zheng }
32146dca716cSHongbo Zheng 
3215fc9b57ffSWei Hu (Xavier) static inline void
3216fc9b57ffSWei Hu (Xavier) hns3_fill_per_desc(struct hns3_desc *desc, struct rte_mbuf *rxm)
3217bba63669SWei Hu (Xavier) {
3218fc9b57ffSWei Hu (Xavier) 	desc->addr = rte_mbuf_data_iova(rxm);
3219fc9b57ffSWei Hu (Xavier) 	desc->tx.send_size = rte_cpu_to_le_16(rte_pktmbuf_data_len(rxm));
322038b539d9SMin Hu (Connor) 	desc->tx.tp_fe_sc_vld_ra_ri |= rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B));
3221fc9b57ffSWei Hu (Xavier) }
3222fc9b57ffSWei Hu (Xavier) 
3223fc9b57ffSWei Hu (Xavier) static void
3224fc9b57ffSWei Hu (Xavier) hns3_fill_first_desc(struct hns3_tx_queue *txq, struct hns3_desc *desc,
3225fc9b57ffSWei Hu (Xavier) 		     struct rte_mbuf *rxm)
3226fc9b57ffSWei Hu (Xavier) {
32276dca716cSHongbo Zheng 	uint64_t ol_flags = rxm->ol_flags;
3228bba63669SWei Hu (Xavier) 	uint32_t hdr_len;
3229bba63669SWei Hu (Xavier) 	uint32_t paylen;
3230bba63669SWei Hu (Xavier) 
3231bba63669SWei Hu (Xavier) 	hdr_len = rxm->l2_len + rxm->l3_len + rxm->l4_len;
3232daa02b5cSOlivier Matz 	hdr_len += (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ?
3233bba63669SWei Hu (Xavier) 			   rxm->outer_l2_len + rxm->outer_l3_len : 0;
3234bba63669SWei Hu (Xavier) 	paylen = rxm->pkt_len - hdr_len;
3235d0ab89e6SChengchang Tang 	desc->tx.paylen_fd_dop_ol4cs |= rte_cpu_to_le_32(paylen);
3236fb6eb900SChengchang Tang 	hns3_set_tso(desc, paylen, rxm);
3237bba63669SWei Hu (Xavier) 
3238fc9b57ffSWei Hu (Xavier) 	/*
3239fc9b57ffSWei Hu (Xavier) 	 * Currently, hardware doesn't support more than two layers VLAN offload
3240fc9b57ffSWei Hu (Xavier) 	 * in Tx direction based on hns3 network engine. So when the number of
3241fc9b57ffSWei Hu (Xavier) 	 * VLANs in the packets represented by rxm plus the number of VLAN
3242fc9b57ffSWei Hu (Xavier) 	 * offload by hardware such as PVID etc, exceeds two, the packets will
32437be78d02SJosh Soref 	 * be discarded or the original VLAN of the packets will be overwritten
3244fc9b57ffSWei Hu (Xavier) 	 * by hardware. When the PF PVID is enabled by calling the API function
3245fc9b57ffSWei Hu (Xavier) 	 * named rte_eth_dev_set_vlan_pvid or the VF PVID is enabled by the hns3
3246fc9b57ffSWei Hu (Xavier) 	 * PF kernel ether driver, the outer VLAN tag will always be the PVID.
3247fc9b57ffSWei Hu (Xavier) 	 * To avoid the VLAN of Tx descriptor is overwritten by PVID, it should
3248fc9b57ffSWei Hu (Xavier) 	 * be added to the position close to the IP header when PVID is enabled.
3249fc9b57ffSWei Hu (Xavier) 	 */
3250daa02b5cSOlivier Matz 	if (!txq->pvid_sw_shift_en &&
3251daa02b5cSOlivier Matz 	    ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) {
3252fc9b57ffSWei Hu (Xavier) 		desc->tx.ol_type_vlan_len_msec |=
3253fc9b57ffSWei Hu (Xavier) 				rte_cpu_to_le_32(BIT(HNS3_TXD_OVLAN_B));
3254daa02b5cSOlivier Matz 		if (ol_flags & RTE_MBUF_F_TX_QINQ)
3255bba63669SWei Hu (Xavier) 			desc->tx.outer_vlan_tag =
3256bba63669SWei Hu (Xavier) 					rte_cpu_to_le_16(rxm->vlan_tci_outer);
3257fc9b57ffSWei Hu (Xavier) 		else
3258fc9b57ffSWei Hu (Xavier) 			desc->tx.outer_vlan_tag =
3259fc9b57ffSWei Hu (Xavier) 					rte_cpu_to_le_16(rxm->vlan_tci);
3260bba63669SWei Hu (Xavier) 	}
3261fc9b57ffSWei Hu (Xavier) 
3262daa02b5cSOlivier Matz 	if (ol_flags & RTE_MBUF_F_TX_QINQ ||
3263daa02b5cSOlivier Matz 	    ((ol_flags & RTE_MBUF_F_TX_VLAN) && txq->pvid_sw_shift_en)) {
3264fc9b57ffSWei Hu (Xavier) 		desc->tx.type_cs_vlan_tso_len |=
3265fc9b57ffSWei Hu (Xavier) 					rte_cpu_to_le_32(BIT(HNS3_TXD_VLAN_B));
3266fc9b57ffSWei Hu (Xavier) 		desc->tx.vlan_tag = rte_cpu_to_le_16(rxm->vlan_tci);
3267bba63669SWei Hu (Xavier) 	}
326838b539d9SMin Hu (Connor) 
3269daa02b5cSOlivier Matz 	if (ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)
327038b539d9SMin Hu (Connor) 		desc->tx.tp_fe_sc_vld_ra_ri |=
327138b539d9SMin Hu (Connor) 				rte_cpu_to_le_16(BIT(HNS3_TXD_TSYN_B));
3272bba63669SWei Hu (Xavier) }
3273bba63669SWei Hu (Xavier) 
32740134a5c7SChengchang Tang static inline int
32750134a5c7SChengchang Tang hns3_tx_alloc_mbufs(struct rte_mempool *mb_pool, uint16_t nb_new_buf,
32760134a5c7SChengchang Tang 			struct rte_mbuf **alloc_mbuf)
3277bba63669SWei Hu (Xavier) {
32780134a5c7SChengchang Tang #define MAX_NON_TSO_BD_PER_PKT 18
32790134a5c7SChengchang Tang 	struct rte_mbuf *pkt_segs[MAX_NON_TSO_BD_PER_PKT];
3280bba63669SWei Hu (Xavier) 	uint16_t i;
3281bba63669SWei Hu (Xavier) 
3282bba63669SWei Hu (Xavier) 	/* Allocate enough mbufs */
32830134a5c7SChengchang Tang 	if (rte_mempool_get_bulk(mb_pool, (void **)pkt_segs, nb_new_buf))
3284bba63669SWei Hu (Xavier) 		return -ENOMEM;
3285bba63669SWei Hu (Xavier) 
32860134a5c7SChengchang Tang 	for (i = 0; i < nb_new_buf - 1; i++)
32870134a5c7SChengchang Tang 		pkt_segs[i]->next = pkt_segs[i + 1];
32880134a5c7SChengchang Tang 
32890134a5c7SChengchang Tang 	pkt_segs[nb_new_buf - 1]->next = NULL;
32900134a5c7SChengchang Tang 	pkt_segs[0]->nb_segs = nb_new_buf;
32910134a5c7SChengchang Tang 	*alloc_mbuf = pkt_segs[0];
3292bba63669SWei Hu (Xavier) 
3293bba63669SWei Hu (Xavier) 	return 0;
3294bba63669SWei Hu (Xavier) }
3295bba63669SWei Hu (Xavier) 
32966c44219fSWei Hu (Xavier) static inline void
32976c44219fSWei Hu (Xavier) hns3_pktmbuf_copy_hdr(struct rte_mbuf *new_pkt, struct rte_mbuf *old_pkt)
32986c44219fSWei Hu (Xavier) {
32996c44219fSWei Hu (Xavier) 	new_pkt->ol_flags = old_pkt->ol_flags;
33006c44219fSWei Hu (Xavier) 	new_pkt->pkt_len = rte_pktmbuf_pkt_len(old_pkt);
33016c44219fSWei Hu (Xavier) 	new_pkt->outer_l2_len = old_pkt->outer_l2_len;
33026c44219fSWei Hu (Xavier) 	new_pkt->outer_l3_len = old_pkt->outer_l3_len;
33036c44219fSWei Hu (Xavier) 	new_pkt->l2_len = old_pkt->l2_len;
33046c44219fSWei Hu (Xavier) 	new_pkt->l3_len = old_pkt->l3_len;
33056c44219fSWei Hu (Xavier) 	new_pkt->l4_len = old_pkt->l4_len;
33066c44219fSWei Hu (Xavier) 	new_pkt->vlan_tci_outer = old_pkt->vlan_tci_outer;
33076c44219fSWei Hu (Xavier) 	new_pkt->vlan_tci = old_pkt->vlan_tci;
33086c44219fSWei Hu (Xavier) }
33096c44219fSWei Hu (Xavier) 
3310bba63669SWei Hu (Xavier) static int
3311da17b003SHongbo Zheng hns3_reassemble_tx_pkts(struct rte_mbuf *tx_pkt, struct rte_mbuf **new_pkt,
3312da17b003SHongbo Zheng 				  uint8_t max_non_tso_bd_num)
3313bba63669SWei Hu (Xavier) {
3314bba63669SWei Hu (Xavier) 	struct rte_mempool *mb_pool;
3315bba63669SWei Hu (Xavier) 	struct rte_mbuf *new_mbuf;
3316bba63669SWei Hu (Xavier) 	struct rte_mbuf *temp_new;
3317bba63669SWei Hu (Xavier) 	struct rte_mbuf *temp;
3318bba63669SWei Hu (Xavier) 	uint16_t last_buf_len;
3319bba63669SWei Hu (Xavier) 	uint16_t nb_new_buf;
3320bba63669SWei Hu (Xavier) 	uint16_t buf_size;
3321bba63669SWei Hu (Xavier) 	uint16_t buf_len;
3322bba63669SWei Hu (Xavier) 	uint16_t len_s;
3323bba63669SWei Hu (Xavier) 	uint16_t len_d;
3324bba63669SWei Hu (Xavier) 	uint16_t len;
3325bba63669SWei Hu (Xavier) 	int ret;
3326bba63669SWei Hu (Xavier) 	char *s;
3327bba63669SWei Hu (Xavier) 	char *d;
3328bba63669SWei Hu (Xavier) 
3329bba63669SWei Hu (Xavier) 	mb_pool = tx_pkt->pool;
3330bba63669SWei Hu (Xavier) 	buf_size = tx_pkt->buf_len - RTE_PKTMBUF_HEADROOM;
33316c44219fSWei Hu (Xavier) 	nb_new_buf = (rte_pktmbuf_pkt_len(tx_pkt) - 1) / buf_size + 1;
3332da17b003SHongbo Zheng 	if (nb_new_buf > max_non_tso_bd_num)
33336c44219fSWei Hu (Xavier) 		return -EINVAL;
3334bba63669SWei Hu (Xavier) 
33356c44219fSWei Hu (Xavier) 	last_buf_len = rte_pktmbuf_pkt_len(tx_pkt) % buf_size;
3336bba63669SWei Hu (Xavier) 	if (last_buf_len == 0)
3337bba63669SWei Hu (Xavier) 		last_buf_len = buf_size;
3338bba63669SWei Hu (Xavier) 
3339bba63669SWei Hu (Xavier) 	/* Allocate enough mbufs */
33400134a5c7SChengchang Tang 	ret = hns3_tx_alloc_mbufs(mb_pool, nb_new_buf, &new_mbuf);
3341bba63669SWei Hu (Xavier) 	if (ret)
3342bba63669SWei Hu (Xavier) 		return ret;
3343bba63669SWei Hu (Xavier) 
3344bba63669SWei Hu (Xavier) 	/* Copy the original packet content to the new mbufs */
3345bba63669SWei Hu (Xavier) 	temp = tx_pkt;
3346bba63669SWei Hu (Xavier) 	s = rte_pktmbuf_mtod(temp, char *);
33476c44219fSWei Hu (Xavier) 	len_s = rte_pktmbuf_data_len(temp);
3348bba63669SWei Hu (Xavier) 	temp_new = new_mbuf;
33490134a5c7SChengchang Tang 	while (temp != NULL && temp_new != NULL) {
3350bba63669SWei Hu (Xavier) 		d = rte_pktmbuf_mtod(temp_new, char *);
33510134a5c7SChengchang Tang 		buf_len = temp_new->next == NULL ? last_buf_len : buf_size;
3352bba63669SWei Hu (Xavier) 		len_d = buf_len;
3353bba63669SWei Hu (Xavier) 
3354bba63669SWei Hu (Xavier) 		while (len_d) {
3355bba63669SWei Hu (Xavier) 			len = RTE_MIN(len_s, len_d);
3356bba63669SWei Hu (Xavier) 			memcpy(d, s, len);
3357bba63669SWei Hu (Xavier) 			s = s + len;
3358bba63669SWei Hu (Xavier) 			d = d + len;
3359bba63669SWei Hu (Xavier) 			len_d = len_d - len;
3360bba63669SWei Hu (Xavier) 			len_s = len_s - len;
3361bba63669SWei Hu (Xavier) 
3362bba63669SWei Hu (Xavier) 			if (len_s == 0) {
3363bba63669SWei Hu (Xavier) 				temp = temp->next;
3364bba63669SWei Hu (Xavier) 				if (temp == NULL)
3365bba63669SWei Hu (Xavier) 					break;
3366bba63669SWei Hu (Xavier) 				s = rte_pktmbuf_mtod(temp, char *);
33676c44219fSWei Hu (Xavier) 				len_s = rte_pktmbuf_data_len(temp);
3368bba63669SWei Hu (Xavier) 			}
3369bba63669SWei Hu (Xavier) 		}
3370bba63669SWei Hu (Xavier) 
3371bba63669SWei Hu (Xavier) 		temp_new->data_len = buf_len;
3372bba63669SWei Hu (Xavier) 		temp_new = temp_new->next;
3373bba63669SWei Hu (Xavier) 	}
33746c44219fSWei Hu (Xavier) 	hns3_pktmbuf_copy_hdr(new_mbuf, tx_pkt);
3375bba63669SWei Hu (Xavier) 
3376bba63669SWei Hu (Xavier) 	/* free original mbufs */
3377bba63669SWei Hu (Xavier) 	rte_pktmbuf_free(tx_pkt);
3378bba63669SWei Hu (Xavier) 
3379bba63669SWei Hu (Xavier) 	*new_pkt = new_mbuf;
3380bba63669SWei Hu (Xavier) 
3381bba63669SWei Hu (Xavier) 	return 0;
3382bba63669SWei Hu (Xavier) }
3383bba63669SWei Hu (Xavier) 
3384bba63669SWei Hu (Xavier) static void
3385fb6eb900SChengchang Tang hns3_parse_outer_params(struct rte_mbuf *m, uint32_t *ol_type_vlan_len_msec)
3386bba63669SWei Hu (Xavier) {
3387bba63669SWei Hu (Xavier) 	uint32_t tmp = *ol_type_vlan_len_msec;
3388fb6eb900SChengchang Tang 	uint64_t ol_flags = m->ol_flags;
3389bba63669SWei Hu (Xavier) 
3390bba63669SWei Hu (Xavier) 	/* (outer) IP header type */
3391daa02b5cSOlivier Matz 	if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) {
3392daa02b5cSOlivier Matz 		if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM)
3393fb6eb900SChengchang Tang 			tmp |= hns3_gen_field_val(HNS3_TXD_OL3T_M,
3394bba63669SWei Hu (Xavier) 					HNS3_TXD_OL3T_S, HNS3_OL3T_IPV4_CSUM);
3395bba63669SWei Hu (Xavier) 		else
3396fb6eb900SChengchang Tang 			tmp |= hns3_gen_field_val(HNS3_TXD_OL3T_M,
3397fb6eb900SChengchang Tang 				HNS3_TXD_OL3T_S, HNS3_OL3T_IPV4_NO_CSUM);
3398daa02b5cSOlivier Matz 	} else if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6) {
3399fb6eb900SChengchang Tang 		tmp |= hns3_gen_field_val(HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
3400bba63669SWei Hu (Xavier) 					HNS3_OL3T_IPV6);
3401bba63669SWei Hu (Xavier) 	}
3402fb6eb900SChengchang Tang 	/* OL3 header size, defined in 4 bytes */
3403fb6eb900SChengchang Tang 	tmp |= hns3_gen_field_val(HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
3404fb6eb900SChengchang Tang 				m->outer_l3_len >> HNS3_L3_LEN_UNIT);
3405bba63669SWei Hu (Xavier) 	*ol_type_vlan_len_msec = tmp;
3406bba63669SWei Hu (Xavier) }
3407bba63669SWei Hu (Xavier) 
3408bba63669SWei Hu (Xavier) static int
3409fb6eb900SChengchang Tang hns3_parse_inner_params(struct rte_mbuf *m, uint32_t *ol_type_vlan_len_msec,
3410fb6eb900SChengchang Tang 			uint32_t *type_cs_vlan_tso_len)
3411bba63669SWei Hu (Xavier) {
3412fb6eb900SChengchang Tang #define HNS3_NVGRE_HLEN 8
3413fb6eb900SChengchang Tang 	uint32_t tmp_outer = *ol_type_vlan_len_msec;
3414fb6eb900SChengchang Tang 	uint32_t tmp_inner = *type_cs_vlan_tso_len;
3415fb6eb900SChengchang Tang 	uint64_t ol_flags = m->ol_flags;
3416fb6eb900SChengchang Tang 	uint16_t inner_l2_len;
3417bba63669SWei Hu (Xavier) 
3418daa02b5cSOlivier Matz 	switch (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
3419daa02b5cSOlivier Matz 	case RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE:
3420daa02b5cSOlivier Matz 	case RTE_MBUF_F_TX_TUNNEL_GENEVE:
3421daa02b5cSOlivier Matz 	case RTE_MBUF_F_TX_TUNNEL_VXLAN:
3422fb6eb900SChengchang Tang 		/* MAC in UDP tunnelling packet, include VxLAN and GENEVE */
3423fb6eb900SChengchang Tang 		tmp_outer |= hns3_gen_field_val(HNS3_TXD_TUNTYPE_M,
3424fb6eb900SChengchang Tang 				HNS3_TXD_TUNTYPE_S, HNS3_TUN_MAC_IN_UDP);
3425bba63669SWei Hu (Xavier) 		/*
3426fb6eb900SChengchang Tang 		 * The inner l2 length of mbuf is the sum of outer l4 length,
3427fb6eb900SChengchang Tang 		 * tunneling header length and inner l2 length for a tunnel
34287be78d02SJosh Soref 		 * packet. But in hns3 tx descriptor, the tunneling header
3429fb6eb900SChengchang Tang 		 * length is contained in the field of outer L4 length.
3430fb6eb900SChengchang Tang 		 * Therefore, driver need to calculate the outer L4 length and
3431fb6eb900SChengchang Tang 		 * inner L2 length.
3432bba63669SWei Hu (Xavier) 		 */
3433fb6eb900SChengchang Tang 		tmp_outer |= hns3_gen_field_val(HNS3_TXD_L4LEN_M,
3434fb6eb900SChengchang Tang 						HNS3_TXD_L4LEN_S,
3435bba63669SWei Hu (Xavier) 						(uint8_t)RTE_ETHER_VXLAN_HLEN >>
3436bba63669SWei Hu (Xavier) 						HNS3_L4_LEN_UNIT);
3437fb6eb900SChengchang Tang 
3438fb6eb900SChengchang Tang 		inner_l2_len = m->l2_len - RTE_ETHER_VXLAN_HLEN;
3439bba63669SWei Hu (Xavier) 		break;
3440daa02b5cSOlivier Matz 	case RTE_MBUF_F_TX_TUNNEL_GRE:
3441fb6eb900SChengchang Tang 		tmp_outer |= hns3_gen_field_val(HNS3_TXD_TUNTYPE_M,
3442fb6eb900SChengchang Tang 					HNS3_TXD_TUNTYPE_S, HNS3_TUN_NVGRE);
3443bba63669SWei Hu (Xavier) 		/*
34447be78d02SJosh Soref 		 * For NVGRE tunnel packet, the outer L4 is empty. So only
3445fb6eb900SChengchang Tang 		 * fill the NVGRE header length to the outer L4 field.
3446bba63669SWei Hu (Xavier) 		 */
3447fb6eb900SChengchang Tang 		tmp_outer |= hns3_gen_field_val(HNS3_TXD_L4LEN_M,
3448fb6eb900SChengchang Tang 				HNS3_TXD_L4LEN_S,
3449fb6eb900SChengchang Tang 				(uint8_t)HNS3_NVGRE_HLEN >> HNS3_L4_LEN_UNIT);
3450fb6eb900SChengchang Tang 
3451fb6eb900SChengchang Tang 		inner_l2_len = m->l2_len - HNS3_NVGRE_HLEN;
3452bba63669SWei Hu (Xavier) 		break;
3453bba63669SWei Hu (Xavier) 	default:
3454bba63669SWei Hu (Xavier) 		/* For non UDP / GRE tunneling, drop the tunnel packet */
3455bba63669SWei Hu (Xavier) 		return -EINVAL;
3456bba63669SWei Hu (Xavier) 	}
3457bba63669SWei Hu (Xavier) 
3458fb6eb900SChengchang Tang 	tmp_inner |= hns3_gen_field_val(HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
3459fb6eb900SChengchang Tang 					inner_l2_len >> HNS3_L2_LEN_UNIT);
3460fb6eb900SChengchang Tang 	/* OL2 header size, defined in 2 bytes */
3461fb6eb900SChengchang Tang 	tmp_outer |= hns3_gen_field_val(HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
3462fb6eb900SChengchang Tang 					m->outer_l2_len >> HNS3_L2_LEN_UNIT);
3463fb6eb900SChengchang Tang 
3464fb6eb900SChengchang Tang 	*type_cs_vlan_tso_len = tmp_inner;
3465fb6eb900SChengchang Tang 	*ol_type_vlan_len_msec = tmp_outer;
3466bba63669SWei Hu (Xavier) 
3467bba63669SWei Hu (Xavier) 	return 0;
3468bba63669SWei Hu (Xavier) }
3469bba63669SWei Hu (Xavier) 
3470bba63669SWei Hu (Xavier) static int
3471fb6eb900SChengchang Tang hns3_parse_tunneling_params(struct hns3_tx_queue *txq, struct rte_mbuf *m,
3472fb6eb900SChengchang Tang 			    uint16_t tx_desc_id)
3473bba63669SWei Hu (Xavier) {
3474bba63669SWei Hu (Xavier) 	struct hns3_desc *tx_ring = txq->tx_ring;
3475bba63669SWei Hu (Xavier) 	struct hns3_desc *desc = &tx_ring[tx_desc_id];
3476d0ab89e6SChengchang Tang 	uint64_t ol_flags = m->ol_flags;
3477fb6eb900SChengchang Tang 	uint32_t tmp_outer = 0;
3478fb6eb900SChengchang Tang 	uint32_t tmp_inner = 0;
3479d0ab89e6SChengchang Tang 	uint32_t tmp_ol4cs;
3480bba63669SWei Hu (Xavier) 	int ret;
3481bba63669SWei Hu (Xavier) 
3482fb6eb900SChengchang Tang 	/*
3483fb6eb900SChengchang Tang 	 * The tunnel header is contained in the inner L2 header field of the
3484fb6eb900SChengchang Tang 	 * mbuf, but for hns3 descriptor, it is contained in the outer L4. So,
3485fb6eb900SChengchang Tang 	 * there is a need that switching between them. To avoid multiple
3486fb6eb900SChengchang Tang 	 * calculations, the length of the L2 header include the outer and
34877be78d02SJosh Soref 	 * inner, will be filled during the parsing of tunnel packets.
3488fb6eb900SChengchang Tang 	 */
3489daa02b5cSOlivier Matz 	if (!(ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)) {
3490fb6eb900SChengchang Tang 		/*
3491fb6eb900SChengchang Tang 		 * For non tunnel type the tunnel type id is 0, so no need to
3492fb6eb900SChengchang Tang 		 * assign a value to it. Only the inner(normal) L2 header length
3493fb6eb900SChengchang Tang 		 * is assigned.
3494fb6eb900SChengchang Tang 		 */
3495fb6eb900SChengchang Tang 		tmp_inner |= hns3_gen_field_val(HNS3_TXD_L2LEN_M,
3496fb6eb900SChengchang Tang 			       HNS3_TXD_L2LEN_S, m->l2_len >> HNS3_L2_LEN_UNIT);
3497fb6eb900SChengchang Tang 	} else {
3498fb6eb900SChengchang Tang 		/*
3499fb6eb900SChengchang Tang 		 * If outer csum is not offload, the outer length may be filled
3500fb6eb900SChengchang Tang 		 * with 0. And the length of the outer header is added to the
3501fb6eb900SChengchang Tang 		 * inner l2_len. It would lead a cksum error. So driver has to
3502fb6eb900SChengchang Tang 		 * calculate the header length.
3503fb6eb900SChengchang Tang 		 */
3504d0ab89e6SChengchang Tang 		if (unlikely(!(ol_flags &
3505daa02b5cSOlivier Matz 			(RTE_MBUF_F_TX_OUTER_IP_CKSUM | RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) &&
3506fb6eb900SChengchang Tang 					m->outer_l2_len == 0)) {
3507fb6eb900SChengchang Tang 			struct rte_net_hdr_lens hdr_len;
3508fb6eb900SChengchang Tang 			(void)rte_net_get_ptype(m, &hdr_len,
3509fb6eb900SChengchang Tang 					RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK);
3510fb6eb900SChengchang Tang 			m->outer_l3_len = hdr_len.l3_len;
3511fb6eb900SChengchang Tang 			m->outer_l2_len = hdr_len.l2_len;
3512fb6eb900SChengchang Tang 			m->l2_len = m->l2_len - hdr_len.l2_len - hdr_len.l3_len;
3513fb6eb900SChengchang Tang 		}
3514fb6eb900SChengchang Tang 		hns3_parse_outer_params(m, &tmp_outer);
3515fb6eb900SChengchang Tang 		ret = hns3_parse_inner_params(m, &tmp_outer, &tmp_inner);
3516bba63669SWei Hu (Xavier) 		if (ret)
3517bba63669SWei Hu (Xavier) 			return -EINVAL;
3518fb6eb900SChengchang Tang 	}
3519bba63669SWei Hu (Xavier) 
3520fb6eb900SChengchang Tang 	desc->tx.ol_type_vlan_len_msec = rte_cpu_to_le_32(tmp_outer);
3521fb6eb900SChengchang Tang 	desc->tx.type_cs_vlan_tso_len = rte_cpu_to_le_32(tmp_inner);
3522daa02b5cSOlivier Matz 	tmp_ol4cs = ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM ?
3523d0ab89e6SChengchang Tang 			BIT(HNS3_TXD_OL4CS_B) : 0;
3524d0ab89e6SChengchang Tang 	desc->tx.paylen_fd_dop_ol4cs = rte_cpu_to_le_32(tmp_ol4cs);
3525bba63669SWei Hu (Xavier) 
3526bba63669SWei Hu (Xavier) 	return 0;
3527bba63669SWei Hu (Xavier) }
3528bba63669SWei Hu (Xavier) 
3529bba63669SWei Hu (Xavier) static void
3530fb6eb900SChengchang Tang hns3_parse_l3_cksum_params(struct rte_mbuf *m, uint32_t *type_cs_vlan_tso_len)
3531bba63669SWei Hu (Xavier) {
3532fb6eb900SChengchang Tang 	uint64_t ol_flags = m->ol_flags;
3533fb6eb900SChengchang Tang 	uint32_t l3_type;
3534bba63669SWei Hu (Xavier) 	uint32_t tmp;
3535bba63669SWei Hu (Xavier) 
3536fb6eb900SChengchang Tang 	tmp = *type_cs_vlan_tso_len;
3537daa02b5cSOlivier Matz 	if (ol_flags & RTE_MBUF_F_TX_IPV4)
3538fb6eb900SChengchang Tang 		l3_type = HNS3_L3T_IPV4;
3539daa02b5cSOlivier Matz 	else if (ol_flags & RTE_MBUF_F_TX_IPV6)
3540fb6eb900SChengchang Tang 		l3_type = HNS3_L3T_IPV6;
3541fb6eb900SChengchang Tang 	else
3542fb6eb900SChengchang Tang 		l3_type = HNS3_L3T_NONE;
3543fb6eb900SChengchang Tang 
3544fb6eb900SChengchang Tang 	/* inner(/normal) L3 header size, defined in 4 bytes */
3545fb6eb900SChengchang Tang 	tmp |= hns3_gen_field_val(HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
3546fb6eb900SChengchang Tang 					m->l3_len >> HNS3_L3_LEN_UNIT);
3547fb6eb900SChengchang Tang 
3548fb6eb900SChengchang Tang 	tmp |= hns3_gen_field_val(HNS3_TXD_L3T_M, HNS3_TXD_L3T_S, l3_type);
3549fb6eb900SChengchang Tang 
3550bba63669SWei Hu (Xavier) 	/* Enable L3 checksum offloads */
3551daa02b5cSOlivier Matz 	if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
3552fb6eb900SChengchang Tang 		tmp |= BIT(HNS3_TXD_L3CS_B);
3553bba63669SWei Hu (Xavier) 	*type_cs_vlan_tso_len = tmp;
3554bba63669SWei Hu (Xavier) }
3555bba63669SWei Hu (Xavier) 
3556bba63669SWei Hu (Xavier) static void
3557fb6eb900SChengchang Tang hns3_parse_l4_cksum_params(struct rte_mbuf *m, uint32_t *type_cs_vlan_tso_len)
3558bba63669SWei Hu (Xavier) {
3559fb6eb900SChengchang Tang 	uint64_t ol_flags = m->ol_flags;
3560bba63669SWei Hu (Xavier) 	uint32_t tmp;
3561bba63669SWei Hu (Xavier) 	/* Enable L4 checksum offloads */
3562daa02b5cSOlivier Matz 	switch (ol_flags & (RTE_MBUF_F_TX_L4_MASK | RTE_MBUF_F_TX_TCP_SEG)) {
3563daa02b5cSOlivier Matz 	case RTE_MBUF_F_TX_TCP_CKSUM | RTE_MBUF_F_TX_TCP_SEG:
3564daa02b5cSOlivier Matz 	case RTE_MBUF_F_TX_TCP_CKSUM:
3565daa02b5cSOlivier Matz 	case RTE_MBUF_F_TX_TCP_SEG:
3566bba63669SWei Hu (Xavier) 		tmp = *type_cs_vlan_tso_len;
3567fb6eb900SChengchang Tang 		tmp |= hns3_gen_field_val(HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
3568bba63669SWei Hu (Xavier) 					HNS3_L4T_TCP);
3569bba63669SWei Hu (Xavier) 		break;
3570daa02b5cSOlivier Matz 	case RTE_MBUF_F_TX_UDP_CKSUM:
3571bba63669SWei Hu (Xavier) 		tmp = *type_cs_vlan_tso_len;
3572fb6eb900SChengchang Tang 		tmp |= hns3_gen_field_val(HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
3573bba63669SWei Hu (Xavier) 					HNS3_L4T_UDP);
3574bba63669SWei Hu (Xavier) 		break;
3575daa02b5cSOlivier Matz 	case RTE_MBUF_F_TX_SCTP_CKSUM:
3576bba63669SWei Hu (Xavier) 		tmp = *type_cs_vlan_tso_len;
3577fb6eb900SChengchang Tang 		tmp |= hns3_gen_field_val(HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
3578bba63669SWei Hu (Xavier) 					HNS3_L4T_SCTP);
3579bba63669SWei Hu (Xavier) 		break;
3580bba63669SWei Hu (Xavier) 	default:
3581fb6eb900SChengchang Tang 		return;
3582bba63669SWei Hu (Xavier) 	}
3583fb6eb900SChengchang Tang 	tmp |= BIT(HNS3_TXD_L4CS_B);
3584fb6eb900SChengchang Tang 	tmp |= hns3_gen_field_val(HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
3585fb6eb900SChengchang Tang 					m->l4_len >> HNS3_L4_LEN_UNIT);
3586fb6eb900SChengchang Tang 	*type_cs_vlan_tso_len = tmp;
3587bba63669SWei Hu (Xavier) }
3588bba63669SWei Hu (Xavier) 
3589bba63669SWei Hu (Xavier) static void
3590fb6eb900SChengchang Tang hns3_txd_enable_checksum(struct hns3_tx_queue *txq, struct rte_mbuf *m,
3591fb6eb900SChengchang Tang 			 uint16_t tx_desc_id)
3592bba63669SWei Hu (Xavier) {
3593bba63669SWei Hu (Xavier) 	struct hns3_desc *tx_ring = txq->tx_ring;
3594bba63669SWei Hu (Xavier) 	struct hns3_desc *desc = &tx_ring[tx_desc_id];
3595bba63669SWei Hu (Xavier) 	uint32_t value = 0;
3596bba63669SWei Hu (Xavier) 
3597fb6eb900SChengchang Tang 	hns3_parse_l3_cksum_params(m, &value);
3598fb6eb900SChengchang Tang 	hns3_parse_l4_cksum_params(m, &value);
3599bba63669SWei Hu (Xavier) 
3600bba63669SWei Hu (Xavier) 	desc->tx.type_cs_vlan_tso_len |= rte_cpu_to_le_32(value);
3601bba63669SWei Hu (Xavier) }
3602bba63669SWei Hu (Xavier) 
36036dca716cSHongbo Zheng static bool
3604da17b003SHongbo Zheng hns3_pkt_need_linearized(struct rte_mbuf *tx_pkts, uint32_t bd_num,
3605da17b003SHongbo Zheng 				 uint32_t max_non_tso_bd_num)
36066dca716cSHongbo Zheng {
36076dca716cSHongbo Zheng 	struct rte_mbuf *m_first = tx_pkts;
36086dca716cSHongbo Zheng 	struct rte_mbuf *m_last = tx_pkts;
36096dca716cSHongbo Zheng 	uint32_t tot_len = 0;
36106dca716cSHongbo Zheng 	uint32_t hdr_len;
36116dca716cSHongbo Zheng 	uint32_t i;
36126dca716cSHongbo Zheng 
36136dca716cSHongbo Zheng 	/*
36146dca716cSHongbo Zheng 	 * Hardware requires that the sum of the data length of every 8
36156dca716cSHongbo Zheng 	 * consecutive buffers is greater than MSS in hns3 network engine.
36166dca716cSHongbo Zheng 	 * We simplify it by ensuring pkt_headlen + the first 8 consecutive
36176dca716cSHongbo Zheng 	 * frags greater than gso header len + mss, and the remaining 7
36186dca716cSHongbo Zheng 	 * consecutive frags greater than MSS except the last 7 frags.
36196dca716cSHongbo Zheng 	 */
3620da17b003SHongbo Zheng 	if (bd_num <= max_non_tso_bd_num)
36216dca716cSHongbo Zheng 		return false;
36226dca716cSHongbo Zheng 
3623da17b003SHongbo Zheng 	for (i = 0; m_last && i < max_non_tso_bd_num - 1;
36246dca716cSHongbo Zheng 	     i++, m_last = m_last->next)
36256dca716cSHongbo Zheng 		tot_len += m_last->data_len;
36266dca716cSHongbo Zheng 
36276dca716cSHongbo Zheng 	if (!m_last)
36286dca716cSHongbo Zheng 		return true;
36296dca716cSHongbo Zheng 
36306dca716cSHongbo Zheng 	/* ensure the first 8 frags is greater than mss + header */
36316dca716cSHongbo Zheng 	hdr_len = tx_pkts->l2_len + tx_pkts->l3_len + tx_pkts->l4_len;
3632daa02b5cSOlivier Matz 	hdr_len += (tx_pkts->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ?
36336dca716cSHongbo Zheng 		   tx_pkts->outer_l2_len + tx_pkts->outer_l3_len : 0;
36346dca716cSHongbo Zheng 	if (tot_len + m_last->data_len < tx_pkts->tso_segsz + hdr_len)
36356dca716cSHongbo Zheng 		return true;
36366dca716cSHongbo Zheng 
36376dca716cSHongbo Zheng 	/*
36386dca716cSHongbo Zheng 	 * ensure the sum of the data length of every 7 consecutive buffer
36396dca716cSHongbo Zheng 	 * is greater than mss except the last one.
36406dca716cSHongbo Zheng 	 */
3641da17b003SHongbo Zheng 	for (i = 0; m_last && i < bd_num - max_non_tso_bd_num; i++) {
36426dca716cSHongbo Zheng 		tot_len -= m_first->data_len;
36436dca716cSHongbo Zheng 		tot_len += m_last->data_len;
36446dca716cSHongbo Zheng 
36456dca716cSHongbo Zheng 		if (tot_len < tx_pkts->tso_segsz)
36466dca716cSHongbo Zheng 			return true;
36476dca716cSHongbo Zheng 
36486dca716cSHongbo Zheng 		m_first = m_first->next;
36496dca716cSHongbo Zheng 		m_last = m_last->next;
36506dca716cSHongbo Zheng 	}
36516dca716cSHongbo Zheng 
36526dca716cSHongbo Zheng 	return false;
36536dca716cSHongbo Zheng }
36546dca716cSHongbo Zheng 
36556dca716cSHongbo Zheng static int
36566dca716cSHongbo Zheng hns3_check_tso_pkt_valid(struct rte_mbuf *m)
36576dca716cSHongbo Zheng {
36586dca716cSHongbo Zheng 	uint32_t tmp_data_len_sum = 0;
36596dca716cSHongbo Zheng 	uint16_t nb_buf = m->nb_segs;
36606dca716cSHongbo Zheng 	uint32_t paylen, hdr_len;
36616dca716cSHongbo Zheng 	struct rte_mbuf *m_seg;
36626dca716cSHongbo Zheng 	int i;
36636dca716cSHongbo Zheng 
36646dca716cSHongbo Zheng 	if (nb_buf > HNS3_MAX_TSO_BD_PER_PKT)
36656dca716cSHongbo Zheng 		return -EINVAL;
36666dca716cSHongbo Zheng 
36676dca716cSHongbo Zheng 	hdr_len = m->l2_len + m->l3_len + m->l4_len;
3668daa02b5cSOlivier Matz 	hdr_len += (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ?
36696dca716cSHongbo Zheng 			m->outer_l2_len + m->outer_l3_len : 0;
36706dca716cSHongbo Zheng 	if (hdr_len > HNS3_MAX_TSO_HDR_SIZE)
36716dca716cSHongbo Zheng 		return -EINVAL;
36726dca716cSHongbo Zheng 
36736dca716cSHongbo Zheng 	paylen = m->pkt_len - hdr_len;
36746dca716cSHongbo Zheng 	if (paylen > HNS3_MAX_BD_PAYLEN)
36756dca716cSHongbo Zheng 		return -EINVAL;
36766dca716cSHongbo Zheng 
36776dca716cSHongbo Zheng 	/*
36786dca716cSHongbo Zheng 	 * The TSO header (include outer and inner L2, L3 and L4 header)
36796dca716cSHongbo Zheng 	 * should be provided by three descriptors in maximum in hns3 network
36806dca716cSHongbo Zheng 	 * engine.
36816dca716cSHongbo Zheng 	 */
36826dca716cSHongbo Zheng 	m_seg = m;
36836dca716cSHongbo Zheng 	for (i = 0; m_seg != NULL && i < HNS3_MAX_TSO_HDR_BD_NUM && i < nb_buf;
36846dca716cSHongbo Zheng 	     i++, m_seg = m_seg->next) {
36856dca716cSHongbo Zheng 		tmp_data_len_sum += m_seg->data_len;
36866dca716cSHongbo Zheng 	}
36876dca716cSHongbo Zheng 
36886dca716cSHongbo Zheng 	if (hdr_len > tmp_data_len_sum)
36896dca716cSHongbo Zheng 		return -EINVAL;
36906dca716cSHongbo Zheng 
36916dca716cSHongbo Zheng 	return 0;
36926dca716cSHongbo Zheng }
36936dca716cSHongbo Zheng 
3694b4e4d7acSChengchang Tang #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3695b4e4d7acSChengchang Tang static inline int
3696b4e4d7acSChengchang Tang hns3_vld_vlan_chk(struct hns3_tx_queue *txq, struct rte_mbuf *m)
3697b4e4d7acSChengchang Tang {
3698b4e4d7acSChengchang Tang 	struct rte_ether_hdr *eh;
3699b4e4d7acSChengchang Tang 	struct rte_vlan_hdr *vh;
3700b4e4d7acSChengchang Tang 
3701992b24a1SWei Hu (Xavier) 	if (!txq->pvid_sw_shift_en)
3702b4e4d7acSChengchang Tang 		return 0;
3703b4e4d7acSChengchang Tang 
3704b4e4d7acSChengchang Tang 	/*
3705b4e4d7acSChengchang Tang 	 * Due to hardware limitations, we only support two-layer VLAN hardware
3706b4e4d7acSChengchang Tang 	 * offload in Tx direction based on hns3 network engine, so when PVID is
3707b4e4d7acSChengchang Tang 	 * enabled, QinQ insert is no longer supported.
3708b4e4d7acSChengchang Tang 	 * And when PVID is enabled, in the following two cases:
3709b4e4d7acSChengchang Tang 	 *  i) packets with more than two VLAN tags.
3710b4e4d7acSChengchang Tang 	 *  ii) packets with one VLAN tag while the hardware VLAN insert is
3711b4e4d7acSChengchang Tang 	 *      enabled.
3712b4e4d7acSChengchang Tang 	 * The packets will be regarded as abnormal packets and discarded by
3713b4e4d7acSChengchang Tang 	 * hardware in Tx direction. For debugging purposes, a validation check
3714b4e4d7acSChengchang Tang 	 * for these types of packets is added to the '.tx_pkt_prepare' ops
3715b4e4d7acSChengchang Tang 	 * implementation function named hns3_prep_pkts to inform users that
3716b4e4d7acSChengchang Tang 	 * these packets will be discarded.
3717b4e4d7acSChengchang Tang 	 */
3718daa02b5cSOlivier Matz 	if (m->ol_flags & RTE_MBUF_F_TX_QINQ)
3719b4e4d7acSChengchang Tang 		return -EINVAL;
3720b4e4d7acSChengchang Tang 
3721b4e4d7acSChengchang Tang 	eh = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
3722b4e4d7acSChengchang Tang 	if (eh->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN)) {
3723daa02b5cSOlivier Matz 		if (m->ol_flags & RTE_MBUF_F_TX_VLAN)
3724b4e4d7acSChengchang Tang 			return -EINVAL;
3725b4e4d7acSChengchang Tang 
3726b4e4d7acSChengchang Tang 		/* Ensure the incoming packet is not a QinQ packet */
3727b4e4d7acSChengchang Tang 		vh = (struct rte_vlan_hdr *)(eh + 1);
3728b4e4d7acSChengchang Tang 		if (vh->eth_proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN))
3729b4e4d7acSChengchang Tang 			return -EINVAL;
3730b4e4d7acSChengchang Tang 	}
3731b4e4d7acSChengchang Tang 
3732b4e4d7acSChengchang Tang 	return 0;
3733b4e4d7acSChengchang Tang }
3734b4e4d7acSChengchang Tang #endif
3735b4e4d7acSChengchang Tang 
37368f01e2f8SChengchang Tang static uint16_t
37378f01e2f8SChengchang Tang hns3_udp_cksum_help(struct rte_mbuf *m)
37388f01e2f8SChengchang Tang {
37398f01e2f8SChengchang Tang 	uint64_t ol_flags = m->ol_flags;
37408f01e2f8SChengchang Tang 	uint16_t cksum = 0;
37418f01e2f8SChengchang Tang 	uint32_t l4_len;
37428f01e2f8SChengchang Tang 
3743daa02b5cSOlivier Matz 	if (ol_flags & RTE_MBUF_F_TX_IPV4) {
37448f01e2f8SChengchang Tang 		struct rte_ipv4_hdr *ipv4_hdr = rte_pktmbuf_mtod_offset(m,
37458f01e2f8SChengchang Tang 				struct rte_ipv4_hdr *, m->l2_len);
37468f01e2f8SChengchang Tang 		l4_len = rte_be_to_cpu_16(ipv4_hdr->total_length) - m->l3_len;
37478f01e2f8SChengchang Tang 	} else {
37488f01e2f8SChengchang Tang 		struct rte_ipv6_hdr *ipv6_hdr = rte_pktmbuf_mtod_offset(m,
37498f01e2f8SChengchang Tang 				struct rte_ipv6_hdr *, m->l2_len);
37508f01e2f8SChengchang Tang 		l4_len = rte_be_to_cpu_16(ipv6_hdr->payload_len);
37518f01e2f8SChengchang Tang 	}
37528f01e2f8SChengchang Tang 
37538f01e2f8SChengchang Tang 	rte_raw_cksum_mbuf(m, m->l2_len + m->l3_len, l4_len, &cksum);
37548f01e2f8SChengchang Tang 
37558f01e2f8SChengchang Tang 	cksum = ~cksum;
37568f01e2f8SChengchang Tang 	/*
37578f01e2f8SChengchang Tang 	 * RFC 768:If the computed checksum is zero for UDP, it is transmitted
37588f01e2f8SChengchang Tang 	 * as all ones
37598f01e2f8SChengchang Tang 	 */
37608f01e2f8SChengchang Tang 	if (cksum == 0)
37618f01e2f8SChengchang Tang 		cksum = 0xffff;
37628f01e2f8SChengchang Tang 
37638f01e2f8SChengchang Tang 	return (uint16_t)cksum;
37648f01e2f8SChengchang Tang }
37658f01e2f8SChengchang Tang 
37668f01e2f8SChengchang Tang static bool
37678f01e2f8SChengchang Tang hns3_validate_tunnel_cksum(struct hns3_tx_queue *tx_queue, struct rte_mbuf *m)
37688f01e2f8SChengchang Tang {
37698f01e2f8SChengchang Tang 	uint64_t ol_flags = m->ol_flags;
37708f01e2f8SChengchang Tang 	struct rte_udp_hdr *udp_hdr;
37718f01e2f8SChengchang Tang 	uint16_t dst_port;
37728f01e2f8SChengchang Tang 
37738f01e2f8SChengchang Tang 	if (tx_queue->udp_cksum_mode == HNS3_SPECIAL_PORT_HW_CKSUM_MODE ||
3774daa02b5cSOlivier Matz 	    ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK ||
3775daa02b5cSOlivier Matz 	    (ol_flags & RTE_MBUF_F_TX_L4_MASK) != RTE_MBUF_F_TX_UDP_CKSUM)
37768f01e2f8SChengchang Tang 		return true;
37778f01e2f8SChengchang Tang 	/*
37788f01e2f8SChengchang Tang 	 * A UDP packet with the same dst_port as VXLAN\VXLAN_GPE\GENEVE will
37798f01e2f8SChengchang Tang 	 * be recognized as a tunnel packet in HW. In this case, if UDP CKSUM
37808f01e2f8SChengchang Tang 	 * offload is set and the tunnel mask has not been set, the CKSUM will
37818f01e2f8SChengchang Tang 	 * be wrong since the header length is wrong and driver should complete
37828f01e2f8SChengchang Tang 	 * the CKSUM to avoid CKSUM error.
37838f01e2f8SChengchang Tang 	 */
37848f01e2f8SChengchang Tang 	udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
37858f01e2f8SChengchang Tang 						m->l2_len + m->l3_len);
37868f01e2f8SChengchang Tang 	dst_port = rte_be_to_cpu_16(udp_hdr->dst_port);
37878f01e2f8SChengchang Tang 	switch (dst_port) {
37888f01e2f8SChengchang Tang 	case RTE_VXLAN_DEFAULT_PORT:
37898f01e2f8SChengchang Tang 	case RTE_VXLAN_GPE_DEFAULT_PORT:
37908f01e2f8SChengchang Tang 	case RTE_GENEVE_DEFAULT_PORT:
37918f01e2f8SChengchang Tang 		udp_hdr->dgram_cksum = hns3_udp_cksum_help(m);
3792daa02b5cSOlivier Matz 		m->ol_flags = ol_flags & ~RTE_MBUF_F_TX_L4_MASK;
37938f01e2f8SChengchang Tang 		return false;
37948f01e2f8SChengchang Tang 	default:
37958f01e2f8SChengchang Tang 		return true;
37968f01e2f8SChengchang Tang 	}
37978f01e2f8SChengchang Tang }
37988f01e2f8SChengchang Tang 
3799dd1e4611SWei Hu (Xavier) static int
3800dd1e4611SWei Hu (Xavier) hns3_prep_pkt_proc(struct hns3_tx_queue *tx_queue, struct rte_mbuf *m)
3801bba63669SWei Hu (Xavier) {
3802bba63669SWei Hu (Xavier) 	int ret;
3803bba63669SWei Hu (Xavier) 
3804bba63669SWei Hu (Xavier) #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3805bba63669SWei Hu (Xavier) 	ret = rte_validate_tx_offload(m);
3806bba63669SWei Hu (Xavier) 	if (ret != 0) {
3807bba63669SWei Hu (Xavier) 		rte_errno = -ret;
3808dd1e4611SWei Hu (Xavier) 		return ret;
3809bba63669SWei Hu (Xavier) 	}
3810b4e4d7acSChengchang Tang 
3811dd1e4611SWei Hu (Xavier) 	ret = hns3_vld_vlan_chk(tx_queue, m);
3812dd1e4611SWei Hu (Xavier) 	if (ret != 0) {
3813b4e4d7acSChengchang Tang 		rte_errno = EINVAL;
3814dd1e4611SWei Hu (Xavier) 		return ret;
3815b4e4d7acSChengchang Tang 	}
3816bba63669SWei Hu (Xavier) #endif
3817dd1e4611SWei Hu (Xavier) 	if (hns3_pkt_is_tso(m)) {
3818dd1e4611SWei Hu (Xavier) 		if (hns3_pkt_need_linearized(m, m->nb_segs,
3819dd1e4611SWei Hu (Xavier) 		    tx_queue->max_non_tso_bd_num) ||
3820dd1e4611SWei Hu (Xavier) 		    hns3_check_tso_pkt_valid(m)) {
3821dd1e4611SWei Hu (Xavier) 			rte_errno = EINVAL;
3822dd1e4611SWei Hu (Xavier) 			return -EINVAL;
3823dd1e4611SWei Hu (Xavier) 		}
3824dd1e4611SWei Hu (Xavier) 
3825dd1e4611SWei Hu (Xavier) 		if (tx_queue->tso_mode != HNS3_TSO_SW_CAL_PSEUDO_H_CSUM) {
3826dd1e4611SWei Hu (Xavier) 			/*
3827dd1e4611SWei Hu (Xavier) 			 * (tso mode != HNS3_TSO_SW_CAL_PSEUDO_H_CSUM) means
3828dd1e4611SWei Hu (Xavier) 			 * hardware support recalculate the TCP pseudo header
3829dd1e4611SWei Hu (Xavier) 			 * checksum of packets that need TSO, so network driver
3830dd1e4611SWei Hu (Xavier) 			 * software not need to recalculate it.
3831dd1e4611SWei Hu (Xavier) 			 */
3832dd1e4611SWei Hu (Xavier) 			return 0;
3833dd1e4611SWei Hu (Xavier) 		}
3834dd1e4611SWei Hu (Xavier) 	}
3835dd1e4611SWei Hu (Xavier) 
3836bba63669SWei Hu (Xavier) 	ret = rte_net_intel_cksum_prepare(m);
3837bba63669SWei Hu (Xavier) 	if (ret != 0) {
3838bba63669SWei Hu (Xavier) 		rte_errno = -ret;
3839dd1e4611SWei Hu (Xavier) 		return ret;
3840bba63669SWei Hu (Xavier) 	}
38416dca716cSHongbo Zheng 
38428f01e2f8SChengchang Tang 	if (!hns3_validate_tunnel_cksum(tx_queue, m))
38438f01e2f8SChengchang Tang 		return 0;
38448f01e2f8SChengchang Tang 
3845dd1e4611SWei Hu (Xavier) 	return 0;
3846dd1e4611SWei Hu (Xavier) }
3847dd1e4611SWei Hu (Xavier) 
3848dd1e4611SWei Hu (Xavier) uint16_t
3849dd1e4611SWei Hu (Xavier) hns3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
3850dd1e4611SWei Hu (Xavier) 	       uint16_t nb_pkts)
3851dd1e4611SWei Hu (Xavier) {
3852dd1e4611SWei Hu (Xavier) 	struct rte_mbuf *m;
3853dd1e4611SWei Hu (Xavier) 	uint16_t i;
3854dd1e4611SWei Hu (Xavier) 
3855dd1e4611SWei Hu (Xavier) 	for (i = 0; i < nb_pkts; i++) {
3856dd1e4611SWei Hu (Xavier) 		m = tx_pkts[i];
3857dd1e4611SWei Hu (Xavier) 		if (hns3_prep_pkt_proc(tx_queue, m))
3858dd1e4611SWei Hu (Xavier) 			return i;
3859bba63669SWei Hu (Xavier) 	}
3860bba63669SWei Hu (Xavier) 
3861bba63669SWei Hu (Xavier) 	return i;
3862bba63669SWei Hu (Xavier) }
3863bba63669SWei Hu (Xavier) 
38646393fc0bSDongdong Liu static inline int
38656393fc0bSDongdong Liu hns3_handle_simple_bd(struct hns3_tx_queue *txq, struct hns3_desc *desc,
38666393fc0bSDongdong Liu 		      struct rte_mbuf *m)
38676393fc0bSDongdong Liu {
38686393fc0bSDongdong Liu #define HNS3_TCP_CSUM_OFFSET	16
38696393fc0bSDongdong Liu #define HNS3_UDP_CSUM_OFFSET	6
38706393fc0bSDongdong Liu 
38716393fc0bSDongdong Liu 	/*
38726393fc0bSDongdong Liu 	 * In HIP09, NIC HW support Tx simple BD mode that the HW will
38736393fc0bSDongdong Liu 	 * calculate the checksum from the start position of checksum and fill
38746393fc0bSDongdong Liu 	 * the checksum result to the offset position without packet type and
38756393fc0bSDongdong Liu 	 * header length of L3/L4.
38766393fc0bSDongdong Liu 	 * For non-tunneling packet:
38776393fc0bSDongdong Liu 	 * - Tx simple BD support for TCP and UDP checksum.
38786393fc0bSDongdong Liu 	 * For tunneling packet:
38796393fc0bSDongdong Liu 	 * - Tx simple BD support for inner L4 checksum(except sctp checksum).
38806393fc0bSDongdong Liu 	 * - Tx simple BD not support the outer checksum and the inner L3
38816393fc0bSDongdong Liu 	 *   checksum.
38826393fc0bSDongdong Liu 	 * - Besides, Tx simple BD is not support for TSO.
38836393fc0bSDongdong Liu 	 */
38846393fc0bSDongdong Liu 	if (txq->simple_bd_enable && !(m->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) &&
38856393fc0bSDongdong Liu 	    !(m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) &&
38866393fc0bSDongdong Liu 	    !(m->ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) &&
38876393fc0bSDongdong Liu 	    ((m->ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_TCP_CKSUM ||
38886393fc0bSDongdong Liu 	    (m->ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_UDP_CKSUM)) {
38896393fc0bSDongdong Liu 		/* set checksum start and offset, defined in 2 Bytes */
38906393fc0bSDongdong Liu 		hns3_set_field(desc->tx.type_cs_vlan_tso_len,
38916393fc0bSDongdong Liu 			       HNS3_TXD_L4_START_M, HNS3_TXD_L4_START_S,
38926393fc0bSDongdong Liu 			       (m->l2_len + m->l3_len) >> HNS3_SIMPLE_BD_UNIT);
38936393fc0bSDongdong Liu 		hns3_set_field(desc->tx.ol_type_vlan_len_msec,
38946393fc0bSDongdong Liu 			   HNS3_TXD_L4_CKS_OFFSET_M, HNS3_TXD_L4_CKS_OFFSET_S,
38956393fc0bSDongdong Liu 			   (m->ol_flags & RTE_MBUF_F_TX_L4_MASK) ==
38966393fc0bSDongdong Liu 			   RTE_MBUF_F_TX_TCP_CKSUM ?
38976393fc0bSDongdong Liu 			   HNS3_TCP_CSUM_OFFSET >> HNS3_SIMPLE_BD_UNIT :
38986393fc0bSDongdong Liu 			   HNS3_UDP_CSUM_OFFSET >> HNS3_SIMPLE_BD_UNIT);
38996393fc0bSDongdong Liu 
39006393fc0bSDongdong Liu 		hns3_set_bit(desc->tx.ckst_mss, HNS3_TXD_CKST_B, 1);
39016393fc0bSDongdong Liu 
39026393fc0bSDongdong Liu 		return 0;
39036393fc0bSDongdong Liu 	}
39046393fc0bSDongdong Liu 
39056393fc0bSDongdong Liu 	return -ENOTSUP;
39066393fc0bSDongdong Liu }
39076393fc0bSDongdong Liu 
3908bba63669SWei Hu (Xavier) static int
3909bba63669SWei Hu (Xavier) hns3_parse_cksum(struct hns3_tx_queue *txq, uint16_t tx_desc_id,
3910fb6eb900SChengchang Tang 		 struct rte_mbuf *m)
3911bba63669SWei Hu (Xavier) {
3912fb6eb900SChengchang Tang 	struct hns3_desc *tx_ring = txq->tx_ring;
3913fb6eb900SChengchang Tang 	struct hns3_desc *desc = &tx_ring[tx_desc_id];
3914fb6eb900SChengchang Tang 
3915fb6eb900SChengchang Tang 	/* Enable checksum offloading */
3916fb6eb900SChengchang Tang 	if (m->ol_flags & HNS3_TX_CKSUM_OFFLOAD_MASK) {
39176393fc0bSDongdong Liu 		if (hns3_handle_simple_bd(txq, desc, m) == 0)
39186393fc0bSDongdong Liu 			return 0;
3919bba63669SWei Hu (Xavier) 		/* Fill in tunneling parameters if necessary */
3920fb6eb900SChengchang Tang 		if (hns3_parse_tunneling_params(txq, m, tx_desc_id)) {
39219b77f1feSHuisong Li 			txq->dfx_stats.unsupported_tunnel_pkt_cnt++;
3922bba63669SWei Hu (Xavier) 				return -EINVAL;
3923bba63669SWei Hu (Xavier) 		}
3924fb6eb900SChengchang Tang 
3925fb6eb900SChengchang Tang 		hns3_txd_enable_checksum(txq, m, tx_desc_id);
3926fb6eb900SChengchang Tang 	} else {
3927fb6eb900SChengchang Tang 		/* clear the control bit */
3928fb6eb900SChengchang Tang 		desc->tx.type_cs_vlan_tso_len  = 0;
3929fb6eb900SChengchang Tang 		desc->tx.ol_type_vlan_len_msec = 0;
3930c4b7d676SWei Hu (Xavier) 	}
3931bba63669SWei Hu (Xavier) 
3932bba63669SWei Hu (Xavier) 	return 0;
3933bba63669SWei Hu (Xavier) }
3934bba63669SWei Hu (Xavier) 
39356dca716cSHongbo Zheng static int
39366dca716cSHongbo Zheng hns3_check_non_tso_pkt(uint16_t nb_buf, struct rte_mbuf **m_seg,
39376dca716cSHongbo Zheng 		      struct rte_mbuf *tx_pkt, struct hns3_tx_queue *txq)
39386dca716cSHongbo Zheng {
3939da17b003SHongbo Zheng 	uint8_t max_non_tso_bd_num;
39406dca716cSHongbo Zheng 	struct rte_mbuf *new_pkt;
39416dca716cSHongbo Zheng 	int ret;
39426dca716cSHongbo Zheng 
39436dca716cSHongbo Zheng 	if (hns3_pkt_is_tso(*m_seg))
39446dca716cSHongbo Zheng 		return 0;
39456dca716cSHongbo Zheng 
39466dca716cSHongbo Zheng 	/*
39476dca716cSHongbo Zheng 	 * If packet length is greater than HNS3_MAX_FRAME_LEN
39486dca716cSHongbo Zheng 	 * driver support, the packet will be ignored.
39496dca716cSHongbo Zheng 	 */
3950c4b7d676SWei Hu (Xavier) 	if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) > HNS3_MAX_FRAME_LEN)) {
39519b77f1feSHuisong Li 		txq->dfx_stats.over_length_pkt_cnt++;
39526dca716cSHongbo Zheng 		return -EINVAL;
3953c4b7d676SWei Hu (Xavier) 	}
39546dca716cSHongbo Zheng 
3955da17b003SHongbo Zheng 	max_non_tso_bd_num = txq->max_non_tso_bd_num;
3956da17b003SHongbo Zheng 	if (unlikely(nb_buf > max_non_tso_bd_num)) {
39579b77f1feSHuisong Li 		txq->dfx_stats.exceed_limit_bd_pkt_cnt++;
3958da17b003SHongbo Zheng 		ret = hns3_reassemble_tx_pkts(tx_pkt, &new_pkt,
3959da17b003SHongbo Zheng 					      max_non_tso_bd_num);
3960c4b7d676SWei Hu (Xavier) 		if (ret) {
39619b77f1feSHuisong Li 			txq->dfx_stats.exceed_limit_bd_reassem_fail++;
39626dca716cSHongbo Zheng 			return ret;
3963c4b7d676SWei Hu (Xavier) 		}
39646dca716cSHongbo Zheng 		*m_seg = new_pkt;
39656dca716cSHongbo Zheng 	}
39666dca716cSHongbo Zheng 
39676dca716cSHongbo Zheng 	return 0;
39686dca716cSHongbo Zheng }
39696dca716cSHongbo Zheng 
39707ef93390SWei Hu (Xavier) static inline void
39717ef93390SWei Hu (Xavier) hns3_tx_free_buffer_simple(struct hns3_tx_queue *txq)
39727ef93390SWei Hu (Xavier) {
39737ef93390SWei Hu (Xavier) 	struct hns3_entry *tx_entry;
39747ef93390SWei Hu (Xavier) 	struct hns3_desc *desc;
39757ef93390SWei Hu (Xavier) 	uint16_t tx_next_clean;
397667d01034SHuisong Li 	uint16_t i;
39777ef93390SWei Hu (Xavier) 
39787ef93390SWei Hu (Xavier) 	while (1) {
39797ef93390SWei Hu (Xavier) 		if (HNS3_GET_TX_QUEUE_PEND_BD_NUM(txq) < txq->tx_rs_thresh)
39807ef93390SWei Hu (Xavier) 			break;
39817ef93390SWei Hu (Xavier) 
39827ef93390SWei Hu (Xavier) 		/*
39837ef93390SWei Hu (Xavier) 		 * All mbufs can be released only when the VLD bits of all
39847ef93390SWei Hu (Xavier) 		 * descriptors in a batch are cleared.
39857ef93390SWei Hu (Xavier) 		 */
39867ef93390SWei Hu (Xavier) 		tx_next_clean = (txq->next_to_clean + txq->tx_rs_thresh - 1) %
39877ef93390SWei Hu (Xavier) 				txq->nb_tx_desc;
39887ef93390SWei Hu (Xavier) 		desc = &txq->tx_ring[tx_next_clean];
39897ef93390SWei Hu (Xavier) 		for (i = 0; i < txq->tx_rs_thresh; i++) {
39907ef93390SWei Hu (Xavier) 			if (rte_le_to_cpu_16(desc->tx.tp_fe_sc_vld_ra_ri) &
39917ef93390SWei Hu (Xavier) 					BIT(HNS3_TXD_VLD_B))
39927ef93390SWei Hu (Xavier) 				return;
39937ef93390SWei Hu (Xavier) 			desc--;
39947ef93390SWei Hu (Xavier) 		}
39957ef93390SWei Hu (Xavier) 
39967ef93390SWei Hu (Xavier) 		tx_entry = &txq->sw_ring[txq->next_to_clean];
39977ef93390SWei Hu (Xavier) 
39983cc817c1SChengwen Feng 		if (txq->mbuf_fast_free_en) {
39993cc817c1SChengwen Feng 			rte_mempool_put_bulk(tx_entry->mbuf->pool,
40003cc817c1SChengwen Feng 					(void **)tx_entry, txq->tx_rs_thresh);
40013cc817c1SChengwen Feng 			for (i = 0; i < txq->tx_rs_thresh; i++)
40023cc817c1SChengwen Feng 				tx_entry[i].mbuf = NULL;
40033cc817c1SChengwen Feng 			goto update_field;
40043cc817c1SChengwen Feng 		}
40053cc817c1SChengwen Feng 
40067ef93390SWei Hu (Xavier) 		for (i = 0; i < txq->tx_rs_thresh; i++)
40077ef93390SWei Hu (Xavier) 			rte_prefetch0((tx_entry + i)->mbuf);
40087ef93390SWei Hu (Xavier) 		for (i = 0; i < txq->tx_rs_thresh; i++, tx_entry++) {
4009*d78c76dbSDengdui Huang 			rte_pktmbuf_free_seg(tx_entry->mbuf);
40107ef93390SWei Hu (Xavier) 			tx_entry->mbuf = NULL;
40117ef93390SWei Hu (Xavier) 		}
40127ef93390SWei Hu (Xavier) 
40133cc817c1SChengwen Feng update_field:
40147ef93390SWei Hu (Xavier) 		txq->next_to_clean = (tx_next_clean + 1) % txq->nb_tx_desc;
40157ef93390SWei Hu (Xavier) 		txq->tx_bd_ready += txq->tx_rs_thresh;
40167ef93390SWei Hu (Xavier) 	}
40177ef93390SWei Hu (Xavier) }
40187ef93390SWei Hu (Xavier) 
40197ef93390SWei Hu (Xavier) static inline void
40207ef93390SWei Hu (Xavier) hns3_tx_backup_1mbuf(struct hns3_entry *tx_entry, struct rte_mbuf **pkts)
40217ef93390SWei Hu (Xavier) {
40227ef93390SWei Hu (Xavier) 	tx_entry->mbuf = pkts[0];
40237ef93390SWei Hu (Xavier) }
40247ef93390SWei Hu (Xavier) 
40257ef93390SWei Hu (Xavier) static inline void
40267ef93390SWei Hu (Xavier) hns3_tx_backup_4mbuf(struct hns3_entry *tx_entry, struct rte_mbuf **pkts)
40277ef93390SWei Hu (Xavier) {
40287ef93390SWei Hu (Xavier) 	hns3_tx_backup_1mbuf(&tx_entry[0], &pkts[0]);
40297ef93390SWei Hu (Xavier) 	hns3_tx_backup_1mbuf(&tx_entry[1], &pkts[1]);
40307ef93390SWei Hu (Xavier) 	hns3_tx_backup_1mbuf(&tx_entry[2], &pkts[2]);
40317ef93390SWei Hu (Xavier) 	hns3_tx_backup_1mbuf(&tx_entry[3], &pkts[3]);
40327ef93390SWei Hu (Xavier) }
40337ef93390SWei Hu (Xavier) 
40347ef93390SWei Hu (Xavier) static inline void
40357ef93390SWei Hu (Xavier) hns3_tx_setup_4bd(struct hns3_desc *txdp, struct rte_mbuf **pkts)
40367ef93390SWei Hu (Xavier) {
40377ef93390SWei Hu (Xavier) #define PER_LOOP_NUM	4
40383ca3dcd6SMin Hu (Connor) 	uint16_t bd_flag = BIT(HNS3_TXD_VLD_B) | BIT(HNS3_TXD_FE_B);
40397ef93390SWei Hu (Xavier) 	uint64_t dma_addr;
40407ef93390SWei Hu (Xavier) 	uint32_t i;
40417ef93390SWei Hu (Xavier) 
40427ef93390SWei Hu (Xavier) 	for (i = 0; i < PER_LOOP_NUM; i++, txdp++, pkts++) {
40437ef93390SWei Hu (Xavier) 		dma_addr = rte_mbuf_data_iova(*pkts);
40447ef93390SWei Hu (Xavier) 		txdp->addr = rte_cpu_to_le_64(dma_addr);
40457ef93390SWei Hu (Xavier) 		txdp->tx.send_size = rte_cpu_to_le_16((*pkts)->data_len);
4046d0ab89e6SChengchang Tang 		txdp->tx.paylen_fd_dop_ol4cs = 0;
40477ef93390SWei Hu (Xavier) 		txdp->tx.type_cs_vlan_tso_len = 0;
40487ef93390SWei Hu (Xavier) 		txdp->tx.ol_type_vlan_len_msec = 0;
40493ca3dcd6SMin Hu (Connor) 		if (unlikely((*pkts)->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST))
40503ca3dcd6SMin Hu (Connor) 			bd_flag |= BIT(HNS3_TXD_TSYN_B);
40517ef93390SWei Hu (Xavier) 		txdp->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(bd_flag);
40527ef93390SWei Hu (Xavier) 	}
40537ef93390SWei Hu (Xavier) }
40547ef93390SWei Hu (Xavier) 
40557ef93390SWei Hu (Xavier) static inline void
40567ef93390SWei Hu (Xavier) hns3_tx_setup_1bd(struct hns3_desc *txdp, struct rte_mbuf **pkts)
40577ef93390SWei Hu (Xavier) {
40583ca3dcd6SMin Hu (Connor) 	uint16_t bd_flag = BIT(HNS3_TXD_VLD_B) | BIT(HNS3_TXD_FE_B);
40597ef93390SWei Hu (Xavier) 	uint64_t dma_addr;
40607ef93390SWei Hu (Xavier) 
40617ef93390SWei Hu (Xavier) 	dma_addr = rte_mbuf_data_iova(*pkts);
40627ef93390SWei Hu (Xavier) 	txdp->addr = rte_cpu_to_le_64(dma_addr);
40637ef93390SWei Hu (Xavier) 	txdp->tx.send_size = rte_cpu_to_le_16((*pkts)->data_len);
4064d0ab89e6SChengchang Tang 	txdp->tx.paylen_fd_dop_ol4cs = 0;
40657ef93390SWei Hu (Xavier) 	txdp->tx.type_cs_vlan_tso_len = 0;
40667ef93390SWei Hu (Xavier) 	txdp->tx.ol_type_vlan_len_msec = 0;
40673ca3dcd6SMin Hu (Connor) 	if (unlikely((*pkts)->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST))
40683ca3dcd6SMin Hu (Connor) 		bd_flag |= BIT(HNS3_TXD_TSYN_B);
40697ef93390SWei Hu (Xavier) 	txdp->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(bd_flag);
40707ef93390SWei Hu (Xavier) }
40717ef93390SWei Hu (Xavier) 
40727ef93390SWei Hu (Xavier) static inline void
40737ef93390SWei Hu (Xavier) hns3_tx_fill_hw_ring(struct hns3_tx_queue *txq,
40747ef93390SWei Hu (Xavier) 		     struct rte_mbuf **pkts,
40757ef93390SWei Hu (Xavier) 		     uint16_t nb_pkts)
40767ef93390SWei Hu (Xavier) {
40777ef93390SWei Hu (Xavier) #define PER_LOOP_NUM	4
40787ef93390SWei Hu (Xavier) #define PER_LOOP_MASK	(PER_LOOP_NUM - 1)
40797ef93390SWei Hu (Xavier) 	struct hns3_desc *txdp = &txq->tx_ring[txq->next_to_use];
40807ef93390SWei Hu (Xavier) 	struct hns3_entry *tx_entry = &txq->sw_ring[txq->next_to_use];
40817ef93390SWei Hu (Xavier) 	const uint32_t mainpart = (nb_pkts & ((uint32_t)~PER_LOOP_MASK));
40827ef93390SWei Hu (Xavier) 	const uint32_t leftover = (nb_pkts & ((uint32_t)PER_LOOP_MASK));
40837ef93390SWei Hu (Xavier) 	uint32_t i;
40847ef93390SWei Hu (Xavier) 
40857ef93390SWei Hu (Xavier) 	for (i = 0; i < mainpart; i += PER_LOOP_NUM) {
40867ef93390SWei Hu (Xavier) 		hns3_tx_backup_4mbuf(tx_entry + i, pkts + i);
40877ef93390SWei Hu (Xavier) 		hns3_tx_setup_4bd(txdp + i, pkts + i);
4088fdcd6a3eSMin Hu (Connor) 
4089fdcd6a3eSMin Hu (Connor) 		/* Increment bytes counter */
4090fdcd6a3eSMin Hu (Connor) 		uint32_t j;
4091fdcd6a3eSMin Hu (Connor) 		for (j = 0; j < PER_LOOP_NUM; j++)
4092fdcd6a3eSMin Hu (Connor) 			txq->basic_stats.bytes += pkts[i + j]->pkt_len;
40937ef93390SWei Hu (Xavier) 	}
40947ef93390SWei Hu (Xavier) 	if (unlikely(leftover > 0)) {
40957ef93390SWei Hu (Xavier) 		for (i = 0; i < leftover; i++) {
40967ef93390SWei Hu (Xavier) 			hns3_tx_backup_1mbuf(tx_entry + mainpart + i,
40977ef93390SWei Hu (Xavier) 					     pkts + mainpart + i);
40987ef93390SWei Hu (Xavier) 			hns3_tx_setup_1bd(txdp + mainpart + i,
40997ef93390SWei Hu (Xavier) 					  pkts + mainpart + i);
4100fdcd6a3eSMin Hu (Connor) 
4101fdcd6a3eSMin Hu (Connor) 			/* Increment bytes counter */
4102fdcd6a3eSMin Hu (Connor) 			txq->basic_stats.bytes += pkts[mainpart + i]->pkt_len;
41037ef93390SWei Hu (Xavier) 		}
41047ef93390SWei Hu (Xavier) 	}
41057ef93390SWei Hu (Xavier) }
41067ef93390SWei Hu (Xavier) 
41077ef93390SWei Hu (Xavier) uint16_t
41087ef93390SWei Hu (Xavier) hns3_xmit_pkts_simple(void *tx_queue,
41097ef93390SWei Hu (Xavier) 		      struct rte_mbuf **tx_pkts,
41107ef93390SWei Hu (Xavier) 		      uint16_t nb_pkts)
41117ef93390SWei Hu (Xavier) {
41127ef93390SWei Hu (Xavier) 	struct hns3_tx_queue *txq = tx_queue;
41137ef93390SWei Hu (Xavier) 	uint16_t nb_tx = 0;
41147ef93390SWei Hu (Xavier) 
41157ef93390SWei Hu (Xavier) 	hns3_tx_free_buffer_simple(txq);
41167ef93390SWei Hu (Xavier) 
41177ef93390SWei Hu (Xavier) 	nb_pkts = RTE_MIN(txq->tx_bd_ready, nb_pkts);
41187ef93390SWei Hu (Xavier) 	if (unlikely(nb_pkts == 0)) {
41197ef93390SWei Hu (Xavier) 		if (txq->tx_bd_ready == 0)
41209b77f1feSHuisong Li 			txq->dfx_stats.queue_full_cnt++;
41217ef93390SWei Hu (Xavier) 		return 0;
41227ef93390SWei Hu (Xavier) 	}
41237ef93390SWei Hu (Xavier) 
41247ef93390SWei Hu (Xavier) 	txq->tx_bd_ready -= nb_pkts;
412576a9c980SChengwen Feng 	if (txq->next_to_use + nb_pkts >= txq->nb_tx_desc) {
41267ef93390SWei Hu (Xavier) 		nb_tx = txq->nb_tx_desc - txq->next_to_use;
41277ef93390SWei Hu (Xavier) 		hns3_tx_fill_hw_ring(txq, tx_pkts, nb_tx);
41287ef93390SWei Hu (Xavier) 		txq->next_to_use = 0;
41297ef93390SWei Hu (Xavier) 	}
41307ef93390SWei Hu (Xavier) 
413176a9c980SChengwen Feng 	if (nb_pkts > nb_tx) {
41327ef93390SWei Hu (Xavier) 		hns3_tx_fill_hw_ring(txq, tx_pkts + nb_tx, nb_pkts - nb_tx);
41337ef93390SWei Hu (Xavier) 		txq->next_to_use += nb_pkts - nb_tx;
413476a9c980SChengwen Feng 	}
41357ef93390SWei Hu (Xavier) 
413623e317ddSChengwen Feng 	hns3_write_txq_tail_reg(txq, nb_pkts);
41377ef93390SWei Hu (Xavier) 
41387ef93390SWei Hu (Xavier) 	return nb_pkts;
41397ef93390SWei Hu (Xavier) }
41407ef93390SWei Hu (Xavier) 
4141bba63669SWei Hu (Xavier) uint16_t
4142bba63669SWei Hu (Xavier) hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
4143bba63669SWei Hu (Xavier) {
4144bba63669SWei Hu (Xavier) 	struct hns3_tx_queue *txq = tx_queue;
4145bba63669SWei Hu (Xavier) 	struct hns3_entry *tx_bak_pkt;
4146fc9b57ffSWei Hu (Xavier) 	struct hns3_desc *tx_ring;
4147bba63669SWei Hu (Xavier) 	struct rte_mbuf *tx_pkt;
4148bba63669SWei Hu (Xavier) 	struct rte_mbuf *m_seg;
4149fc9b57ffSWei Hu (Xavier) 	struct hns3_desc *desc;
4150bba63669SWei Hu (Xavier) 	uint32_t nb_hold = 0;
4151bba63669SWei Hu (Xavier) 	uint16_t tx_next_use;
4152bba63669SWei Hu (Xavier) 	uint16_t tx_pkt_num;
4153bba63669SWei Hu (Xavier) 	uint16_t tx_bd_max;
4154bba63669SWei Hu (Xavier) 	uint16_t nb_buf;
4155bba63669SWei Hu (Xavier) 	uint16_t nb_tx;
4156bba63669SWei Hu (Xavier) 	uint16_t i;
4157bba63669SWei Hu (Xavier) 
415860560096SChengwen Feng 	hns3_tx_free_useless_buffer(txq);
4159bba63669SWei Hu (Xavier) 
4160bba63669SWei Hu (Xavier) 	tx_next_use   = txq->next_to_use;
4161bba63669SWei Hu (Xavier) 	tx_bd_max     = txq->nb_tx_desc;
4162eb570862SYisen Zhuang 	tx_pkt_num = nb_pkts;
4163fc9b57ffSWei Hu (Xavier) 	tx_ring = txq->tx_ring;
4164bba63669SWei Hu (Xavier) 
4165bba63669SWei Hu (Xavier) 	/* send packets */
4166bba63669SWei Hu (Xavier) 	tx_bak_pkt = &txq->sw_ring[tx_next_use];
4167bba63669SWei Hu (Xavier) 	for (nb_tx = 0; nb_tx < tx_pkt_num; nb_tx++) {
4168bba63669SWei Hu (Xavier) 		tx_pkt = *tx_pkts++;
4169bba63669SWei Hu (Xavier) 
4170bba63669SWei Hu (Xavier) 		nb_buf = tx_pkt->nb_segs;
4171bba63669SWei Hu (Xavier) 
4172eb570862SYisen Zhuang 		if (nb_buf > txq->tx_bd_ready) {
41739b77f1feSHuisong Li 			txq->dfx_stats.queue_full_cnt++;
417460560096SChengwen Feng 			if (nb_tx == 0)
417560560096SChengwen Feng 				return 0;
4176bba63669SWei Hu (Xavier) 			goto end_of_tx;
4177bba63669SWei Hu (Xavier) 		}
4178bba63669SWei Hu (Xavier) 
4179bba63669SWei Hu (Xavier) 		/*
4180395b5e08SWei Hu (Xavier) 		 * If packet length is less than minimum packet length supported
4181395b5e08SWei Hu (Xavier) 		 * by hardware in Tx direction, driver need to pad it to avoid
4182395b5e08SWei Hu (Xavier) 		 * error.
4183de620754SWei Hu (Xavier) 		 */
4184395b5e08SWei Hu (Xavier) 		if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) <
4185395b5e08SWei Hu (Xavier) 						txq->min_tx_pkt_len)) {
4186de620754SWei Hu (Xavier) 			uint16_t add_len;
4187de620754SWei Hu (Xavier) 			char *appended;
4188de620754SWei Hu (Xavier) 
4189395b5e08SWei Hu (Xavier) 			add_len = txq->min_tx_pkt_len -
4190de620754SWei Hu (Xavier) 					 rte_pktmbuf_pkt_len(tx_pkt);
4191de620754SWei Hu (Xavier) 			appended = rte_pktmbuf_append(tx_pkt, add_len);
4192c4b7d676SWei Hu (Xavier) 			if (appended == NULL) {
41939b77f1feSHuisong Li 				txq->dfx_stats.pkt_padding_fail_cnt++;
4194de620754SWei Hu (Xavier) 				break;
4195c4b7d676SWei Hu (Xavier) 			}
4196de620754SWei Hu (Xavier) 
4197de620754SWei Hu (Xavier) 			memset(appended, 0, add_len);
4198de620754SWei Hu (Xavier) 		}
4199de620754SWei Hu (Xavier) 
4200bba63669SWei Hu (Xavier) 		m_seg = tx_pkt;
42016dca716cSHongbo Zheng 
42026dca716cSHongbo Zheng 		if (hns3_check_non_tso_pkt(nb_buf, &m_seg, tx_pkt, txq))
4203bba63669SWei Hu (Xavier) 			goto end_of_tx;
4204bba63669SWei Hu (Xavier) 
4205fb6eb900SChengchang Tang 		if (hns3_parse_cksum(txq, tx_next_use, m_seg))
4206bba63669SWei Hu (Xavier) 			goto end_of_tx;
4207bba63669SWei Hu (Xavier) 
4208bba63669SWei Hu (Xavier) 		i = 0;
4209fc9b57ffSWei Hu (Xavier) 		desc = &tx_ring[tx_next_use];
4210fc9b57ffSWei Hu (Xavier) 
4211fc9b57ffSWei Hu (Xavier) 		/*
4212fc9b57ffSWei Hu (Xavier) 		 * If the packet is divided into multiple Tx Buffer Descriptors,
4213fc9b57ffSWei Hu (Xavier) 		 * only need to fill vlan, paylen and tso into the first Tx
4214fc9b57ffSWei Hu (Xavier) 		 * Buffer Descriptor.
4215fc9b57ffSWei Hu (Xavier) 		 */
4216fc9b57ffSWei Hu (Xavier) 		hns3_fill_first_desc(txq, desc, m_seg);
4217fc9b57ffSWei Hu (Xavier) 
4218bba63669SWei Hu (Xavier) 		do {
4219fc9b57ffSWei Hu (Xavier) 			desc = &tx_ring[tx_next_use];
4220fc9b57ffSWei Hu (Xavier) 			/*
4221fc9b57ffSWei Hu (Xavier) 			 * Fill valid bits, DMA address and data length for each
4222fc9b57ffSWei Hu (Xavier) 			 * Tx Buffer Descriptor.
4223fc9b57ffSWei Hu (Xavier) 			 */
4224fc9b57ffSWei Hu (Xavier) 			hns3_fill_per_desc(desc, m_seg);
4225bba63669SWei Hu (Xavier) 			tx_bak_pkt->mbuf = m_seg;
422627f97077SWei Hu (Xavier) 			m_seg = m_seg->next;
4227bba63669SWei Hu (Xavier) 			tx_next_use++;
4228bba63669SWei Hu (Xavier) 			tx_bak_pkt++;
4229bba63669SWei Hu (Xavier) 			if (tx_next_use >= tx_bd_max) {
4230bba63669SWei Hu (Xavier) 				tx_next_use = 0;
4231bba63669SWei Hu (Xavier) 				tx_bak_pkt = txq->sw_ring;
4232bba63669SWei Hu (Xavier) 			}
4233e5e6ffc3SDengdui Huang 			if (m_seg != NULL)
4234e5e6ffc3SDengdui Huang 				TX_BD_LOG(&txq->hns->hw, DEBUG, desc);
4235bba63669SWei Hu (Xavier) 
4236bba63669SWei Hu (Xavier) 			i++;
4237bba63669SWei Hu (Xavier) 		} while (m_seg != NULL);
4238bba63669SWei Hu (Xavier) 
4239fc9b57ffSWei Hu (Xavier) 		/* Add end flag for the last Tx Buffer Descriptor */
4240fc9b57ffSWei Hu (Xavier) 		desc->tx.tp_fe_sc_vld_ra_ri |=
4241fc9b57ffSWei Hu (Xavier) 				 rte_cpu_to_le_16(BIT(HNS3_TXD_FE_B));
4242e5e6ffc3SDengdui Huang 		TX_BD_LOG(&txq->hns->hw, DEBUG, desc);
4243fc9b57ffSWei Hu (Xavier) 
4244fdcd6a3eSMin Hu (Connor) 		/* Increment bytes counter */
4245fdcd6a3eSMin Hu (Connor) 		txq->basic_stats.bytes += tx_pkt->pkt_len;
4246bba63669SWei Hu (Xavier) 		nb_hold += i;
42478f64f284SWei Hu (Xavier) 		txq->next_to_use = tx_next_use;
4248eb570862SYisen Zhuang 		txq->tx_bd_ready -= i;
4249bba63669SWei Hu (Xavier) 	}
4250bba63669SWei Hu (Xavier) 
4251bba63669SWei Hu (Xavier) end_of_tx:
4252bba63669SWei Hu (Xavier) 
4253eb570862SYisen Zhuang 	if (likely(nb_tx))
425423e317ddSChengwen Feng 		hns3_write_txq_tail_reg(txq, nb_hold);
4255bba63669SWei Hu (Xavier) 
4256bba63669SWei Hu (Xavier) 	return nb_tx;
4257bba63669SWei Hu (Xavier) }
4258bba63669SWei Hu (Xavier) 
4259e31f123dSWei Hu (Xavier) int __rte_weak
4260e31f123dSWei Hu (Xavier) hns3_tx_check_vec_support(__rte_unused struct rte_eth_dev *dev)
4261e31f123dSWei Hu (Xavier) {
4262e31f123dSWei Hu (Xavier) 	return -ENOTSUP;
4263e31f123dSWei Hu (Xavier) }
4264e31f123dSWei Hu (Xavier) 
4265e31f123dSWei Hu (Xavier) uint16_t __rte_weak
4266e31f123dSWei Hu (Xavier) hns3_xmit_pkts_vec(__rte_unused void *tx_queue,
4267e31f123dSWei Hu (Xavier) 		   __rte_unused struct rte_mbuf **tx_pkts,
4268e31f123dSWei Hu (Xavier) 		   __rte_unused uint16_t nb_pkts)
4269e31f123dSWei Hu (Xavier) {
4270e31f123dSWei Hu (Xavier) 	return 0;
4271e31f123dSWei Hu (Xavier) }
4272e31f123dSWei Hu (Xavier) 
4273f0c243a6SChengwen Feng uint16_t __rte_weak
4274f0c243a6SChengwen Feng hns3_xmit_pkts_vec_sve(void __rte_unused * tx_queue,
4275f0c243a6SChengwen Feng 		       struct rte_mbuf __rte_unused **tx_pkts,
4276f0c243a6SChengwen Feng 		       uint16_t __rte_unused nb_pkts)
4277f0c243a6SChengwen Feng {
4278f0c243a6SChengwen Feng 	return 0;
4279f0c243a6SChengwen Feng }
4280f0c243a6SChengwen Feng 
42817ef93390SWei Hu (Xavier) int
42827ef93390SWei Hu (Xavier) hns3_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
42837ef93390SWei Hu (Xavier) 		       struct rte_eth_burst_mode *mode)
42847ef93390SWei Hu (Xavier) {
428510f91af5SHuisong Li 	static const struct {
428610f91af5SHuisong Li 		eth_tx_burst_t pkt_burst;
428710f91af5SHuisong Li 		const char *info;
428810f91af5SHuisong Li 	} burst_infos[] = {
428910f91af5SHuisong Li 		{ hns3_xmit_pkts_simple,	"Scalar Simple" },
429010f91af5SHuisong Li 		{ hns3_xmit_pkts,		"Scalar"        },
429110f91af5SHuisong Li 		{ hns3_xmit_pkts_vec,		"Vector Neon"   },
429210f91af5SHuisong Li 		{ hns3_xmit_pkts_vec_sve,	"Vector Sve"    },
429310f91af5SHuisong Li 		{ rte_eth_pkt_burst_dummy,	"Dummy"         },
429410f91af5SHuisong Li 	};
429510f91af5SHuisong Li 
42967ef93390SWei Hu (Xavier) 	eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
429710f91af5SHuisong Li 	int ret = -EINVAL;
429810f91af5SHuisong Li 	unsigned int i;
42997ef93390SWei Hu (Xavier) 
430010f91af5SHuisong Li 	for (i = 0; i < RTE_DIM(burst_infos); i++) {
430110f91af5SHuisong Li 		if (pkt_burst == burst_infos[i].pkt_burst) {
430210f91af5SHuisong Li 			snprintf(mode->info, sizeof(mode->info), "%s",
430310f91af5SHuisong Li 				 burst_infos[i].info);
430410f91af5SHuisong Li 			ret = 0;
430510f91af5SHuisong Li 			break;
430610f91af5SHuisong Li 		}
430710f91af5SHuisong Li 	}
43087ef93390SWei Hu (Xavier) 
430910f91af5SHuisong Li 	return ret;
43107ef93390SWei Hu (Xavier) }
43117ef93390SWei Hu (Xavier) 
431238b539d9SMin Hu (Connor) static bool
431338b539d9SMin Hu (Connor) hns3_tx_check_simple_support(struct rte_eth_dev *dev)
431438b539d9SMin Hu (Connor) {
431538b539d9SMin Hu (Connor) 	uint64_t offloads = dev->data->dev_conf.txmode.offloads;
431638b539d9SMin Hu (Connor) 
4317295968d1SFerruh Yigit 	return (offloads == (offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE));
431838b539d9SMin Hu (Connor) }
431938b539d9SMin Hu (Connor) 
4320d7ec2c07SChengchang Tang static bool
4321d7ec2c07SChengchang Tang hns3_get_tx_prep_needed(struct rte_eth_dev *dev)
4322d7ec2c07SChengchang Tang {
4323d7ec2c07SChengchang Tang #define HNS3_DEV_TX_CSKUM_TSO_OFFLOAD_MASK (\
4324295968d1SFerruh Yigit 		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
4325295968d1SFerruh Yigit 		RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
4326295968d1SFerruh Yigit 		RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
4327295968d1SFerruh Yigit 		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | \
4328295968d1SFerruh Yigit 		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
4329295968d1SFerruh Yigit 		RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM | \
4330295968d1SFerruh Yigit 		RTE_ETH_TX_OFFLOAD_TCP_TSO | \
4331295968d1SFerruh Yigit 		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | \
4332295968d1SFerruh Yigit 		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | \
4333295968d1SFerruh Yigit 		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO)
4334d7ec2c07SChengchang Tang 
4335d7ec2c07SChengchang Tang 	uint64_t tx_offload = dev->data->dev_conf.txmode.offloads;
43366a934ba4SHuisong Li 
4337d7ec2c07SChengchang Tang 	if (tx_offload & HNS3_DEV_TX_CSKUM_TSO_OFFLOAD_MASK)
4338d7ec2c07SChengchang Tang 		return true;
4339d7ec2c07SChengchang Tang 
4340d7ec2c07SChengchang Tang 	return false;
4341d7ec2c07SChengchang Tang }
4342d7ec2c07SChengchang Tang 
43436a934ba4SHuisong Li static eth_tx_prep_t
43446a934ba4SHuisong Li hns3_get_tx_prepare(struct rte_eth_dev *dev)
43456a934ba4SHuisong Li {
43466a934ba4SHuisong Li 	return hns3_get_tx_prep_needed(dev) ? hns3_prep_pkts : NULL;
43476a934ba4SHuisong Li }
43486a934ba4SHuisong Li 
43492aec7beaSHuisong Li static eth_tx_burst_t
43506a934ba4SHuisong Li hns3_get_tx_function(struct rte_eth_dev *dev)
43517ef93390SWei Hu (Xavier) {
43527ef93390SWei Hu (Xavier) 	struct hns3_adapter *hns = dev->data->dev_private;
4353a124f9e9SChengwen Feng 	bool vec_allowed, sve_allowed, simple_allowed;
43546a934ba4SHuisong Li 	bool vec_support;
43557ef93390SWei Hu (Xavier) 
4356e40ad6fcSChengwen Feng 	vec_support = hns3_tx_check_vec_support(dev) == 0;
4357e40ad6fcSChengwen Feng 	vec_allowed = vec_support && hns3_get_default_vec_support();
4358e40ad6fcSChengwen Feng 	sve_allowed = vec_support && hns3_get_sve_support();
43597e2e162eSChengwen Feng 	simple_allowed = hns3_tx_check_simple_support(dev);
4360a124f9e9SChengwen Feng 
4361a124f9e9SChengwen Feng 	if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_VEC && vec_allowed)
4362a124f9e9SChengwen Feng 		return hns3_xmit_pkts_vec;
4363a124f9e9SChengwen Feng 	if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_SVE && sve_allowed)
4364a124f9e9SChengwen Feng 		return hns3_xmit_pkts_vec_sve;
4365a124f9e9SChengwen Feng 	if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_SIMPLE && simple_allowed)
43667ef93390SWei Hu (Xavier) 		return hns3_xmit_pkts_simple;
43676a934ba4SHuisong Li 	if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_COMMON)
4368a124f9e9SChengwen Feng 		return hns3_xmit_pkts;
43697ef93390SWei Hu (Xavier) 
4370a124f9e9SChengwen Feng 	if (vec_allowed)
4371a124f9e9SChengwen Feng 		return hns3_xmit_pkts_vec;
4372a124f9e9SChengwen Feng 	if (simple_allowed)
4373a124f9e9SChengwen Feng 		return hns3_xmit_pkts_simple;
4374a124f9e9SChengwen Feng 
43757ef93390SWei Hu (Xavier) 	return hns3_xmit_pkts;
43767ef93390SWei Hu (Xavier) }
43777ef93390SWei Hu (Xavier) 
43787feb2aeeSChengwen Feng static void
43797feb2aeeSChengwen Feng hns3_trace_rxtx_function(struct rte_eth_dev *dev)
43807feb2aeeSChengwen Feng {
43817feb2aeeSChengwen Feng 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
43827feb2aeeSChengwen Feng 	struct rte_eth_burst_mode rx_mode;
43837feb2aeeSChengwen Feng 	struct rte_eth_burst_mode tx_mode;
43847feb2aeeSChengwen Feng 
43857feb2aeeSChengwen Feng 	memset(&rx_mode, 0, sizeof(rx_mode));
43867feb2aeeSChengwen Feng 	memset(&tx_mode, 0, sizeof(tx_mode));
43877feb2aeeSChengwen Feng 	(void)hns3_rx_burst_mode_get(dev, 0, &rx_mode);
43887feb2aeeSChengwen Feng 	(void)hns3_tx_burst_mode_get(dev, 0, &tx_mode);
43897feb2aeeSChengwen Feng 
43907feb2aeeSChengwen Feng 	hns3_dbg(hw, "using rx_pkt_burst: %s, tx_pkt_burst: %s.",
43917feb2aeeSChengwen Feng 		 rx_mode.info, tx_mode.info);
43927feb2aeeSChengwen Feng }
43937feb2aeeSChengwen Feng 
439496c33cfbSMin Hu (Connor) static void
439596c33cfbSMin Hu (Connor) hns3_eth_dev_fp_ops_config(const struct rte_eth_dev *dev)
439696c33cfbSMin Hu (Connor) {
439796c33cfbSMin Hu (Connor) 	struct rte_eth_fp_ops *fpo = rte_eth_fp_ops;
439896c33cfbSMin Hu (Connor) 	uint16_t port_id = dev->data->port_id;
439996c33cfbSMin Hu (Connor) 
440096c33cfbSMin Hu (Connor) 	fpo[port_id].rx_pkt_burst = dev->rx_pkt_burst;
440196c33cfbSMin Hu (Connor) 	fpo[port_id].tx_pkt_burst = dev->tx_pkt_burst;
440296c33cfbSMin Hu (Connor) 	fpo[port_id].tx_pkt_prepare = dev->tx_pkt_prepare;
440396c33cfbSMin Hu (Connor) 	fpo[port_id].rx_descriptor_status = dev->rx_descriptor_status;
440496c33cfbSMin Hu (Connor) 	fpo[port_id].tx_descriptor_status = dev->tx_descriptor_status;
44058ba42ce9SHuisong Li 	fpo[port_id].rxq.data = dev->data->rx_queues;
44068ba42ce9SHuisong Li 	fpo[port_id].txq.data = dev->data->tx_queues;
440796c33cfbSMin Hu (Connor) }
440896c33cfbSMin Hu (Connor) 
440996c33cfbSMin Hu (Connor) void
441096c33cfbSMin Hu (Connor) hns3_set_rxtx_function(struct rte_eth_dev *eth_dev)
4411bba63669SWei Hu (Xavier) {
4412168b7d79SHuisong Li 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
44132790c646SWei Hu (Xavier) 	struct hns3_adapter *hns = eth_dev->data->dev_private;
44142790c646SWei Hu (Xavier) 
44152790c646SWei Hu (Xavier) 	if (hns->hw.adapter_state == HNS3_NIC_STARTED &&
4416e12a0166STyler Retzlaff 	    rte_atomic_load_explicit(&hns->hw.reset.resetting, rte_memory_order_relaxed) == 0) {
4417521ab3e9SWei Hu (Xavier) 		eth_dev->rx_pkt_burst = hns3_get_rx_function(eth_dev);
441863e05f19SHongbo Zheng 		eth_dev->rx_descriptor_status = hns3_dev_rx_descriptor_status;
4419168b7d79SHuisong Li 		eth_dev->tx_pkt_burst = hw->set_link_down ?
4420a41f593fSFerruh Yigit 					rte_eth_pkt_burst_dummy :
44216a934ba4SHuisong Li 					hns3_get_tx_function(eth_dev);
44226a934ba4SHuisong Li 		eth_dev->tx_pkt_prepare = hns3_get_tx_prepare(eth_dev);
4423656a6d9cSHongbo Zheng 		eth_dev->tx_descriptor_status = hns3_dev_tx_descriptor_status;
44242790c646SWei Hu (Xavier) 	} else {
4425a41f593fSFerruh Yigit 		eth_dev->rx_pkt_burst = rte_eth_pkt_burst_dummy;
4426a41f593fSFerruh Yigit 		eth_dev->tx_pkt_burst = rte_eth_pkt_burst_dummy;
44271cc574c4SHuisong Li 		eth_dev->tx_pkt_prepare = NULL;
44282790c646SWei Hu (Xavier) 	}
442996c33cfbSMin Hu (Connor) 
4430a8f52a5cSHuisong Li 	hns3_trace_rxtx_function(eth_dev);
443196c33cfbSMin Hu (Connor) 	hns3_eth_dev_fp_ops_config(eth_dev);
4432bba63669SWei Hu (Xavier) }
4433091a0f95SHuisong Li 
4434091a0f95SHuisong Li void
4435091a0f95SHuisong Li hns3_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
4436091a0f95SHuisong Li 		  struct rte_eth_rxq_info *qinfo)
4437091a0f95SHuisong Li {
4438091a0f95SHuisong Li 	struct hns3_rx_queue *rxq = dev->data->rx_queues[queue_id];
4439091a0f95SHuisong Li 
4440091a0f95SHuisong Li 	qinfo->mp = rxq->mb_pool;
4441091a0f95SHuisong Li 	qinfo->nb_desc = rxq->nb_rx_desc;
4442091a0f95SHuisong Li 	qinfo->scattered_rx = dev->data->scattered_rx;
4443e692c746SChengchang Tang 	/* Report the HW Rx buffer length to user */
4444e692c746SChengchang Tang 	qinfo->rx_buf_size = rxq->rx_buf_len;
4445091a0f95SHuisong Li 
4446091a0f95SHuisong Li 	/*
4447091a0f95SHuisong Li 	 * If there are no available Rx buffer descriptors, incoming packets
4448091a0f95SHuisong Li 	 * are always dropped by hardware based on hns3 network engine.
4449091a0f95SHuisong Li 	 */
4450091a0f95SHuisong Li 	qinfo->conf.rx_drop_en = 1;
4451091a0f95SHuisong Li 	qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
4452091a0f95SHuisong Li 	qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
4453091a0f95SHuisong Li 	qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
4454091a0f95SHuisong Li }
4455091a0f95SHuisong Li 
4456091a0f95SHuisong Li void
4457091a0f95SHuisong Li hns3_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
4458091a0f95SHuisong Li 		  struct rte_eth_txq_info *qinfo)
4459091a0f95SHuisong Li {
4460091a0f95SHuisong Li 	struct hns3_tx_queue *txq = dev->data->tx_queues[queue_id];
4461091a0f95SHuisong Li 
4462091a0f95SHuisong Li 	qinfo->nb_desc = txq->nb_tx_desc;
4463091a0f95SHuisong Li 	qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads;
44647ef93390SWei Hu (Xavier) 	qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
44657ef93390SWei Hu (Xavier) 	qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
4466091a0f95SHuisong Li 	qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
4467091a0f95SHuisong Li }
4468fa29fe45SChengchang Tang 
4469fa29fe45SChengchang Tang int
4470fa29fe45SChengchang Tang hns3_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
4471fa29fe45SChengchang Tang {
4472fa29fe45SChengchang Tang 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4473fa29fe45SChengchang Tang 	struct hns3_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
4474fa29fe45SChengchang Tang 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
4475fa29fe45SChengchang Tang 	int ret;
4476fa29fe45SChengchang Tang 
4477efcaa81eSChengchang Tang 	if (!hns3_dev_get_support(hw, INDEP_TXRX))
4478fa29fe45SChengchang Tang 		return -ENOTSUP;
4479fa29fe45SChengchang Tang 
448018da3c85SChengchang Tang 	rte_spinlock_lock(&hw->lock);
4481bbc5a31bSChengwen Feng 
4482e12a0166STyler Retzlaff 	if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
4483bbc5a31bSChengwen Feng 		hns3_err(hw, "fail to start Rx queue during resetting.");
4484bbc5a31bSChengwen Feng 		rte_spinlock_unlock(&hw->lock);
4485bbc5a31bSChengwen Feng 		return -EIO;
4486bbc5a31bSChengwen Feng 	}
4487bbc5a31bSChengwen Feng 
4488fa29fe45SChengchang Tang 	ret = hns3_reset_queue(hw, rx_queue_id, HNS3_RING_TYPE_RX);
4489fa29fe45SChengchang Tang 	if (ret) {
4490fa29fe45SChengchang Tang 		hns3_err(hw, "fail to reset Rx queue %u, ret = %d.",
4491fa29fe45SChengchang Tang 			 rx_queue_id, ret);
449218da3c85SChengchang Tang 		rte_spinlock_unlock(&hw->lock);
4493fa29fe45SChengchang Tang 		return ret;
4494fa29fe45SChengchang Tang 	}
4495fa29fe45SChengchang Tang 
4496f81a18f4SChengwen Feng 	if (rxq->sw_ring[0].mbuf != NULL)
4497f81a18f4SChengwen Feng 		hns3_rx_queue_release_mbufs(rxq);
4498f81a18f4SChengwen Feng 
4499fa29fe45SChengchang Tang 	ret = hns3_init_rxq(hns, rx_queue_id);
4500fa29fe45SChengchang Tang 	if (ret) {
4501fa29fe45SChengchang Tang 		hns3_err(hw, "fail to init Rx queue %u, ret = %d.",
4502fa29fe45SChengchang Tang 			 rx_queue_id, ret);
450318da3c85SChengchang Tang 		rte_spinlock_unlock(&hw->lock);
4504fa29fe45SChengchang Tang 		return ret;
4505fa29fe45SChengchang Tang 	}
4506fa29fe45SChengchang Tang 
4507fa29fe45SChengchang Tang 	hns3_enable_rxq(rxq, true);
4508fa29fe45SChengchang Tang 	dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
450918da3c85SChengchang Tang 	rte_spinlock_unlock(&hw->lock);
4510fa29fe45SChengchang Tang 
4511fa29fe45SChengchang Tang 	return ret;
4512fa29fe45SChengchang Tang }
4513fa29fe45SChengchang Tang 
4514821496d2SChengchang Tang static void
4515821496d2SChengchang Tang hns3_reset_sw_rxq(struct hns3_rx_queue *rxq)
4516821496d2SChengchang Tang {
4517821496d2SChengchang Tang 	rxq->next_to_use = 0;
4518821496d2SChengchang Tang 	rxq->rx_rearm_start = 0;
4519821496d2SChengchang Tang 	rxq->rx_free_hold = 0;
4520821496d2SChengchang Tang 	rxq->rx_rearm_nb = 0;
4521821496d2SChengchang Tang 	rxq->pkt_first_seg = NULL;
4522821496d2SChengchang Tang 	rxq->pkt_last_seg = NULL;
4523821496d2SChengchang Tang 	memset(&rxq->rx_ring[0], 0, rxq->nb_rx_desc * sizeof(struct hns3_desc));
4524821496d2SChengchang Tang 	hns3_rxq_vec_setup(rxq);
4525821496d2SChengchang Tang }
4526821496d2SChengchang Tang 
4527fa29fe45SChengchang Tang int
4528fa29fe45SChengchang Tang hns3_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
4529fa29fe45SChengchang Tang {
4530fa29fe45SChengchang Tang 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4531fa29fe45SChengchang Tang 	struct hns3_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
4532fa29fe45SChengchang Tang 
4533efcaa81eSChengchang Tang 	if (!hns3_dev_get_support(hw, INDEP_TXRX))
4534fa29fe45SChengchang Tang 		return -ENOTSUP;
4535fa29fe45SChengchang Tang 
453618da3c85SChengchang Tang 	rte_spinlock_lock(&hw->lock);
4537bbc5a31bSChengwen Feng 
4538e12a0166STyler Retzlaff 	if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
4539bbc5a31bSChengwen Feng 		hns3_err(hw, "fail to stop Rx queue during resetting.");
4540bbc5a31bSChengwen Feng 		rte_spinlock_unlock(&hw->lock);
4541bbc5a31bSChengwen Feng 		return -EIO;
4542bbc5a31bSChengwen Feng 	}
4543bbc5a31bSChengwen Feng 
4544fa29fe45SChengchang Tang 	hns3_enable_rxq(rxq, false);
4545821496d2SChengchang Tang 
4546fa29fe45SChengchang Tang 	hns3_rx_queue_release_mbufs(rxq);
4547821496d2SChengchang Tang 
4548821496d2SChengchang Tang 	hns3_reset_sw_rxq(rxq);
4549fa29fe45SChengchang Tang 	dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
455018da3c85SChengchang Tang 	rte_spinlock_unlock(&hw->lock);
4551fa29fe45SChengchang Tang 
4552fa29fe45SChengchang Tang 	return 0;
4553fa29fe45SChengchang Tang }
4554fa29fe45SChengchang Tang 
4555fa29fe45SChengchang Tang int
4556fa29fe45SChengchang Tang hns3_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
4557fa29fe45SChengchang Tang {
4558fa29fe45SChengchang Tang 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4559fa29fe45SChengchang Tang 	struct hns3_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
4560fa29fe45SChengchang Tang 	int ret;
4561fa29fe45SChengchang Tang 
4562efcaa81eSChengchang Tang 	if (!hns3_dev_get_support(hw, INDEP_TXRX))
4563fa29fe45SChengchang Tang 		return -ENOTSUP;
4564fa29fe45SChengchang Tang 
456518da3c85SChengchang Tang 	rte_spinlock_lock(&hw->lock);
4566bbc5a31bSChengwen Feng 
4567e12a0166STyler Retzlaff 	if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
4568bbc5a31bSChengwen Feng 		hns3_err(hw, "fail to start Tx queue during resetting.");
4569bbc5a31bSChengwen Feng 		rte_spinlock_unlock(&hw->lock);
4570bbc5a31bSChengwen Feng 		return -EIO;
4571bbc5a31bSChengwen Feng 	}
4572bbc5a31bSChengwen Feng 
4573fa29fe45SChengchang Tang 	ret = hns3_reset_queue(hw, tx_queue_id, HNS3_RING_TYPE_TX);
4574fa29fe45SChengchang Tang 	if (ret) {
4575fa29fe45SChengchang Tang 		hns3_err(hw, "fail to reset Tx queue %u, ret = %d.",
4576fa29fe45SChengchang Tang 			 tx_queue_id, ret);
457718da3c85SChengchang Tang 		rte_spinlock_unlock(&hw->lock);
4578fa29fe45SChengchang Tang 		return ret;
4579fa29fe45SChengchang Tang 	}
4580fa29fe45SChengchang Tang 
4581fa29fe45SChengchang Tang 	hns3_init_txq(txq);
4582fa29fe45SChengchang Tang 	hns3_enable_txq(txq, true);
4583fa29fe45SChengchang Tang 	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
458418da3c85SChengchang Tang 	rte_spinlock_unlock(&hw->lock);
4585fa29fe45SChengchang Tang 
4586fa29fe45SChengchang Tang 	return ret;
4587fa29fe45SChengchang Tang }
4588fa29fe45SChengchang Tang 
4589fa29fe45SChengchang Tang int
4590fa29fe45SChengchang Tang hns3_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
4591fa29fe45SChengchang Tang {
4592fa29fe45SChengchang Tang 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4593fa29fe45SChengchang Tang 	struct hns3_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
4594fa29fe45SChengchang Tang 
4595efcaa81eSChengchang Tang 	if (!hns3_dev_get_support(hw, INDEP_TXRX))
4596fa29fe45SChengchang Tang 		return -ENOTSUP;
4597fa29fe45SChengchang Tang 
459818da3c85SChengchang Tang 	rte_spinlock_lock(&hw->lock);
4599bbc5a31bSChengwen Feng 
4600e12a0166STyler Retzlaff 	if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
4601bbc5a31bSChengwen Feng 		hns3_err(hw, "fail to stop Tx queue during resetting.");
4602bbc5a31bSChengwen Feng 		rte_spinlock_unlock(&hw->lock);
4603bbc5a31bSChengwen Feng 		return -EIO;
4604bbc5a31bSChengwen Feng 	}
4605bbc5a31bSChengwen Feng 
4606fa29fe45SChengchang Tang 	hns3_enable_txq(txq, false);
4607fa29fe45SChengchang Tang 	hns3_tx_queue_release_mbufs(txq);
4608fa29fe45SChengchang Tang 	/*
4609fa29fe45SChengchang Tang 	 * All the mbufs in sw_ring are released and all the pointers in sw_ring
4610fa29fe45SChengchang Tang 	 * are set to NULL. If this queue is still called by upper layer,
4611fa29fe45SChengchang Tang 	 * residual SW status of this txq may cause these pointers in sw_ring
4612fa29fe45SChengchang Tang 	 * which have been set to NULL to be released again. To avoid it,
4613fa29fe45SChengchang Tang 	 * reinit the txq.
4614fa29fe45SChengchang Tang 	 */
4615fa29fe45SChengchang Tang 	hns3_init_txq(txq);
4616fa29fe45SChengchang Tang 	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
461718da3c85SChengchang Tang 	rte_spinlock_unlock(&hw->lock);
4618fa29fe45SChengchang Tang 
4619fa29fe45SChengchang Tang 	return 0;
4620fa29fe45SChengchang Tang }
46212be91035SLijun Ou 
4622dfecc320SChengwen Feng static int
4623dfecc320SChengwen Feng hns3_tx_done_cleanup_full(struct hns3_tx_queue *txq, uint32_t free_cnt)
4624dfecc320SChengwen Feng {
462560560096SChengwen Feng 	uint16_t next_to_clean = txq->next_to_clean;
462660560096SChengwen Feng 	uint16_t next_to_use   = txq->next_to_use;
462760560096SChengwen Feng 	uint16_t tx_bd_ready   = txq->tx_bd_ready;
462860560096SChengwen Feng 	struct hns3_entry *tx_pkt = &txq->sw_ring[next_to_clean];
462960560096SChengwen Feng 	struct hns3_desc *desc = &txq->tx_ring[next_to_clean];
4630dfecc320SChengwen Feng 	uint32_t idx;
4631dfecc320SChengwen Feng 
4632dfecc320SChengwen Feng 	if (free_cnt == 0 || free_cnt > txq->nb_tx_desc)
4633dfecc320SChengwen Feng 		free_cnt = txq->nb_tx_desc;
4634dfecc320SChengwen Feng 
463560560096SChengwen Feng 	for (idx = 0; idx < free_cnt; idx++) {
463660560096SChengwen Feng 		if (next_to_clean == next_to_use)
4637dfecc320SChengwen Feng 			break;
463860560096SChengwen Feng 		if (desc->tx.tp_fe_sc_vld_ra_ri &
463960560096SChengwen Feng 		    rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B)))
464060560096SChengwen Feng 			break;
464160560096SChengwen Feng 		if (tx_pkt->mbuf != NULL) {
464260560096SChengwen Feng 			rte_pktmbuf_free_seg(tx_pkt->mbuf);
464360560096SChengwen Feng 			tx_pkt->mbuf = NULL;
464460560096SChengwen Feng 		}
464560560096SChengwen Feng 		next_to_clean++;
464660560096SChengwen Feng 		tx_bd_ready++;
464760560096SChengwen Feng 		tx_pkt++;
464860560096SChengwen Feng 		desc++;
464960560096SChengwen Feng 		if (next_to_clean == txq->nb_tx_desc) {
465060560096SChengwen Feng 			tx_pkt = txq->sw_ring;
465160560096SChengwen Feng 			desc = txq->tx_ring;
465260560096SChengwen Feng 			next_to_clean = 0;
465360560096SChengwen Feng 		}
4654dfecc320SChengwen Feng 	}
4655dfecc320SChengwen Feng 
465660560096SChengwen Feng 	if (idx > 0) {
465760560096SChengwen Feng 		txq->next_to_clean = next_to_clean;
465860560096SChengwen Feng 		txq->tx_bd_ready = tx_bd_ready;
465960560096SChengwen Feng 	}
466060560096SChengwen Feng 
466160560096SChengwen Feng 	return (int)idx;
4662dfecc320SChengwen Feng }
4663dfecc320SChengwen Feng 
4664dfecc320SChengwen Feng int
4665dfecc320SChengwen Feng hns3_tx_done_cleanup(void *txq, uint32_t free_cnt)
4666dfecc320SChengwen Feng {
4667dfecc320SChengwen Feng 	struct hns3_tx_queue *q = (struct hns3_tx_queue *)txq;
4668dfecc320SChengwen Feng 	struct rte_eth_dev *dev = &rte_eth_devices[q->port_id];
4669dfecc320SChengwen Feng 
4670dfecc320SChengwen Feng 	if (dev->tx_pkt_burst == hns3_xmit_pkts)
4671dfecc320SChengwen Feng 		return hns3_tx_done_cleanup_full(q, free_cnt);
4672a41f593fSFerruh Yigit 	else if (dev->tx_pkt_burst == rte_eth_pkt_burst_dummy)
4673dfecc320SChengwen Feng 		return 0;
4674dfecc320SChengwen Feng 	else
4675dfecc320SChengwen Feng 		return -ENOTSUP;
4676dfecc320SChengwen Feng }
4677dfecc320SChengwen Feng 
4678656a6d9cSHongbo Zheng int
467963e05f19SHongbo Zheng hns3_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
468063e05f19SHongbo Zheng {
468163e05f19SHongbo Zheng 	volatile struct hns3_desc *rxdp;
468263e05f19SHongbo Zheng 	struct hns3_rx_queue *rxq;
468363e05f19SHongbo Zheng 	struct rte_eth_dev *dev;
468463e05f19SHongbo Zheng 	uint32_t bd_base_info;
468563e05f19SHongbo Zheng 	uint16_t desc_id;
468663e05f19SHongbo Zheng 
468763e05f19SHongbo Zheng 	rxq = (struct hns3_rx_queue *)rx_queue;
468863e05f19SHongbo Zheng 	if (offset >= rxq->nb_rx_desc)
468963e05f19SHongbo Zheng 		return -EINVAL;
469063e05f19SHongbo Zheng 
469163e05f19SHongbo Zheng 	desc_id = (rxq->next_to_use + offset) % rxq->nb_rx_desc;
469263e05f19SHongbo Zheng 	rxdp = &rxq->rx_ring[desc_id];
469363e05f19SHongbo Zheng 	bd_base_info = rte_le_to_cpu_32(rxdp->rx.bd_base_info);
469463e05f19SHongbo Zheng 	dev = &rte_eth_devices[rxq->port_id];
4695aa5baf47SChengwen Feng 	if (dev->rx_pkt_burst == hns3_recv_pkts_simple ||
469663e05f19SHongbo Zheng 	    dev->rx_pkt_burst == hns3_recv_scattered_pkts) {
469763e05f19SHongbo Zheng 		if (offset >= rxq->nb_rx_desc - rxq->rx_free_hold)
469863e05f19SHongbo Zheng 			return RTE_ETH_RX_DESC_UNAVAIL;
469963e05f19SHongbo Zheng 	} else if (dev->rx_pkt_burst == hns3_recv_pkts_vec ||
470063e05f19SHongbo Zheng 		   dev->rx_pkt_burst == hns3_recv_pkts_vec_sve) {
470163e05f19SHongbo Zheng 		if (offset >= rxq->nb_rx_desc - rxq->rx_rearm_nb)
470263e05f19SHongbo Zheng 			return RTE_ETH_RX_DESC_UNAVAIL;
470363e05f19SHongbo Zheng 	} else {
470463e05f19SHongbo Zheng 		return RTE_ETH_RX_DESC_UNAVAIL;
470563e05f19SHongbo Zheng 	}
470663e05f19SHongbo Zheng 
470763e05f19SHongbo Zheng 	if (!(bd_base_info & BIT(HNS3_RXD_VLD_B)))
470863e05f19SHongbo Zheng 		return RTE_ETH_RX_DESC_AVAIL;
470963e05f19SHongbo Zheng 	else
471063e05f19SHongbo Zheng 		return RTE_ETH_RX_DESC_DONE;
471163e05f19SHongbo Zheng }
471263e05f19SHongbo Zheng 
471363e05f19SHongbo Zheng int
4714656a6d9cSHongbo Zheng hns3_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
4715656a6d9cSHongbo Zheng {
4716656a6d9cSHongbo Zheng 	volatile struct hns3_desc *txdp;
4717656a6d9cSHongbo Zheng 	struct hns3_tx_queue *txq;
4718656a6d9cSHongbo Zheng 	struct rte_eth_dev *dev;
4719656a6d9cSHongbo Zheng 	uint16_t desc_id;
4720656a6d9cSHongbo Zheng 
4721656a6d9cSHongbo Zheng 	txq = (struct hns3_tx_queue *)tx_queue;
4722656a6d9cSHongbo Zheng 	if (offset >= txq->nb_tx_desc)
4723656a6d9cSHongbo Zheng 		return -EINVAL;
4724656a6d9cSHongbo Zheng 
4725656a6d9cSHongbo Zheng 	dev = &rte_eth_devices[txq->port_id];
4726656a6d9cSHongbo Zheng 	if (dev->tx_pkt_burst != hns3_xmit_pkts_simple &&
4727656a6d9cSHongbo Zheng 	    dev->tx_pkt_burst != hns3_xmit_pkts &&
4728656a6d9cSHongbo Zheng 	    dev->tx_pkt_burst != hns3_xmit_pkts_vec_sve &&
4729656a6d9cSHongbo Zheng 	    dev->tx_pkt_burst != hns3_xmit_pkts_vec)
4730656a6d9cSHongbo Zheng 		return RTE_ETH_TX_DESC_UNAVAIL;
4731656a6d9cSHongbo Zheng 
4732656a6d9cSHongbo Zheng 	desc_id = (txq->next_to_use + offset) % txq->nb_tx_desc;
4733656a6d9cSHongbo Zheng 	txdp = &txq->tx_ring[desc_id];
4734656a6d9cSHongbo Zheng 	if (txdp->tx.tp_fe_sc_vld_ra_ri & rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B)))
4735656a6d9cSHongbo Zheng 		return RTE_ETH_TX_DESC_FULL;
4736656a6d9cSHongbo Zheng 	else
4737656a6d9cSHongbo Zheng 		return RTE_ETH_TX_DESC_DONE;
4738656a6d9cSHongbo Zheng }
4739656a6d9cSHongbo Zheng 
47402be91035SLijun Ou uint32_t
47418d7d4fcdSKonstantin Ananyev hns3_rx_queue_count(void *rx_queue)
47422be91035SLijun Ou {
47432be91035SLijun Ou 	/*
47442be91035SLijun Ou 	 * Number of BDs that have been processed by the driver
47452be91035SLijun Ou 	 * but have not been notified to the hardware.
47462be91035SLijun Ou 	 */
47472be91035SLijun Ou 	uint32_t driver_hold_bd_num;
47482be91035SLijun Ou 	struct hns3_rx_queue *rxq;
47498d7d4fcdSKonstantin Ananyev 	const struct rte_eth_dev *dev;
47502be91035SLijun Ou 	uint32_t fbd_num;
47512be91035SLijun Ou 
47528d7d4fcdSKonstantin Ananyev 	rxq = rx_queue;
47538d7d4fcdSKonstantin Ananyev 	dev = &rte_eth_devices[rxq->port_id];
47548d7d4fcdSKonstantin Ananyev 
47552be91035SLijun Ou 	fbd_num = hns3_read_dev(rxq, HNS3_RING_RX_FBDNUM_REG);
47562be91035SLijun Ou 	if (dev->rx_pkt_burst == hns3_recv_pkts_vec ||
47572be91035SLijun Ou 	    dev->rx_pkt_burst == hns3_recv_pkts_vec_sve)
47582be91035SLijun Ou 		driver_hold_bd_num = rxq->rx_rearm_nb;
47592be91035SLijun Ou 	else
47602be91035SLijun Ou 		driver_hold_bd_num = rxq->rx_free_hold;
47612be91035SLijun Ou 
47622be91035SLijun Ou 	if (fbd_num <= driver_hold_bd_num)
47632be91035SLijun Ou 		return 0;
47642be91035SLijun Ou 	else
47652be91035SLijun Ou 		return fbd_num - driver_hold_bd_num;
47662be91035SLijun Ou }
4767fb5e9069SChengwen Feng 
4768fb5e9069SChengwen Feng void
4769fb5e9069SChengwen Feng hns3_enable_rxd_adv_layout(struct hns3_hw *hw)
4770fb5e9069SChengwen Feng {
4771fb5e9069SChengwen Feng 	/*
4772fb5e9069SChengwen Feng 	 * If the hardware support rxd advanced layout, then driver enable it
4773fb5e9069SChengwen Feng 	 * default.
4774fb5e9069SChengwen Feng 	 */
4775efcaa81eSChengchang Tang 	if (hns3_dev_get_support(hw, RXD_ADV_LAYOUT))
4776fb5e9069SChengwen Feng 		hns3_write_dev(hw, HNS3_RXD_ADV_LAYOUT_EN_REG, 1);
4777fb5e9069SChengwen Feng }
4778168b7d79SHuisong Li 
4779168b7d79SHuisong Li void
4780168b7d79SHuisong Li hns3_stop_tx_datapath(struct rte_eth_dev *dev)
4781168b7d79SHuisong Li {
4782a41f593fSFerruh Yigit 	dev->tx_pkt_burst = rte_eth_pkt_burst_dummy;
4783168b7d79SHuisong Li 	dev->tx_pkt_prepare = NULL;
478496c33cfbSMin Hu (Connor) 	hns3_eth_dev_fp_ops_config(dev);
478596c33cfbSMin Hu (Connor) 
478696c33cfbSMin Hu (Connor) 	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
478796c33cfbSMin Hu (Connor) 		return;
478896c33cfbSMin Hu (Connor) 
4789168b7d79SHuisong Li 	rte_wmb();
4790168b7d79SHuisong Li 	/* Disable tx datapath on secondary process. */
4791168b7d79SHuisong Li 	hns3_mp_req_stop_tx(dev);
4792168b7d79SHuisong Li 	/* Prevent crashes when queues are still in use. */
4793168b7d79SHuisong Li 	rte_delay_ms(dev->data->nb_tx_queues);
4794168b7d79SHuisong Li }
4795168b7d79SHuisong Li 
4796168b7d79SHuisong Li void
4797168b7d79SHuisong Li hns3_start_tx_datapath(struct rte_eth_dev *dev)
4798168b7d79SHuisong Li {
47996a934ba4SHuisong Li 	dev->tx_pkt_burst = hns3_get_tx_function(dev);
48006a934ba4SHuisong Li 	dev->tx_pkt_prepare = hns3_get_tx_prepare(dev);
480196c33cfbSMin Hu (Connor) 	hns3_eth_dev_fp_ops_config(dev);
480296c33cfbSMin Hu (Connor) 
480396c33cfbSMin Hu (Connor) 	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
480496c33cfbSMin Hu (Connor) 		return;
480596c33cfbSMin Hu (Connor) 
4806168b7d79SHuisong Li 	hns3_mp_req_start_tx(dev);
4807168b7d79SHuisong Li }
48084ba28c95SHuisong Li 
48094ba28c95SHuisong Li void
48104ba28c95SHuisong Li hns3_stop_rxtx_datapath(struct rte_eth_dev *dev)
48114ba28c95SHuisong Li {
48124ba28c95SHuisong Li 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
48134ba28c95SHuisong Li 
48144ba28c95SHuisong Li 	hns3_set_rxtx_function(dev);
48154ba28c95SHuisong Li 
48164ba28c95SHuisong Li 	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
48174ba28c95SHuisong Li 		return;
48184ba28c95SHuisong Li 
48194ba28c95SHuisong Li 	rte_wmb();
48204ba28c95SHuisong Li 	/* Disable datapath on secondary process. */
48214ba28c95SHuisong Li 	hns3_mp_req_stop_rxtx(dev);
48224ba28c95SHuisong Li 	/* Prevent crashes when queues are still in use. */
48234ba28c95SHuisong Li 	rte_delay_ms(hw->cfg_max_queues);
48244ba28c95SHuisong Li }
48254ba28c95SHuisong Li 
48264ba28c95SHuisong Li void
48274ba28c95SHuisong Li hns3_start_rxtx_datapath(struct rte_eth_dev *dev)
48284ba28c95SHuisong Li {
48294ba28c95SHuisong Li 	hns3_set_rxtx_function(dev);
48304ba28c95SHuisong Li 
48314ba28c95SHuisong Li 	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
48324ba28c95SHuisong Li 		return;
48334ba28c95SHuisong Li 
48344ba28c95SHuisong Li 	hns3_mp_req_start_rxtx(dev);
48354ba28c95SHuisong Li }
48369e1e7ddeSChengwen Feng 
48379e1e7ddeSChengwen Feng static int
48389e1e7ddeSChengwen Feng hns3_monitor_callback(const uint64_t value,
48399e1e7ddeSChengwen Feng 		const uint64_t arg[RTE_POWER_MONITOR_OPAQUE_SZ] __rte_unused)
48409e1e7ddeSChengwen Feng {
48419e1e7ddeSChengwen Feng 	const uint64_t vld = rte_le_to_cpu_32(BIT(HNS3_RXD_VLD_B));
48429e1e7ddeSChengwen Feng 	return (value & vld) == vld ? -1 : 0;
48439e1e7ddeSChengwen Feng }
48449e1e7ddeSChengwen Feng 
48459e1e7ddeSChengwen Feng int
48469e1e7ddeSChengwen Feng hns3_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc)
48479e1e7ddeSChengwen Feng {
48489e1e7ddeSChengwen Feng 	struct hns3_rx_queue *rxq = rx_queue;
48499e1e7ddeSChengwen Feng 	struct hns3_desc *rxdp = &rxq->rx_ring[rxq->next_to_use];
48509e1e7ddeSChengwen Feng 
48519e1e7ddeSChengwen Feng 	pmc->addr = &rxdp->rx.bd_base_info;
48529e1e7ddeSChengwen Feng 	pmc->fn = hns3_monitor_callback;
48539e1e7ddeSChengwen Feng 	pmc->size = sizeof(uint32_t);
48549e1e7ddeSChengwen Feng 
48559e1e7ddeSChengwen Feng 	return 0;
48569e1e7ddeSChengwen Feng }
4857