xref: /dpdk/drivers/net/ark/ark_ethdev_tx.c (revision 6c7f491e7fee0708e33b2e7e45c712db7a69c1d3)
1540914bcSEd Czeck /* SPDX-License-Identifier: BSD-3-Clause
29ee9e0d3SEd Czeck  * Copyright (c) 2015-2021 Atomic Rules LLC
3c33d45afSEd Czeck  */
4c33d45afSEd Czeck 
5c33d45afSEd Czeck #include <unistd.h>
6c33d45afSEd Czeck 
7c33d45afSEd Czeck #include "ark_ethdev_tx.h"
8c33d45afSEd Czeck #include "ark_global.h"
9c33d45afSEd Czeck #include "ark_mpu.h"
10c33d45afSEd Czeck #include "ark_ddm.h"
11c33d45afSEd Czeck #include "ark_logs.h"
12c33d45afSEd Czeck 
13c33d45afSEd Czeck #define ARK_TX_META_SIZE   32
14c33d45afSEd Czeck #define ARK_TX_META_OFFSET (RTE_PKTMBUF_HEADROOM - ARK_TX_META_SIZE)
15c33d45afSEd Czeck #define ARK_TX_MAX_NOCHAIN (RTE_MBUF_DEFAULT_DATAROOM)
16c33d45afSEd Czeck 
17e274fbfeSEd Czeck #ifndef RTE_LIBRTE_ARK_MIN_TX_PKTLEN
18e274fbfeSEd Czeck #define ARK_MIN_TX_PKTLEN 0
19e274fbfeSEd Czeck #else
20e274fbfeSEd Czeck #define ARK_MIN_TX_PKTLEN RTE_LIBRTE_ARK_MIN_TX_PKTLEN
21e274fbfeSEd Czeck #endif
22c33d45afSEd Czeck 
23c33d45afSEd Czeck /* ************************************************************************* */
24c33d45afSEd Czeck struct ark_tx_queue {
259ee9e0d3SEd Czeck 	union ark_tx_meta *meta_q;
26c33d45afSEd Czeck 	struct rte_mbuf **bufs;
27c33d45afSEd Czeck 
28c33d45afSEd Czeck 	/* handles for hw objects */
29c33d45afSEd Czeck 	struct ark_mpu_t *mpu;
30c33d45afSEd Czeck 	struct ark_ddm_t *ddm;
31c33d45afSEd Czeck 
32c33d45afSEd Czeck 	/* Stats HW tracks bytes and packets, need to count send errors */
33c33d45afSEd Czeck 	uint64_t tx_errors;
34c33d45afSEd Czeck 
35*6c7f491eSEd Czeck 	tx_user_meta_hook_fn tx_user_meta_hook;
36*6c7f491eSEd Czeck 	void *ext_user_data;
37*6c7f491eSEd Czeck 
38c33d45afSEd Czeck 	uint32_t queue_size;
39c33d45afSEd Czeck 	uint32_t queue_mask;
40c33d45afSEd Czeck 
41c33d45afSEd Czeck 	/* 3 indexes to the paired data rings. */
429ee9e0d3SEd Czeck 	int32_t prod_index;		/* where to put the next one */
439ee9e0d3SEd Czeck 	int32_t free_index;		/* mbuf has been freed */
44c33d45afSEd Czeck 
45c33d45afSEd Czeck 	/* The queue Id is used to identify the HW Q */
46c33d45afSEd Czeck 	uint16_t phys_qid;
47c33d45afSEd Czeck 	/* The queue Index within the dpdk device structures */
48c33d45afSEd Czeck 	uint16_t queue_index;
49c33d45afSEd Czeck 
50*6c7f491eSEd Czeck 	/* next cache line - fields written by device */
518f196dc8SJerin Jacob 	RTE_MARKER cacheline1 __rte_cache_min_aligned;
529ee9e0d3SEd Czeck 	volatile int32_t cons_index;		/* hw is done, can be freed */
53c33d45afSEd Czeck } __rte_cache_aligned;
54c33d45afSEd Czeck 
55c33d45afSEd Czeck /* Forward declarations */
569ee9e0d3SEd Czeck static int eth_ark_tx_jumbo(struct ark_tx_queue *queue,
579ee9e0d3SEd Czeck 			    struct rte_mbuf *mbuf,
589ee9e0d3SEd Czeck 			    uint32_t *user_meta, uint8_t meta_cnt);
59c33d45afSEd Czeck static int eth_ark_tx_hw_queue_config(struct ark_tx_queue *queue);
60c33d45afSEd Czeck static void free_completed_tx(struct ark_tx_queue *queue);
61c33d45afSEd Czeck 
62c33d45afSEd Czeck static inline void
63c33d45afSEd Czeck ark_tx_hw_queue_stop(struct ark_tx_queue *queue)
64c33d45afSEd Czeck {
65c33d45afSEd Czeck 	ark_mpu_stop(queue->mpu);
66c33d45afSEd Czeck }
67c33d45afSEd Czeck 
68c33d45afSEd Czeck /* ************************************************************************* */
69c33d45afSEd Czeck static inline void
709ee9e0d3SEd Czeck eth_ark_tx_desc_fill(struct ark_tx_queue *queue,
719ee9e0d3SEd Czeck 		     struct rte_mbuf *mbuf,
729ee9e0d3SEd Czeck 		     uint8_t  flags,
739ee9e0d3SEd Czeck 		     uint32_t *user_meta,
749ee9e0d3SEd Czeck 		     uint8_t  meta_cnt /* 0 to 5 */
759ee9e0d3SEd Czeck 		     )
76c33d45afSEd Czeck {
779ee9e0d3SEd Czeck 	uint32_t tx_idx;
789ee9e0d3SEd Czeck 	union ark_tx_meta *meta;
799ee9e0d3SEd Czeck 	uint8_t m;
809ee9e0d3SEd Czeck 
819ee9e0d3SEd Czeck 	/* Header */
829ee9e0d3SEd Czeck 	tx_idx = queue->prod_index & queue->queue_mask;
839ee9e0d3SEd Czeck 	meta = &queue->meta_q[tx_idx];
84c33d45afSEd Czeck 	meta->data_len = rte_pktmbuf_data_len(mbuf);
85c33d45afSEd Czeck 	meta->flags = flags;
869ee9e0d3SEd Czeck 	meta->meta_cnt = meta_cnt / 2;
879ee9e0d3SEd Czeck 	meta->user1 = meta_cnt ? (*user_meta++) : 0;
889ee9e0d3SEd Czeck 	queue->prod_index++;
899ee9e0d3SEd Czeck 
909ee9e0d3SEd Czeck 	queue->bufs[tx_idx] = mbuf;
919ee9e0d3SEd Czeck 
929ee9e0d3SEd Czeck 	/* 1 or 2 user meta data entries, user words 1,2 and 3,4 */
939ee9e0d3SEd Czeck 	for (m = 1; m < meta_cnt; m += 2) {
949ee9e0d3SEd Czeck 		tx_idx = queue->prod_index & queue->queue_mask;
959ee9e0d3SEd Czeck 		meta = &queue->meta_q[tx_idx];
969ee9e0d3SEd Czeck 		meta->usermeta0 = *user_meta++;
979ee9e0d3SEd Czeck 		meta->usermeta1 = *user_meta++;
989ee9e0d3SEd Czeck 		queue->prod_index++;
99c33d45afSEd Czeck 	}
100c33d45afSEd Czeck 
1019ee9e0d3SEd Czeck 	tx_idx = queue->prod_index & queue->queue_mask;
1029ee9e0d3SEd Czeck 	meta = &queue->meta_q[tx_idx];
1039ee9e0d3SEd Czeck 	meta->physaddr = rte_mbuf_data_iova(mbuf);
1049ee9e0d3SEd Czeck 	queue->prod_index++;
1059ee9e0d3SEd Czeck }
1069ee9e0d3SEd Czeck 
1079ee9e0d3SEd Czeck 
108c33d45afSEd Czeck /* ************************************************************************* */
109c33d45afSEd Czeck uint16_t
110c33d45afSEd Czeck eth_ark_xmit_pkts_noop(void *vtxq __rte_unused,
111c33d45afSEd Czeck 		       struct rte_mbuf **tx_pkts __rte_unused,
112c33d45afSEd Czeck 		       uint16_t nb_pkts __rte_unused)
113c33d45afSEd Czeck {
114c33d45afSEd Czeck 	return 0;
115c33d45afSEd Czeck }
116c33d45afSEd Czeck 
117c33d45afSEd Czeck /* ************************************************************************* */
118c33d45afSEd Czeck uint16_t
119c33d45afSEd Czeck eth_ark_xmit_pkts(void *vtxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
120c33d45afSEd Czeck {
121c33d45afSEd Czeck 	struct ark_tx_queue *queue;
122c33d45afSEd Czeck 	struct rte_mbuf *mbuf;
123*6c7f491eSEd Czeck 	uint32_t user_meta[5];
124c33d45afSEd Czeck 
125c33d45afSEd Czeck 	int stat;
1269ee9e0d3SEd Czeck 	int32_t prod_index_limit;
127c33d45afSEd Czeck 	uint16_t nb;
128*6c7f491eSEd Czeck 	uint8_t user_len = 0;
129e274fbfeSEd Czeck 	const uint32_t min_pkt_len = ARK_MIN_TX_PKTLEN;
130*6c7f491eSEd Czeck 	tx_user_meta_hook_fn tx_user_meta_hook;
131c33d45afSEd Czeck 
132c33d45afSEd Czeck 	queue = (struct ark_tx_queue *)vtxq;
133*6c7f491eSEd Czeck 	tx_user_meta_hook = queue->tx_user_meta_hook;
134c33d45afSEd Czeck 
135c33d45afSEd Czeck 	/* free any packets after the HW is done with them */
136c33d45afSEd Czeck 	free_completed_tx(queue);
137c33d45afSEd Czeck 
1389ee9e0d3SEd Czeck 	/* leave 4 elements mpu data */
1399ee9e0d3SEd Czeck 	prod_index_limit = queue->queue_size + queue->free_index - 4;
140c33d45afSEd Czeck 
141c33d45afSEd Czeck 	for (nb = 0;
1429ee9e0d3SEd Czeck 	     (nb < nb_pkts) && (prod_index_limit - queue->prod_index) > 0;
143c33d45afSEd Czeck 	     ++nb) {
144c33d45afSEd Czeck 		mbuf = tx_pkts[nb];
145c33d45afSEd Czeck 
146e274fbfeSEd Czeck 		if (min_pkt_len &&
147e274fbfeSEd Czeck 		    unlikely(rte_pktmbuf_pkt_len(mbuf) < min_pkt_len)) {
148c33d45afSEd Czeck 			/* this packet even if it is small can be split,
149c33d45afSEd Czeck 			 * be sure to add to the end mbuf
150c33d45afSEd Czeck 			 */
151e274fbfeSEd Czeck 			uint16_t to_add = min_pkt_len -
152e274fbfeSEd Czeck 				rte_pktmbuf_pkt_len(mbuf);
153c33d45afSEd Czeck 			char *appended =
154c33d45afSEd Czeck 				rte_pktmbuf_append(mbuf, to_add);
155c33d45afSEd Czeck 
156c33d45afSEd Czeck 			if (appended == 0) {
157c33d45afSEd Czeck 				/* This packet is in error,
158c33d45afSEd Czeck 				 * we cannot send it so just
159c33d45afSEd Czeck 				 * count it and delete it.
160c33d45afSEd Czeck 				 */
161c33d45afSEd Czeck 				queue->tx_errors += 1;
162c33d45afSEd Czeck 				rte_pktmbuf_free(mbuf);
163c33d45afSEd Czeck 				continue;
164c33d45afSEd Czeck 			}
165c33d45afSEd Czeck 			memset(appended, 0, to_add);
166c33d45afSEd Czeck 		}
167c33d45afSEd Czeck 
168*6c7f491eSEd Czeck 		if (tx_user_meta_hook)
169*6c7f491eSEd Czeck 			tx_user_meta_hook(mbuf, user_meta, &user_len,
170*6c7f491eSEd Czeck 					  queue->ext_user_data);
171c33d45afSEd Czeck 		if (unlikely(mbuf->nb_segs != 1)) {
1729ee9e0d3SEd Czeck 			stat = eth_ark_tx_jumbo(queue, mbuf,
173*6c7f491eSEd Czeck 						user_meta, user_len);
174c33d45afSEd Czeck 			if (unlikely(stat != 0))
175c33d45afSEd Czeck 				break;		/* Queue is full */
176c33d45afSEd Czeck 		} else {
1779ee9e0d3SEd Czeck 			eth_ark_tx_desc_fill(queue, mbuf,
1789ee9e0d3SEd Czeck 					     ARK_DDM_SOP | ARK_DDM_EOP,
179*6c7f491eSEd Czeck 					     user_meta, user_len);
180c33d45afSEd Czeck 		}
181c33d45afSEd Czeck 	}
182c33d45afSEd Czeck 
1831502d443SEd Czeck 	if (ARK_DEBUG_CORE && nb != nb_pkts) {
1841502d443SEd Czeck 		ARK_PMD_LOG(DEBUG, "TX: Failure to send:"
185c33d45afSEd Czeck 			   " req: %" PRIU32
186c33d45afSEd Czeck 			   " sent: %" PRIU32
187c33d45afSEd Czeck 			   " prod: %" PRIU32
188c33d45afSEd Czeck 			   " cons: %" PRIU32
189c33d45afSEd Czeck 			   " free: %" PRIU32 "\n",
190c33d45afSEd Czeck 			   nb_pkts, nb,
191c33d45afSEd Czeck 			   queue->prod_index,
192c33d45afSEd Czeck 			   queue->cons_index,
193c33d45afSEd Czeck 			   queue->free_index);
194c33d45afSEd Czeck 		ark_mpu_dump(queue->mpu,
195c33d45afSEd Czeck 			     "TX Failure MPU: ",
196c33d45afSEd Czeck 			     queue->phys_qid);
197c33d45afSEd Czeck 	}
198c33d45afSEd Czeck 
199c33d45afSEd Czeck 	/* let FPGA know producer index.  */
200c33d45afSEd Czeck 	if (likely(nb != 0))
201c33d45afSEd Czeck 		ark_mpu_set_producer(queue->mpu, queue->prod_index);
202c33d45afSEd Czeck 
203c33d45afSEd Czeck 	return nb;
204c33d45afSEd Czeck }
205c33d45afSEd Czeck 
206c33d45afSEd Czeck /* ************************************************************************* */
2079ee9e0d3SEd Czeck static int
2089ee9e0d3SEd Czeck eth_ark_tx_jumbo(struct ark_tx_queue *queue, struct rte_mbuf *mbuf,
2099ee9e0d3SEd Czeck 		 uint32_t *user_meta, uint8_t meta_cnt)
210c33d45afSEd Czeck {
211c33d45afSEd Czeck 	struct rte_mbuf *next;
2129ee9e0d3SEd Czeck 	int32_t free_queue_space;
213c33d45afSEd Czeck 	uint8_t flags = ARK_DDM_SOP;
214c33d45afSEd Czeck 
215c33d45afSEd Czeck 	free_queue_space = queue->queue_mask -
216c33d45afSEd Czeck 		(queue->prod_index - queue->free_index);
2179ee9e0d3SEd Czeck 	/* We need up to 4 mbufs for first header and 2 for subsequent ones */
2189ee9e0d3SEd Czeck 	if (unlikely(free_queue_space < (2 + (2 * mbuf->nb_segs))))
219c33d45afSEd Czeck 		return -1;
220c33d45afSEd Czeck 
221c33d45afSEd Czeck 	while (mbuf != NULL) {
222c33d45afSEd Czeck 		next = mbuf->next;
223c33d45afSEd Czeck 		flags |= (next == NULL) ? ARK_DDM_EOP : 0;
2249ee9e0d3SEd Czeck 
2259ee9e0d3SEd Czeck 		eth_ark_tx_desc_fill(queue, mbuf, flags, user_meta, meta_cnt);
226c33d45afSEd Czeck 
227c33d45afSEd Czeck 		flags &= ~ARK_DDM_SOP;	/* drop SOP flags */
2289ee9e0d3SEd Czeck 		meta_cnt = 0;		/* Meta only on SOP */
229c33d45afSEd Czeck 		mbuf = next;
230c33d45afSEd Czeck 	}
231c33d45afSEd Czeck 
232c33d45afSEd Czeck 	return 0;
233c33d45afSEd Czeck }
234c33d45afSEd Czeck 
235c33d45afSEd Czeck /* ************************************************************************* */
236c33d45afSEd Czeck int
237c33d45afSEd Czeck eth_ark_tx_queue_setup(struct rte_eth_dev *dev,
238c33d45afSEd Czeck 		       uint16_t queue_idx,
239c33d45afSEd Czeck 		       uint16_t nb_desc,
240c33d45afSEd Czeck 		       unsigned int socket_id,
241c33d45afSEd Czeck 		       const struct rte_eth_txconf *tx_conf __rte_unused)
242c33d45afSEd Czeck {
2430bf8b0f1SStephen Hemminger 	struct ark_adapter *ark = dev->data->dev_private;
244c33d45afSEd Czeck 	struct ark_tx_queue *queue;
245c33d45afSEd Czeck 	int status;
246c33d45afSEd Czeck 
2477311db73SEd Czeck 	int qidx = queue_idx;
248c33d45afSEd Czeck 
249c33d45afSEd Czeck 	if (!rte_is_power_of_2(nb_desc)) {
2501502d443SEd Czeck 		ARK_PMD_LOG(ERR,
251c33d45afSEd Czeck 			    "DPDK Arkville configuration queue size"
252c33d45afSEd Czeck 			    " must be power of two %u (%s)\n",
253c33d45afSEd Czeck 			    nb_desc, __func__);
254c33d45afSEd Czeck 		return -1;
255c33d45afSEd Czeck 	}
256c33d45afSEd Czeck 
2579ee9e0d3SEd Czeck 	/* Each packet requires at least 2 mpu elements - double desc count */
2589ee9e0d3SEd Czeck 	nb_desc = 2 * nb_desc;
2599ee9e0d3SEd Czeck 
260c33d45afSEd Czeck 	/* Allocate queue struct */
261c33d45afSEd Czeck 	queue =	rte_zmalloc_socket("Ark_txqueue",
262c33d45afSEd Czeck 				   sizeof(struct ark_tx_queue),
263c33d45afSEd Czeck 				   64,
264c33d45afSEd Czeck 				   socket_id);
265c33d45afSEd Czeck 	if (queue == 0) {
2661502d443SEd Czeck 		ARK_PMD_LOG(ERR, "Failed to allocate tx "
267c33d45afSEd Czeck 			    "queue memory in %s\n",
268c33d45afSEd Czeck 			    __func__);
269c33d45afSEd Czeck 		return -ENOMEM;
270c33d45afSEd Czeck 	}
271c33d45afSEd Czeck 
272c33d45afSEd Czeck 	/* we use zmalloc no need to initialize fields */
273c33d45afSEd Czeck 	queue->queue_size = nb_desc;
274c33d45afSEd Czeck 	queue->queue_mask = nb_desc - 1;
275c33d45afSEd Czeck 	queue->phys_qid = qidx;
276c33d45afSEd Czeck 	queue->queue_index = queue_idx;
277c33d45afSEd Czeck 	dev->data->tx_queues[queue_idx] = queue;
278*6c7f491eSEd Czeck 	queue->tx_user_meta_hook = ark->user_ext.tx_user_meta_hook;
279*6c7f491eSEd Czeck 	queue->ext_user_data = ark->user_data[dev->data->port_id];
280c33d45afSEd Czeck 
281c33d45afSEd Czeck 	queue->meta_q =
282c33d45afSEd Czeck 		rte_zmalloc_socket("Ark_txqueue meta",
2839ee9e0d3SEd Czeck 				   nb_desc * sizeof(union ark_tx_meta),
284c33d45afSEd Czeck 				   64,
285c33d45afSEd Czeck 				   socket_id);
286c33d45afSEd Czeck 	queue->bufs =
287c33d45afSEd Czeck 		rte_zmalloc_socket("Ark_txqueue bufs",
288c33d45afSEd Czeck 				   nb_desc * sizeof(struct rte_mbuf *),
289c33d45afSEd Czeck 				   64,
290c33d45afSEd Czeck 				   socket_id);
291c33d45afSEd Czeck 
292c33d45afSEd Czeck 	if (queue->meta_q == 0 || queue->bufs == 0) {
2931502d443SEd Czeck 		ARK_PMD_LOG(ERR, "Failed to allocate "
294c33d45afSEd Czeck 			    "queue memory in %s\n", __func__);
295c33d45afSEd Czeck 		rte_free(queue->meta_q);
296c33d45afSEd Czeck 		rte_free(queue->bufs);
297c33d45afSEd Czeck 		rte_free(queue);
298c33d45afSEd Czeck 		return -ENOMEM;
299c33d45afSEd Czeck 	}
300c33d45afSEd Czeck 
301c33d45afSEd Czeck 	queue->ddm = RTE_PTR_ADD(ark->ddm.v, qidx * ARK_DDM_QOFFSET);
302c33d45afSEd Czeck 	queue->mpu = RTE_PTR_ADD(ark->mputx.v, qidx * ARK_MPU_QOFFSET);
303c33d45afSEd Czeck 
304c33d45afSEd Czeck 	status = eth_ark_tx_hw_queue_config(queue);
305c33d45afSEd Czeck 
306c33d45afSEd Czeck 	if (unlikely(status != 0)) {
307c33d45afSEd Czeck 		rte_free(queue->meta_q);
308c33d45afSEd Czeck 		rte_free(queue->bufs);
309c33d45afSEd Czeck 		rte_free(queue);
310c33d45afSEd Czeck 		return -1;		/* ERROR CODE */
311c33d45afSEd Czeck 	}
312c33d45afSEd Czeck 
313c33d45afSEd Czeck 	return 0;
314c33d45afSEd Czeck }
315c33d45afSEd Czeck 
316c33d45afSEd Czeck /* ************************************************************************* */
317c33d45afSEd Czeck static int
318c33d45afSEd Czeck eth_ark_tx_hw_queue_config(struct ark_tx_queue *queue)
319c33d45afSEd Czeck {
320df6e0a06SSantosh Shukla 	rte_iova_t queue_base, ring_base, cons_index_addr;
321c33d45afSEd Czeck 	uint32_t write_interval_ns;
322c33d45afSEd Czeck 
323c33d45afSEd Czeck 	/* Verify HW -- MPU */
3249ee9e0d3SEd Czeck 	if (ark_mpu_verify(queue->mpu, sizeof(union ark_tx_meta)))
325c33d45afSEd Czeck 		return -1;
326c33d45afSEd Czeck 
32787cf4c6cSThomas Monjalon 	queue_base = rte_malloc_virt2iova(queue);
32887cf4c6cSThomas Monjalon 	ring_base = rte_malloc_virt2iova(queue->meta_q);
329c33d45afSEd Czeck 	cons_index_addr =
330c33d45afSEd Czeck 		queue_base + offsetof(struct ark_tx_queue, cons_index);
331c33d45afSEd Czeck 
332c33d45afSEd Czeck 	ark_mpu_stop(queue->mpu);
333c33d45afSEd Czeck 	ark_mpu_reset(queue->mpu);
334c33d45afSEd Czeck 
335c33d45afSEd Czeck 	/* Stop and Reset and configure MPU */
336c33d45afSEd Czeck 	ark_mpu_configure(queue->mpu, ring_base, queue->queue_size, 1);
337c33d45afSEd Czeck 
338c33d45afSEd Czeck 	/*
339c33d45afSEd Czeck 	 * Adjust the write interval based on queue size --
340c33d45afSEd Czeck 	 * increase pcie traffic  when low mbuf count
341c33d45afSEd Czeck 	 * Queue sizes less than 128 are not allowed
342c33d45afSEd Czeck 	 */
343c33d45afSEd Czeck 	switch (queue->queue_size) {
344c33d45afSEd Czeck 	case 128:
345c33d45afSEd Czeck 		write_interval_ns = 500;
346c33d45afSEd Czeck 		break;
347c33d45afSEd Czeck 	case 256:
348c33d45afSEd Czeck 		write_interval_ns = 500;
349c33d45afSEd Czeck 		break;
350c33d45afSEd Czeck 	case 512:
351c33d45afSEd Czeck 		write_interval_ns = 1000;
352c33d45afSEd Czeck 		break;
353c33d45afSEd Czeck 	default:
354c33d45afSEd Czeck 		write_interval_ns = 2000;
355c33d45afSEd Czeck 		break;
356c33d45afSEd Czeck 	}
357c33d45afSEd Czeck 
358c33d45afSEd Czeck 	/* Completion address in UDM */
359c33d45afSEd Czeck 	ark_ddm_setup(queue->ddm, cons_index_addr, write_interval_ns);
360c33d45afSEd Czeck 
361c33d45afSEd Czeck 	return 0;
362c33d45afSEd Czeck }
363c33d45afSEd Czeck 
364c33d45afSEd Czeck /* ************************************************************************* */
365c33d45afSEd Czeck void
366c33d45afSEd Czeck eth_ark_tx_queue_release(void *vtx_queue)
367c33d45afSEd Czeck {
368c33d45afSEd Czeck 	struct ark_tx_queue *queue;
369c33d45afSEd Czeck 
370c33d45afSEd Czeck 	queue = (struct ark_tx_queue *)vtx_queue;
371c33d45afSEd Czeck 
372c33d45afSEd Czeck 	ark_tx_hw_queue_stop(queue);
373c33d45afSEd Czeck 
374c33d45afSEd Czeck 	queue->cons_index = queue->prod_index;
375c33d45afSEd Czeck 	free_completed_tx(queue);
376c33d45afSEd Czeck 
377c33d45afSEd Czeck 	rte_free(queue->meta_q);
378c33d45afSEd Czeck 	rte_free(queue->bufs);
379c33d45afSEd Czeck 	rte_free(queue);
380c33d45afSEd Czeck }
381c33d45afSEd Czeck 
382c33d45afSEd Czeck /* ************************************************************************* */
383c33d45afSEd Czeck int
384c33d45afSEd Czeck eth_ark_tx_queue_stop(struct rte_eth_dev *dev, uint16_t queue_id)
385c33d45afSEd Czeck {
386c33d45afSEd Czeck 	struct ark_tx_queue *queue;
387c33d45afSEd Czeck 	int cnt = 0;
388c33d45afSEd Czeck 
389c33d45afSEd Czeck 	queue = dev->data->tx_queues[queue_id];
390c33d45afSEd Czeck 
391c33d45afSEd Czeck 	/* Wait for DDM to send out all packets. */
392c33d45afSEd Czeck 	while (queue->cons_index != queue->prod_index) {
393c33d45afSEd Czeck 		usleep(100);
394c33d45afSEd Czeck 		if (cnt++ > 10000)
395c33d45afSEd Czeck 			return -1;
396c33d45afSEd Czeck 	}
397c33d45afSEd Czeck 
398c33d45afSEd Czeck 	ark_mpu_stop(queue->mpu);
399c33d45afSEd Czeck 	free_completed_tx(queue);
400c33d45afSEd Czeck 
401c33d45afSEd Czeck 	dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
402c33d45afSEd Czeck 
403c33d45afSEd Czeck 	return 0;
404c33d45afSEd Czeck }
405c33d45afSEd Czeck 
406c33d45afSEd Czeck int
407c33d45afSEd Czeck eth_ark_tx_queue_start(struct rte_eth_dev *dev, uint16_t queue_id)
408c33d45afSEd Czeck {
409c33d45afSEd Czeck 	struct ark_tx_queue *queue;
410c33d45afSEd Czeck 
411c33d45afSEd Czeck 	queue = dev->data->tx_queues[queue_id];
412c33d45afSEd Czeck 	if (dev->data->tx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_STARTED)
413c33d45afSEd Czeck 		return 0;
414c33d45afSEd Czeck 
415c33d45afSEd Czeck 	ark_mpu_start(queue->mpu);
416c33d45afSEd Czeck 	dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
417c33d45afSEd Czeck 
418c33d45afSEd Czeck 	return 0;
419c33d45afSEd Czeck }
420c33d45afSEd Czeck 
421c33d45afSEd Czeck /* ************************************************************************* */
422c33d45afSEd Czeck static void
423c33d45afSEd Czeck free_completed_tx(struct ark_tx_queue *queue)
424c33d45afSEd Czeck {
425c33d45afSEd Czeck 	struct rte_mbuf *mbuf;
4269ee9e0d3SEd Czeck 	union ark_tx_meta *meta;
4279ee9e0d3SEd Czeck 	int32_t top_index;
428c33d45afSEd Czeck 
429c33d45afSEd Czeck 	top_index = queue->cons_index;	/* read once */
4309ee9e0d3SEd Czeck 	while ((top_index - queue->free_index) > 0) {
431c33d45afSEd Czeck 		meta = &queue->meta_q[queue->free_index & queue->queue_mask];
432c33d45afSEd Czeck 		if (likely((meta->flags & ARK_DDM_SOP) != 0)) {
4339ee9e0d3SEd Czeck 			mbuf = queue->bufs[queue->free_index &
4349ee9e0d3SEd Czeck 					   queue->queue_mask];
435c33d45afSEd Czeck 			/* ref count of the mbuf is checked in this call. */
436c33d45afSEd Czeck 			rte_pktmbuf_free(mbuf);
437c33d45afSEd Czeck 		}
4389ee9e0d3SEd Czeck 		queue->free_index += (meta->meta_cnt + 2);
439c33d45afSEd Czeck 	}
440c33d45afSEd Czeck }
441c33d45afSEd Czeck 
442c33d45afSEd Czeck /* ************************************************************************* */
443c33d45afSEd Czeck void
444c33d45afSEd Czeck eth_tx_queue_stats_get(void *vqueue, struct rte_eth_stats *stats)
445c33d45afSEd Czeck {
446c33d45afSEd Czeck 	struct ark_tx_queue *queue;
447c33d45afSEd Czeck 	struct ark_ddm_t *ddm;
448c33d45afSEd Czeck 	uint64_t bytes, pkts;
449c33d45afSEd Czeck 
450c33d45afSEd Czeck 	queue = vqueue;
451c33d45afSEd Czeck 	ddm = queue->ddm;
452c33d45afSEd Czeck 
453c33d45afSEd Czeck 	bytes = ark_ddm_queue_byte_count(ddm);
454c33d45afSEd Czeck 	pkts = ark_ddm_queue_pkt_count(ddm);
455c33d45afSEd Czeck 
456c33d45afSEd Czeck 	stats->q_opackets[queue->queue_index] = pkts;
457c33d45afSEd Czeck 	stats->q_obytes[queue->queue_index] = bytes;
458c33d45afSEd Czeck 	stats->opackets += pkts;
459c33d45afSEd Czeck 	stats->obytes += bytes;
460c33d45afSEd Czeck 	stats->oerrors += queue->tx_errors;
461c33d45afSEd Czeck }
462c33d45afSEd Czeck 
463c33d45afSEd Czeck void
464c33d45afSEd Czeck eth_tx_queue_stats_reset(void *vqueue)
465c33d45afSEd Czeck {
466c33d45afSEd Czeck 	struct ark_tx_queue *queue;
467c33d45afSEd Czeck 	struct ark_ddm_t *ddm;
468c33d45afSEd Czeck 
469c33d45afSEd Czeck 	queue = vqueue;
470c33d45afSEd Czeck 	ddm = queue->ddm;
471c33d45afSEd Czeck 
472c33d45afSEd Czeck 	ark_ddm_queue_reset_stats(ddm);
473c33d45afSEd Czeck 	queue->tx_errors = 0;
474c33d45afSEd Czeck }
475