xref: /dpdk/drivers/net/mvneta/mvneta_rxtx.c (revision d7b080f1e72d833d668a66199fe99ccda6c81a36)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Marvell International Ltd.
3  * Copyright(c) 2018 Semihalf.
4  * All rights reserved.
5  */
6 
7 #include "mvneta_rxtx.h"
8 
9 #define MVNETA_PKT_EFFEC_OFFS (MRVL_NETA_PKT_OFFS + MV_MH_SIZE)
10 
11 #define MRVL_NETA_DEFAULT_TC 0
12 
13 /** Maximum number of descriptors in shadow queue. Must be power of 2 */
14 #define MRVL_NETA_TX_SHADOWQ_SIZE MRVL_NETA_TXD_MAX
15 
16 /** Shadow queue size mask (since shadow queue size is power of 2) */
17 #define MRVL_NETA_TX_SHADOWQ_MASK (MRVL_NETA_TX_SHADOWQ_SIZE - 1)
18 
19 /** Minimum number of sent buffers to release from shadow queue to BM */
20 #define MRVL_NETA_BUF_RELEASE_BURST_SIZE_MIN	16
21 
22 /** Maximum number of sent buffers to release from shadow queue to BM */
23 #define MRVL_NETA_BUF_RELEASE_BURST_SIZE_MAX	64
24 
25 #define MVNETA_COOKIE_ADDR_INVALID ~0ULL
26 #define MVNETA_COOKIE_HIGH_ADDR_SHIFT	(sizeof(neta_cookie_t) * 8)
27 #define MVNETA_COOKIE_HIGH_ADDR_MASK	(~0ULL << MVNETA_COOKIE_HIGH_ADDR_SHIFT)
28 
29 #define MVNETA_SET_COOKIE_HIGH_ADDR(addr) {				\
30 	if (unlikely(cookie_addr_high == MVNETA_COOKIE_ADDR_INVALID))	\
31 		cookie_addr_high =					\
32 			(uint64_t)(addr) & MVNETA_COOKIE_HIGH_ADDR_MASK;\
33 }
34 
35 #define MVNETA_CHECK_COOKIE_HIGH_ADDR(addr)		\
36 	((likely(cookie_addr_high ==			\
37 	((uint64_t)(addr) & MVNETA_COOKIE_HIGH_ADDR_MASK))) ? 1 : 0)
38 
39 struct mvneta_rxq {
40 	struct mvneta_priv *priv;
41 	struct rte_mempool *mp;
42 	int queue_id;
43 	int port_id;
44 	int size;
45 	int cksum_enabled;
46 	uint64_t bytes_recv;
47 	uint64_t drop_mac;
48 	uint64_t pkts_processed;
49 };
50 
51 /*
52  * To use buffer harvesting based on loopback port shadow queue structure
53  * was introduced for buffers information bookkeeping.
54  */
55 struct mvneta_shadow_txq {
56 	int head;           /* write index - used when sending buffers */
57 	int tail;           /* read index - used when releasing buffers */
58 	u16 size;           /* queue occupied size */
59 	struct neta_buff_inf ent[MRVL_NETA_TX_SHADOWQ_SIZE]; /* q entries */
60 };
61 
62 struct mvneta_txq {
63 	struct mvneta_priv *priv;
64 	int queue_id;
65 	int port_id;
66 	uint64_t bytes_sent;
67 	struct mvneta_shadow_txq shadow_txq;
68 	int tx_deferred_start;
69 };
70 
71 static uint64_t cookie_addr_high = MVNETA_COOKIE_ADDR_INVALID;
72 static uint16_t rx_desc_free_thresh = MRVL_NETA_BUF_RELEASE_BURST_SIZE_MIN;
73 
74 static inline int
mvneta_buffs_refill(struct mvneta_priv * priv,struct mvneta_rxq * rxq,u16 * num)75 mvneta_buffs_refill(struct mvneta_priv *priv, struct mvneta_rxq *rxq, u16 *num)
76 {
77 	struct rte_mbuf *mbufs[MRVL_NETA_BUF_RELEASE_BURST_SIZE_MAX];
78 	struct neta_buff_inf entries[MRVL_NETA_BUF_RELEASE_BURST_SIZE_MAX];
79 	int i, ret;
80 	uint16_t nb_desc = *num;
81 
82 	/* To prevent GCC-12 warning. */
83 	if (unlikely(nb_desc == 0))
84 		return -1;
85 
86 	ret = rte_pktmbuf_alloc_bulk(rxq->mp, mbufs, nb_desc);
87 	if (ret) {
88 		MVNETA_LOG(ERR, "Failed to allocate %u mbufs.", nb_desc);
89 		*num = 0;
90 		return -1;
91 	}
92 
93 	MVNETA_SET_COOKIE_HIGH_ADDR(mbufs[0]);
94 
95 	for (i = 0; i < nb_desc; i++) {
96 		if (unlikely(!MVNETA_CHECK_COOKIE_HIGH_ADDR(mbufs[i]))) {
97 			MVNETA_LOG(ERR,
98 				"mbuf virt high addr 0x%lx out of range 0x%lx",
99 				(uint64_t)mbufs[i] >> 32,
100 				cookie_addr_high >> 32);
101 			*num = 0;
102 			goto out;
103 		}
104 		entries[i].addr = rte_mbuf_data_iova_default(mbufs[i]);
105 		entries[i].cookie = (neta_cookie_t)(uint64_t)mbufs[i];
106 	}
107 	neta_ppio_inq_put_buffs(priv->ppio, rxq->queue_id, entries, num);
108 
109 out:
110 	for (i = *num; i < nb_desc; i++)
111 		rte_pktmbuf_free(mbufs[i]);
112 
113 	return 0;
114 }
115 
116 /**
117  * Allocate buffers from mempool
118  * and store addresses in rx descriptors.
119  *
120  * @return
121  *   0 on success, negative error value otherwise.
122  */
123 static inline int
mvneta_buffs_alloc(struct mvneta_priv * priv,struct mvneta_rxq * rxq,int * num)124 mvneta_buffs_alloc(struct mvneta_priv *priv, struct mvneta_rxq *rxq, int *num)
125 {
126 	uint16_t nb_desc, nb_desc_burst, sent = 0;
127 	int ret = 0;
128 
129 	nb_desc = *num;
130 
131 	do {
132 		nb_desc_burst =
133 			(nb_desc < MRVL_NETA_BUF_RELEASE_BURST_SIZE_MAX) ?
134 			nb_desc : MRVL_NETA_BUF_RELEASE_BURST_SIZE_MAX;
135 
136 		ret = mvneta_buffs_refill(priv, rxq, &nb_desc_burst);
137 		if (unlikely(ret || !nb_desc_burst))
138 			break;
139 
140 		sent += nb_desc_burst;
141 		nb_desc -= nb_desc_burst;
142 
143 	} while (nb_desc);
144 
145 	*num = sent;
146 
147 	return ret;
148 }
149 
150 static inline void
mvneta_fill_shadowq(struct mvneta_shadow_txq * sq,struct rte_mbuf * buf)151 mvneta_fill_shadowq(struct mvneta_shadow_txq *sq, struct rte_mbuf *buf)
152 {
153 	sq->ent[sq->head].cookie = (uint64_t)buf;
154 	sq->ent[sq->head].addr = buf ?
155 		rte_mbuf_data_iova_default(buf) : 0;
156 
157 	sq->head = (sq->head + 1) & MRVL_NETA_TX_SHADOWQ_MASK;
158 	sq->size++;
159 }
160 
161 static inline void
mvneta_fill_desc(struct neta_ppio_desc * desc,struct rte_mbuf * buf)162 mvneta_fill_desc(struct neta_ppio_desc *desc, struct rte_mbuf *buf)
163 {
164 	neta_ppio_outq_desc_reset(desc);
165 	neta_ppio_outq_desc_set_phys_addr(desc, rte_pktmbuf_iova(buf));
166 	neta_ppio_outq_desc_set_pkt_offset(desc, 0);
167 	neta_ppio_outq_desc_set_pkt_len(desc, rte_pktmbuf_data_len(buf));
168 }
169 
170 /**
171  * Release already sent buffers to mempool.
172  *
173  * @param ppio
174  *   Pointer to the port structure.
175  * @param sq
176  *   Pointer to the shadow queue.
177  * @param qid
178  *   Queue id number.
179  * @param force
180  *   Force releasing packets.
181  */
182 static inline void
mvneta_sent_buffers_free(struct neta_ppio * ppio,struct mvneta_shadow_txq * sq,int qid)183 mvneta_sent_buffers_free(struct neta_ppio *ppio,
184 			 struct mvneta_shadow_txq *sq, int qid)
185 {
186 	struct neta_buff_inf *entry;
187 	uint16_t nb_done = 0;
188 	int i;
189 	int tail = sq->tail;
190 
191 	neta_ppio_get_num_outq_done(ppio, qid, &nb_done);
192 
193 	if (nb_done > sq->size) {
194 		MVNETA_LOG(ERR, "nb_done: %d, sq->size %d",
195 			   nb_done, sq->size);
196 		return;
197 	}
198 
199 	for (i = 0; i < nb_done; i++) {
200 		entry = &sq->ent[tail];
201 
202 		if (unlikely(!entry->addr)) {
203 			MVNETA_LOG(DEBUG,
204 				"Shadow memory @%d: cookie(%lx), pa(%lx)!",
205 				tail, (u64)entry->cookie,
206 				(u64)entry->addr);
207 			tail = (tail + 1) & MRVL_NETA_TX_SHADOWQ_MASK;
208 			continue;
209 		}
210 
211 		struct rte_mbuf *mbuf;
212 
213 		mbuf = (struct rte_mbuf *)
214 			   (cookie_addr_high | entry->cookie);
215 		rte_pktmbuf_free(mbuf);
216 		tail = (tail + 1) & MRVL_NETA_TX_SHADOWQ_MASK;
217 	}
218 
219 	sq->tail = tail;
220 	sq->size -= nb_done;
221 }
222 
223 /**
224  * Return packet type information and l3/l4 offsets.
225  *
226  * @param desc
227  *   Pointer to the received packet descriptor.
228  * @param l3_offset
229  *   l3 packet offset.
230  * @param l4_offset
231  *   l4 packet offset.
232  *
233  * @return
234  *   Packet type information.
235  */
236 static inline uint64_t
mvneta_desc_to_packet_type_and_offset(struct neta_ppio_desc * desc,uint8_t * l3_offset,uint8_t * l4_offset)237 mvneta_desc_to_packet_type_and_offset(struct neta_ppio_desc *desc,
238 				    uint8_t *l3_offset, uint8_t *l4_offset)
239 {
240 	enum neta_inq_l3_type l3_type;
241 	enum neta_inq_l4_type l4_type;
242 	uint64_t packet_type;
243 
244 	neta_ppio_inq_desc_get_l3_info(desc, &l3_type, l3_offset);
245 	neta_ppio_inq_desc_get_l4_info(desc, &l4_type, l4_offset);
246 
247 	packet_type = RTE_PTYPE_L2_ETHER;
248 
249 	if (NETA_RXD_GET_VLAN_INFO(desc))
250 		packet_type |= RTE_PTYPE_L2_ETHER_VLAN;
251 
252 	switch (l3_type) {
253 	case NETA_INQ_L3_TYPE_IPV4_BAD:
254 	case NETA_INQ_L3_TYPE_IPV4_OK:
255 		packet_type |= RTE_PTYPE_L3_IPV4;
256 		break;
257 	case NETA_INQ_L3_TYPE_IPV6:
258 		packet_type |= RTE_PTYPE_L3_IPV6;
259 		break;
260 	default:
261 		packet_type |= RTE_PTYPE_UNKNOWN;
262 		MVNETA_LOG(DEBUG, "Failed to recognize l3 packet type");
263 		break;
264 	}
265 
266 	switch (l4_type) {
267 	case NETA_INQ_L4_TYPE_TCP:
268 		packet_type |= RTE_PTYPE_L4_TCP;
269 		break;
270 	case NETA_INQ_L4_TYPE_UDP:
271 		packet_type |= RTE_PTYPE_L4_UDP;
272 		break;
273 	default:
274 		packet_type |= RTE_PTYPE_UNKNOWN;
275 		MVNETA_LOG(DEBUG, "Failed to recognize l4 packet type");
276 		break;
277 	}
278 
279 	return packet_type;
280 }
281 
282 /**
283  * Prepare offload information.
284  *
285  * @param ol_flags
286  *   Offload flags.
287  * @param l3_type
288  *   Pointer to the neta_ouq_l3_type structure.
289  * @param l4_type
290  *   Pointer to the neta_outq_l4_type structure.
291  * @param gen_l3_cksum
292  *   Will be set to 1 in case l3 checksum is computed.
293  * @param l4_cksum
294  *   Will be set to 1 in case l4 checksum is computed.
295  */
296 static inline void
mvneta_prepare_proto_info(uint64_t ol_flags,enum neta_outq_l3_type * l3_type,enum neta_outq_l4_type * l4_type,int * gen_l3_cksum,int * gen_l4_cksum)297 mvneta_prepare_proto_info(uint64_t ol_flags,
298 			  enum neta_outq_l3_type *l3_type,
299 			  enum neta_outq_l4_type *l4_type,
300 			  int *gen_l3_cksum,
301 			  int *gen_l4_cksum)
302 {
303 	/*
304 	 * Based on ol_flags prepare information
305 	 * for neta_ppio_outq_desc_set_proto_info() which setups descriptor
306 	 * for offloading.
307 	 * in most of the checksum cases ipv4 must be set, so this is the
308 	 * default value
309 	 */
310 	*l3_type = NETA_OUTQ_L3_TYPE_IPV4;
311 	*gen_l3_cksum = ol_flags & RTE_MBUF_F_TX_IP_CKSUM ? 1 : 0;
312 
313 	if (ol_flags & RTE_MBUF_F_TX_IPV6) {
314 		*l3_type = NETA_OUTQ_L3_TYPE_IPV6;
315 		/* no checksum for ipv6 header */
316 		*gen_l3_cksum = 0;
317 	}
318 
319 	if (ol_flags & RTE_MBUF_F_TX_TCP_CKSUM) {
320 		*l4_type = NETA_OUTQ_L4_TYPE_TCP;
321 		*gen_l4_cksum = 1;
322 	} else if (ol_flags & RTE_MBUF_F_TX_UDP_CKSUM) {
323 		*l4_type = NETA_OUTQ_L4_TYPE_UDP;
324 		*gen_l4_cksum = 1;
325 	} else {
326 		*l4_type = NETA_OUTQ_L4_TYPE_OTHER;
327 		/* no checksum for other type */
328 		*gen_l4_cksum = 0;
329 	}
330 }
331 
332 /**
333  * Get offload information from the received packet descriptor.
334  *
335  * @param desc
336  *   Pointer to the received packet descriptor.
337  *
338  * @return
339  *   Mbuf offload flags.
340  */
341 static inline uint64_t
mvneta_desc_to_ol_flags(struct neta_ppio_desc * desc)342 mvneta_desc_to_ol_flags(struct neta_ppio_desc *desc)
343 {
344 	uint64_t flags;
345 	enum neta_inq_desc_status status;
346 
347 	status = neta_ppio_inq_desc_get_l3_pkt_error(desc);
348 	if (unlikely(status != NETA_DESC_ERR_OK))
349 		flags = RTE_MBUF_F_RX_IP_CKSUM_BAD;
350 	else
351 		flags = RTE_MBUF_F_RX_IP_CKSUM_GOOD;
352 
353 	status = neta_ppio_inq_desc_get_l4_pkt_error(desc);
354 	if (unlikely(status != NETA_DESC_ERR_OK))
355 		flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
356 	else
357 		flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
358 
359 	return flags;
360 }
361 
362 /**
363  * DPDK callback for transmit.
364  *
365  * @param txq
366  *   Generic pointer transmit queue.
367  * @param tx_pkts
368  *   Packets to transmit.
369  * @param nb_pkts
370  *   Number of packets in array.
371  *
372  * @return
373  *   Number of packets successfully transmitted.
374  */
375 static uint16_t
mvneta_tx_pkt_burst(void * txq,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)376 mvneta_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
377 {
378 	struct mvneta_txq *q = txq;
379 	struct mvneta_shadow_txq *sq;
380 	struct neta_ppio_desc descs[nb_pkts];
381 	int i, bytes_sent = 0;
382 	uint16_t num, sq_free_size;
383 	uint64_t addr;
384 
385 	sq = &q->shadow_txq;
386 	if (unlikely(!nb_pkts || !q->priv->ppio))
387 		return 0;
388 
389 	if (sq->size)
390 		mvneta_sent_buffers_free(q->priv->ppio,
391 					 sq, q->queue_id);
392 
393 	sq_free_size = MRVL_NETA_TX_SHADOWQ_SIZE - sq->size - 1;
394 	if (unlikely(nb_pkts > sq_free_size)) {
395 		MVNETA_LOG(DEBUG,
396 			"No room in shadow queue for %d packets! %d packets will be sent.",
397 			nb_pkts, sq_free_size);
398 		nb_pkts = sq_free_size;
399 	}
400 
401 
402 	for (i = 0; i < nb_pkts; i++) {
403 		struct rte_mbuf *mbuf = tx_pkts[i];
404 		int gen_l3_cksum, gen_l4_cksum;
405 		enum neta_outq_l3_type l3_type;
406 		enum neta_outq_l4_type l4_type;
407 
408 		/* Fill first mbuf info in shadow queue */
409 		mvneta_fill_shadowq(sq, mbuf);
410 		mvneta_fill_desc(&descs[i], mbuf);
411 
412 		bytes_sent += rte_pktmbuf_pkt_len(mbuf);
413 
414 		if (!(mbuf->ol_flags & MVNETA_TX_PKT_OFFLOADS))
415 			continue;
416 		mvneta_prepare_proto_info(mbuf->ol_flags, &l3_type, &l4_type,
417 					  &gen_l3_cksum, &gen_l4_cksum);
418 
419 		neta_ppio_outq_desc_set_proto_info(&descs[i], l3_type, l4_type,
420 						   mbuf->l2_len,
421 						   mbuf->l2_len + mbuf->l3_len,
422 						   gen_l3_cksum, gen_l4_cksum);
423 	}
424 	num = nb_pkts;
425 	neta_ppio_send(q->priv->ppio, q->queue_id, descs, &nb_pkts);
426 
427 
428 	/* number of packets that were not sent */
429 	if (unlikely(num > nb_pkts)) {
430 		for (i = nb_pkts; i < num; i++) {
431 			sq->head = (MRVL_NETA_TX_SHADOWQ_SIZE + sq->head - 1) &
432 				MRVL_NETA_TX_SHADOWQ_MASK;
433 			addr = cookie_addr_high | sq->ent[sq->head].cookie;
434 			bytes_sent -=
435 				rte_pktmbuf_pkt_len((struct rte_mbuf *)addr);
436 		}
437 		sq->size -= num - nb_pkts;
438 	}
439 
440 	q->bytes_sent += bytes_sent;
441 
442 	return nb_pkts;
443 }
444 
445 /** DPDK callback for S/G transmit.
446  *
447  * @param txq
448  *   Generic pointer transmit queue.
449  * @param tx_pkts
450  *   Packets to transmit.
451  * @param nb_pkts
452  *   Number of packets in array.
453  *
454  * @return
455  *   Number of packets successfully transmitted.
456  */
457 static uint16_t
mvneta_tx_sg_pkt_burst(void * txq,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)458 mvneta_tx_sg_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
459 {
460 	struct mvneta_txq *q = txq;
461 	struct mvneta_shadow_txq *sq;
462 	struct neta_ppio_desc descs[nb_pkts * NETA_PPIO_DESC_NUM_FRAGS];
463 	struct neta_ppio_sg_pkts pkts;
464 	uint8_t frags[nb_pkts];
465 	int i, j, bytes_sent = 0;
466 	int tail, tail_first;
467 	uint16_t num, sq_free_size;
468 	uint16_t nb_segs, total_descs = 0;
469 	uint64_t addr;
470 
471 	sq = &q->shadow_txq;
472 	pkts.frags = frags;
473 	pkts.num = 0;
474 
475 	if (unlikely(!q->priv->ppio))
476 		return 0;
477 
478 	if (sq->size)
479 		mvneta_sent_buffers_free(q->priv->ppio,
480 					 sq, q->queue_id);
481 	/* Save shadow queue free size */
482 	sq_free_size = MRVL_NETA_TX_SHADOWQ_SIZE - sq->size - 1;
483 
484 	tail = 0;
485 	for (i = 0; i < nb_pkts; i++) {
486 		struct rte_mbuf *mbuf = tx_pkts[i];
487 		struct rte_mbuf *seg = NULL;
488 		int gen_l3_cksum, gen_l4_cksum;
489 		enum neta_outq_l3_type l3_type;
490 		enum neta_outq_l4_type l4_type;
491 
492 		nb_segs = mbuf->nb_segs;
493 		total_descs += nb_segs;
494 
495 		/*
496 		 * Check if total_descs does not exceed
497 		 * shadow queue free size
498 		 */
499 		if (unlikely(total_descs > sq_free_size)) {
500 			total_descs -= nb_segs;
501 			MVNETA_LOG(DEBUG,
502 				"No room in shadow queue for %d packets! "
503 				"%d packets will be sent.",
504 				nb_pkts, i);
505 			break;
506 		}
507 
508 
509 		/* Check if nb_segs does not exceed the max nb of desc per
510 		 * fragmented packet
511 		 */
512 		if (unlikely(nb_segs > NETA_PPIO_DESC_NUM_FRAGS)) {
513 			total_descs -= nb_segs;
514 			MVNETA_LOG(ERR,
515 				"Too many segments. Packet won't be sent.");
516 			break;
517 		}
518 
519 		pkts.frags[pkts.num] = nb_segs;
520 		pkts.num++;
521 		tail_first = tail;
522 
523 		seg = mbuf;
524 		for (j = 0; j < nb_segs - 1; j++) {
525 			/* For the subsequent segments, set shadow queue
526 			 * buffer to NULL
527 			 */
528 			mvneta_fill_shadowq(sq, NULL);
529 			mvneta_fill_desc(&descs[tail], seg);
530 
531 			tail++;
532 			seg = seg->next;
533 		}
534 		/* Put first mbuf info in last shadow queue entry */
535 		mvneta_fill_shadowq(sq, mbuf);
536 		/* Update descriptor with last segment */
537 		mvneta_fill_desc(&descs[tail++], seg);
538 
539 		bytes_sent += rte_pktmbuf_pkt_len(mbuf);
540 
541 		if (!(mbuf->ol_flags & MVNETA_TX_PKT_OFFLOADS))
542 			continue;
543 		mvneta_prepare_proto_info(mbuf->ol_flags, &l3_type, &l4_type,
544 					  &gen_l3_cksum, &gen_l4_cksum);
545 
546 		neta_ppio_outq_desc_set_proto_info(&descs[tail_first],
547 						   l3_type, l4_type,
548 						   mbuf->l2_len,
549 						   mbuf->l2_len + mbuf->l3_len,
550 						   gen_l3_cksum, gen_l4_cksum);
551 	}
552 	num = total_descs;
553 	neta_ppio_send_sg(q->priv->ppio, q->queue_id, descs, &total_descs,
554 			  &pkts);
555 
556 	/* number of packets that were not sent */
557 	if (unlikely(num > total_descs)) {
558 		for (i = total_descs; i < num; i++) {
559 			sq->head = (MRVL_NETA_TX_SHADOWQ_SIZE +
560 					sq->head - 1) &
561 					MRVL_NETA_TX_SHADOWQ_MASK;
562 			addr = sq->ent[sq->head].cookie;
563 			if (addr) {
564 				struct rte_mbuf *mbuf;
565 
566 				mbuf = (struct rte_mbuf *)
567 						(cookie_addr_high | addr);
568 				bytes_sent -= rte_pktmbuf_pkt_len(mbuf);
569 			}
570 		}
571 		sq->size -= num - total_descs;
572 		nb_pkts = pkts.num;
573 	}
574 
575 	q->bytes_sent += bytes_sent;
576 
577 	return nb_pkts;
578 }
579 
580 /**
581  * Set tx burst function according to offload flag
582  *
583  * @param dev
584  *   Pointer to Ethernet device structure.
585  */
586 void
mvneta_set_tx_function(struct rte_eth_dev * dev)587 mvneta_set_tx_function(struct rte_eth_dev *dev)
588 {
589 	struct mvneta_priv *priv = dev->data->dev_private;
590 
591 	/* Use a simple Tx queue (no offloads, no multi segs) if possible */
592 	if (priv->multiseg) {
593 		MVNETA_LOG(INFO, "Using multi-segment tx callback");
594 		dev->tx_pkt_burst = mvneta_tx_sg_pkt_burst;
595 	} else {
596 		MVNETA_LOG(INFO, "Using single-segment tx callback");
597 		dev->tx_pkt_burst = mvneta_tx_pkt_burst;
598 	}
599 }
600 
601 /**
602  * DPDK callback for receive.
603  *
604  * @param rxq
605  *   Generic pointer to the receive queue.
606  * @param rx_pkts
607  *   Array to store received packets.
608  * @param nb_pkts
609  *   Maximum number of packets in array.
610  *
611  * @return
612  *   Number of packets successfully received.
613  */
614 uint16_t
mvneta_rx_pkt_burst(void * rxq,struct rte_mbuf ** rx_pkts,uint16_t nb_pkts)615 mvneta_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
616 {
617 	struct mvneta_rxq *q = rxq;
618 	struct neta_ppio_desc descs[nb_pkts];
619 	int i, ret, rx_done = 0, rx_dropped = 0;
620 
621 	if (unlikely(!q || !q->priv->ppio))
622 		return 0;
623 
624 	ret = neta_ppio_recv(q->priv->ppio, q->queue_id,
625 			descs, &nb_pkts);
626 
627 	if (unlikely(ret < 0)) {
628 		MVNETA_LOG(ERR, "Failed to receive packets");
629 		return 0;
630 	}
631 
632 	for (i = 0; i < nb_pkts; i++) {
633 		struct rte_mbuf *mbuf;
634 		uint8_t l3_offset, l4_offset;
635 		enum neta_inq_desc_status status;
636 		uint64_t addr;
637 
638 		addr = cookie_addr_high |
639 			neta_ppio_inq_desc_get_cookie(&descs[i]);
640 		mbuf = (struct rte_mbuf *)addr;
641 
642 		rte_pktmbuf_reset(mbuf);
643 
644 		/* drop packet in case of mac, overrun or resource error */
645 		status = neta_ppio_inq_desc_get_l2_pkt_error(&descs[i]);
646 		if (unlikely(status != NETA_DESC_ERR_OK)) {
647 			/* Release the mbuf to the mempool since
648 			 * it won't be transferred to tx path
649 			 */
650 			rte_pktmbuf_free(mbuf);
651 			q->drop_mac++;
652 			rx_dropped++;
653 			continue;
654 		}
655 
656 		mbuf->data_off += MVNETA_PKT_EFFEC_OFFS;
657 		mbuf->pkt_len = neta_ppio_inq_desc_get_pkt_len(&descs[i]);
658 		mbuf->data_len = mbuf->pkt_len;
659 		mbuf->port = q->port_id;
660 		mbuf->packet_type =
661 			mvneta_desc_to_packet_type_and_offset(&descs[i],
662 								&l3_offset,
663 								&l4_offset);
664 		mbuf->l2_len = l3_offset;
665 		mbuf->l3_len = l4_offset - l3_offset;
666 
667 		if (likely(q->cksum_enabled))
668 			mbuf->ol_flags = mvneta_desc_to_ol_flags(&descs[i]);
669 
670 		rx_pkts[rx_done++] = mbuf;
671 		q->bytes_recv += mbuf->pkt_len;
672 	}
673 	q->pkts_processed += rx_done + rx_dropped;
674 
675 	if (q->pkts_processed > rx_desc_free_thresh) {
676 		int buf_to_refill = rx_desc_free_thresh;
677 
678 		ret = mvneta_buffs_alloc(q->priv, q, &buf_to_refill);
679 		if (ret)
680 			MVNETA_LOG(ERR, "Refill failed");
681 		q->pkts_processed -= buf_to_refill;
682 	}
683 
684 	return rx_done;
685 }
686 
687 /**
688  * DPDK callback to configure the receive queue.
689  *
690  * @param dev
691  *   Pointer to Ethernet device structure.
692  * @param idx
693  *   RX queue index.
694  * @param desc
695  *   Number of descriptors to configure in queue.
696  * @param socket
697  *   NUMA socket on which memory must be allocated.
698  * @param conf
699  *   Thresholds parameters (unused_).
700  * @param mp
701  *   Memory pool for buffer allocations.
702  *
703  * @return
704  *   0 on success, negative error value otherwise.
705  */
706 int
mvneta_rx_queue_setup(struct rte_eth_dev * dev,uint16_t idx,uint16_t desc,unsigned int socket,const struct rte_eth_rxconf * conf __rte_unused,struct rte_mempool * mp)707 mvneta_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
708 		      unsigned int socket,
709 		      const struct rte_eth_rxconf *conf __rte_unused,
710 		      struct rte_mempool *mp)
711 {
712 	struct mvneta_priv *priv = dev->data->dev_private;
713 	struct mvneta_rxq *rxq;
714 	uint32_t frame_size, buf_size = rte_pktmbuf_data_room_size(mp);
715 	uint32_t max_rx_pktlen = dev->data->mtu + RTE_ETHER_HDR_LEN;
716 
717 	frame_size = buf_size - RTE_PKTMBUF_HEADROOM - MVNETA_PKT_EFFEC_OFFS;
718 
719 	if (frame_size < max_rx_pktlen) {
720 		MVNETA_LOG(ERR,
721 			"Mbuf size must be increased to %u bytes to hold up "
722 			"to %u bytes of data.",
723 			max_rx_pktlen + buf_size - frame_size,
724 			max_rx_pktlen);
725 		dev->data->mtu = frame_size - RTE_ETHER_HDR_LEN;
726 		MVNETA_LOG(INFO, "Setting MTU to %u", dev->data->mtu);
727 	}
728 
729 	if (dev->data->rx_queues[idx]) {
730 		rte_free(dev->data->rx_queues[idx]);
731 		dev->data->rx_queues[idx] = NULL;
732 	}
733 
734 	rxq = rte_zmalloc_socket("rxq", sizeof(*rxq), 0, socket);
735 	if (!rxq)
736 		return -ENOMEM;
737 
738 	rxq->priv = priv;
739 	rxq->mp = mp;
740 	rxq->cksum_enabled = dev->data->dev_conf.rxmode.offloads &
741 			     RTE_ETH_RX_OFFLOAD_IPV4_CKSUM;
742 	rxq->queue_id = idx;
743 	rxq->port_id = dev->data->port_id;
744 	rxq->size = desc;
745 	rx_desc_free_thresh = RTE_MIN(rx_desc_free_thresh, (desc / 2));
746 	priv->ppio_params.inqs_params.tcs_params[MRVL_NETA_DEFAULT_TC].size =
747 		desc;
748 
749 	dev->data->rx_queues[idx] = rxq;
750 
751 	return 0;
752 }
753 
754 /**
755  * DPDK callback to configure the transmit queue.
756  *
757  * @param dev
758  *   Pointer to Ethernet device structure.
759  * @param idx
760  *   Transmit queue index.
761  * @param desc
762  *   Number of descriptors to configure in the queue.
763  * @param socket
764  *   NUMA socket on which memory must be allocated.
765  * @param conf
766  *   Tx queue configuration parameters.
767  *
768  * @return
769  *   0 on success, negative error value otherwise.
770  */
771 int
mvneta_tx_queue_setup(struct rte_eth_dev * dev,uint16_t idx,uint16_t desc,unsigned int socket,const struct rte_eth_txconf * conf)772 mvneta_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
773 		      unsigned int socket, const struct rte_eth_txconf *conf)
774 {
775 	struct mvneta_priv *priv = dev->data->dev_private;
776 	struct mvneta_txq *txq;
777 
778 	if (dev->data->tx_queues[idx]) {
779 		rte_free(dev->data->tx_queues[idx]);
780 		dev->data->tx_queues[idx] = NULL;
781 	}
782 
783 	txq = rte_zmalloc_socket("txq", sizeof(*txq), 0, socket);
784 	if (!txq)
785 		return -ENOMEM;
786 
787 	txq->priv = priv;
788 	txq->queue_id = idx;
789 	txq->port_id = dev->data->port_id;
790 	txq->tx_deferred_start = conf->tx_deferred_start;
791 	dev->data->tx_queues[idx] = txq;
792 
793 	priv->ppio_params.outqs_params.outqs_params[idx].size = desc;
794 	priv->ppio_params.outqs_params.outqs_params[idx].weight = 1;
795 
796 	return 0;
797 }
798 
799 /**
800  * DPDK callback to release the transmit queue.
801  *
802  * @param dev
803  *   Pointer to Ethernet device structure.
804  * @param qid
805  *   Transmit queue index.
806  */
807 void
mvneta_tx_queue_release(struct rte_eth_dev * dev,uint16_t qid)808 mvneta_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
809 {
810 	struct mvneta_txq *q = dev->data->tx_queues[qid];
811 
812 	if (!q)
813 		return;
814 
815 	rte_free(q);
816 }
817 
818 /**
819  * Return mbufs to mempool.
820  *
821  * @param rxq
822  *    Pointer to rx queue structure
823  * @param desc
824  *    Array of rx descriptors
825  */
826 static void
mvneta_recv_buffs_free(struct neta_ppio_desc * desc,uint16_t num)827 mvneta_recv_buffs_free(struct neta_ppio_desc *desc, uint16_t num)
828 {
829 	uint64_t addr;
830 	uint8_t i;
831 
832 	for (i = 0; i < num; i++) {
833 		if (desc) {
834 			addr = cookie_addr_high |
835 					neta_ppio_inq_desc_get_cookie(desc);
836 			if (addr)
837 				rte_pktmbuf_free((struct rte_mbuf *)addr);
838 			desc++;
839 		}
840 	}
841 }
842 
843 int
mvneta_alloc_rx_bufs(struct rte_eth_dev * dev)844 mvneta_alloc_rx_bufs(struct rte_eth_dev *dev)
845 {
846 	struct mvneta_priv *priv = dev->data->dev_private;
847 	int ret = 0, i;
848 
849 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
850 		struct mvneta_rxq *rxq = dev->data->rx_queues[i];
851 		int num = rxq->size;
852 
853 		ret = mvneta_buffs_alloc(priv, rxq, &num);
854 		if (ret || num != rxq->size) {
855 			rte_free(rxq);
856 			return ret;
857 		}
858 	}
859 
860 	return 0;
861 }
862 
863 /**
864  * Flush single receive queue.
865  *
866  * @param rxq
867  *   Pointer to rx queue structure.
868  * @param descs
869  *   Array of rx descriptors
870  */
871 static void
mvneta_rx_queue_flush(struct mvneta_rxq * rxq)872 mvneta_rx_queue_flush(struct mvneta_rxq *rxq)
873 {
874 	struct neta_ppio_desc *descs;
875 	struct neta_buff_inf *bufs;
876 	uint16_t num;
877 	int ret, i;
878 
879 	descs = rte_malloc("rxdesc", MRVL_NETA_RXD_MAX * sizeof(*descs), 0);
880 	if (descs == NULL) {
881 		MVNETA_LOG(ERR, "Failed to allocate descs.");
882 		return;
883 	}
884 
885 	bufs = rte_malloc("buffs", MRVL_NETA_RXD_MAX * sizeof(*bufs), 0);
886 	if (bufs == NULL) {
887 		MVNETA_LOG(ERR, "Failed to allocate bufs.");
888 		rte_free(descs);
889 		return;
890 	}
891 
892 	do {
893 		num = MRVL_NETA_RXD_MAX;
894 		ret = neta_ppio_recv(rxq->priv->ppio,
895 				     rxq->queue_id,
896 				     descs, &num);
897 		mvneta_recv_buffs_free(descs, num);
898 	} while (ret == 0 && num);
899 
900 	rxq->pkts_processed = 0;
901 
902 	num = MRVL_NETA_RXD_MAX;
903 
904 	neta_ppio_inq_get_all_buffs(rxq->priv->ppio, rxq->queue_id, bufs, &num);
905 	MVNETA_LOG(INFO, "freeing %u unused bufs.", num);
906 
907 	for (i = 0; i < num; i++) {
908 		uint64_t addr;
909 		if (bufs[i].cookie) {
910 			addr = cookie_addr_high | bufs[i].cookie;
911 			rte_pktmbuf_free((struct rte_mbuf *)addr);
912 		}
913 	}
914 
915 	rte_free(descs);
916 	rte_free(bufs);
917 }
918 
919 /**
920  * Flush single transmit queue.
921  *
922  * @param txq
923  *     Pointer to tx queue structure
924  */
925 static void
mvneta_tx_queue_flush(struct mvneta_txq * txq)926 mvneta_tx_queue_flush(struct mvneta_txq *txq)
927 {
928 	struct mvneta_shadow_txq *sq = &txq->shadow_txq;
929 
930 	if (sq->size)
931 		mvneta_sent_buffers_free(txq->priv->ppio, sq,
932 					 txq->queue_id);
933 
934 	/* free the rest of them */
935 	while (sq->tail != sq->head) {
936 		uint64_t addr = cookie_addr_high |
937 			sq->ent[sq->tail].cookie;
938 		rte_pktmbuf_free((struct rte_mbuf *)addr);
939 		sq->tail = (sq->tail + 1) & MRVL_NETA_TX_SHADOWQ_MASK;
940 	}
941 	memset(sq, 0, sizeof(*sq));
942 }
943 
944 void
mvneta_flush_queues(struct rte_eth_dev * dev)945 mvneta_flush_queues(struct rte_eth_dev *dev)
946 {
947 	int i;
948 
949 	MVNETA_LOG(INFO, "Flushing rx queues");
950 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
951 		struct mvneta_rxq *rxq = dev->data->rx_queues[i];
952 
953 		mvneta_rx_queue_flush(rxq);
954 	}
955 
956 	MVNETA_LOG(INFO, "Flushing tx queues");
957 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
958 		struct mvneta_txq *txq = dev->data->tx_queues[i];
959 
960 		mvneta_tx_queue_flush(txq);
961 	}
962 }
963 
964 /**
965  * DPDK callback to release the receive queue.
966  *
967  * @param dev
968  *   Pointer to Ethernet device structure.
969  * @param qid
970  *   Receive queue index.
971  */
972 void
mvneta_rx_queue_release(struct rte_eth_dev * dev,uint16_t qid)973 mvneta_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
974 {
975 	struct mvneta_rxq *q = dev->data->rx_queues[qid];
976 
977 	if (!q)
978 		return;
979 
980 	/* If dev_stop was called already, mbufs are already
981 	 * returned to mempool and ppio is deinitialized.
982 	 * Skip this step.
983 	 */
984 
985 	if (q->priv->ppio)
986 		mvneta_rx_queue_flush(q);
987 
988 	rte_free(q);
989 }
990 
991 /**
992  * DPDK callback to get information about specific receive queue.
993  *
994  * @param dev
995  *   Pointer to Ethernet device structure.
996  * @param rx_queue_id
997  *   Receive queue index.
998  * @param qinfo
999  *   Receive queue information structure.
1000  */
1001 void
mvneta_rxq_info_get(struct rte_eth_dev * dev,uint16_t rx_queue_id,struct rte_eth_rxq_info * qinfo)1002 mvneta_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
1003 		    struct rte_eth_rxq_info *qinfo)
1004 {
1005 	struct mvneta_rxq *q = dev->data->rx_queues[rx_queue_id];
1006 
1007 	qinfo->mp = q->mp;
1008 	qinfo->nb_desc = q->size;
1009 }
1010 
1011 /**
1012  * DPDK callback to get information about specific transmit queue.
1013  *
1014  * @param dev
1015  *   Pointer to Ethernet device structure.
1016  * @param tx_queue_id
1017  *   Transmit queue index.
1018  * @param qinfo
1019  *   Transmit queue information structure.
1020  */
1021 void
mvneta_txq_info_get(struct rte_eth_dev * dev,uint16_t tx_queue_id,struct rte_eth_txq_info * qinfo)1022 mvneta_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
1023 		    struct rte_eth_txq_info *qinfo)
1024 {
1025 	struct mvneta_priv *priv = dev->data->dev_private;
1026 
1027 	qinfo->nb_desc =
1028 		priv->ppio_params.outqs_params.outqs_params[tx_queue_id].size;
1029 }
1030