xref: /dpdk/drivers/net/virtio/virtio_rxtx.c (revision 2d91b28730a945def257bc372a525c9b5dbf181c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <errno.h>
10 
11 #include <rte_cycles.h>
12 #include <rte_memory.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_mempool.h>
15 #include <rte_malloc.h>
16 #include <rte_mbuf.h>
17 #include <rte_ether.h>
18 #include <ethdev_driver.h>
19 #include <rte_prefetch.h>
20 #include <rte_string_fns.h>
21 #include <rte_errno.h>
22 #include <rte_byteorder.h>
23 #include <rte_net.h>
24 #include <rte_ip.h>
25 #include <rte_udp.h>
26 #include <rte_tcp.h>
27 
28 #include "virtio_logs.h"
29 #include "virtio_ethdev.h"
30 #include "virtio.h"
31 #include "virtqueue.h"
32 #include "virtio_rxtx.h"
33 #include "virtio_rxtx_simple.h"
34 #include "virtio_ring.h"
35 
36 #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
37 #define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len)
38 #else
39 #define  VIRTIO_DUMP_PACKET(m, len) do { } while (0)
40 #endif
41 
42 int
43 virtio_dev_rx_queue_done(void *rxq, uint16_t offset)
44 {
45 	struct virtnet_rx *rxvq = rxq;
46 	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
47 
48 	return virtqueue_nused(vq) >= offset;
49 }
50 
51 void
52 vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx, uint16_t num)
53 {
54 	vq->vq_free_cnt += num;
55 	vq->vq_desc_tail_idx = desc_idx & (vq->vq_nentries - 1);
56 }
57 
58 void
59 vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
60 {
61 	struct vring_desc *dp, *dp_tail;
62 	struct vq_desc_extra *dxp;
63 	uint16_t desc_idx_last = desc_idx;
64 
65 	dp  = &vq->vq_split.ring.desc[desc_idx];
66 	dxp = &vq->vq_descx[desc_idx];
67 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
68 	if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
69 		while (dp->flags & VRING_DESC_F_NEXT) {
70 			desc_idx_last = dp->next;
71 			dp = &vq->vq_split.ring.desc[dp->next];
72 		}
73 	}
74 	dxp->ndescs = 0;
75 
76 	/*
77 	 * We must append the existing free chain, if any, to the end of
78 	 * newly freed chain. If the virtqueue was completely used, then
79 	 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
80 	 */
81 	if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) {
82 		vq->vq_desc_head_idx = desc_idx;
83 	} else {
84 		dp_tail = &vq->vq_split.ring.desc[vq->vq_desc_tail_idx];
85 		dp_tail->next = desc_idx;
86 	}
87 
88 	vq->vq_desc_tail_idx = desc_idx_last;
89 	dp->next = VQ_RING_DESC_CHAIN_END;
90 }
91 
92 void
93 virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
94 {
95 	uint32_t s = mbuf->pkt_len;
96 	struct rte_ether_addr *ea;
97 
98 	stats->bytes += s;
99 
100 	if (s == 64) {
101 		stats->size_bins[1]++;
102 	} else if (s > 64 && s < 1024) {
103 		uint32_t bin;
104 
105 		/* count zeros, and offset into correct bin */
106 		bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
107 		stats->size_bins[bin]++;
108 	} else {
109 		if (s < 64)
110 			stats->size_bins[0]++;
111 		else if (s < 1519)
112 			stats->size_bins[6]++;
113 		else
114 			stats->size_bins[7]++;
115 	}
116 
117 	ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
118 	if (rte_is_multicast_ether_addr(ea)) {
119 		if (rte_is_broadcast_ether_addr(ea))
120 			stats->broadcast++;
121 		else
122 			stats->multicast++;
123 	}
124 }
125 
126 static inline void
127 virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
128 {
129 	VIRTIO_DUMP_PACKET(m, m->data_len);
130 
131 	virtio_update_packet_stats(&rxvq->stats, m);
132 }
133 
134 static uint16_t
135 virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq,
136 				  struct rte_mbuf **rx_pkts,
137 				  uint32_t *len,
138 				  uint16_t num)
139 {
140 	struct rte_mbuf *cookie;
141 	uint16_t used_idx;
142 	uint16_t id;
143 	struct vring_packed_desc *desc;
144 	uint16_t i;
145 
146 	desc = vq->vq_packed.ring.desc;
147 
148 	for (i = 0; i < num; i++) {
149 		used_idx = vq->vq_used_cons_idx;
150 		/* desc_is_used has a load-acquire or rte_io_rmb inside
151 		 * and wait for used desc in virtqueue.
152 		 */
153 		if (!desc_is_used(&desc[used_idx], vq))
154 			return i;
155 		len[i] = desc[used_idx].len;
156 		id = desc[used_idx].id;
157 		cookie = (struct rte_mbuf *)vq->vq_descx[id].cookie;
158 		if (unlikely(cookie == NULL)) {
159 			PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
160 				vq->vq_used_cons_idx);
161 			break;
162 		}
163 		rte_prefetch0(cookie);
164 		rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
165 		rx_pkts[i] = cookie;
166 
167 		vq->vq_free_cnt++;
168 		vq->vq_used_cons_idx++;
169 		if (vq->vq_used_cons_idx >= vq->vq_nentries) {
170 			vq->vq_used_cons_idx -= vq->vq_nentries;
171 			vq->vq_packed.used_wrap_counter ^= 1;
172 		}
173 	}
174 
175 	return i;
176 }
177 
178 static uint16_t
179 virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,
180 			   uint32_t *len, uint16_t num)
181 {
182 	struct vring_used_elem *uep;
183 	struct rte_mbuf *cookie;
184 	uint16_t used_idx, desc_idx;
185 	uint16_t i;
186 
187 	/*  Caller does the check */
188 	for (i = 0; i < num ; i++) {
189 		used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
190 		uep = &vq->vq_split.ring.used->ring[used_idx];
191 		desc_idx = (uint16_t) uep->id;
192 		len[i] = uep->len;
193 		cookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie;
194 
195 		if (unlikely(cookie == NULL)) {
196 			PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
197 				vq->vq_used_cons_idx);
198 			break;
199 		}
200 
201 		rte_prefetch0(cookie);
202 		rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
203 		rx_pkts[i]  = cookie;
204 		vq->vq_used_cons_idx++;
205 		vq_ring_free_chain(vq, desc_idx);
206 		vq->vq_descx[desc_idx].cookie = NULL;
207 	}
208 
209 	return i;
210 }
211 
212 static uint16_t
213 virtqueue_dequeue_rx_inorder(struct virtqueue *vq,
214 			struct rte_mbuf **rx_pkts,
215 			uint32_t *len,
216 			uint16_t num)
217 {
218 	struct vring_used_elem *uep;
219 	struct rte_mbuf *cookie;
220 	uint16_t used_idx = 0;
221 	uint16_t i;
222 
223 	if (unlikely(num == 0))
224 		return 0;
225 
226 	for (i = 0; i < num; i++) {
227 		used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
228 		/* Desc idx same as used idx */
229 		uep = &vq->vq_split.ring.used->ring[used_idx];
230 		len[i] = uep->len;
231 		cookie = (struct rte_mbuf *)vq->vq_descx[used_idx].cookie;
232 
233 		if (unlikely(cookie == NULL)) {
234 			PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
235 				vq->vq_used_cons_idx);
236 			break;
237 		}
238 
239 		rte_prefetch0(cookie);
240 		rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
241 		rx_pkts[i]  = cookie;
242 		vq->vq_used_cons_idx++;
243 		vq->vq_descx[used_idx].cookie = NULL;
244 	}
245 
246 	vq_ring_free_inorder(vq, used_idx, i);
247 	return i;
248 }
249 
250 static inline int
251 virtqueue_enqueue_refill_inorder(struct virtqueue *vq,
252 			struct rte_mbuf **cookies,
253 			uint16_t num)
254 {
255 	struct vq_desc_extra *dxp;
256 	struct virtio_hw *hw = vq->hw;
257 	struct vring_desc *start_dp;
258 	uint16_t head_idx, idx, i = 0;
259 
260 	if (unlikely(vq->vq_free_cnt == 0))
261 		return -ENOSPC;
262 	if (unlikely(vq->vq_free_cnt < num))
263 		return -EMSGSIZE;
264 
265 	head_idx = vq->vq_desc_head_idx & (vq->vq_nentries - 1);
266 	start_dp = vq->vq_split.ring.desc;
267 
268 	while (i < num) {
269 		idx = head_idx & (vq->vq_nentries - 1);
270 		dxp = &vq->vq_descx[idx];
271 		dxp->cookie = (void *)cookies[i];
272 		dxp->ndescs = 1;
273 
274 		start_dp[idx].addr = cookies[i]->buf_iova +
275 			RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
276 		start_dp[idx].len = cookies[i]->buf_len -
277 			RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
278 		start_dp[idx].flags =  VRING_DESC_F_WRITE;
279 
280 		vq_update_avail_ring(vq, idx);
281 		head_idx++;
282 		i++;
283 	}
284 
285 	vq->vq_desc_head_idx += num;
286 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
287 	return 0;
288 }
289 
290 static inline int
291 virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf **cookie,
292 				uint16_t num)
293 {
294 	struct vq_desc_extra *dxp;
295 	struct virtio_hw *hw = vq->hw;
296 	struct vring_desc *start_dp = vq->vq_split.ring.desc;
297 	uint16_t idx, i;
298 
299 	if (unlikely(vq->vq_free_cnt == 0))
300 		return -ENOSPC;
301 	if (unlikely(vq->vq_free_cnt < num))
302 		return -EMSGSIZE;
303 
304 	if (unlikely(vq->vq_desc_head_idx >= vq->vq_nentries))
305 		return -EFAULT;
306 
307 	for (i = 0; i < num; i++) {
308 		idx = vq->vq_desc_head_idx;
309 		dxp = &vq->vq_descx[idx];
310 		dxp->cookie = (void *)cookie[i];
311 		dxp->ndescs = 1;
312 
313 		start_dp[idx].addr = cookie[i]->buf_iova +
314 			RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
315 		start_dp[idx].len = cookie[i]->buf_len -
316 			RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
317 		start_dp[idx].flags = VRING_DESC_F_WRITE;
318 		vq->vq_desc_head_idx = start_dp[idx].next;
319 		vq_update_avail_ring(vq, idx);
320 		if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) {
321 			vq->vq_desc_tail_idx = vq->vq_desc_head_idx;
322 			break;
323 		}
324 	}
325 
326 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
327 
328 	return 0;
329 }
330 
331 static inline void
332 virtqueue_refill_single_packed(struct virtqueue *vq,
333 			       struct vring_packed_desc *dp,
334 			       struct rte_mbuf *cookie)
335 {
336 	uint16_t flags = vq->vq_packed.cached_flags;
337 	struct virtio_hw *hw = vq->hw;
338 
339 	dp->addr = cookie->buf_iova +
340 			RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
341 	dp->len = cookie->buf_len -
342 		RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
343 
344 	virtqueue_store_flags_packed(dp, flags,
345 				     hw->weak_barriers);
346 
347 	if (++vq->vq_avail_idx >= vq->vq_nentries) {
348 		vq->vq_avail_idx -= vq->vq_nentries;
349 		vq->vq_packed.cached_flags ^=
350 			VRING_PACKED_DESC_F_AVAIL_USED;
351 		flags = vq->vq_packed.cached_flags;
352 	}
353 }
354 
355 static inline int
356 virtqueue_enqueue_recv_refill_packed_init(struct virtqueue *vq,
357 				     struct rte_mbuf **cookie, uint16_t num)
358 {
359 	struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc;
360 	struct vq_desc_extra *dxp;
361 	uint16_t idx;
362 	int i;
363 
364 	if (unlikely(vq->vq_free_cnt == 0))
365 		return -ENOSPC;
366 	if (unlikely(vq->vq_free_cnt < num))
367 		return -EMSGSIZE;
368 
369 	for (i = 0; i < num; i++) {
370 		idx = vq->vq_avail_idx;
371 		dxp = &vq->vq_descx[idx];
372 		dxp->cookie = (void *)cookie[i];
373 		dxp->ndescs = 1;
374 
375 		virtqueue_refill_single_packed(vq, &start_dp[idx], cookie[i]);
376 	}
377 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
378 	return 0;
379 }
380 
381 static inline int
382 virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq,
383 				     struct rte_mbuf **cookie, uint16_t num)
384 {
385 	struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc;
386 	struct vq_desc_extra *dxp;
387 	uint16_t idx, did;
388 	int i;
389 
390 	if (unlikely(vq->vq_free_cnt == 0))
391 		return -ENOSPC;
392 	if (unlikely(vq->vq_free_cnt < num))
393 		return -EMSGSIZE;
394 
395 	for (i = 0; i < num; i++) {
396 		idx = vq->vq_avail_idx;
397 		did = start_dp[idx].id;
398 		dxp = &vq->vq_descx[did];
399 		dxp->cookie = (void *)cookie[i];
400 		dxp->ndescs = 1;
401 
402 		virtqueue_refill_single_packed(vq, &start_dp[idx], cookie[i]);
403 	}
404 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
405 	return 0;
406 }
407 
408 /* When doing TSO, the IP length is not included in the pseudo header
409  * checksum of the packet given to the PMD, but for virtio it is
410  * expected.
411  */
412 static void
413 virtio_tso_fix_cksum(struct rte_mbuf *m)
414 {
415 	/* common case: header is not fragmented */
416 	if (likely(rte_pktmbuf_data_len(m) >= m->l2_len + m->l3_len +
417 			m->l4_len)) {
418 		struct rte_ipv4_hdr *iph;
419 		struct rte_ipv6_hdr *ip6h;
420 		struct rte_tcp_hdr *th;
421 		uint16_t prev_cksum, new_cksum, ip_len, ip_paylen;
422 		uint32_t tmp;
423 
424 		iph = rte_pktmbuf_mtod_offset(m,
425 					struct rte_ipv4_hdr *, m->l2_len);
426 		th = RTE_PTR_ADD(iph, m->l3_len);
427 		if ((iph->version_ihl >> 4) == 4) {
428 			iph->hdr_checksum = 0;
429 			iph->hdr_checksum = rte_ipv4_cksum(iph);
430 			ip_len = iph->total_length;
431 			ip_paylen = rte_cpu_to_be_16(rte_be_to_cpu_16(ip_len) -
432 				m->l3_len);
433 		} else {
434 			ip6h = (struct rte_ipv6_hdr *)iph;
435 			ip_paylen = ip6h->payload_len;
436 		}
437 
438 		/* calculate the new phdr checksum not including ip_paylen */
439 		prev_cksum = th->cksum;
440 		tmp = prev_cksum;
441 		tmp += ip_paylen;
442 		tmp = (tmp & 0xffff) + (tmp >> 16);
443 		new_cksum = tmp;
444 
445 		/* replace it in the packet */
446 		th->cksum = new_cksum;
447 	}
448 }
449 
450 
451 
452 
453 static inline void
454 virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
455 			struct rte_mbuf **cookies,
456 			uint16_t num)
457 {
458 	struct vq_desc_extra *dxp;
459 	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
460 	struct vring_desc *start_dp;
461 	struct virtio_net_hdr *hdr;
462 	uint16_t idx;
463 	int16_t head_size = vq->hw->vtnet_hdr_size;
464 	uint16_t i = 0;
465 
466 	idx = vq->vq_desc_head_idx;
467 	start_dp = vq->vq_split.ring.desc;
468 
469 	while (i < num) {
470 		idx = idx & (vq->vq_nentries - 1);
471 		dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
472 		dxp->cookie = (void *)cookies[i];
473 		dxp->ndescs = 1;
474 		virtio_update_packet_stats(&txvq->stats, cookies[i]);
475 
476 		hdr = rte_pktmbuf_mtod_offset(cookies[i],
477 				struct virtio_net_hdr *, -head_size);
478 
479 		/* if offload disabled, hdr is not zeroed yet, do it now */
480 		if (!vq->hw->has_tx_offload)
481 			virtqueue_clear_net_hdr(hdr);
482 		else
483 			virtqueue_xmit_offload(hdr, cookies[i]);
484 
485 		start_dp[idx].addr  = rte_mbuf_data_iova(cookies[i]) - head_size;
486 		start_dp[idx].len   = cookies[i]->data_len + head_size;
487 		start_dp[idx].flags = 0;
488 
489 
490 		vq_update_avail_ring(vq, idx);
491 
492 		idx++;
493 		i++;
494 	};
495 
496 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
497 	vq->vq_desc_head_idx = idx & (vq->vq_nentries - 1);
498 }
499 
500 static inline void
501 virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq,
502 				   struct rte_mbuf *cookie,
503 				   int in_order)
504 {
505 	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
506 	struct vring_packed_desc *dp;
507 	struct vq_desc_extra *dxp;
508 	uint16_t idx, id, flags;
509 	int16_t head_size = vq->hw->vtnet_hdr_size;
510 	struct virtio_net_hdr *hdr;
511 
512 	id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;
513 	idx = vq->vq_avail_idx;
514 	dp = &vq->vq_packed.ring.desc[idx];
515 
516 	dxp = &vq->vq_descx[id];
517 	dxp->ndescs = 1;
518 	dxp->cookie = cookie;
519 
520 	flags = vq->vq_packed.cached_flags;
521 
522 	/* prepend cannot fail, checked by caller */
523 	hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *,
524 				      -head_size);
525 
526 	/* if offload disabled, hdr is not zeroed yet, do it now */
527 	if (!vq->hw->has_tx_offload)
528 		virtqueue_clear_net_hdr(hdr);
529 	else
530 		virtqueue_xmit_offload(hdr, cookie);
531 
532 	dp->addr = rte_mbuf_data_iova(cookie) - head_size;
533 	dp->len  = cookie->data_len + head_size;
534 	dp->id   = id;
535 
536 	if (++vq->vq_avail_idx >= vq->vq_nentries) {
537 		vq->vq_avail_idx -= vq->vq_nentries;
538 		vq->vq_packed.cached_flags ^= VRING_PACKED_DESC_F_AVAIL_USED;
539 	}
540 
541 	vq->vq_free_cnt--;
542 
543 	if (!in_order) {
544 		vq->vq_desc_head_idx = dxp->next;
545 		if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
546 			vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;
547 	}
548 
549 	virtqueue_store_flags_packed(dp, flags, vq->hw->weak_barriers);
550 }
551 
552 static inline void
553 virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
554 			uint16_t needed, int use_indirect, int can_push,
555 			int in_order)
556 {
557 	struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
558 	struct vq_desc_extra *dxp;
559 	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
560 	struct vring_desc *start_dp;
561 	uint16_t seg_num = cookie->nb_segs;
562 	uint16_t head_idx, idx;
563 	int16_t head_size = vq->hw->vtnet_hdr_size;
564 	bool prepend_header = false;
565 	struct virtio_net_hdr *hdr;
566 
567 	head_idx = vq->vq_desc_head_idx;
568 	idx = head_idx;
569 	if (in_order)
570 		dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
571 	else
572 		dxp = &vq->vq_descx[idx];
573 	dxp->cookie = (void *)cookie;
574 	dxp->ndescs = needed;
575 
576 	start_dp = vq->vq_split.ring.desc;
577 
578 	if (can_push) {
579 		/* prepend cannot fail, checked by caller */
580 		hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *,
581 					      -head_size);
582 		prepend_header = true;
583 
584 		/* if offload disabled, it is not zeroed below, do it now */
585 		if (!vq->hw->has_tx_offload)
586 			virtqueue_clear_net_hdr(hdr);
587 	} else if (use_indirect) {
588 		/* setup tx ring slot to point to indirect
589 		 * descriptor list stored in reserved region.
590 		 *
591 		 * the first slot in indirect ring is already preset
592 		 * to point to the header in reserved region
593 		 */
594 		start_dp[idx].addr  = txvq->virtio_net_hdr_mem +
595 			RTE_PTR_DIFF(&txr[idx].tx_indir, txr);
596 		start_dp[idx].len   = (seg_num + 1) * sizeof(struct vring_desc);
597 		start_dp[idx].flags = VRING_DESC_F_INDIRECT;
598 		hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
599 
600 		/* loop below will fill in rest of the indirect elements */
601 		start_dp = txr[idx].tx_indir;
602 		idx = 1;
603 	} else {
604 		/* setup first tx ring slot to point to header
605 		 * stored in reserved region.
606 		 */
607 		start_dp[idx].addr  = txvq->virtio_net_hdr_mem +
608 			RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
609 		start_dp[idx].len   = vq->hw->vtnet_hdr_size;
610 		start_dp[idx].flags = VRING_DESC_F_NEXT;
611 		hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
612 
613 		idx = start_dp[idx].next;
614 	}
615 
616 	if (vq->hw->has_tx_offload)
617 		virtqueue_xmit_offload(hdr, cookie);
618 
619 	do {
620 		start_dp[idx].addr  = rte_mbuf_data_iova(cookie);
621 		start_dp[idx].len   = cookie->data_len;
622 		if (prepend_header) {
623 			start_dp[idx].addr -= head_size;
624 			start_dp[idx].len += head_size;
625 			prepend_header = false;
626 		}
627 		start_dp[idx].flags = cookie->next ? VRING_DESC_F_NEXT : 0;
628 		idx = start_dp[idx].next;
629 	} while ((cookie = cookie->next) != NULL);
630 
631 	if (use_indirect)
632 		idx = vq->vq_split.ring.desc[head_idx].next;
633 
634 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
635 
636 	vq->vq_desc_head_idx = idx;
637 	vq_update_avail_ring(vq, head_idx);
638 
639 	if (!in_order) {
640 		if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
641 			vq->vq_desc_tail_idx = idx;
642 	}
643 }
644 
645 void
646 virtio_dev_cq_start(struct rte_eth_dev *dev)
647 {
648 	struct virtio_hw *hw = dev->data->dev_private;
649 
650 	if (hw->cvq) {
651 		rte_spinlock_init(&hw->cvq->lock);
652 		VIRTQUEUE_DUMP(virtnet_cq_to_vq(hw->cvq));
653 	}
654 }
655 
656 int
657 virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
658 			uint16_t queue_idx,
659 			uint16_t nb_desc,
660 			unsigned int socket_id __rte_unused,
661 			const struct rte_eth_rxconf *rx_conf,
662 			struct rte_mempool *mp)
663 {
664 	uint16_t vq_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
665 	struct virtio_hw *hw = dev->data->dev_private;
666 	struct virtqueue *vq = hw->vqs[vq_idx];
667 	struct virtnet_rx *rxvq;
668 	uint16_t rx_free_thresh;
669 
670 	PMD_INIT_FUNC_TRACE();
671 
672 	if (rx_conf->rx_deferred_start) {
673 		PMD_INIT_LOG(ERR, "Rx deferred start is not supported");
674 		return -EINVAL;
675 	}
676 
677 	rx_free_thresh = rx_conf->rx_free_thresh;
678 	if (rx_free_thresh == 0)
679 		rx_free_thresh =
680 			RTE_MIN(vq->vq_nentries / 4, DEFAULT_RX_FREE_THRESH);
681 
682 	if (rx_free_thresh & 0x3) {
683 		RTE_LOG(ERR, PMD, "rx_free_thresh must be multiples of four."
684 			" (rx_free_thresh=%u port=%u queue=%u)\n",
685 			rx_free_thresh, dev->data->port_id, queue_idx);
686 		return -EINVAL;
687 	}
688 
689 	if (rx_free_thresh >= vq->vq_nentries) {
690 		RTE_LOG(ERR, PMD, "rx_free_thresh must be less than the "
691 			"number of RX entries (%u)."
692 			" (rx_free_thresh=%u port=%u queue=%u)\n",
693 			vq->vq_nentries,
694 			rx_free_thresh, dev->data->port_id, queue_idx);
695 		return -EINVAL;
696 	}
697 	vq->vq_free_thresh = rx_free_thresh;
698 
699 	if (nb_desc == 0 || nb_desc > vq->vq_nentries)
700 		nb_desc = vq->vq_nentries;
701 	vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
702 
703 	rxvq = &vq->rxq;
704 	rxvq->queue_id = queue_idx;
705 	rxvq->mpool = mp;
706 	dev->data->rx_queues[queue_idx] = rxvq;
707 
708 	return 0;
709 }
710 
711 int
712 virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
713 {
714 	uint16_t vq_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
715 	struct virtio_hw *hw = dev->data->dev_private;
716 	struct virtqueue *vq = hw->vqs[vq_idx];
717 	struct virtnet_rx *rxvq = &vq->rxq;
718 	struct rte_mbuf *m;
719 	uint16_t desc_idx;
720 	int error, nbufs, i;
721 	bool in_order = virtio_with_feature(hw, VIRTIO_F_IN_ORDER);
722 
723 	PMD_INIT_FUNC_TRACE();
724 
725 	/* Allocate blank mbufs for the each rx descriptor */
726 	nbufs = 0;
727 
728 	if (hw->use_vec_rx && !virtio_with_packed_queue(hw)) {
729 		for (desc_idx = 0; desc_idx < vq->vq_nentries;
730 		     desc_idx++) {
731 			vq->vq_split.ring.avail->ring[desc_idx] = desc_idx;
732 			vq->vq_split.ring.desc[desc_idx].flags =
733 				VRING_DESC_F_WRITE;
734 		}
735 
736 		virtio_rxq_vec_setup(rxvq);
737 	}
738 
739 	memset(rxvq->fake_mbuf, 0, sizeof(*rxvq->fake_mbuf));
740 	for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST; desc_idx++)
741 		vq->sw_ring[vq->vq_nentries + desc_idx] = rxvq->fake_mbuf;
742 
743 	if (hw->use_vec_rx && !virtio_with_packed_queue(hw)) {
744 		while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
745 			virtio_rxq_rearm_vec(rxvq);
746 			nbufs += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
747 		}
748 	} else if (!virtio_with_packed_queue(vq->hw) && in_order) {
749 		if ((!virtqueue_full(vq))) {
750 			uint16_t free_cnt = vq->vq_free_cnt;
751 			struct rte_mbuf *pkts[free_cnt];
752 
753 			if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, pkts,
754 				free_cnt)) {
755 				error = virtqueue_enqueue_refill_inorder(vq,
756 						pkts,
757 						free_cnt);
758 				if (unlikely(error)) {
759 					for (i = 0; i < free_cnt; i++)
760 						rte_pktmbuf_free(pkts[i]);
761 				}
762 			}
763 
764 			nbufs += free_cnt;
765 			vq_update_avail_idx(vq);
766 		}
767 	} else {
768 		while (!virtqueue_full(vq)) {
769 			m = rte_mbuf_raw_alloc(rxvq->mpool);
770 			if (m == NULL)
771 				break;
772 
773 			/* Enqueue allocated buffers */
774 			if (virtio_with_packed_queue(vq->hw))
775 				error = virtqueue_enqueue_recv_refill_packed_init(vq,
776 						&m, 1);
777 			else
778 				error = virtqueue_enqueue_recv_refill(vq,
779 						&m, 1);
780 			if (error) {
781 				rte_pktmbuf_free(m);
782 				break;
783 			}
784 			nbufs++;
785 		}
786 
787 		if (!virtio_with_packed_queue(vq->hw))
788 			vq_update_avail_idx(vq);
789 	}
790 
791 	PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs);
792 
793 	VIRTQUEUE_DUMP(vq);
794 
795 	return 0;
796 }
797 
798 /*
799  * struct rte_eth_dev *dev: Used to update dev
800  * uint16_t nb_desc: Defaults to values read from config space
801  * unsigned int socket_id: Used to allocate memzone
802  * const struct rte_eth_txconf *tx_conf: Used to setup tx engine
803  * uint16_t queue_idx: Just used as an index in dev txq list
804  */
805 int
806 virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
807 			uint16_t queue_idx,
808 			uint16_t nb_desc,
809 			unsigned int socket_id __rte_unused,
810 			const struct rte_eth_txconf *tx_conf)
811 {
812 	uint8_t vq_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
813 	struct virtio_hw *hw = dev->data->dev_private;
814 	struct virtqueue *vq = hw->vqs[vq_idx];
815 	struct virtnet_tx *txvq;
816 	uint16_t tx_free_thresh;
817 
818 	PMD_INIT_FUNC_TRACE();
819 
820 	if (tx_conf->tx_deferred_start) {
821 		PMD_INIT_LOG(ERR, "Tx deferred start is not supported");
822 		return -EINVAL;
823 	}
824 
825 	if (nb_desc == 0 || nb_desc > vq->vq_nentries)
826 		nb_desc = vq->vq_nentries;
827 	vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
828 
829 	txvq = &vq->txq;
830 	txvq->queue_id = queue_idx;
831 
832 	tx_free_thresh = tx_conf->tx_free_thresh;
833 	if (tx_free_thresh == 0)
834 		tx_free_thresh =
835 			RTE_MIN(vq->vq_nentries / 4, DEFAULT_TX_FREE_THRESH);
836 
837 	if (tx_free_thresh >= (vq->vq_nentries - 3)) {
838 		PMD_DRV_LOG(ERR, "tx_free_thresh must be less than the "
839 			"number of TX entries minus 3 (%u)."
840 			" (tx_free_thresh=%u port=%u queue=%u)\n",
841 			vq->vq_nentries - 3,
842 			tx_free_thresh, dev->data->port_id, queue_idx);
843 		return -EINVAL;
844 	}
845 
846 	vq->vq_free_thresh = tx_free_thresh;
847 
848 	dev->data->tx_queues[queue_idx] = txvq;
849 	return 0;
850 }
851 
852 int
853 virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
854 				uint16_t queue_idx)
855 {
856 	uint8_t vq_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
857 	struct virtio_hw *hw = dev->data->dev_private;
858 	struct virtqueue *vq = hw->vqs[vq_idx];
859 
860 	PMD_INIT_FUNC_TRACE();
861 
862 	if (!virtio_with_packed_queue(hw)) {
863 		if (virtio_with_feature(hw, VIRTIO_F_IN_ORDER))
864 			vq->vq_split.ring.desc[vq->vq_nentries - 1].next = 0;
865 	}
866 
867 	VIRTQUEUE_DUMP(vq);
868 
869 	return 0;
870 }
871 
872 static inline void
873 virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m)
874 {
875 	int error;
876 	/*
877 	 * Requeue the discarded mbuf. This should always be
878 	 * successful since it was just dequeued.
879 	 */
880 	if (virtio_with_packed_queue(vq->hw))
881 		error = virtqueue_enqueue_recv_refill_packed(vq, &m, 1);
882 	else
883 		error = virtqueue_enqueue_recv_refill(vq, &m, 1);
884 
885 	if (unlikely(error)) {
886 		PMD_DRV_LOG(ERR, "cannot requeue discarded mbuf");
887 		rte_pktmbuf_free(m);
888 	}
889 }
890 
891 static inline void
892 virtio_discard_rxbuf_inorder(struct virtqueue *vq, struct rte_mbuf *m)
893 {
894 	int error;
895 
896 	error = virtqueue_enqueue_refill_inorder(vq, &m, 1);
897 	if (unlikely(error)) {
898 		PMD_DRV_LOG(ERR, "cannot requeue discarded mbuf");
899 		rte_pktmbuf_free(m);
900 	}
901 }
902 
903 /* Optionally fill offload information in structure */
904 static inline int
905 virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
906 {
907 	struct rte_net_hdr_lens hdr_lens;
908 	uint32_t hdrlen, ptype;
909 	int l4_supported = 0;
910 
911 	/* nothing to do */
912 	if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
913 		return 0;
914 
915 	m->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
916 
917 	ptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK);
918 	m->packet_type = ptype;
919 	if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP ||
920 	    (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP ||
921 	    (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_SCTP)
922 		l4_supported = 1;
923 
924 	if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
925 		hdrlen = hdr_lens.l2_len + hdr_lens.l3_len + hdr_lens.l4_len;
926 		if (hdr->csum_start <= hdrlen && l4_supported) {
927 			m->ol_flags |= PKT_RX_L4_CKSUM_NONE;
928 		} else {
929 			/* Unknown proto or tunnel, do sw cksum. We can assume
930 			 * the cksum field is in the first segment since the
931 			 * buffers we provided to the host are large enough.
932 			 * In case of SCTP, this will be wrong since it's a CRC
933 			 * but there's nothing we can do.
934 			 */
935 			uint16_t csum = 0, off;
936 
937 			if (rte_raw_cksum_mbuf(m, hdr->csum_start,
938 				rte_pktmbuf_pkt_len(m) - hdr->csum_start,
939 				&csum) < 0)
940 				return -EINVAL;
941 			if (likely(csum != 0xffff))
942 				csum = ~csum;
943 			off = hdr->csum_offset + hdr->csum_start;
944 			if (rte_pktmbuf_data_len(m) >= off + 1)
945 				*rte_pktmbuf_mtod_offset(m, uint16_t *,
946 					off) = csum;
947 		}
948 	} else if (hdr->flags & VIRTIO_NET_HDR_F_DATA_VALID && l4_supported) {
949 		m->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
950 	}
951 
952 	/* GSO request, save required information in mbuf */
953 	if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
954 		/* Check unsupported modes */
955 		if ((hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN) ||
956 		    (hdr->gso_size == 0)) {
957 			return -EINVAL;
958 		}
959 
960 		/* Update mss lengthes in mbuf */
961 		m->tso_segsz = hdr->gso_size;
962 		switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
963 			case VIRTIO_NET_HDR_GSO_TCPV4:
964 			case VIRTIO_NET_HDR_GSO_TCPV6:
965 				m->ol_flags |= PKT_RX_LRO | \
966 					PKT_RX_L4_CKSUM_NONE;
967 				break;
968 			default:
969 				return -EINVAL;
970 		}
971 	}
972 
973 	return 0;
974 }
975 
976 #define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc))
977 uint16_t
978 virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
979 {
980 	struct virtnet_rx *rxvq = rx_queue;
981 	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
982 	struct virtio_hw *hw = vq->hw;
983 	struct rte_mbuf *rxm;
984 	uint16_t nb_used, num, nb_rx;
985 	uint32_t len[VIRTIO_MBUF_BURST_SZ];
986 	struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
987 	int error;
988 	uint32_t i, nb_enqueued;
989 	uint32_t hdr_size;
990 	struct virtio_net_hdr *hdr;
991 
992 	nb_rx = 0;
993 	if (unlikely(hw->started == 0))
994 		return nb_rx;
995 
996 	nb_used = virtqueue_nused(vq);
997 
998 	num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts;
999 	if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1000 		num = VIRTIO_MBUF_BURST_SZ;
1001 	if (likely(num > DESC_PER_CACHELINE))
1002 		num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1003 
1004 	num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num);
1005 	PMD_RX_LOG(DEBUG, "used:%d dequeue:%d", nb_used, num);
1006 
1007 	nb_enqueued = 0;
1008 	hdr_size = hw->vtnet_hdr_size;
1009 
1010 	for (i = 0; i < num ; i++) {
1011 		rxm = rcv_pkts[i];
1012 
1013 		PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1014 
1015 		if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1016 			PMD_RX_LOG(ERR, "Packet drop");
1017 			nb_enqueued++;
1018 			virtio_discard_rxbuf(vq, rxm);
1019 			rxvq->stats.errors++;
1020 			continue;
1021 		}
1022 
1023 		rxm->port = rxvq->port_id;
1024 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1025 		rxm->ol_flags = 0;
1026 		rxm->vlan_tci = 0;
1027 
1028 		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1029 		rxm->data_len = (uint16_t)(len[i] - hdr_size);
1030 
1031 		hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr +
1032 			RTE_PKTMBUF_HEADROOM - hdr_size);
1033 
1034 		if (hw->vlan_strip)
1035 			rte_vlan_strip(rxm);
1036 
1037 		if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) {
1038 			virtio_discard_rxbuf(vq, rxm);
1039 			rxvq->stats.errors++;
1040 			continue;
1041 		}
1042 
1043 		virtio_rx_stats_updated(rxvq, rxm);
1044 
1045 		rx_pkts[nb_rx++] = rxm;
1046 	}
1047 
1048 	rxvq->stats.packets += nb_rx;
1049 
1050 	/* Allocate new mbuf for the used descriptor */
1051 	if (likely(!virtqueue_full(vq))) {
1052 		uint16_t free_cnt = vq->vq_free_cnt;
1053 		struct rte_mbuf *new_pkts[free_cnt];
1054 
1055 		if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts,
1056 						free_cnt) == 0)) {
1057 			error = virtqueue_enqueue_recv_refill(vq, new_pkts,
1058 					free_cnt);
1059 			if (unlikely(error)) {
1060 				for (i = 0; i < free_cnt; i++)
1061 					rte_pktmbuf_free(new_pkts[i]);
1062 			}
1063 			nb_enqueued += free_cnt;
1064 		} else {
1065 			struct rte_eth_dev *dev =
1066 				&rte_eth_devices[rxvq->port_id];
1067 			dev->data->rx_mbuf_alloc_failed += free_cnt;
1068 		}
1069 	}
1070 
1071 	if (likely(nb_enqueued)) {
1072 		vq_update_avail_idx(vq);
1073 
1074 		if (unlikely(virtqueue_kick_prepare(vq))) {
1075 			virtqueue_notify(vq);
1076 			PMD_RX_LOG(DEBUG, "Notified");
1077 		}
1078 	}
1079 
1080 	return nb_rx;
1081 }
1082 
1083 uint16_t
1084 virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
1085 			uint16_t nb_pkts)
1086 {
1087 	struct virtnet_rx *rxvq = rx_queue;
1088 	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
1089 	struct virtio_hw *hw = vq->hw;
1090 	struct rte_mbuf *rxm;
1091 	uint16_t num, nb_rx;
1092 	uint32_t len[VIRTIO_MBUF_BURST_SZ];
1093 	struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1094 	int error;
1095 	uint32_t i, nb_enqueued;
1096 	uint32_t hdr_size;
1097 	struct virtio_net_hdr *hdr;
1098 
1099 	nb_rx = 0;
1100 	if (unlikely(hw->started == 0))
1101 		return nb_rx;
1102 
1103 	num = RTE_MIN(VIRTIO_MBUF_BURST_SZ, nb_pkts);
1104 	if (likely(num > DESC_PER_CACHELINE))
1105 		num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1106 
1107 	num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num);
1108 	PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1109 
1110 	nb_enqueued = 0;
1111 	hdr_size = hw->vtnet_hdr_size;
1112 
1113 	for (i = 0; i < num; i++) {
1114 		rxm = rcv_pkts[i];
1115 
1116 		PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1117 
1118 		if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1119 			PMD_RX_LOG(ERR, "Packet drop");
1120 			nb_enqueued++;
1121 			virtio_discard_rxbuf(vq, rxm);
1122 			rxvq->stats.errors++;
1123 			continue;
1124 		}
1125 
1126 		rxm->port = rxvq->port_id;
1127 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1128 		rxm->ol_flags = 0;
1129 		rxm->vlan_tci = 0;
1130 
1131 		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1132 		rxm->data_len = (uint16_t)(len[i] - hdr_size);
1133 
1134 		hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr +
1135 			RTE_PKTMBUF_HEADROOM - hdr_size);
1136 
1137 		if (hw->vlan_strip)
1138 			rte_vlan_strip(rxm);
1139 
1140 		if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) {
1141 			virtio_discard_rxbuf(vq, rxm);
1142 			rxvq->stats.errors++;
1143 			continue;
1144 		}
1145 
1146 		virtio_rx_stats_updated(rxvq, rxm);
1147 
1148 		rx_pkts[nb_rx++] = rxm;
1149 	}
1150 
1151 	rxvq->stats.packets += nb_rx;
1152 
1153 	/* Allocate new mbuf for the used descriptor */
1154 	if (likely(!virtqueue_full(vq))) {
1155 		uint16_t free_cnt = vq->vq_free_cnt;
1156 		struct rte_mbuf *new_pkts[free_cnt];
1157 
1158 		if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts,
1159 						free_cnt) == 0)) {
1160 			error = virtqueue_enqueue_recv_refill_packed(vq,
1161 					new_pkts, free_cnt);
1162 			if (unlikely(error)) {
1163 				for (i = 0; i < free_cnt; i++)
1164 					rte_pktmbuf_free(new_pkts[i]);
1165 			}
1166 			nb_enqueued += free_cnt;
1167 		} else {
1168 			struct rte_eth_dev *dev =
1169 				&rte_eth_devices[rxvq->port_id];
1170 			dev->data->rx_mbuf_alloc_failed += free_cnt;
1171 		}
1172 	}
1173 
1174 	if (likely(nb_enqueued)) {
1175 		if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1176 			virtqueue_notify(vq);
1177 			PMD_RX_LOG(DEBUG, "Notified");
1178 		}
1179 	}
1180 
1181 	return nb_rx;
1182 }
1183 
1184 
1185 uint16_t
1186 virtio_recv_pkts_inorder(void *rx_queue,
1187 			struct rte_mbuf **rx_pkts,
1188 			uint16_t nb_pkts)
1189 {
1190 	struct virtnet_rx *rxvq = rx_queue;
1191 	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
1192 	struct virtio_hw *hw = vq->hw;
1193 	struct rte_mbuf *rxm;
1194 	struct rte_mbuf *prev = NULL;
1195 	uint16_t nb_used, num, nb_rx;
1196 	uint32_t len[VIRTIO_MBUF_BURST_SZ];
1197 	struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1198 	int error;
1199 	uint32_t nb_enqueued;
1200 	uint32_t seg_num;
1201 	uint32_t seg_res;
1202 	uint32_t hdr_size;
1203 	int32_t i;
1204 
1205 	nb_rx = 0;
1206 	if (unlikely(hw->started == 0))
1207 		return nb_rx;
1208 
1209 	nb_used = virtqueue_nused(vq);
1210 	nb_used = RTE_MIN(nb_used, nb_pkts);
1211 	nb_used = RTE_MIN(nb_used, VIRTIO_MBUF_BURST_SZ);
1212 
1213 	PMD_RX_LOG(DEBUG, "used:%d", nb_used);
1214 
1215 	nb_enqueued = 0;
1216 	seg_num = 1;
1217 	seg_res = 0;
1218 	hdr_size = hw->vtnet_hdr_size;
1219 
1220 	num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len, nb_used);
1221 
1222 	for (i = 0; i < num; i++) {
1223 		struct virtio_net_hdr_mrg_rxbuf *header;
1224 
1225 		PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1226 		PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1227 
1228 		rxm = rcv_pkts[i];
1229 
1230 		if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1231 			PMD_RX_LOG(ERR, "Packet drop");
1232 			nb_enqueued++;
1233 			virtio_discard_rxbuf_inorder(vq, rxm);
1234 			rxvq->stats.errors++;
1235 			continue;
1236 		}
1237 
1238 		header = (struct virtio_net_hdr_mrg_rxbuf *)
1239 			 ((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM
1240 			 - hdr_size);
1241 
1242 		if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
1243 			seg_num = header->num_buffers;
1244 			if (seg_num == 0)
1245 				seg_num = 1;
1246 		} else {
1247 			seg_num = 1;
1248 		}
1249 
1250 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1251 		rxm->nb_segs = seg_num;
1252 		rxm->ol_flags = 0;
1253 		rxm->vlan_tci = 0;
1254 		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1255 		rxm->data_len = (uint16_t)(len[i] - hdr_size);
1256 
1257 		rxm->port = rxvq->port_id;
1258 
1259 		rx_pkts[nb_rx] = rxm;
1260 		prev = rxm;
1261 
1262 		if (vq->hw->has_rx_offload &&
1263 				virtio_rx_offload(rxm, &header->hdr) < 0) {
1264 			virtio_discard_rxbuf_inorder(vq, rxm);
1265 			rxvq->stats.errors++;
1266 			continue;
1267 		}
1268 
1269 		if (hw->vlan_strip)
1270 			rte_vlan_strip(rx_pkts[nb_rx]);
1271 
1272 		seg_res = seg_num - 1;
1273 
1274 		/* Merge remaining segments */
1275 		while (seg_res != 0 && i < (num - 1)) {
1276 			i++;
1277 
1278 			rxm = rcv_pkts[i];
1279 			rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1280 			rxm->pkt_len = (uint32_t)(len[i]);
1281 			rxm->data_len = (uint16_t)(len[i]);
1282 
1283 			rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1284 
1285 			prev->next = rxm;
1286 			prev = rxm;
1287 			seg_res -= 1;
1288 		}
1289 
1290 		if (!seg_res) {
1291 			virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1292 			nb_rx++;
1293 		}
1294 	}
1295 
1296 	/* Last packet still need merge segments */
1297 	while (seg_res != 0) {
1298 		uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1299 					VIRTIO_MBUF_BURST_SZ);
1300 
1301 		if (likely(virtqueue_nused(vq) >= rcv_cnt)) {
1302 			num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len,
1303 							   rcv_cnt);
1304 			uint16_t extra_idx = 0;
1305 
1306 			rcv_cnt = num;
1307 			while (extra_idx < rcv_cnt) {
1308 				rxm = rcv_pkts[extra_idx];
1309 				rxm->data_off =
1310 					RTE_PKTMBUF_HEADROOM - hdr_size;
1311 				rxm->pkt_len = (uint32_t)(len[extra_idx]);
1312 				rxm->data_len = (uint16_t)(len[extra_idx]);
1313 				prev->next = rxm;
1314 				prev = rxm;
1315 				rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1316 				extra_idx += 1;
1317 			};
1318 			seg_res -= rcv_cnt;
1319 
1320 			if (!seg_res) {
1321 				virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1322 				nb_rx++;
1323 			}
1324 		} else {
1325 			PMD_RX_LOG(ERR,
1326 					"No enough segments for packet.");
1327 			rte_pktmbuf_free(rx_pkts[nb_rx]);
1328 			rxvq->stats.errors++;
1329 			break;
1330 		}
1331 	}
1332 
1333 	rxvq->stats.packets += nb_rx;
1334 
1335 	/* Allocate new mbuf for the used descriptor */
1336 
1337 	if (likely(!virtqueue_full(vq))) {
1338 		/* free_cnt may include mrg descs */
1339 		uint16_t free_cnt = vq->vq_free_cnt;
1340 		struct rte_mbuf *new_pkts[free_cnt];
1341 
1342 		if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1343 			error = virtqueue_enqueue_refill_inorder(vq, new_pkts,
1344 					free_cnt);
1345 			if (unlikely(error)) {
1346 				for (i = 0; i < free_cnt; i++)
1347 					rte_pktmbuf_free(new_pkts[i]);
1348 			}
1349 			nb_enqueued += free_cnt;
1350 		} else {
1351 			struct rte_eth_dev *dev =
1352 				&rte_eth_devices[rxvq->port_id];
1353 			dev->data->rx_mbuf_alloc_failed += free_cnt;
1354 		}
1355 	}
1356 
1357 	if (likely(nb_enqueued)) {
1358 		vq_update_avail_idx(vq);
1359 
1360 		if (unlikely(virtqueue_kick_prepare(vq))) {
1361 			virtqueue_notify(vq);
1362 			PMD_RX_LOG(DEBUG, "Notified");
1363 		}
1364 	}
1365 
1366 	return nb_rx;
1367 }
1368 
1369 uint16_t
1370 virtio_recv_mergeable_pkts(void *rx_queue,
1371 			struct rte_mbuf **rx_pkts,
1372 			uint16_t nb_pkts)
1373 {
1374 	struct virtnet_rx *rxvq = rx_queue;
1375 	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
1376 	struct virtio_hw *hw = vq->hw;
1377 	struct rte_mbuf *rxm;
1378 	struct rte_mbuf *prev = NULL;
1379 	uint16_t nb_used, num, nb_rx = 0;
1380 	uint32_t len[VIRTIO_MBUF_BURST_SZ];
1381 	struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1382 	int error;
1383 	uint32_t nb_enqueued = 0;
1384 	uint32_t seg_num = 0;
1385 	uint32_t seg_res = 0;
1386 	uint32_t hdr_size = hw->vtnet_hdr_size;
1387 	int32_t i;
1388 
1389 	if (unlikely(hw->started == 0))
1390 		return nb_rx;
1391 
1392 	nb_used = virtqueue_nused(vq);
1393 
1394 	PMD_RX_LOG(DEBUG, "used:%d", nb_used);
1395 
1396 	num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts;
1397 	if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1398 		num = VIRTIO_MBUF_BURST_SZ;
1399 	if (likely(num > DESC_PER_CACHELINE))
1400 		num = num - ((vq->vq_used_cons_idx + num) %
1401 				DESC_PER_CACHELINE);
1402 
1403 
1404 	num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num);
1405 
1406 	for (i = 0; i < num; i++) {
1407 		struct virtio_net_hdr_mrg_rxbuf *header;
1408 
1409 		PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1410 		PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1411 
1412 		rxm = rcv_pkts[i];
1413 
1414 		if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1415 			PMD_RX_LOG(ERR, "Packet drop");
1416 			nb_enqueued++;
1417 			virtio_discard_rxbuf(vq, rxm);
1418 			rxvq->stats.errors++;
1419 			continue;
1420 		}
1421 
1422 		header = (struct virtio_net_hdr_mrg_rxbuf *)
1423 			 ((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM
1424 			 - hdr_size);
1425 		seg_num = header->num_buffers;
1426 		if (seg_num == 0)
1427 			seg_num = 1;
1428 
1429 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1430 		rxm->nb_segs = seg_num;
1431 		rxm->ol_flags = 0;
1432 		rxm->vlan_tci = 0;
1433 		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1434 		rxm->data_len = (uint16_t)(len[i] - hdr_size);
1435 
1436 		rxm->port = rxvq->port_id;
1437 
1438 		rx_pkts[nb_rx] = rxm;
1439 		prev = rxm;
1440 
1441 		if (hw->has_rx_offload &&
1442 				virtio_rx_offload(rxm, &header->hdr) < 0) {
1443 			virtio_discard_rxbuf(vq, rxm);
1444 			rxvq->stats.errors++;
1445 			continue;
1446 		}
1447 
1448 		if (hw->vlan_strip)
1449 			rte_vlan_strip(rx_pkts[nb_rx]);
1450 
1451 		seg_res = seg_num - 1;
1452 
1453 		/* Merge remaining segments */
1454 		while (seg_res != 0 && i < (num - 1)) {
1455 			i++;
1456 
1457 			rxm = rcv_pkts[i];
1458 			rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1459 			rxm->pkt_len = (uint32_t)(len[i]);
1460 			rxm->data_len = (uint16_t)(len[i]);
1461 
1462 			rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1463 
1464 			prev->next = rxm;
1465 			prev = rxm;
1466 			seg_res -= 1;
1467 		}
1468 
1469 		if (!seg_res) {
1470 			virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1471 			nb_rx++;
1472 		}
1473 	}
1474 
1475 	/* Last packet still need merge segments */
1476 	while (seg_res != 0) {
1477 		uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1478 					VIRTIO_MBUF_BURST_SZ);
1479 
1480 		if (likely(virtqueue_nused(vq) >= rcv_cnt)) {
1481 			num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len,
1482 							   rcv_cnt);
1483 			uint16_t extra_idx = 0;
1484 
1485 			rcv_cnt = num;
1486 			while (extra_idx < rcv_cnt) {
1487 				rxm = rcv_pkts[extra_idx];
1488 				rxm->data_off =
1489 					RTE_PKTMBUF_HEADROOM - hdr_size;
1490 				rxm->pkt_len = (uint32_t)(len[extra_idx]);
1491 				rxm->data_len = (uint16_t)(len[extra_idx]);
1492 				prev->next = rxm;
1493 				prev = rxm;
1494 				rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1495 				extra_idx += 1;
1496 			};
1497 			seg_res -= rcv_cnt;
1498 
1499 			if (!seg_res) {
1500 				virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1501 				nb_rx++;
1502 			}
1503 		} else {
1504 			PMD_RX_LOG(ERR,
1505 					"No enough segments for packet.");
1506 			rte_pktmbuf_free(rx_pkts[nb_rx]);
1507 			rxvq->stats.errors++;
1508 			break;
1509 		}
1510 	}
1511 
1512 	rxvq->stats.packets += nb_rx;
1513 
1514 	/* Allocate new mbuf for the used descriptor */
1515 	if (likely(!virtqueue_full(vq))) {
1516 		/* free_cnt may include mrg descs */
1517 		uint16_t free_cnt = vq->vq_free_cnt;
1518 		struct rte_mbuf *new_pkts[free_cnt];
1519 
1520 		if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1521 			error = virtqueue_enqueue_recv_refill(vq, new_pkts,
1522 					free_cnt);
1523 			if (unlikely(error)) {
1524 				for (i = 0; i < free_cnt; i++)
1525 					rte_pktmbuf_free(new_pkts[i]);
1526 			}
1527 			nb_enqueued += free_cnt;
1528 		} else {
1529 			struct rte_eth_dev *dev =
1530 				&rte_eth_devices[rxvq->port_id];
1531 			dev->data->rx_mbuf_alloc_failed += free_cnt;
1532 		}
1533 	}
1534 
1535 	if (likely(nb_enqueued)) {
1536 		vq_update_avail_idx(vq);
1537 
1538 		if (unlikely(virtqueue_kick_prepare(vq))) {
1539 			virtqueue_notify(vq);
1540 			PMD_RX_LOG(DEBUG, "Notified");
1541 		}
1542 	}
1543 
1544 	return nb_rx;
1545 }
1546 
1547 uint16_t
1548 virtio_recv_mergeable_pkts_packed(void *rx_queue,
1549 			struct rte_mbuf **rx_pkts,
1550 			uint16_t nb_pkts)
1551 {
1552 	struct virtnet_rx *rxvq = rx_queue;
1553 	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
1554 	struct virtio_hw *hw = vq->hw;
1555 	struct rte_mbuf *rxm;
1556 	struct rte_mbuf *prev = NULL;
1557 	uint16_t num, nb_rx = 0;
1558 	uint32_t len[VIRTIO_MBUF_BURST_SZ];
1559 	struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1560 	uint32_t nb_enqueued = 0;
1561 	uint32_t seg_num = 0;
1562 	uint32_t seg_res = 0;
1563 	uint32_t hdr_size = hw->vtnet_hdr_size;
1564 	int32_t i;
1565 	int error;
1566 
1567 	if (unlikely(hw->started == 0))
1568 		return nb_rx;
1569 
1570 
1571 	num = nb_pkts;
1572 	if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1573 		num = VIRTIO_MBUF_BURST_SZ;
1574 	if (likely(num > DESC_PER_CACHELINE))
1575 		num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1576 
1577 	num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num);
1578 
1579 	for (i = 0; i < num; i++) {
1580 		struct virtio_net_hdr_mrg_rxbuf *header;
1581 
1582 		PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1583 		PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1584 
1585 		rxm = rcv_pkts[i];
1586 
1587 		if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1588 			PMD_RX_LOG(ERR, "Packet drop");
1589 			nb_enqueued++;
1590 			virtio_discard_rxbuf(vq, rxm);
1591 			rxvq->stats.errors++;
1592 			continue;
1593 		}
1594 
1595 		header = (struct virtio_net_hdr_mrg_rxbuf *)((char *)
1596 			  rxm->buf_addr + RTE_PKTMBUF_HEADROOM - hdr_size);
1597 		seg_num = header->num_buffers;
1598 
1599 		if (seg_num == 0)
1600 			seg_num = 1;
1601 
1602 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1603 		rxm->nb_segs = seg_num;
1604 		rxm->ol_flags = 0;
1605 		rxm->vlan_tci = 0;
1606 		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1607 		rxm->data_len = (uint16_t)(len[i] - hdr_size);
1608 
1609 		rxm->port = rxvq->port_id;
1610 		rx_pkts[nb_rx] = rxm;
1611 		prev = rxm;
1612 
1613 		if (hw->has_rx_offload &&
1614 				virtio_rx_offload(rxm, &header->hdr) < 0) {
1615 			virtio_discard_rxbuf(vq, rxm);
1616 			rxvq->stats.errors++;
1617 			continue;
1618 		}
1619 
1620 		if (hw->vlan_strip)
1621 			rte_vlan_strip(rx_pkts[nb_rx]);
1622 
1623 		seg_res = seg_num - 1;
1624 
1625 		/* Merge remaining segments */
1626 		while (seg_res != 0 && i < (num - 1)) {
1627 			i++;
1628 
1629 			rxm = rcv_pkts[i];
1630 			rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1631 			rxm->pkt_len = (uint32_t)(len[i]);
1632 			rxm->data_len = (uint16_t)(len[i]);
1633 
1634 			rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1635 
1636 			prev->next = rxm;
1637 			prev = rxm;
1638 			seg_res -= 1;
1639 		}
1640 
1641 		if (!seg_res) {
1642 			virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1643 			nb_rx++;
1644 		}
1645 	}
1646 
1647 	/* Last packet still need merge segments */
1648 	while (seg_res != 0) {
1649 		uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1650 					VIRTIO_MBUF_BURST_SZ);
1651 		uint16_t extra_idx = 0;
1652 
1653 		rcv_cnt = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts,
1654 				len, rcv_cnt);
1655 		if (unlikely(rcv_cnt == 0)) {
1656 			PMD_RX_LOG(ERR, "No enough segments for packet.");
1657 			rte_pktmbuf_free(rx_pkts[nb_rx]);
1658 			rxvq->stats.errors++;
1659 			break;
1660 		}
1661 
1662 		while (extra_idx < rcv_cnt) {
1663 			rxm = rcv_pkts[extra_idx];
1664 
1665 			rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1666 			rxm->pkt_len = (uint32_t)(len[extra_idx]);
1667 			rxm->data_len = (uint16_t)(len[extra_idx]);
1668 
1669 			prev->next = rxm;
1670 			prev = rxm;
1671 			rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1672 			extra_idx += 1;
1673 		}
1674 		seg_res -= rcv_cnt;
1675 		if (!seg_res) {
1676 			virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1677 			nb_rx++;
1678 		}
1679 	}
1680 
1681 	rxvq->stats.packets += nb_rx;
1682 
1683 	/* Allocate new mbuf for the used descriptor */
1684 	if (likely(!virtqueue_full(vq))) {
1685 		/* free_cnt may include mrg descs */
1686 		uint16_t free_cnt = vq->vq_free_cnt;
1687 		struct rte_mbuf *new_pkts[free_cnt];
1688 
1689 		if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1690 			error = virtqueue_enqueue_recv_refill_packed(vq,
1691 					new_pkts, free_cnt);
1692 			if (unlikely(error)) {
1693 				for (i = 0; i < free_cnt; i++)
1694 					rte_pktmbuf_free(new_pkts[i]);
1695 			}
1696 			nb_enqueued += free_cnt;
1697 		} else {
1698 			struct rte_eth_dev *dev =
1699 				&rte_eth_devices[rxvq->port_id];
1700 			dev->data->rx_mbuf_alloc_failed += free_cnt;
1701 		}
1702 	}
1703 
1704 	if (likely(nb_enqueued)) {
1705 		if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1706 			virtqueue_notify(vq);
1707 			PMD_RX_LOG(DEBUG, "Notified");
1708 		}
1709 	}
1710 
1711 	return nb_rx;
1712 }
1713 
1714 uint16_t
1715 virtio_xmit_pkts_prepare(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts,
1716 			uint16_t nb_pkts)
1717 {
1718 	uint16_t nb_tx;
1719 	int error;
1720 
1721 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1722 		struct rte_mbuf *m = tx_pkts[nb_tx];
1723 
1724 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
1725 		error = rte_validate_tx_offload(m);
1726 		if (unlikely(error)) {
1727 			rte_errno = -error;
1728 			break;
1729 		}
1730 #endif
1731 
1732 		/* Do VLAN tag insertion */
1733 		if (unlikely(m->ol_flags & PKT_TX_VLAN_PKT)) {
1734 			error = rte_vlan_insert(&m);
1735 			/* rte_vlan_insert() may change pointer
1736 			 * even in the case of failure
1737 			 */
1738 			tx_pkts[nb_tx] = m;
1739 
1740 			if (unlikely(error)) {
1741 				rte_errno = -error;
1742 				break;
1743 			}
1744 		}
1745 
1746 		error = rte_net_intel_cksum_prepare(m);
1747 		if (unlikely(error)) {
1748 			rte_errno = -error;
1749 			break;
1750 		}
1751 
1752 		if (m->ol_flags & PKT_TX_TCP_SEG)
1753 			virtio_tso_fix_cksum(m);
1754 	}
1755 
1756 	return nb_tx;
1757 }
1758 
1759 uint16_t
1760 virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
1761 			uint16_t nb_pkts)
1762 {
1763 	struct virtnet_tx *txvq = tx_queue;
1764 	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
1765 	struct virtio_hw *hw = vq->hw;
1766 	uint16_t hdr_size = hw->vtnet_hdr_size;
1767 	uint16_t nb_tx = 0;
1768 	bool in_order = virtio_with_feature(hw, VIRTIO_F_IN_ORDER);
1769 
1770 	if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
1771 		return nb_tx;
1772 
1773 	if (unlikely(nb_pkts < 1))
1774 		return nb_pkts;
1775 
1776 	PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
1777 
1778 	if (nb_pkts > vq->vq_free_cnt)
1779 		virtio_xmit_cleanup_packed(vq, nb_pkts - vq->vq_free_cnt,
1780 					   in_order);
1781 
1782 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1783 		struct rte_mbuf *txm = tx_pkts[nb_tx];
1784 		int can_push = 0, use_indirect = 0, slots, need;
1785 
1786 		/* optimize ring usage */
1787 		if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
1788 		      virtio_with_feature(hw, VIRTIO_F_VERSION_1)) &&
1789 		    rte_mbuf_refcnt_read(txm) == 1 &&
1790 		    RTE_MBUF_DIRECT(txm) &&
1791 		    txm->nb_segs == 1 &&
1792 		    rte_pktmbuf_headroom(txm) >= hdr_size &&
1793 		    rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
1794 			   __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
1795 			can_push = 1;
1796 		else if (virtio_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
1797 			 txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
1798 			use_indirect = 1;
1799 		/* How many main ring entries are needed to this Tx?
1800 		 * indirect   => 1
1801 		 * any_layout => number of segments
1802 		 * default    => number of segments + 1
1803 		 */
1804 		slots = use_indirect ? 1 : (txm->nb_segs + !can_push);
1805 		need = slots - vq->vq_free_cnt;
1806 
1807 		/* Positive value indicates it need free vring descriptors */
1808 		if (unlikely(need > 0)) {
1809 			virtio_xmit_cleanup_packed(vq, need, in_order);
1810 			need = slots - vq->vq_free_cnt;
1811 			if (unlikely(need > 0)) {
1812 				PMD_TX_LOG(ERR,
1813 					   "No free tx descriptors to transmit");
1814 				break;
1815 			}
1816 		}
1817 
1818 		/* Enqueue Packet buffers */
1819 		if (can_push)
1820 			virtqueue_enqueue_xmit_packed_fast(txvq, txm, in_order);
1821 		else
1822 			virtqueue_enqueue_xmit_packed(txvq, txm, slots,
1823 						      use_indirect, 0,
1824 						      in_order);
1825 
1826 		virtio_update_packet_stats(&txvq->stats, txm);
1827 	}
1828 
1829 	txvq->stats.packets += nb_tx;
1830 
1831 	if (likely(nb_tx)) {
1832 		if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1833 			virtqueue_notify(vq);
1834 			PMD_TX_LOG(DEBUG, "Notified backend after xmit");
1835 		}
1836 	}
1837 
1838 	return nb_tx;
1839 }
1840 
1841 uint16_t
1842 virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1843 {
1844 	struct virtnet_tx *txvq = tx_queue;
1845 	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
1846 	struct virtio_hw *hw = vq->hw;
1847 	uint16_t hdr_size = hw->vtnet_hdr_size;
1848 	uint16_t nb_used, nb_tx = 0;
1849 
1850 	if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
1851 		return nb_tx;
1852 
1853 	if (unlikely(nb_pkts < 1))
1854 		return nb_pkts;
1855 
1856 	PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
1857 
1858 	nb_used = virtqueue_nused(vq);
1859 
1860 	if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
1861 		virtio_xmit_cleanup(vq, nb_used);
1862 
1863 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1864 		struct rte_mbuf *txm = tx_pkts[nb_tx];
1865 		int can_push = 0, use_indirect = 0, slots, need;
1866 
1867 		/* optimize ring usage */
1868 		if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
1869 		      virtio_with_feature(hw, VIRTIO_F_VERSION_1)) &&
1870 		    rte_mbuf_refcnt_read(txm) == 1 &&
1871 		    RTE_MBUF_DIRECT(txm) &&
1872 		    txm->nb_segs == 1 &&
1873 		    rte_pktmbuf_headroom(txm) >= hdr_size &&
1874 		    rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
1875 				   __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
1876 			can_push = 1;
1877 		else if (virtio_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
1878 			 txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
1879 			use_indirect = 1;
1880 
1881 		/* How many main ring entries are needed to this Tx?
1882 		 * any_layout => number of segments
1883 		 * indirect   => 1
1884 		 * default    => number of segments + 1
1885 		 */
1886 		slots = use_indirect ? 1 : (txm->nb_segs + !can_push);
1887 		need = slots - vq->vq_free_cnt;
1888 
1889 		/* Positive value indicates it need free vring descriptors */
1890 		if (unlikely(need > 0)) {
1891 			nb_used = virtqueue_nused(vq);
1892 
1893 			need = RTE_MIN(need, (int)nb_used);
1894 
1895 			virtio_xmit_cleanup(vq, need);
1896 			need = slots - vq->vq_free_cnt;
1897 			if (unlikely(need > 0)) {
1898 				PMD_TX_LOG(ERR,
1899 					   "No free tx descriptors to transmit");
1900 				break;
1901 			}
1902 		}
1903 
1904 		/* Enqueue Packet buffers */
1905 		virtqueue_enqueue_xmit(txvq, txm, slots, use_indirect,
1906 			can_push, 0);
1907 
1908 		virtio_update_packet_stats(&txvq->stats, txm);
1909 	}
1910 
1911 	txvq->stats.packets += nb_tx;
1912 
1913 	if (likely(nb_tx)) {
1914 		vq_update_avail_idx(vq);
1915 
1916 		if (unlikely(virtqueue_kick_prepare(vq))) {
1917 			virtqueue_notify(vq);
1918 			PMD_TX_LOG(DEBUG, "Notified backend after xmit");
1919 		}
1920 	}
1921 
1922 	return nb_tx;
1923 }
1924 
1925 static __rte_always_inline int
1926 virtio_xmit_try_cleanup_inorder(struct virtqueue *vq, uint16_t need)
1927 {
1928 	uint16_t nb_used, nb_clean, nb_descs;
1929 
1930 	nb_descs = vq->vq_free_cnt + need;
1931 	nb_used = virtqueue_nused(vq);
1932 	nb_clean = RTE_MIN(need, (int)nb_used);
1933 
1934 	virtio_xmit_cleanup_inorder(vq, nb_clean);
1935 
1936 	return nb_descs - vq->vq_free_cnt;
1937 }
1938 
1939 uint16_t
1940 virtio_xmit_pkts_inorder(void *tx_queue,
1941 			struct rte_mbuf **tx_pkts,
1942 			uint16_t nb_pkts)
1943 {
1944 	struct virtnet_tx *txvq = tx_queue;
1945 	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
1946 	struct virtio_hw *hw = vq->hw;
1947 	uint16_t hdr_size = hw->vtnet_hdr_size;
1948 	uint16_t nb_used, nb_tx = 0, nb_inorder_pkts = 0;
1949 	struct rte_mbuf *inorder_pkts[nb_pkts];
1950 	int need;
1951 
1952 	if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
1953 		return nb_tx;
1954 
1955 	if (unlikely(nb_pkts < 1))
1956 		return nb_pkts;
1957 
1958 	VIRTQUEUE_DUMP(vq);
1959 	PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
1960 	nb_used = virtqueue_nused(vq);
1961 
1962 	if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
1963 		virtio_xmit_cleanup_inorder(vq, nb_used);
1964 
1965 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1966 		struct rte_mbuf *txm = tx_pkts[nb_tx];
1967 		int slots;
1968 
1969 		/* optimize ring usage */
1970 		if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
1971 		     virtio_with_feature(hw, VIRTIO_F_VERSION_1)) &&
1972 		     rte_mbuf_refcnt_read(txm) == 1 &&
1973 		     RTE_MBUF_DIRECT(txm) &&
1974 		     txm->nb_segs == 1 &&
1975 		     rte_pktmbuf_headroom(txm) >= hdr_size &&
1976 		     rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
1977 				__alignof__(struct virtio_net_hdr_mrg_rxbuf))) {
1978 			inorder_pkts[nb_inorder_pkts] = txm;
1979 			nb_inorder_pkts++;
1980 
1981 			continue;
1982 		}
1983 
1984 		if (nb_inorder_pkts) {
1985 			need = nb_inorder_pkts - vq->vq_free_cnt;
1986 			if (unlikely(need > 0)) {
1987 				need = virtio_xmit_try_cleanup_inorder(vq,
1988 								       need);
1989 				if (unlikely(need > 0)) {
1990 					PMD_TX_LOG(ERR,
1991 						"No free tx descriptors to "
1992 						"transmit");
1993 					break;
1994 				}
1995 			}
1996 			virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
1997 							nb_inorder_pkts);
1998 			nb_inorder_pkts = 0;
1999 		}
2000 
2001 		slots = txm->nb_segs + 1;
2002 		need = slots - vq->vq_free_cnt;
2003 		if (unlikely(need > 0)) {
2004 			need = virtio_xmit_try_cleanup_inorder(vq, slots);
2005 
2006 			if (unlikely(need > 0)) {
2007 				PMD_TX_LOG(ERR,
2008 					"No free tx descriptors to transmit");
2009 				break;
2010 			}
2011 		}
2012 		/* Enqueue Packet buffers */
2013 		virtqueue_enqueue_xmit(txvq, txm, slots, 0, 0, 1);
2014 
2015 		virtio_update_packet_stats(&txvq->stats, txm);
2016 	}
2017 
2018 	/* Transmit all inorder packets */
2019 	if (nb_inorder_pkts) {
2020 		need = nb_inorder_pkts - vq->vq_free_cnt;
2021 		if (unlikely(need > 0)) {
2022 			need = virtio_xmit_try_cleanup_inorder(vq,
2023 								  need);
2024 			if (unlikely(need > 0)) {
2025 				PMD_TX_LOG(ERR,
2026 					"No free tx descriptors to transmit");
2027 				nb_inorder_pkts = vq->vq_free_cnt;
2028 				nb_tx -= need;
2029 			}
2030 		}
2031 
2032 		virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
2033 						nb_inorder_pkts);
2034 	}
2035 
2036 	txvq->stats.packets += nb_tx;
2037 
2038 	if (likely(nb_tx)) {
2039 		vq_update_avail_idx(vq);
2040 
2041 		if (unlikely(virtqueue_kick_prepare(vq))) {
2042 			virtqueue_notify(vq);
2043 			PMD_TX_LOG(DEBUG, "Notified backend after xmit");
2044 		}
2045 	}
2046 
2047 	VIRTQUEUE_DUMP(vq);
2048 
2049 	return nb_tx;
2050 }
2051 
2052 __rte_weak uint16_t
2053 virtio_recv_pkts_packed_vec(void *rx_queue __rte_unused,
2054 			    struct rte_mbuf **rx_pkts __rte_unused,
2055 			    uint16_t nb_pkts __rte_unused)
2056 {
2057 	return 0;
2058 }
2059 
2060 __rte_weak uint16_t
2061 virtio_xmit_pkts_packed_vec(void *tx_queue __rte_unused,
2062 			    struct rte_mbuf **tx_pkts __rte_unused,
2063 			    uint16_t nb_pkts __rte_unused)
2064 {
2065 	return 0;
2066 }
2067