xref: /dpdk/drivers/net/virtio/virtio_rxtx.c (revision f5057be340e44f3edc0fe90fa875eb89a4c49b4f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <errno.h>
10 
11 #include <rte_cycles.h>
12 #include <rte_memory.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_mempool.h>
15 #include <rte_malloc.h>
16 #include <rte_mbuf.h>
17 #include <rte_ether.h>
18 #include <rte_ethdev_driver.h>
19 #include <rte_prefetch.h>
20 #include <rte_string_fns.h>
21 #include <rte_errno.h>
22 #include <rte_byteorder.h>
23 #include <rte_net.h>
24 #include <rte_ip.h>
25 #include <rte_udp.h>
26 #include <rte_tcp.h>
27 
28 #include "virtio_logs.h"
29 #include "virtio_ethdev.h"
30 #include "virtio_pci.h"
31 #include "virtqueue.h"
32 #include "virtio_rxtx.h"
33 #include "virtio_rxtx_simple.h"
34 #include "virtio_ring.h"
35 
36 #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
37 #define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len)
38 #else
39 #define  VIRTIO_DUMP_PACKET(m, len) do { } while (0)
40 #endif
41 
42 int
43 virtio_dev_rx_queue_done(void *rxq, uint16_t offset)
44 {
45 	struct virtnet_rx *rxvq = rxq;
46 	struct virtqueue *vq = rxvq->vq;
47 
48 	return virtqueue_nused(vq) >= offset;
49 }
50 
51 void
52 vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx, uint16_t num)
53 {
54 	vq->vq_free_cnt += num;
55 	vq->vq_desc_tail_idx = desc_idx & (vq->vq_nentries - 1);
56 }
57 
58 void
59 vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
60 {
61 	struct vring_desc *dp, *dp_tail;
62 	struct vq_desc_extra *dxp;
63 	uint16_t desc_idx_last = desc_idx;
64 
65 	dp  = &vq->vq_split.ring.desc[desc_idx];
66 	dxp = &vq->vq_descx[desc_idx];
67 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
68 	if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
69 		while (dp->flags & VRING_DESC_F_NEXT) {
70 			desc_idx_last = dp->next;
71 			dp = &vq->vq_split.ring.desc[dp->next];
72 		}
73 	}
74 	dxp->ndescs = 0;
75 
76 	/*
77 	 * We must append the existing free chain, if any, to the end of
78 	 * newly freed chain. If the virtqueue was completely used, then
79 	 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
80 	 */
81 	if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) {
82 		vq->vq_desc_head_idx = desc_idx;
83 	} else {
84 		dp_tail = &vq->vq_split.ring.desc[vq->vq_desc_tail_idx];
85 		dp_tail->next = desc_idx;
86 	}
87 
88 	vq->vq_desc_tail_idx = desc_idx_last;
89 	dp->next = VQ_RING_DESC_CHAIN_END;
90 }
91 
92 void
93 virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
94 {
95 	uint32_t s = mbuf->pkt_len;
96 	struct rte_ether_addr *ea;
97 
98 	stats->bytes += s;
99 
100 	if (s == 64) {
101 		stats->size_bins[1]++;
102 	} else if (s > 64 && s < 1024) {
103 		uint32_t bin;
104 
105 		/* count zeros, and offset into correct bin */
106 		bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
107 		stats->size_bins[bin]++;
108 	} else {
109 		if (s < 64)
110 			stats->size_bins[0]++;
111 		else if (s < 1519)
112 			stats->size_bins[6]++;
113 		else
114 			stats->size_bins[7]++;
115 	}
116 
117 	ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
118 	if (rte_is_multicast_ether_addr(ea)) {
119 		if (rte_is_broadcast_ether_addr(ea))
120 			stats->broadcast++;
121 		else
122 			stats->multicast++;
123 	}
124 }
125 
126 static inline void
127 virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
128 {
129 	VIRTIO_DUMP_PACKET(m, m->data_len);
130 
131 	virtio_update_packet_stats(&rxvq->stats, m);
132 }
133 
134 static uint16_t
135 virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq,
136 				  struct rte_mbuf **rx_pkts,
137 				  uint32_t *len,
138 				  uint16_t num)
139 {
140 	struct rte_mbuf *cookie;
141 	uint16_t used_idx;
142 	uint16_t id;
143 	struct vring_packed_desc *desc;
144 	uint16_t i;
145 
146 	desc = vq->vq_packed.ring.desc;
147 
148 	for (i = 0; i < num; i++) {
149 		used_idx = vq->vq_used_cons_idx;
150 		/* desc_is_used has a load-acquire or rte_io_rmb inside
151 		 * and wait for used desc in virtqueue.
152 		 */
153 		if (!desc_is_used(&desc[used_idx], vq))
154 			return i;
155 		len[i] = desc[used_idx].len;
156 		id = desc[used_idx].id;
157 		cookie = (struct rte_mbuf *)vq->vq_descx[id].cookie;
158 		if (unlikely(cookie == NULL)) {
159 			PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
160 				vq->vq_used_cons_idx);
161 			break;
162 		}
163 		rte_prefetch0(cookie);
164 		rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
165 		rx_pkts[i] = cookie;
166 
167 		vq->vq_free_cnt++;
168 		vq->vq_used_cons_idx++;
169 		if (vq->vq_used_cons_idx >= vq->vq_nentries) {
170 			vq->vq_used_cons_idx -= vq->vq_nentries;
171 			vq->vq_packed.used_wrap_counter ^= 1;
172 		}
173 	}
174 
175 	return i;
176 }
177 
178 static uint16_t
179 virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,
180 			   uint32_t *len, uint16_t num)
181 {
182 	struct vring_used_elem *uep;
183 	struct rte_mbuf *cookie;
184 	uint16_t used_idx, desc_idx;
185 	uint16_t i;
186 
187 	/*  Caller does the check */
188 	for (i = 0; i < num ; i++) {
189 		used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
190 		uep = &vq->vq_split.ring.used->ring[used_idx];
191 		desc_idx = (uint16_t) uep->id;
192 		len[i] = uep->len;
193 		cookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie;
194 
195 		if (unlikely(cookie == NULL)) {
196 			PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
197 				vq->vq_used_cons_idx);
198 			break;
199 		}
200 
201 		rte_prefetch0(cookie);
202 		rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
203 		rx_pkts[i]  = cookie;
204 		vq->vq_used_cons_idx++;
205 		vq_ring_free_chain(vq, desc_idx);
206 		vq->vq_descx[desc_idx].cookie = NULL;
207 	}
208 
209 	return i;
210 }
211 
212 static uint16_t
213 virtqueue_dequeue_rx_inorder(struct virtqueue *vq,
214 			struct rte_mbuf **rx_pkts,
215 			uint32_t *len,
216 			uint16_t num)
217 {
218 	struct vring_used_elem *uep;
219 	struct rte_mbuf *cookie;
220 	uint16_t used_idx = 0;
221 	uint16_t i;
222 
223 	if (unlikely(num == 0))
224 		return 0;
225 
226 	for (i = 0; i < num; i++) {
227 		used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
228 		/* Desc idx same as used idx */
229 		uep = &vq->vq_split.ring.used->ring[used_idx];
230 		len[i] = uep->len;
231 		cookie = (struct rte_mbuf *)vq->vq_descx[used_idx].cookie;
232 
233 		if (unlikely(cookie == NULL)) {
234 			PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
235 				vq->vq_used_cons_idx);
236 			break;
237 		}
238 
239 		rte_prefetch0(cookie);
240 		rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
241 		rx_pkts[i]  = cookie;
242 		vq->vq_used_cons_idx++;
243 		vq->vq_descx[used_idx].cookie = NULL;
244 	}
245 
246 	vq_ring_free_inorder(vq, used_idx, i);
247 	return i;
248 }
249 
250 static inline int
251 virtqueue_enqueue_refill_inorder(struct virtqueue *vq,
252 			struct rte_mbuf **cookies,
253 			uint16_t num)
254 {
255 	struct vq_desc_extra *dxp;
256 	struct virtio_hw *hw = vq->hw;
257 	struct vring_desc *start_dp;
258 	uint16_t head_idx, idx, i = 0;
259 
260 	if (unlikely(vq->vq_free_cnt == 0))
261 		return -ENOSPC;
262 	if (unlikely(vq->vq_free_cnt < num))
263 		return -EMSGSIZE;
264 
265 	head_idx = vq->vq_desc_head_idx & (vq->vq_nentries - 1);
266 	start_dp = vq->vq_split.ring.desc;
267 
268 	while (i < num) {
269 		idx = head_idx & (vq->vq_nentries - 1);
270 		dxp = &vq->vq_descx[idx];
271 		dxp->cookie = (void *)cookies[i];
272 		dxp->ndescs = 1;
273 
274 		start_dp[idx].addr =
275 				VIRTIO_MBUF_ADDR(cookies[i], vq) +
276 				RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
277 		start_dp[idx].len =
278 				cookies[i]->buf_len -
279 				RTE_PKTMBUF_HEADROOM +
280 				hw->vtnet_hdr_size;
281 		start_dp[idx].flags =  VRING_DESC_F_WRITE;
282 
283 		vq_update_avail_ring(vq, idx);
284 		head_idx++;
285 		i++;
286 	}
287 
288 	vq->vq_desc_head_idx += num;
289 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
290 	return 0;
291 }
292 
293 static inline int
294 virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf **cookie,
295 				uint16_t num)
296 {
297 	struct vq_desc_extra *dxp;
298 	struct virtio_hw *hw = vq->hw;
299 	struct vring_desc *start_dp = vq->vq_split.ring.desc;
300 	uint16_t idx, i;
301 
302 	if (unlikely(vq->vq_free_cnt == 0))
303 		return -ENOSPC;
304 	if (unlikely(vq->vq_free_cnt < num))
305 		return -EMSGSIZE;
306 
307 	if (unlikely(vq->vq_desc_head_idx >= vq->vq_nentries))
308 		return -EFAULT;
309 
310 	for (i = 0; i < num; i++) {
311 		idx = vq->vq_desc_head_idx;
312 		dxp = &vq->vq_descx[idx];
313 		dxp->cookie = (void *)cookie[i];
314 		dxp->ndescs = 1;
315 
316 		start_dp[idx].addr =
317 			VIRTIO_MBUF_ADDR(cookie[i], vq) +
318 			RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
319 		start_dp[idx].len =
320 			cookie[i]->buf_len - RTE_PKTMBUF_HEADROOM +
321 			hw->vtnet_hdr_size;
322 		start_dp[idx].flags = VRING_DESC_F_WRITE;
323 		vq->vq_desc_head_idx = start_dp[idx].next;
324 		vq_update_avail_ring(vq, idx);
325 		if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) {
326 			vq->vq_desc_tail_idx = vq->vq_desc_head_idx;
327 			break;
328 		}
329 	}
330 
331 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
332 
333 	return 0;
334 }
335 
336 static inline int
337 virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq,
338 				     struct rte_mbuf **cookie, uint16_t num)
339 {
340 	struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc;
341 	uint16_t flags = vq->vq_packed.cached_flags;
342 	struct virtio_hw *hw = vq->hw;
343 	struct vq_desc_extra *dxp;
344 	uint16_t idx;
345 	int i;
346 
347 	if (unlikely(vq->vq_free_cnt == 0))
348 		return -ENOSPC;
349 	if (unlikely(vq->vq_free_cnt < num))
350 		return -EMSGSIZE;
351 
352 	for (i = 0; i < num; i++) {
353 		idx = vq->vq_avail_idx;
354 		dxp = &vq->vq_descx[idx];
355 		dxp->cookie = (void *)cookie[i];
356 		dxp->ndescs = 1;
357 
358 		start_dp[idx].addr = VIRTIO_MBUF_ADDR(cookie[i], vq) +
359 				RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
360 		start_dp[idx].len = cookie[i]->buf_len - RTE_PKTMBUF_HEADROOM
361 					+ hw->vtnet_hdr_size;
362 
363 		vq->vq_desc_head_idx = dxp->next;
364 		if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
365 			vq->vq_desc_tail_idx = vq->vq_desc_head_idx;
366 
367 		virtqueue_store_flags_packed(&start_dp[idx], flags,
368 					     hw->weak_barriers);
369 
370 		if (++vq->vq_avail_idx >= vq->vq_nentries) {
371 			vq->vq_avail_idx -= vq->vq_nentries;
372 			vq->vq_packed.cached_flags ^=
373 				VRING_PACKED_DESC_F_AVAIL_USED;
374 			flags = vq->vq_packed.cached_flags;
375 		}
376 	}
377 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
378 	return 0;
379 }
380 
381 /* When doing TSO, the IP length is not included in the pseudo header
382  * checksum of the packet given to the PMD, but for virtio it is
383  * expected.
384  */
385 static void
386 virtio_tso_fix_cksum(struct rte_mbuf *m)
387 {
388 	/* common case: header is not fragmented */
389 	if (likely(rte_pktmbuf_data_len(m) >= m->l2_len + m->l3_len +
390 			m->l4_len)) {
391 		struct rte_ipv4_hdr *iph;
392 		struct rte_ipv6_hdr *ip6h;
393 		struct rte_tcp_hdr *th;
394 		uint16_t prev_cksum, new_cksum, ip_len, ip_paylen;
395 		uint32_t tmp;
396 
397 		iph = rte_pktmbuf_mtod_offset(m,
398 					struct rte_ipv4_hdr *, m->l2_len);
399 		th = RTE_PTR_ADD(iph, m->l3_len);
400 		if ((iph->version_ihl >> 4) == 4) {
401 			iph->hdr_checksum = 0;
402 			iph->hdr_checksum = rte_ipv4_cksum(iph);
403 			ip_len = iph->total_length;
404 			ip_paylen = rte_cpu_to_be_16(rte_be_to_cpu_16(ip_len) -
405 				m->l3_len);
406 		} else {
407 			ip6h = (struct rte_ipv6_hdr *)iph;
408 			ip_paylen = ip6h->payload_len;
409 		}
410 
411 		/* calculate the new phdr checksum not including ip_paylen */
412 		prev_cksum = th->cksum;
413 		tmp = prev_cksum;
414 		tmp += ip_paylen;
415 		tmp = (tmp & 0xffff) + (tmp >> 16);
416 		new_cksum = tmp;
417 
418 		/* replace it in the packet */
419 		th->cksum = new_cksum;
420 	}
421 }
422 
423 
424 
425 
426 static inline void
427 virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
428 			struct rte_mbuf **cookies,
429 			uint16_t num)
430 {
431 	struct vq_desc_extra *dxp;
432 	struct virtqueue *vq = txvq->vq;
433 	struct vring_desc *start_dp;
434 	struct virtio_net_hdr *hdr;
435 	uint16_t idx;
436 	int16_t head_size = vq->hw->vtnet_hdr_size;
437 	uint16_t i = 0;
438 
439 	idx = vq->vq_desc_head_idx;
440 	start_dp = vq->vq_split.ring.desc;
441 
442 	while (i < num) {
443 		idx = idx & (vq->vq_nentries - 1);
444 		dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
445 		dxp->cookie = (void *)cookies[i];
446 		dxp->ndescs = 1;
447 		virtio_update_packet_stats(&txvq->stats, cookies[i]);
448 
449 		hdr = rte_pktmbuf_mtod_offset(cookies[i],
450 				struct virtio_net_hdr *, -head_size);
451 
452 		/* if offload disabled, hdr is not zeroed yet, do it now */
453 		if (!vq->hw->has_tx_offload)
454 			virtqueue_clear_net_hdr(hdr);
455 		else
456 			virtqueue_xmit_offload(hdr, cookies[i], true);
457 
458 		start_dp[idx].addr  =
459 			VIRTIO_MBUF_DATA_DMA_ADDR(cookies[i], vq) - head_size;
460 		start_dp[idx].len   = cookies[i]->data_len + head_size;
461 		start_dp[idx].flags = 0;
462 
463 
464 		vq_update_avail_ring(vq, idx);
465 
466 		idx++;
467 		i++;
468 	};
469 
470 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
471 	vq->vq_desc_head_idx = idx & (vq->vq_nentries - 1);
472 }
473 
474 static inline void
475 virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq,
476 				   struct rte_mbuf *cookie,
477 				   int in_order)
478 {
479 	struct virtqueue *vq = txvq->vq;
480 	struct vring_packed_desc *dp;
481 	struct vq_desc_extra *dxp;
482 	uint16_t idx, id, flags;
483 	int16_t head_size = vq->hw->vtnet_hdr_size;
484 	struct virtio_net_hdr *hdr;
485 
486 	id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;
487 	idx = vq->vq_avail_idx;
488 	dp = &vq->vq_packed.ring.desc[idx];
489 
490 	dxp = &vq->vq_descx[id];
491 	dxp->ndescs = 1;
492 	dxp->cookie = cookie;
493 
494 	flags = vq->vq_packed.cached_flags;
495 
496 	/* prepend cannot fail, checked by caller */
497 	hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *,
498 				      -head_size);
499 
500 	/* if offload disabled, hdr is not zeroed yet, do it now */
501 	if (!vq->hw->has_tx_offload)
502 		virtqueue_clear_net_hdr(hdr);
503 	else
504 		virtqueue_xmit_offload(hdr, cookie, true);
505 
506 	dp->addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq) - head_size;
507 	dp->len  = cookie->data_len + head_size;
508 	dp->id   = id;
509 
510 	if (++vq->vq_avail_idx >= vq->vq_nentries) {
511 		vq->vq_avail_idx -= vq->vq_nentries;
512 		vq->vq_packed.cached_flags ^= VRING_PACKED_DESC_F_AVAIL_USED;
513 	}
514 
515 	vq->vq_free_cnt--;
516 
517 	if (!in_order) {
518 		vq->vq_desc_head_idx = dxp->next;
519 		if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
520 			vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;
521 	}
522 
523 	virtqueue_store_flags_packed(dp, flags, vq->hw->weak_barriers);
524 }
525 
526 static inline void
527 virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
528 			uint16_t needed, int use_indirect, int can_push,
529 			int in_order)
530 {
531 	struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
532 	struct vq_desc_extra *dxp;
533 	struct virtqueue *vq = txvq->vq;
534 	struct vring_desc *start_dp;
535 	uint16_t seg_num = cookie->nb_segs;
536 	uint16_t head_idx, idx;
537 	int16_t head_size = vq->hw->vtnet_hdr_size;
538 	bool prepend_header = false;
539 	struct virtio_net_hdr *hdr;
540 
541 	head_idx = vq->vq_desc_head_idx;
542 	idx = head_idx;
543 	if (in_order)
544 		dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
545 	else
546 		dxp = &vq->vq_descx[idx];
547 	dxp->cookie = (void *)cookie;
548 	dxp->ndescs = needed;
549 
550 	start_dp = vq->vq_split.ring.desc;
551 
552 	if (can_push) {
553 		/* prepend cannot fail, checked by caller */
554 		hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *,
555 					      -head_size);
556 		prepend_header = true;
557 
558 		/* if offload disabled, it is not zeroed below, do it now */
559 		if (!vq->hw->has_tx_offload)
560 			virtqueue_clear_net_hdr(hdr);
561 	} else if (use_indirect) {
562 		/* setup tx ring slot to point to indirect
563 		 * descriptor list stored in reserved region.
564 		 *
565 		 * the first slot in indirect ring is already preset
566 		 * to point to the header in reserved region
567 		 */
568 		start_dp[idx].addr  = txvq->virtio_net_hdr_mem +
569 			RTE_PTR_DIFF(&txr[idx].tx_indir, txr);
570 		start_dp[idx].len   = (seg_num + 1) * sizeof(struct vring_desc);
571 		start_dp[idx].flags = VRING_DESC_F_INDIRECT;
572 		hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
573 
574 		/* loop below will fill in rest of the indirect elements */
575 		start_dp = txr[idx].tx_indir;
576 		idx = 1;
577 	} else {
578 		/* setup first tx ring slot to point to header
579 		 * stored in reserved region.
580 		 */
581 		start_dp[idx].addr  = txvq->virtio_net_hdr_mem +
582 			RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
583 		start_dp[idx].len   = vq->hw->vtnet_hdr_size;
584 		start_dp[idx].flags = VRING_DESC_F_NEXT;
585 		hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
586 
587 		idx = start_dp[idx].next;
588 	}
589 
590 	virtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload);
591 
592 	do {
593 		start_dp[idx].addr  = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
594 		start_dp[idx].len   = cookie->data_len;
595 		if (prepend_header) {
596 			start_dp[idx].addr -= head_size;
597 			start_dp[idx].len += head_size;
598 			prepend_header = false;
599 		}
600 		start_dp[idx].flags = cookie->next ? VRING_DESC_F_NEXT : 0;
601 		idx = start_dp[idx].next;
602 	} while ((cookie = cookie->next) != NULL);
603 
604 	if (use_indirect)
605 		idx = vq->vq_split.ring.desc[head_idx].next;
606 
607 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
608 
609 	vq->vq_desc_head_idx = idx;
610 	vq_update_avail_ring(vq, head_idx);
611 
612 	if (!in_order) {
613 		if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
614 			vq->vq_desc_tail_idx = idx;
615 	}
616 }
617 
618 void
619 virtio_dev_cq_start(struct rte_eth_dev *dev)
620 {
621 	struct virtio_hw *hw = dev->data->dev_private;
622 
623 	if (hw->cvq && hw->cvq->vq) {
624 		rte_spinlock_init(&hw->cvq->lock);
625 		VIRTQUEUE_DUMP((struct virtqueue *)hw->cvq->vq);
626 	}
627 }
628 
629 int
630 virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
631 			uint16_t queue_idx,
632 			uint16_t nb_desc,
633 			unsigned int socket_id __rte_unused,
634 			const struct rte_eth_rxconf *rx_conf,
635 			struct rte_mempool *mp)
636 {
637 	uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
638 	struct virtio_hw *hw = dev->data->dev_private;
639 	struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
640 	struct virtnet_rx *rxvq;
641 	uint16_t rx_free_thresh;
642 
643 	PMD_INIT_FUNC_TRACE();
644 
645 	if (rx_conf->rx_deferred_start) {
646 		PMD_INIT_LOG(ERR, "Rx deferred start is not supported");
647 		return -EINVAL;
648 	}
649 
650 	rx_free_thresh = rx_conf->rx_free_thresh;
651 	if (rx_free_thresh == 0)
652 		rx_free_thresh =
653 			RTE_MIN(vq->vq_nentries / 4, DEFAULT_RX_FREE_THRESH);
654 
655 	if (rx_free_thresh & 0x3) {
656 		RTE_LOG(ERR, PMD, "rx_free_thresh must be multiples of four."
657 			" (rx_free_thresh=%u port=%u queue=%u)\n",
658 			rx_free_thresh, dev->data->port_id, queue_idx);
659 		return -EINVAL;
660 	}
661 
662 	if (rx_free_thresh >= vq->vq_nentries) {
663 		RTE_LOG(ERR, PMD, "rx_free_thresh must be less than the "
664 			"number of RX entries (%u)."
665 			" (rx_free_thresh=%u port=%u queue=%u)\n",
666 			vq->vq_nentries,
667 			rx_free_thresh, dev->data->port_id, queue_idx);
668 		return -EINVAL;
669 	}
670 	vq->vq_free_thresh = rx_free_thresh;
671 
672 	if (nb_desc == 0 || nb_desc > vq->vq_nentries)
673 		nb_desc = vq->vq_nentries;
674 	vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
675 
676 	rxvq = &vq->rxq;
677 	rxvq->queue_id = queue_idx;
678 	rxvq->mpool = mp;
679 	dev->data->rx_queues[queue_idx] = rxvq;
680 
681 	return 0;
682 }
683 
684 int
685 virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
686 {
687 	uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
688 	struct virtio_hw *hw = dev->data->dev_private;
689 	struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
690 	struct virtnet_rx *rxvq = &vq->rxq;
691 	struct rte_mbuf *m;
692 	uint16_t desc_idx;
693 	int error, nbufs, i;
694 	bool in_order = vtpci_with_feature(hw, VIRTIO_F_IN_ORDER);
695 
696 	PMD_INIT_FUNC_TRACE();
697 
698 	/* Allocate blank mbufs for the each rx descriptor */
699 	nbufs = 0;
700 
701 	if (hw->use_vec_rx && !vtpci_packed_queue(hw)) {
702 		for (desc_idx = 0; desc_idx < vq->vq_nentries;
703 		     desc_idx++) {
704 			vq->vq_split.ring.avail->ring[desc_idx] = desc_idx;
705 			vq->vq_split.ring.desc[desc_idx].flags =
706 				VRING_DESC_F_WRITE;
707 		}
708 
709 		virtio_rxq_vec_setup(rxvq);
710 	}
711 
712 	memset(&rxvq->fake_mbuf, 0, sizeof(rxvq->fake_mbuf));
713 	for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST;
714 	     desc_idx++) {
715 		vq->sw_ring[vq->vq_nentries + desc_idx] =
716 			&rxvq->fake_mbuf;
717 	}
718 
719 	if (hw->use_vec_rx && !vtpci_packed_queue(hw)) {
720 		while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
721 			virtio_rxq_rearm_vec(rxvq);
722 			nbufs += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
723 		}
724 	} else if (!vtpci_packed_queue(vq->hw) && in_order) {
725 		if ((!virtqueue_full(vq))) {
726 			uint16_t free_cnt = vq->vq_free_cnt;
727 			struct rte_mbuf *pkts[free_cnt];
728 
729 			if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, pkts,
730 				free_cnt)) {
731 				error = virtqueue_enqueue_refill_inorder(vq,
732 						pkts,
733 						free_cnt);
734 				if (unlikely(error)) {
735 					for (i = 0; i < free_cnt; i++)
736 						rte_pktmbuf_free(pkts[i]);
737 				}
738 			}
739 
740 			nbufs += free_cnt;
741 			vq_update_avail_idx(vq);
742 		}
743 	} else {
744 		while (!virtqueue_full(vq)) {
745 			m = rte_mbuf_raw_alloc(rxvq->mpool);
746 			if (m == NULL)
747 				break;
748 
749 			/* Enqueue allocated buffers */
750 			if (vtpci_packed_queue(vq->hw))
751 				error = virtqueue_enqueue_recv_refill_packed(vq,
752 						&m, 1);
753 			else
754 				error = virtqueue_enqueue_recv_refill(vq,
755 						&m, 1);
756 			if (error) {
757 				rte_pktmbuf_free(m);
758 				break;
759 			}
760 			nbufs++;
761 		}
762 
763 		if (!vtpci_packed_queue(vq->hw))
764 			vq_update_avail_idx(vq);
765 	}
766 
767 	PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs);
768 
769 	VIRTQUEUE_DUMP(vq);
770 
771 	return 0;
772 }
773 
774 /*
775  * struct rte_eth_dev *dev: Used to update dev
776  * uint16_t nb_desc: Defaults to values read from config space
777  * unsigned int socket_id: Used to allocate memzone
778  * const struct rte_eth_txconf *tx_conf: Used to setup tx engine
779  * uint16_t queue_idx: Just used as an index in dev txq list
780  */
781 int
782 virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
783 			uint16_t queue_idx,
784 			uint16_t nb_desc,
785 			unsigned int socket_id __rte_unused,
786 			const struct rte_eth_txconf *tx_conf)
787 {
788 	uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
789 	struct virtio_hw *hw = dev->data->dev_private;
790 	struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
791 	struct virtnet_tx *txvq;
792 	uint16_t tx_free_thresh;
793 
794 	PMD_INIT_FUNC_TRACE();
795 
796 	if (tx_conf->tx_deferred_start) {
797 		PMD_INIT_LOG(ERR, "Tx deferred start is not supported");
798 		return -EINVAL;
799 	}
800 
801 	if (nb_desc == 0 || nb_desc > vq->vq_nentries)
802 		nb_desc = vq->vq_nentries;
803 	vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
804 
805 	txvq = &vq->txq;
806 	txvq->queue_id = queue_idx;
807 
808 	tx_free_thresh = tx_conf->tx_free_thresh;
809 	if (tx_free_thresh == 0)
810 		tx_free_thresh =
811 			RTE_MIN(vq->vq_nentries / 4, DEFAULT_TX_FREE_THRESH);
812 
813 	if (tx_free_thresh >= (vq->vq_nentries - 3)) {
814 		PMD_DRV_LOG(ERR, "tx_free_thresh must be less than the "
815 			"number of TX entries minus 3 (%u)."
816 			" (tx_free_thresh=%u port=%u queue=%u)\n",
817 			vq->vq_nentries - 3,
818 			tx_free_thresh, dev->data->port_id, queue_idx);
819 		return -EINVAL;
820 	}
821 
822 	vq->vq_free_thresh = tx_free_thresh;
823 
824 	dev->data->tx_queues[queue_idx] = txvq;
825 	return 0;
826 }
827 
828 int
829 virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
830 				uint16_t queue_idx)
831 {
832 	uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
833 	struct virtio_hw *hw = dev->data->dev_private;
834 	struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
835 
836 	PMD_INIT_FUNC_TRACE();
837 
838 	if (!vtpci_packed_queue(hw)) {
839 		if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER))
840 			vq->vq_split.ring.desc[vq->vq_nentries - 1].next = 0;
841 	}
842 
843 	VIRTQUEUE_DUMP(vq);
844 
845 	return 0;
846 }
847 
848 static inline void
849 virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m)
850 {
851 	int error;
852 	/*
853 	 * Requeue the discarded mbuf. This should always be
854 	 * successful since it was just dequeued.
855 	 */
856 	if (vtpci_packed_queue(vq->hw))
857 		error = virtqueue_enqueue_recv_refill_packed(vq, &m, 1);
858 	else
859 		error = virtqueue_enqueue_recv_refill(vq, &m, 1);
860 
861 	if (unlikely(error)) {
862 		PMD_DRV_LOG(ERR, "cannot requeue discarded mbuf");
863 		rte_pktmbuf_free(m);
864 	}
865 }
866 
867 static inline void
868 virtio_discard_rxbuf_inorder(struct virtqueue *vq, struct rte_mbuf *m)
869 {
870 	int error;
871 
872 	error = virtqueue_enqueue_refill_inorder(vq, &m, 1);
873 	if (unlikely(error)) {
874 		PMD_DRV_LOG(ERR, "cannot requeue discarded mbuf");
875 		rte_pktmbuf_free(m);
876 	}
877 }
878 
879 /* Optionally fill offload information in structure */
880 static inline int
881 virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
882 {
883 	struct rte_net_hdr_lens hdr_lens;
884 	uint32_t hdrlen, ptype;
885 	int l4_supported = 0;
886 
887 	/* nothing to do */
888 	if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
889 		return 0;
890 
891 	m->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
892 
893 	ptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK);
894 	m->packet_type = ptype;
895 	if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP ||
896 	    (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP ||
897 	    (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_SCTP)
898 		l4_supported = 1;
899 
900 	if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
901 		hdrlen = hdr_lens.l2_len + hdr_lens.l3_len + hdr_lens.l4_len;
902 		if (hdr->csum_start <= hdrlen && l4_supported) {
903 			m->ol_flags |= PKT_RX_L4_CKSUM_NONE;
904 		} else {
905 			/* Unknown proto or tunnel, do sw cksum. We can assume
906 			 * the cksum field is in the first segment since the
907 			 * buffers we provided to the host are large enough.
908 			 * In case of SCTP, this will be wrong since it's a CRC
909 			 * but there's nothing we can do.
910 			 */
911 			uint16_t csum = 0, off;
912 
913 			rte_raw_cksum_mbuf(m, hdr->csum_start,
914 				rte_pktmbuf_pkt_len(m) - hdr->csum_start,
915 				&csum);
916 			if (likely(csum != 0xffff))
917 				csum = ~csum;
918 			off = hdr->csum_offset + hdr->csum_start;
919 			if (rte_pktmbuf_data_len(m) >= off + 1)
920 				*rte_pktmbuf_mtod_offset(m, uint16_t *,
921 					off) = csum;
922 		}
923 	} else if (hdr->flags & VIRTIO_NET_HDR_F_DATA_VALID && l4_supported) {
924 		m->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
925 	}
926 
927 	/* GSO request, save required information in mbuf */
928 	if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
929 		/* Check unsupported modes */
930 		if ((hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN) ||
931 		    (hdr->gso_size == 0)) {
932 			return -EINVAL;
933 		}
934 
935 		/* Update mss lengthes in mbuf */
936 		m->tso_segsz = hdr->gso_size;
937 		switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
938 			case VIRTIO_NET_HDR_GSO_TCPV4:
939 			case VIRTIO_NET_HDR_GSO_TCPV6:
940 				m->ol_flags |= PKT_RX_LRO | \
941 					PKT_RX_L4_CKSUM_NONE;
942 				break;
943 			default:
944 				return -EINVAL;
945 		}
946 	}
947 
948 	return 0;
949 }
950 
951 #define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc))
952 uint16_t
953 virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
954 {
955 	struct virtnet_rx *rxvq = rx_queue;
956 	struct virtqueue *vq = rxvq->vq;
957 	struct virtio_hw *hw = vq->hw;
958 	struct rte_mbuf *rxm;
959 	uint16_t nb_used, num, nb_rx;
960 	uint32_t len[VIRTIO_MBUF_BURST_SZ];
961 	struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
962 	int error;
963 	uint32_t i, nb_enqueued;
964 	uint32_t hdr_size;
965 	struct virtio_net_hdr *hdr;
966 
967 	nb_rx = 0;
968 	if (unlikely(hw->started == 0))
969 		return nb_rx;
970 
971 	nb_used = virtqueue_nused(vq);
972 
973 	num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts;
974 	if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
975 		num = VIRTIO_MBUF_BURST_SZ;
976 	if (likely(num > DESC_PER_CACHELINE))
977 		num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
978 
979 	num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num);
980 	PMD_RX_LOG(DEBUG, "used:%d dequeue:%d", nb_used, num);
981 
982 	nb_enqueued = 0;
983 	hdr_size = hw->vtnet_hdr_size;
984 
985 	for (i = 0; i < num ; i++) {
986 		rxm = rcv_pkts[i];
987 
988 		PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
989 
990 		if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
991 			PMD_RX_LOG(ERR, "Packet drop");
992 			nb_enqueued++;
993 			virtio_discard_rxbuf(vq, rxm);
994 			rxvq->stats.errors++;
995 			continue;
996 		}
997 
998 		rxm->port = rxvq->port_id;
999 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1000 		rxm->ol_flags = 0;
1001 		rxm->vlan_tci = 0;
1002 
1003 		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1004 		rxm->data_len = (uint16_t)(len[i] - hdr_size);
1005 
1006 		hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr +
1007 			RTE_PKTMBUF_HEADROOM - hdr_size);
1008 
1009 		if (hw->vlan_strip)
1010 			rte_vlan_strip(rxm);
1011 
1012 		if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) {
1013 			virtio_discard_rxbuf(vq, rxm);
1014 			rxvq->stats.errors++;
1015 			continue;
1016 		}
1017 
1018 		virtio_rx_stats_updated(rxvq, rxm);
1019 
1020 		rx_pkts[nb_rx++] = rxm;
1021 	}
1022 
1023 	rxvq->stats.packets += nb_rx;
1024 
1025 	/* Allocate new mbuf for the used descriptor */
1026 	if (likely(!virtqueue_full(vq))) {
1027 		uint16_t free_cnt = vq->vq_free_cnt;
1028 		struct rte_mbuf *new_pkts[free_cnt];
1029 
1030 		if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts,
1031 						free_cnt) == 0)) {
1032 			error = virtqueue_enqueue_recv_refill(vq, new_pkts,
1033 					free_cnt);
1034 			if (unlikely(error)) {
1035 				for (i = 0; i < free_cnt; i++)
1036 					rte_pktmbuf_free(new_pkts[i]);
1037 			}
1038 			nb_enqueued += free_cnt;
1039 		} else {
1040 			struct rte_eth_dev *dev =
1041 				&rte_eth_devices[rxvq->port_id];
1042 			dev->data->rx_mbuf_alloc_failed += free_cnt;
1043 		}
1044 	}
1045 
1046 	if (likely(nb_enqueued)) {
1047 		vq_update_avail_idx(vq);
1048 
1049 		if (unlikely(virtqueue_kick_prepare(vq))) {
1050 			virtqueue_notify(vq);
1051 			PMD_RX_LOG(DEBUG, "Notified");
1052 		}
1053 	}
1054 
1055 	return nb_rx;
1056 }
1057 
1058 uint16_t
1059 virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
1060 			uint16_t nb_pkts)
1061 {
1062 	struct virtnet_rx *rxvq = rx_queue;
1063 	struct virtqueue *vq = rxvq->vq;
1064 	struct virtio_hw *hw = vq->hw;
1065 	struct rte_mbuf *rxm;
1066 	uint16_t num, nb_rx;
1067 	uint32_t len[VIRTIO_MBUF_BURST_SZ];
1068 	struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1069 	int error;
1070 	uint32_t i, nb_enqueued;
1071 	uint32_t hdr_size;
1072 	struct virtio_net_hdr *hdr;
1073 
1074 	nb_rx = 0;
1075 	if (unlikely(hw->started == 0))
1076 		return nb_rx;
1077 
1078 	num = RTE_MIN(VIRTIO_MBUF_BURST_SZ, nb_pkts);
1079 	if (likely(num > DESC_PER_CACHELINE))
1080 		num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1081 
1082 	num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num);
1083 	PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1084 
1085 	nb_enqueued = 0;
1086 	hdr_size = hw->vtnet_hdr_size;
1087 
1088 	for (i = 0; i < num; i++) {
1089 		rxm = rcv_pkts[i];
1090 
1091 		PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1092 
1093 		if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1094 			PMD_RX_LOG(ERR, "Packet drop");
1095 			nb_enqueued++;
1096 			virtio_discard_rxbuf(vq, rxm);
1097 			rxvq->stats.errors++;
1098 			continue;
1099 		}
1100 
1101 		rxm->port = rxvq->port_id;
1102 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1103 		rxm->ol_flags = 0;
1104 		rxm->vlan_tci = 0;
1105 
1106 		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1107 		rxm->data_len = (uint16_t)(len[i] - hdr_size);
1108 
1109 		hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr +
1110 			RTE_PKTMBUF_HEADROOM - hdr_size);
1111 
1112 		if (hw->vlan_strip)
1113 			rte_vlan_strip(rxm);
1114 
1115 		if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) {
1116 			virtio_discard_rxbuf(vq, rxm);
1117 			rxvq->stats.errors++;
1118 			continue;
1119 		}
1120 
1121 		virtio_rx_stats_updated(rxvq, rxm);
1122 
1123 		rx_pkts[nb_rx++] = rxm;
1124 	}
1125 
1126 	rxvq->stats.packets += nb_rx;
1127 
1128 	/* Allocate new mbuf for the used descriptor */
1129 	if (likely(!virtqueue_full(vq))) {
1130 		uint16_t free_cnt = vq->vq_free_cnt;
1131 		struct rte_mbuf *new_pkts[free_cnt];
1132 
1133 		if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts,
1134 						free_cnt) == 0)) {
1135 			error = virtqueue_enqueue_recv_refill_packed(vq,
1136 					new_pkts, free_cnt);
1137 			if (unlikely(error)) {
1138 				for (i = 0; i < free_cnt; i++)
1139 					rte_pktmbuf_free(new_pkts[i]);
1140 			}
1141 			nb_enqueued += free_cnt;
1142 		} else {
1143 			struct rte_eth_dev *dev =
1144 				&rte_eth_devices[rxvq->port_id];
1145 			dev->data->rx_mbuf_alloc_failed += free_cnt;
1146 		}
1147 	}
1148 
1149 	if (likely(nb_enqueued)) {
1150 		if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1151 			virtqueue_notify(vq);
1152 			PMD_RX_LOG(DEBUG, "Notified");
1153 		}
1154 	}
1155 
1156 	return nb_rx;
1157 }
1158 
1159 
1160 uint16_t
1161 virtio_recv_pkts_inorder(void *rx_queue,
1162 			struct rte_mbuf **rx_pkts,
1163 			uint16_t nb_pkts)
1164 {
1165 	struct virtnet_rx *rxvq = rx_queue;
1166 	struct virtqueue *vq = rxvq->vq;
1167 	struct virtio_hw *hw = vq->hw;
1168 	struct rte_mbuf *rxm;
1169 	struct rte_mbuf *prev = NULL;
1170 	uint16_t nb_used, num, nb_rx;
1171 	uint32_t len[VIRTIO_MBUF_BURST_SZ];
1172 	struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1173 	int error;
1174 	uint32_t nb_enqueued;
1175 	uint32_t seg_num;
1176 	uint32_t seg_res;
1177 	uint32_t hdr_size;
1178 	int32_t i;
1179 
1180 	nb_rx = 0;
1181 	if (unlikely(hw->started == 0))
1182 		return nb_rx;
1183 
1184 	nb_used = virtqueue_nused(vq);
1185 	nb_used = RTE_MIN(nb_used, nb_pkts);
1186 	nb_used = RTE_MIN(nb_used, VIRTIO_MBUF_BURST_SZ);
1187 
1188 	PMD_RX_LOG(DEBUG, "used:%d", nb_used);
1189 
1190 	nb_enqueued = 0;
1191 	seg_num = 1;
1192 	seg_res = 0;
1193 	hdr_size = hw->vtnet_hdr_size;
1194 
1195 	num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len, nb_used);
1196 
1197 	for (i = 0; i < num; i++) {
1198 		struct virtio_net_hdr_mrg_rxbuf *header;
1199 
1200 		PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1201 		PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1202 
1203 		rxm = rcv_pkts[i];
1204 
1205 		if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1206 			PMD_RX_LOG(ERR, "Packet drop");
1207 			nb_enqueued++;
1208 			virtio_discard_rxbuf_inorder(vq, rxm);
1209 			rxvq->stats.errors++;
1210 			continue;
1211 		}
1212 
1213 		header = (struct virtio_net_hdr_mrg_rxbuf *)
1214 			 ((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM
1215 			 - hdr_size);
1216 
1217 		if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
1218 			seg_num = header->num_buffers;
1219 			if (seg_num == 0)
1220 				seg_num = 1;
1221 		} else {
1222 			seg_num = 1;
1223 		}
1224 
1225 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1226 		rxm->nb_segs = seg_num;
1227 		rxm->ol_flags = 0;
1228 		rxm->vlan_tci = 0;
1229 		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1230 		rxm->data_len = (uint16_t)(len[i] - hdr_size);
1231 
1232 		rxm->port = rxvq->port_id;
1233 
1234 		rx_pkts[nb_rx] = rxm;
1235 		prev = rxm;
1236 
1237 		if (vq->hw->has_rx_offload &&
1238 				virtio_rx_offload(rxm, &header->hdr) < 0) {
1239 			virtio_discard_rxbuf_inorder(vq, rxm);
1240 			rxvq->stats.errors++;
1241 			continue;
1242 		}
1243 
1244 		if (hw->vlan_strip)
1245 			rte_vlan_strip(rx_pkts[nb_rx]);
1246 
1247 		seg_res = seg_num - 1;
1248 
1249 		/* Merge remaining segments */
1250 		while (seg_res != 0 && i < (num - 1)) {
1251 			i++;
1252 
1253 			rxm = rcv_pkts[i];
1254 			rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1255 			rxm->pkt_len = (uint32_t)(len[i]);
1256 			rxm->data_len = (uint16_t)(len[i]);
1257 
1258 			rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1259 
1260 			prev->next = rxm;
1261 			prev = rxm;
1262 			seg_res -= 1;
1263 		}
1264 
1265 		if (!seg_res) {
1266 			virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1267 			nb_rx++;
1268 		}
1269 	}
1270 
1271 	/* Last packet still need merge segments */
1272 	while (seg_res != 0) {
1273 		uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1274 					VIRTIO_MBUF_BURST_SZ);
1275 
1276 		if (likely(virtqueue_nused(vq) >= rcv_cnt)) {
1277 			num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len,
1278 							   rcv_cnt);
1279 			uint16_t extra_idx = 0;
1280 
1281 			rcv_cnt = num;
1282 			while (extra_idx < rcv_cnt) {
1283 				rxm = rcv_pkts[extra_idx];
1284 				rxm->data_off =
1285 					RTE_PKTMBUF_HEADROOM - hdr_size;
1286 				rxm->pkt_len = (uint32_t)(len[extra_idx]);
1287 				rxm->data_len = (uint16_t)(len[extra_idx]);
1288 				prev->next = rxm;
1289 				prev = rxm;
1290 				rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1291 				extra_idx += 1;
1292 			};
1293 			seg_res -= rcv_cnt;
1294 
1295 			if (!seg_res) {
1296 				virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1297 				nb_rx++;
1298 			}
1299 		} else {
1300 			PMD_RX_LOG(ERR,
1301 					"No enough segments for packet.");
1302 			rte_pktmbuf_free(rx_pkts[nb_rx]);
1303 			rxvq->stats.errors++;
1304 			break;
1305 		}
1306 	}
1307 
1308 	rxvq->stats.packets += nb_rx;
1309 
1310 	/* Allocate new mbuf for the used descriptor */
1311 
1312 	if (likely(!virtqueue_full(vq))) {
1313 		/* free_cnt may include mrg descs */
1314 		uint16_t free_cnt = vq->vq_free_cnt;
1315 		struct rte_mbuf *new_pkts[free_cnt];
1316 
1317 		if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1318 			error = virtqueue_enqueue_refill_inorder(vq, new_pkts,
1319 					free_cnt);
1320 			if (unlikely(error)) {
1321 				for (i = 0; i < free_cnt; i++)
1322 					rte_pktmbuf_free(new_pkts[i]);
1323 			}
1324 			nb_enqueued += free_cnt;
1325 		} else {
1326 			struct rte_eth_dev *dev =
1327 				&rte_eth_devices[rxvq->port_id];
1328 			dev->data->rx_mbuf_alloc_failed += free_cnt;
1329 		}
1330 	}
1331 
1332 	if (likely(nb_enqueued)) {
1333 		vq_update_avail_idx(vq);
1334 
1335 		if (unlikely(virtqueue_kick_prepare(vq))) {
1336 			virtqueue_notify(vq);
1337 			PMD_RX_LOG(DEBUG, "Notified");
1338 		}
1339 	}
1340 
1341 	return nb_rx;
1342 }
1343 
1344 uint16_t
1345 virtio_recv_mergeable_pkts(void *rx_queue,
1346 			struct rte_mbuf **rx_pkts,
1347 			uint16_t nb_pkts)
1348 {
1349 	struct virtnet_rx *rxvq = rx_queue;
1350 	struct virtqueue *vq = rxvq->vq;
1351 	struct virtio_hw *hw = vq->hw;
1352 	struct rte_mbuf *rxm;
1353 	struct rte_mbuf *prev = NULL;
1354 	uint16_t nb_used, num, nb_rx = 0;
1355 	uint32_t len[VIRTIO_MBUF_BURST_SZ];
1356 	struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1357 	int error;
1358 	uint32_t nb_enqueued = 0;
1359 	uint32_t seg_num = 0;
1360 	uint32_t seg_res = 0;
1361 	uint32_t hdr_size = hw->vtnet_hdr_size;
1362 	int32_t i;
1363 
1364 	if (unlikely(hw->started == 0))
1365 		return nb_rx;
1366 
1367 	nb_used = virtqueue_nused(vq);
1368 
1369 	PMD_RX_LOG(DEBUG, "used:%d", nb_used);
1370 
1371 	num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts;
1372 	if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1373 		num = VIRTIO_MBUF_BURST_SZ;
1374 	if (likely(num > DESC_PER_CACHELINE))
1375 		num = num - ((vq->vq_used_cons_idx + num) %
1376 				DESC_PER_CACHELINE);
1377 
1378 
1379 	num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num);
1380 
1381 	for (i = 0; i < num; i++) {
1382 		struct virtio_net_hdr_mrg_rxbuf *header;
1383 
1384 		PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1385 		PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1386 
1387 		rxm = rcv_pkts[i];
1388 
1389 		if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1390 			PMD_RX_LOG(ERR, "Packet drop");
1391 			nb_enqueued++;
1392 			virtio_discard_rxbuf(vq, rxm);
1393 			rxvq->stats.errors++;
1394 			continue;
1395 		}
1396 
1397 		header = (struct virtio_net_hdr_mrg_rxbuf *)
1398 			 ((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM
1399 			 - hdr_size);
1400 		seg_num = header->num_buffers;
1401 		if (seg_num == 0)
1402 			seg_num = 1;
1403 
1404 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1405 		rxm->nb_segs = seg_num;
1406 		rxm->ol_flags = 0;
1407 		rxm->vlan_tci = 0;
1408 		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1409 		rxm->data_len = (uint16_t)(len[i] - hdr_size);
1410 
1411 		rxm->port = rxvq->port_id;
1412 
1413 		rx_pkts[nb_rx] = rxm;
1414 		prev = rxm;
1415 
1416 		if (hw->has_rx_offload &&
1417 				virtio_rx_offload(rxm, &header->hdr) < 0) {
1418 			virtio_discard_rxbuf(vq, rxm);
1419 			rxvq->stats.errors++;
1420 			continue;
1421 		}
1422 
1423 		if (hw->vlan_strip)
1424 			rte_vlan_strip(rx_pkts[nb_rx]);
1425 
1426 		seg_res = seg_num - 1;
1427 
1428 		/* Merge remaining segments */
1429 		while (seg_res != 0 && i < (num - 1)) {
1430 			i++;
1431 
1432 			rxm = rcv_pkts[i];
1433 			rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1434 			rxm->pkt_len = (uint32_t)(len[i]);
1435 			rxm->data_len = (uint16_t)(len[i]);
1436 
1437 			rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1438 
1439 			prev->next = rxm;
1440 			prev = rxm;
1441 			seg_res -= 1;
1442 		}
1443 
1444 		if (!seg_res) {
1445 			virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1446 			nb_rx++;
1447 		}
1448 	}
1449 
1450 	/* Last packet still need merge segments */
1451 	while (seg_res != 0) {
1452 		uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1453 					VIRTIO_MBUF_BURST_SZ);
1454 
1455 		if (likely(virtqueue_nused(vq) >= rcv_cnt)) {
1456 			num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len,
1457 							   rcv_cnt);
1458 			uint16_t extra_idx = 0;
1459 
1460 			rcv_cnt = num;
1461 			while (extra_idx < rcv_cnt) {
1462 				rxm = rcv_pkts[extra_idx];
1463 				rxm->data_off =
1464 					RTE_PKTMBUF_HEADROOM - hdr_size;
1465 				rxm->pkt_len = (uint32_t)(len[extra_idx]);
1466 				rxm->data_len = (uint16_t)(len[extra_idx]);
1467 				prev->next = rxm;
1468 				prev = rxm;
1469 				rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1470 				extra_idx += 1;
1471 			};
1472 			seg_res -= rcv_cnt;
1473 
1474 			if (!seg_res) {
1475 				virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1476 				nb_rx++;
1477 			}
1478 		} else {
1479 			PMD_RX_LOG(ERR,
1480 					"No enough segments for packet.");
1481 			rte_pktmbuf_free(rx_pkts[nb_rx]);
1482 			rxvq->stats.errors++;
1483 			break;
1484 		}
1485 	}
1486 
1487 	rxvq->stats.packets += nb_rx;
1488 
1489 	/* Allocate new mbuf for the used descriptor */
1490 	if (likely(!virtqueue_full(vq))) {
1491 		/* free_cnt may include mrg descs */
1492 		uint16_t free_cnt = vq->vq_free_cnt;
1493 		struct rte_mbuf *new_pkts[free_cnt];
1494 
1495 		if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1496 			error = virtqueue_enqueue_recv_refill(vq, new_pkts,
1497 					free_cnt);
1498 			if (unlikely(error)) {
1499 				for (i = 0; i < free_cnt; i++)
1500 					rte_pktmbuf_free(new_pkts[i]);
1501 			}
1502 			nb_enqueued += free_cnt;
1503 		} else {
1504 			struct rte_eth_dev *dev =
1505 				&rte_eth_devices[rxvq->port_id];
1506 			dev->data->rx_mbuf_alloc_failed += free_cnt;
1507 		}
1508 	}
1509 
1510 	if (likely(nb_enqueued)) {
1511 		vq_update_avail_idx(vq);
1512 
1513 		if (unlikely(virtqueue_kick_prepare(vq))) {
1514 			virtqueue_notify(vq);
1515 			PMD_RX_LOG(DEBUG, "Notified");
1516 		}
1517 	}
1518 
1519 	return nb_rx;
1520 }
1521 
1522 uint16_t
1523 virtio_recv_mergeable_pkts_packed(void *rx_queue,
1524 			struct rte_mbuf **rx_pkts,
1525 			uint16_t nb_pkts)
1526 {
1527 	struct virtnet_rx *rxvq = rx_queue;
1528 	struct virtqueue *vq = rxvq->vq;
1529 	struct virtio_hw *hw = vq->hw;
1530 	struct rte_mbuf *rxm;
1531 	struct rte_mbuf *prev = NULL;
1532 	uint16_t num, nb_rx = 0;
1533 	uint32_t len[VIRTIO_MBUF_BURST_SZ];
1534 	struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1535 	uint32_t nb_enqueued = 0;
1536 	uint32_t seg_num = 0;
1537 	uint32_t seg_res = 0;
1538 	uint32_t hdr_size = hw->vtnet_hdr_size;
1539 	int32_t i;
1540 	int error;
1541 
1542 	if (unlikely(hw->started == 0))
1543 		return nb_rx;
1544 
1545 
1546 	num = nb_pkts;
1547 	if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1548 		num = VIRTIO_MBUF_BURST_SZ;
1549 	if (likely(num > DESC_PER_CACHELINE))
1550 		num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1551 
1552 	num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num);
1553 
1554 	for (i = 0; i < num; i++) {
1555 		struct virtio_net_hdr_mrg_rxbuf *header;
1556 
1557 		PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1558 		PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1559 
1560 		rxm = rcv_pkts[i];
1561 
1562 		if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1563 			PMD_RX_LOG(ERR, "Packet drop");
1564 			nb_enqueued++;
1565 			virtio_discard_rxbuf(vq, rxm);
1566 			rxvq->stats.errors++;
1567 			continue;
1568 		}
1569 
1570 		header = (struct virtio_net_hdr_mrg_rxbuf *)((char *)
1571 			  rxm->buf_addr + RTE_PKTMBUF_HEADROOM - hdr_size);
1572 		seg_num = header->num_buffers;
1573 
1574 		if (seg_num == 0)
1575 			seg_num = 1;
1576 
1577 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1578 		rxm->nb_segs = seg_num;
1579 		rxm->ol_flags = 0;
1580 		rxm->vlan_tci = 0;
1581 		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1582 		rxm->data_len = (uint16_t)(len[i] - hdr_size);
1583 
1584 		rxm->port = rxvq->port_id;
1585 		rx_pkts[nb_rx] = rxm;
1586 		prev = rxm;
1587 
1588 		if (hw->has_rx_offload &&
1589 				virtio_rx_offload(rxm, &header->hdr) < 0) {
1590 			virtio_discard_rxbuf(vq, rxm);
1591 			rxvq->stats.errors++;
1592 			continue;
1593 		}
1594 
1595 		if (hw->vlan_strip)
1596 			rte_vlan_strip(rx_pkts[nb_rx]);
1597 
1598 		seg_res = seg_num - 1;
1599 
1600 		/* Merge remaining segments */
1601 		while (seg_res != 0 && i < (num - 1)) {
1602 			i++;
1603 
1604 			rxm = rcv_pkts[i];
1605 			rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1606 			rxm->pkt_len = (uint32_t)(len[i]);
1607 			rxm->data_len = (uint16_t)(len[i]);
1608 
1609 			rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1610 
1611 			prev->next = rxm;
1612 			prev = rxm;
1613 			seg_res -= 1;
1614 		}
1615 
1616 		if (!seg_res) {
1617 			virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1618 			nb_rx++;
1619 		}
1620 	}
1621 
1622 	/* Last packet still need merge segments */
1623 	while (seg_res != 0) {
1624 		uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1625 					VIRTIO_MBUF_BURST_SZ);
1626 		uint16_t extra_idx = 0;
1627 
1628 		rcv_cnt = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts,
1629 				len, rcv_cnt);
1630 		if (unlikely(rcv_cnt == 0)) {
1631 			PMD_RX_LOG(ERR, "No enough segments for packet.");
1632 			rte_pktmbuf_free(rx_pkts[nb_rx]);
1633 			rxvq->stats.errors++;
1634 			break;
1635 		}
1636 
1637 		while (extra_idx < rcv_cnt) {
1638 			rxm = rcv_pkts[extra_idx];
1639 
1640 			rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1641 			rxm->pkt_len = (uint32_t)(len[extra_idx]);
1642 			rxm->data_len = (uint16_t)(len[extra_idx]);
1643 
1644 			prev->next = rxm;
1645 			prev = rxm;
1646 			rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1647 			extra_idx += 1;
1648 		}
1649 		seg_res -= rcv_cnt;
1650 		if (!seg_res) {
1651 			virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1652 			nb_rx++;
1653 		}
1654 	}
1655 
1656 	rxvq->stats.packets += nb_rx;
1657 
1658 	/* Allocate new mbuf for the used descriptor */
1659 	if (likely(!virtqueue_full(vq))) {
1660 		/* free_cnt may include mrg descs */
1661 		uint16_t free_cnt = vq->vq_free_cnt;
1662 		struct rte_mbuf *new_pkts[free_cnt];
1663 
1664 		if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1665 			error = virtqueue_enqueue_recv_refill_packed(vq,
1666 					new_pkts, free_cnt);
1667 			if (unlikely(error)) {
1668 				for (i = 0; i < free_cnt; i++)
1669 					rte_pktmbuf_free(new_pkts[i]);
1670 			}
1671 			nb_enqueued += free_cnt;
1672 		} else {
1673 			struct rte_eth_dev *dev =
1674 				&rte_eth_devices[rxvq->port_id];
1675 			dev->data->rx_mbuf_alloc_failed += free_cnt;
1676 		}
1677 	}
1678 
1679 	if (likely(nb_enqueued)) {
1680 		if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1681 			virtqueue_notify(vq);
1682 			PMD_RX_LOG(DEBUG, "Notified");
1683 		}
1684 	}
1685 
1686 	return nb_rx;
1687 }
1688 
1689 uint16_t
1690 virtio_xmit_pkts_prepare(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts,
1691 			uint16_t nb_pkts)
1692 {
1693 	uint16_t nb_tx;
1694 	int error;
1695 
1696 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1697 		struct rte_mbuf *m = tx_pkts[nb_tx];
1698 
1699 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
1700 		error = rte_validate_tx_offload(m);
1701 		if (unlikely(error)) {
1702 			rte_errno = -error;
1703 			break;
1704 		}
1705 #endif
1706 
1707 		/* Do VLAN tag insertion */
1708 		if (unlikely(m->ol_flags & PKT_TX_VLAN_PKT)) {
1709 			error = rte_vlan_insert(&m);
1710 			/* rte_vlan_insert() may change pointer
1711 			 * even in the case of failure
1712 			 */
1713 			tx_pkts[nb_tx] = m;
1714 
1715 			if (unlikely(error)) {
1716 				rte_errno = -error;
1717 				break;
1718 			}
1719 		}
1720 
1721 		error = rte_net_intel_cksum_prepare(m);
1722 		if (unlikely(error)) {
1723 			rte_errno = -error;
1724 			break;
1725 		}
1726 
1727 		if (m->ol_flags & PKT_TX_TCP_SEG)
1728 			virtio_tso_fix_cksum(m);
1729 	}
1730 
1731 	return nb_tx;
1732 }
1733 
1734 uint16_t
1735 virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
1736 			uint16_t nb_pkts)
1737 {
1738 	struct virtnet_tx *txvq = tx_queue;
1739 	struct virtqueue *vq = txvq->vq;
1740 	struct virtio_hw *hw = vq->hw;
1741 	uint16_t hdr_size = hw->vtnet_hdr_size;
1742 	uint16_t nb_tx = 0;
1743 	bool in_order = vtpci_with_feature(hw, VIRTIO_F_IN_ORDER);
1744 
1745 	if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
1746 		return nb_tx;
1747 
1748 	if (unlikely(nb_pkts < 1))
1749 		return nb_pkts;
1750 
1751 	PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
1752 
1753 	if (nb_pkts > vq->vq_free_cnt)
1754 		virtio_xmit_cleanup_packed(vq, nb_pkts - vq->vq_free_cnt,
1755 					   in_order);
1756 
1757 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1758 		struct rte_mbuf *txm = tx_pkts[nb_tx];
1759 		int can_push = 0, use_indirect = 0, slots, need;
1760 
1761 		/* optimize ring usage */
1762 		if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
1763 		      vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
1764 		    rte_mbuf_refcnt_read(txm) == 1 &&
1765 		    RTE_MBUF_DIRECT(txm) &&
1766 		    txm->nb_segs == 1 &&
1767 		    rte_pktmbuf_headroom(txm) >= hdr_size &&
1768 		    rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
1769 			   __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
1770 			can_push = 1;
1771 		else if (vtpci_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
1772 			 txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
1773 			use_indirect = 1;
1774 		/* How many main ring entries are needed to this Tx?
1775 		 * indirect   => 1
1776 		 * any_layout => number of segments
1777 		 * default    => number of segments + 1
1778 		 */
1779 		slots = use_indirect ? 1 : (txm->nb_segs + !can_push);
1780 		need = slots - vq->vq_free_cnt;
1781 
1782 		/* Positive value indicates it need free vring descriptors */
1783 		if (unlikely(need > 0)) {
1784 			virtio_xmit_cleanup_packed(vq, need, in_order);
1785 			need = slots - vq->vq_free_cnt;
1786 			if (unlikely(need > 0)) {
1787 				PMD_TX_LOG(ERR,
1788 					   "No free tx descriptors to transmit");
1789 				break;
1790 			}
1791 		}
1792 
1793 		/* Enqueue Packet buffers */
1794 		if (can_push)
1795 			virtqueue_enqueue_xmit_packed_fast(txvq, txm, in_order);
1796 		else
1797 			virtqueue_enqueue_xmit_packed(txvq, txm, slots,
1798 						      use_indirect, 0,
1799 						      in_order);
1800 
1801 		virtio_update_packet_stats(&txvq->stats, txm);
1802 	}
1803 
1804 	txvq->stats.packets += nb_tx;
1805 
1806 	if (likely(nb_tx)) {
1807 		if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1808 			virtqueue_notify(vq);
1809 			PMD_TX_LOG(DEBUG, "Notified backend after xmit");
1810 		}
1811 	}
1812 
1813 	return nb_tx;
1814 }
1815 
1816 uint16_t
1817 virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1818 {
1819 	struct virtnet_tx *txvq = tx_queue;
1820 	struct virtqueue *vq = txvq->vq;
1821 	struct virtio_hw *hw = vq->hw;
1822 	uint16_t hdr_size = hw->vtnet_hdr_size;
1823 	uint16_t nb_used, nb_tx = 0;
1824 
1825 	if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
1826 		return nb_tx;
1827 
1828 	if (unlikely(nb_pkts < 1))
1829 		return nb_pkts;
1830 
1831 	PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
1832 
1833 	nb_used = virtqueue_nused(vq);
1834 
1835 	if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
1836 		virtio_xmit_cleanup(vq, nb_used);
1837 
1838 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1839 		struct rte_mbuf *txm = tx_pkts[nb_tx];
1840 		int can_push = 0, use_indirect = 0, slots, need;
1841 
1842 		/* optimize ring usage */
1843 		if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
1844 		      vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
1845 		    rte_mbuf_refcnt_read(txm) == 1 &&
1846 		    RTE_MBUF_DIRECT(txm) &&
1847 		    txm->nb_segs == 1 &&
1848 		    rte_pktmbuf_headroom(txm) >= hdr_size &&
1849 		    rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
1850 				   __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
1851 			can_push = 1;
1852 		else if (vtpci_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
1853 			 txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
1854 			use_indirect = 1;
1855 
1856 		/* How many main ring entries are needed to this Tx?
1857 		 * any_layout => number of segments
1858 		 * indirect   => 1
1859 		 * default    => number of segments + 1
1860 		 */
1861 		slots = use_indirect ? 1 : (txm->nb_segs + !can_push);
1862 		need = slots - vq->vq_free_cnt;
1863 
1864 		/* Positive value indicates it need free vring descriptors */
1865 		if (unlikely(need > 0)) {
1866 			nb_used = virtqueue_nused(vq);
1867 
1868 			need = RTE_MIN(need, (int)nb_used);
1869 
1870 			virtio_xmit_cleanup(vq, need);
1871 			need = slots - vq->vq_free_cnt;
1872 			if (unlikely(need > 0)) {
1873 				PMD_TX_LOG(ERR,
1874 					   "No free tx descriptors to transmit");
1875 				break;
1876 			}
1877 		}
1878 
1879 		/* Enqueue Packet buffers */
1880 		virtqueue_enqueue_xmit(txvq, txm, slots, use_indirect,
1881 			can_push, 0);
1882 
1883 		virtio_update_packet_stats(&txvq->stats, txm);
1884 	}
1885 
1886 	txvq->stats.packets += nb_tx;
1887 
1888 	if (likely(nb_tx)) {
1889 		vq_update_avail_idx(vq);
1890 
1891 		if (unlikely(virtqueue_kick_prepare(vq))) {
1892 			virtqueue_notify(vq);
1893 			PMD_TX_LOG(DEBUG, "Notified backend after xmit");
1894 		}
1895 	}
1896 
1897 	return nb_tx;
1898 }
1899 
1900 static __rte_always_inline int
1901 virtio_xmit_try_cleanup_inorder(struct virtqueue *vq, uint16_t need)
1902 {
1903 	uint16_t nb_used, nb_clean, nb_descs;
1904 
1905 	nb_descs = vq->vq_free_cnt + need;
1906 	nb_used = virtqueue_nused(vq);
1907 	nb_clean = RTE_MIN(need, (int)nb_used);
1908 
1909 	virtio_xmit_cleanup_inorder(vq, nb_clean);
1910 
1911 	return nb_descs - vq->vq_free_cnt;
1912 }
1913 
1914 uint16_t
1915 virtio_xmit_pkts_inorder(void *tx_queue,
1916 			struct rte_mbuf **tx_pkts,
1917 			uint16_t nb_pkts)
1918 {
1919 	struct virtnet_tx *txvq = tx_queue;
1920 	struct virtqueue *vq = txvq->vq;
1921 	struct virtio_hw *hw = vq->hw;
1922 	uint16_t hdr_size = hw->vtnet_hdr_size;
1923 	uint16_t nb_used, nb_tx = 0, nb_inorder_pkts = 0;
1924 	struct rte_mbuf *inorder_pkts[nb_pkts];
1925 	int need;
1926 
1927 	if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
1928 		return nb_tx;
1929 
1930 	if (unlikely(nb_pkts < 1))
1931 		return nb_pkts;
1932 
1933 	VIRTQUEUE_DUMP(vq);
1934 	PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
1935 	nb_used = virtqueue_nused(vq);
1936 
1937 	if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
1938 		virtio_xmit_cleanup_inorder(vq, nb_used);
1939 
1940 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1941 		struct rte_mbuf *txm = tx_pkts[nb_tx];
1942 		int slots;
1943 
1944 		/* optimize ring usage */
1945 		if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
1946 		     vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
1947 		     rte_mbuf_refcnt_read(txm) == 1 &&
1948 		     RTE_MBUF_DIRECT(txm) &&
1949 		     txm->nb_segs == 1 &&
1950 		     rte_pktmbuf_headroom(txm) >= hdr_size &&
1951 		     rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
1952 				__alignof__(struct virtio_net_hdr_mrg_rxbuf))) {
1953 			inorder_pkts[nb_inorder_pkts] = txm;
1954 			nb_inorder_pkts++;
1955 
1956 			continue;
1957 		}
1958 
1959 		if (nb_inorder_pkts) {
1960 			need = nb_inorder_pkts - vq->vq_free_cnt;
1961 			if (unlikely(need > 0)) {
1962 				need = virtio_xmit_try_cleanup_inorder(vq,
1963 								       need);
1964 				if (unlikely(need > 0)) {
1965 					PMD_TX_LOG(ERR,
1966 						"No free tx descriptors to "
1967 						"transmit");
1968 					break;
1969 				}
1970 			}
1971 			virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
1972 							nb_inorder_pkts);
1973 			nb_inorder_pkts = 0;
1974 		}
1975 
1976 		slots = txm->nb_segs + 1;
1977 		need = slots - vq->vq_free_cnt;
1978 		if (unlikely(need > 0)) {
1979 			need = virtio_xmit_try_cleanup_inorder(vq, slots);
1980 
1981 			if (unlikely(need > 0)) {
1982 				PMD_TX_LOG(ERR,
1983 					"No free tx descriptors to transmit");
1984 				break;
1985 			}
1986 		}
1987 		/* Enqueue Packet buffers */
1988 		virtqueue_enqueue_xmit(txvq, txm, slots, 0, 0, 1);
1989 
1990 		virtio_update_packet_stats(&txvq->stats, txm);
1991 	}
1992 
1993 	/* Transmit all inorder packets */
1994 	if (nb_inorder_pkts) {
1995 		need = nb_inorder_pkts - vq->vq_free_cnt;
1996 		if (unlikely(need > 0)) {
1997 			need = virtio_xmit_try_cleanup_inorder(vq,
1998 								  need);
1999 			if (unlikely(need > 0)) {
2000 				PMD_TX_LOG(ERR,
2001 					"No free tx descriptors to transmit");
2002 				nb_inorder_pkts = vq->vq_free_cnt;
2003 				nb_tx -= need;
2004 			}
2005 		}
2006 
2007 		virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
2008 						nb_inorder_pkts);
2009 	}
2010 
2011 	txvq->stats.packets += nb_tx;
2012 
2013 	if (likely(nb_tx)) {
2014 		vq_update_avail_idx(vq);
2015 
2016 		if (unlikely(virtqueue_kick_prepare(vq))) {
2017 			virtqueue_notify(vq);
2018 			PMD_TX_LOG(DEBUG, "Notified backend after xmit");
2019 		}
2020 	}
2021 
2022 	VIRTQUEUE_DUMP(vq);
2023 
2024 	return nb_tx;
2025 }
2026 
2027 #ifndef CC_AVX512_SUPPORT
2028 uint16_t
2029 virtio_recv_pkts_packed_vec(void *rx_queue __rte_unused,
2030 			    struct rte_mbuf **rx_pkts __rte_unused,
2031 			    uint16_t nb_pkts __rte_unused)
2032 {
2033 	return 0;
2034 }
2035 
2036 uint16_t
2037 virtio_xmit_pkts_packed_vec(void *tx_queue __rte_unused,
2038 			    struct rte_mbuf **tx_pkts __rte_unused,
2039 			    uint16_t nb_pkts __rte_unused)
2040 {
2041 	return 0;
2042 }
2043 #endif /* ifndef CC_AVX512_SUPPORT */
2044