xref: /dpdk/drivers/net/virtio/virtio_rxtx.c (revision e9fd1ebf981f361844aea9ec94e17f4bda5e1479)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #include <stdalign.h>
6 #include <stdint.h>
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <string.h>
10 #include <errno.h>
11 
12 #include <rte_cycles.h>
13 #include <rte_memory.h>
14 #include <rte_branch_prediction.h>
15 #include <rte_mempool.h>
16 #include <rte_malloc.h>
17 #include <rte_mbuf.h>
18 #include <rte_ether.h>
19 #include <ethdev_driver.h>
20 #include <rte_prefetch.h>
21 #include <rte_string_fns.h>
22 #include <rte_errno.h>
23 #include <rte_byteorder.h>
24 #include <rte_net.h>
25 #include <rte_ip.h>
26 #include <rte_udp.h>
27 #include <rte_tcp.h>
28 
29 #include "virtio_logs.h"
30 #include "virtio_ethdev.h"
31 #include "virtio.h"
32 #include "virtqueue.h"
33 #include "virtio_rxtx.h"
34 #include "virtio_rxtx_simple.h"
35 #include "virtio_ring.h"
36 
37 #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
38 #define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len)
39 #else
40 #define  VIRTIO_DUMP_PACKET(m, len) do { } while (0)
41 #endif
42 
43 void
44 vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx, uint16_t num)
45 {
46 	vq->vq_free_cnt += num;
47 	vq->vq_desc_tail_idx = desc_idx & (vq->vq_nentries - 1);
48 }
49 
50 void
51 vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
52 {
53 	struct vring_desc *dp, *dp_tail;
54 	struct vq_desc_extra *dxp;
55 	uint16_t desc_idx_last = desc_idx;
56 
57 	dp  = &vq->vq_split.ring.desc[desc_idx];
58 	dxp = &vq->vq_descx[desc_idx];
59 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
60 	if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
61 		while (dp->flags & VRING_DESC_F_NEXT) {
62 			desc_idx_last = dp->next;
63 			dp = &vq->vq_split.ring.desc[dp->next];
64 		}
65 	}
66 	dxp->ndescs = 0;
67 
68 	/*
69 	 * We must append the existing free chain, if any, to the end of
70 	 * newly freed chain. If the virtqueue was completely used, then
71 	 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
72 	 */
73 	if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) {
74 		vq->vq_desc_head_idx = desc_idx;
75 	} else {
76 		dp_tail = &vq->vq_split.ring.desc[vq->vq_desc_tail_idx];
77 		dp_tail->next = desc_idx;
78 	}
79 
80 	vq->vq_desc_tail_idx = desc_idx_last;
81 	dp->next = VQ_RING_DESC_CHAIN_END;
82 }
83 
84 void
85 virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
86 {
87 	uint32_t s = mbuf->pkt_len;
88 	struct rte_ether_addr *ea;
89 
90 	stats->bytes += s;
91 
92 	if (s == 64) {
93 		stats->size_bins[1]++;
94 	} else if (s > 64 && s < 1024) {
95 		uint32_t bin;
96 
97 		/* count zeros, and offset into correct bin */
98 		bin = (sizeof(s) * 8) - rte_clz32(s) - 5;
99 		stats->size_bins[bin]++;
100 	} else {
101 		if (s < 64)
102 			stats->size_bins[0]++;
103 		else if (s < 1519)
104 			stats->size_bins[6]++;
105 		else
106 			stats->size_bins[7]++;
107 	}
108 
109 	ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
110 	if (rte_is_multicast_ether_addr(ea)) {
111 		if (rte_is_broadcast_ether_addr(ea))
112 			stats->broadcast++;
113 		else
114 			stats->multicast++;
115 	}
116 }
117 
118 static inline void
119 virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
120 {
121 	VIRTIO_DUMP_PACKET(m, m->data_len);
122 
123 	virtio_update_packet_stats(&rxvq->stats, m);
124 }
125 
126 static uint16_t
127 virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq,
128 				  struct rte_mbuf **rx_pkts,
129 				  uint32_t *len,
130 				  uint16_t num)
131 {
132 	struct rte_mbuf *cookie;
133 	uint16_t used_idx;
134 	uint16_t id;
135 	struct vring_packed_desc *desc;
136 	uint16_t i;
137 
138 	desc = vq->vq_packed.ring.desc;
139 
140 	for (i = 0; i < num; i++) {
141 		used_idx = vq->vq_used_cons_idx;
142 		/* desc_is_used has a load-acquire or rte_io_rmb inside
143 		 * and wait for used desc in virtqueue.
144 		 */
145 		if (!desc_is_used(&desc[used_idx], vq))
146 			return i;
147 		len[i] = desc[used_idx].len;
148 		id = desc[used_idx].id;
149 		cookie = (struct rte_mbuf *)vq->vq_descx[id].cookie;
150 		if (unlikely(cookie == NULL)) {
151 			PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
152 				vq->vq_used_cons_idx);
153 			break;
154 		}
155 		rte_prefetch0(cookie);
156 		rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
157 		rx_pkts[i] = cookie;
158 
159 		vq->vq_free_cnt++;
160 		vq->vq_used_cons_idx++;
161 		if (vq->vq_used_cons_idx >= vq->vq_nentries) {
162 			vq->vq_used_cons_idx -= vq->vq_nentries;
163 			vq->vq_packed.used_wrap_counter ^= 1;
164 		}
165 	}
166 
167 	return i;
168 }
169 
170 static uint16_t
171 virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,
172 			   uint32_t *len, uint16_t num)
173 {
174 	struct vring_used_elem *uep;
175 	struct rte_mbuf *cookie;
176 	uint16_t used_idx, desc_idx;
177 	uint16_t i;
178 
179 	/*  Caller does the check */
180 	for (i = 0; i < num ; i++) {
181 		used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
182 		uep = &vq->vq_split.ring.used->ring[used_idx];
183 		desc_idx = (uint16_t) uep->id;
184 		len[i] = uep->len;
185 		cookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie;
186 
187 		if (unlikely(cookie == NULL)) {
188 			PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
189 				vq->vq_used_cons_idx);
190 			break;
191 		}
192 
193 		rte_prefetch0(cookie);
194 		rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
195 		rx_pkts[i]  = cookie;
196 		vq->vq_used_cons_idx++;
197 		vq_ring_free_chain(vq, desc_idx);
198 		vq->vq_descx[desc_idx].cookie = NULL;
199 	}
200 
201 	return i;
202 }
203 
204 static uint16_t
205 virtqueue_dequeue_rx_inorder(struct virtqueue *vq,
206 			struct rte_mbuf **rx_pkts,
207 			uint32_t *len,
208 			uint16_t num)
209 {
210 	struct vring_used_elem *uep;
211 	struct rte_mbuf *cookie;
212 	uint16_t used_idx = 0;
213 	uint16_t i;
214 
215 	if (unlikely(num == 0))
216 		return 0;
217 
218 	for (i = 0; i < num; i++) {
219 		used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
220 		/* Desc idx same as used idx */
221 		uep = &vq->vq_split.ring.used->ring[used_idx];
222 		len[i] = uep->len;
223 		cookie = (struct rte_mbuf *)vq->vq_descx[used_idx].cookie;
224 
225 		if (unlikely(cookie == NULL)) {
226 			PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
227 				vq->vq_used_cons_idx);
228 			break;
229 		}
230 
231 		rte_prefetch0(cookie);
232 		rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
233 		rx_pkts[i]  = cookie;
234 		vq->vq_used_cons_idx++;
235 		vq->vq_descx[used_idx].cookie = NULL;
236 	}
237 
238 	vq_ring_free_inorder(vq, used_idx, i);
239 	return i;
240 }
241 
242 static inline int
243 virtqueue_enqueue_refill_inorder(struct virtqueue *vq,
244 			struct rte_mbuf **cookies,
245 			uint16_t num)
246 {
247 	struct vq_desc_extra *dxp;
248 	struct virtio_hw *hw = vq->hw;
249 	struct vring_desc *start_dp;
250 	uint16_t head_idx, idx, i = 0;
251 
252 	if (unlikely(vq->vq_free_cnt == 0))
253 		return -ENOSPC;
254 	if (unlikely(vq->vq_free_cnt < num))
255 		return -EMSGSIZE;
256 
257 	head_idx = vq->vq_desc_head_idx & (vq->vq_nentries - 1);
258 	start_dp = vq->vq_split.ring.desc;
259 
260 	while (i < num) {
261 		idx = head_idx & (vq->vq_nentries - 1);
262 		dxp = &vq->vq_descx[idx];
263 		dxp->cookie = (void *)cookies[i];
264 		dxp->ndescs = 1;
265 
266 		start_dp[idx].addr = VIRTIO_MBUF_ADDR(cookies[i], vq) +
267 			RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
268 		start_dp[idx].len = cookies[i]->buf_len -
269 			RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
270 		start_dp[idx].flags =  VRING_DESC_F_WRITE;
271 
272 		vq_update_avail_ring(vq, idx);
273 		head_idx++;
274 		i++;
275 	}
276 
277 	vq->vq_desc_head_idx += num;
278 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
279 	return 0;
280 }
281 
282 static inline int
283 virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf **cookie,
284 				uint16_t num)
285 {
286 	struct vq_desc_extra *dxp;
287 	struct virtio_hw *hw = vq->hw;
288 	struct vring_desc *start_dp = vq->vq_split.ring.desc;
289 	uint16_t idx, i;
290 
291 	if (unlikely(vq->vq_free_cnt == 0))
292 		return -ENOSPC;
293 	if (unlikely(vq->vq_free_cnt < num))
294 		return -EMSGSIZE;
295 
296 	if (unlikely(vq->vq_desc_head_idx >= vq->vq_nentries))
297 		return -EFAULT;
298 
299 	for (i = 0; i < num; i++) {
300 		idx = vq->vq_desc_head_idx;
301 		dxp = &vq->vq_descx[idx];
302 		dxp->cookie = (void *)cookie[i];
303 		dxp->ndescs = 1;
304 
305 		start_dp[idx].addr = VIRTIO_MBUF_ADDR(cookie[i], vq) +
306 			RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
307 		start_dp[idx].len = cookie[i]->buf_len - RTE_PKTMBUF_HEADROOM +
308 			hw->vtnet_hdr_size;
309 		start_dp[idx].flags = VRING_DESC_F_WRITE;
310 		vq->vq_desc_head_idx = start_dp[idx].next;
311 		vq_update_avail_ring(vq, idx);
312 		if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) {
313 			vq->vq_desc_tail_idx = vq->vq_desc_head_idx;
314 			break;
315 		}
316 	}
317 
318 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
319 
320 	return 0;
321 }
322 
323 static inline void
324 virtqueue_refill_single_packed(struct virtqueue *vq,
325 			       struct vring_packed_desc *dp,
326 			       struct rte_mbuf *cookie)
327 {
328 	uint16_t flags = vq->vq_packed.cached_flags;
329 	struct virtio_hw *hw = vq->hw;
330 
331 	dp->addr = VIRTIO_MBUF_ADDR(cookie, vq) + RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
332 	dp->len = cookie->buf_len - RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
333 
334 	virtqueue_store_flags_packed(dp, flags, hw->weak_barriers);
335 
336 	if (++vq->vq_avail_idx >= vq->vq_nentries) {
337 		vq->vq_avail_idx -= vq->vq_nentries;
338 		vq->vq_packed.cached_flags ^=
339 			VRING_PACKED_DESC_F_AVAIL_USED;
340 		flags = vq->vq_packed.cached_flags;
341 	}
342 }
343 
344 static inline int
345 virtqueue_enqueue_recv_refill_packed_init(struct virtqueue *vq,
346 				     struct rte_mbuf **cookie, uint16_t num)
347 {
348 	struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc;
349 	struct vq_desc_extra *dxp;
350 	uint16_t idx;
351 	int i;
352 
353 	if (unlikely(vq->vq_free_cnt == 0))
354 		return -ENOSPC;
355 	if (unlikely(vq->vq_free_cnt < num))
356 		return -EMSGSIZE;
357 
358 	for (i = 0; i < num; i++) {
359 		idx = vq->vq_avail_idx;
360 		dxp = &vq->vq_descx[idx];
361 		dxp->cookie = (void *)cookie[i];
362 		dxp->ndescs = 1;
363 
364 		virtqueue_refill_single_packed(vq, &start_dp[idx], cookie[i]);
365 	}
366 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
367 	return 0;
368 }
369 
370 static inline int
371 virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq,
372 				     struct rte_mbuf **cookie, uint16_t num)
373 {
374 	struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc;
375 	struct vq_desc_extra *dxp;
376 	uint16_t idx, did;
377 	int i;
378 
379 	if (unlikely(vq->vq_free_cnt == 0))
380 		return -ENOSPC;
381 	if (unlikely(vq->vq_free_cnt < num))
382 		return -EMSGSIZE;
383 
384 	for (i = 0; i < num; i++) {
385 		idx = vq->vq_avail_idx;
386 		did = start_dp[idx].id;
387 		dxp = &vq->vq_descx[did];
388 		dxp->cookie = (void *)cookie[i];
389 		dxp->ndescs = 1;
390 
391 		virtqueue_refill_single_packed(vq, &start_dp[idx], cookie[i]);
392 	}
393 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
394 	return 0;
395 }
396 
397 /* When doing TSO, the IP length is not included in the pseudo header
398  * checksum of the packet given to the PMD, but for virtio it is
399  * expected.
400  */
401 static void
402 virtio_tso_fix_cksum(struct rte_mbuf *m)
403 {
404 	/* common case: header is not fragmented */
405 	if (likely(rte_pktmbuf_data_len(m) >= m->l2_len + m->l3_len +
406 			m->l4_len)) {
407 		struct rte_ipv4_hdr *iph;
408 		struct rte_tcp_hdr *th;
409 		uint16_t prev_cksum, new_cksum;
410 		uint32_t ip_paylen;
411 		uint32_t tmp;
412 
413 		iph = rte_pktmbuf_mtod_offset(m,
414 					struct rte_ipv4_hdr *, m->l2_len);
415 		th = RTE_PTR_ADD(iph, m->l3_len);
416 
417 		/*
418 		 * Calculate IPv4 header checksum with current total length value
419 		 * (whatever it is) to have correct checksum after update on edits
420 		 * done by TSO.
421 		 */
422 		if ((iph->version_ihl >> 4) == 4) {
423 			iph->hdr_checksum = 0;
424 			iph->hdr_checksum = rte_ipv4_cksum(iph);
425 		}
426 
427 		/*
428 		 * Do not use IPv4 total length and IPv6 payload length fields to get
429 		 * TSO payload length since it could not fit into 16 bits.
430 		 */
431 		ip_paylen = rte_cpu_to_be_32(rte_pktmbuf_pkt_len(m) - m->l2_len -
432 					m->l3_len);
433 
434 		/* calculate the new phdr checksum not including ip_paylen */
435 		prev_cksum = th->cksum;
436 		tmp = prev_cksum;
437 		tmp += (ip_paylen & 0xffff) + (ip_paylen >> 16);
438 		tmp = (tmp & 0xffff) + (tmp >> 16);
439 		new_cksum = tmp;
440 
441 		/* replace it in the packet */
442 		th->cksum = new_cksum;
443 	}
444 }
445 
446 
447 
448 
449 static inline void
450 virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
451 			struct rte_mbuf **cookies,
452 			uint16_t num)
453 {
454 	struct vq_desc_extra *dxp;
455 	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
456 	struct vring_desc *start_dp;
457 	struct virtio_net_hdr *hdr;
458 	uint16_t idx;
459 	int16_t head_size = vq->hw->vtnet_hdr_size;
460 	uint16_t i = 0;
461 
462 	idx = vq->vq_desc_head_idx;
463 	start_dp = vq->vq_split.ring.desc;
464 
465 	while (i < num) {
466 		idx = idx & (vq->vq_nentries - 1);
467 		dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
468 		dxp->cookie = (void *)cookies[i];
469 		dxp->ndescs = 1;
470 		virtio_update_packet_stats(&txvq->stats, cookies[i]);
471 
472 		hdr = rte_pktmbuf_mtod_offset(cookies[i],
473 				struct virtio_net_hdr *, -head_size);
474 
475 		/* if offload disabled, hdr is not zeroed yet, do it now */
476 		if (!vq->hw->has_tx_offload)
477 			virtqueue_clear_net_hdr(hdr);
478 		else
479 			virtqueue_xmit_offload(hdr, cookies[i]);
480 
481 		start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookies[i], vq) - head_size;
482 		start_dp[idx].len = cookies[i]->data_len + head_size;
483 		start_dp[idx].flags = 0;
484 
485 
486 		vq_update_avail_ring(vq, idx);
487 
488 		idx++;
489 		i++;
490 	};
491 
492 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
493 	vq->vq_desc_head_idx = idx & (vq->vq_nentries - 1);
494 }
495 
496 static inline void
497 virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq,
498 				   struct rte_mbuf *cookie,
499 				   int in_order)
500 {
501 	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
502 	struct vring_packed_desc *dp;
503 	struct vq_desc_extra *dxp;
504 	uint16_t idx, id, flags;
505 	int16_t head_size = vq->hw->vtnet_hdr_size;
506 	struct virtio_net_hdr *hdr;
507 
508 	id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;
509 	idx = vq->vq_avail_idx;
510 	dp = &vq->vq_packed.ring.desc[idx];
511 
512 	dxp = &vq->vq_descx[id];
513 	dxp->ndescs = 1;
514 	dxp->cookie = cookie;
515 
516 	flags = vq->vq_packed.cached_flags;
517 
518 	/* prepend cannot fail, checked by caller */
519 	hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *,
520 				      -head_size);
521 
522 	/* if offload disabled, hdr is not zeroed yet, do it now */
523 	if (!vq->hw->has_tx_offload)
524 		virtqueue_clear_net_hdr(hdr);
525 	else
526 		virtqueue_xmit_offload(hdr, cookie);
527 
528 	dp->addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq) - head_size;
529 	dp->len = cookie->data_len + head_size;
530 	dp->id = id;
531 
532 	if (++vq->vq_avail_idx >= vq->vq_nentries) {
533 		vq->vq_avail_idx -= vq->vq_nentries;
534 		vq->vq_packed.cached_flags ^= VRING_PACKED_DESC_F_AVAIL_USED;
535 	}
536 
537 	vq->vq_free_cnt--;
538 
539 	if (!in_order) {
540 		vq->vq_desc_head_idx = dxp->next;
541 		if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
542 			vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;
543 	}
544 
545 	virtqueue_store_flags_packed(dp, flags, vq->hw->weak_barriers);
546 }
547 
548 static inline void
549 virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
550 			uint16_t needed, int use_indirect, int can_push,
551 			int in_order)
552 {
553 	struct virtio_tx_region *txr = txvq->hdr_mz->addr;
554 	struct vq_desc_extra *dxp;
555 	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
556 	struct vring_desc *start_dp;
557 	uint16_t seg_num = cookie->nb_segs;
558 	uint16_t head_idx, idx;
559 	int16_t head_size = vq->hw->vtnet_hdr_size;
560 	bool prepend_header = false;
561 	struct virtio_net_hdr *hdr;
562 
563 	head_idx = vq->vq_desc_head_idx;
564 	idx = head_idx;
565 	if (in_order)
566 		dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
567 	else
568 		dxp = &vq->vq_descx[idx];
569 	dxp->cookie = (void *)cookie;
570 	dxp->ndescs = needed;
571 
572 	start_dp = vq->vq_split.ring.desc;
573 
574 	if (can_push) {
575 		/* prepend cannot fail, checked by caller */
576 		hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *,
577 					      -head_size);
578 		prepend_header = true;
579 
580 		/* if offload disabled, it is not zeroed below, do it now */
581 		if (!vq->hw->has_tx_offload)
582 			virtqueue_clear_net_hdr(hdr);
583 	} else if (use_indirect) {
584 		/* setup tx ring slot to point to indirect
585 		 * descriptor list stored in reserved region.
586 		 *
587 		 * the first slot in indirect ring is already preset
588 		 * to point to the header in reserved region
589 		 */
590 		start_dp[idx].addr = txvq->hdr_mem + RTE_PTR_DIFF(&txr[idx].tx_indir, txr);
591 		start_dp[idx].len = (seg_num + 1) * sizeof(struct vring_desc);
592 		start_dp[idx].flags = VRING_DESC_F_INDIRECT;
593 		hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
594 
595 		/* loop below will fill in rest of the indirect elements */
596 		start_dp = txr[idx].tx_indir;
597 		idx = 1;
598 	} else {
599 		/* setup first tx ring slot to point to header
600 		 * stored in reserved region.
601 		 */
602 		start_dp[idx].addr = txvq->hdr_mem + RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
603 		start_dp[idx].len = vq->hw->vtnet_hdr_size;
604 		start_dp[idx].flags = VRING_DESC_F_NEXT;
605 		hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
606 
607 		idx = start_dp[idx].next;
608 	}
609 
610 	if (vq->hw->has_tx_offload)
611 		virtqueue_xmit_offload(hdr, cookie);
612 
613 	do {
614 		start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
615 		start_dp[idx].len = cookie->data_len;
616 		if (prepend_header) {
617 			start_dp[idx].addr -= head_size;
618 			start_dp[idx].len += head_size;
619 			prepend_header = false;
620 		}
621 		start_dp[idx].flags = cookie->next ? VRING_DESC_F_NEXT : 0;
622 		idx = start_dp[idx].next;
623 	} while ((cookie = cookie->next) != NULL);
624 
625 	if (use_indirect)
626 		idx = vq->vq_split.ring.desc[head_idx].next;
627 
628 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
629 
630 	vq->vq_desc_head_idx = idx;
631 	vq_update_avail_ring(vq, head_idx);
632 
633 	if (!in_order) {
634 		if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
635 			vq->vq_desc_tail_idx = idx;
636 	}
637 }
638 
639 void
640 virtio_dev_cq_start(struct rte_eth_dev *dev)
641 {
642 	struct virtio_hw *hw = dev->data->dev_private;
643 
644 	if (hw->cvq) {
645 		rte_spinlock_init(&hw->cvq->lock);
646 		VIRTQUEUE_DUMP(virtnet_cq_to_vq(hw->cvq));
647 	}
648 }
649 
650 int
651 virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
652 			uint16_t queue_idx,
653 			uint16_t nb_desc,
654 			unsigned int socket_id __rte_unused,
655 			const struct rte_eth_rxconf *rx_conf,
656 			struct rte_mempool *mp)
657 {
658 	uint16_t vq_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
659 	struct virtio_hw *hw = dev->data->dev_private;
660 	struct virtqueue *vq = hw->vqs[vq_idx];
661 	struct virtnet_rx *rxvq;
662 	uint16_t rx_free_thresh;
663 	uint16_t buf_size;
664 	const char *error;
665 
666 	PMD_INIT_FUNC_TRACE();
667 
668 	if (rx_conf->rx_deferred_start) {
669 		PMD_INIT_LOG(ERR, "Rx deferred start is not supported");
670 		return -EINVAL;
671 	}
672 
673 	buf_size = virtio_rx_mem_pool_buf_size(mp);
674 	if (!virtio_rx_check_scatter(hw->max_rx_pkt_len, buf_size,
675 				     hw->rx_ol_scatter, &error)) {
676 		PMD_INIT_LOG(ERR, "RxQ %u Rx scatter check failed: %s",
677 			     queue_idx, error);
678 		return -EINVAL;
679 	}
680 
681 	rx_free_thresh = rx_conf->rx_free_thresh;
682 	if (rx_free_thresh == 0)
683 		rx_free_thresh =
684 			RTE_MIN(vq->vq_nentries / 4, DEFAULT_RX_FREE_THRESH);
685 
686 	if (rx_free_thresh & 0x3) {
687 		PMD_INIT_LOG(ERR, "rx_free_thresh must be multiples of four."
688 			" (rx_free_thresh=%u port=%u queue=%u)",
689 			rx_free_thresh, dev->data->port_id, queue_idx);
690 		return -EINVAL;
691 	}
692 
693 	if (rx_free_thresh >= vq->vq_nentries) {
694 		PMD_INIT_LOG(ERR, "rx_free_thresh must be less than the "
695 			"number of RX entries (%u)."
696 			" (rx_free_thresh=%u port=%u queue=%u)",
697 			vq->vq_nentries,
698 			rx_free_thresh, dev->data->port_id, queue_idx);
699 		return -EINVAL;
700 	}
701 	vq->vq_free_thresh = rx_free_thresh;
702 
703 	/*
704 	 * For split ring vectorized path descriptors number must be
705 	 * equal to the ring size.
706 	 */
707 	if (nb_desc > vq->vq_nentries ||
708 	    (!virtio_with_packed_queue(hw) && hw->use_vec_rx)) {
709 		nb_desc = vq->vq_nentries;
710 	}
711 	vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
712 
713 	rxvq = &vq->rxq;
714 	rxvq->mpool = mp;
715 	dev->data->rx_queues[queue_idx] = rxvq;
716 
717 	return 0;
718 }
719 
720 int
721 virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
722 {
723 	uint16_t vq_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
724 	struct virtio_hw *hw = dev->data->dev_private;
725 	struct virtqueue *vq = hw->vqs[vq_idx];
726 	struct virtnet_rx *rxvq = &vq->rxq;
727 	struct rte_mbuf *m;
728 	uint16_t desc_idx;
729 	int error, nbufs, i;
730 	bool in_order = virtio_with_feature(hw, VIRTIO_F_IN_ORDER);
731 
732 	PMD_INIT_FUNC_TRACE();
733 
734 	/* Allocate blank mbufs for the each rx descriptor */
735 	nbufs = 0;
736 
737 	if (hw->use_vec_rx && !virtio_with_packed_queue(hw)) {
738 		for (desc_idx = 0; desc_idx < vq->vq_nentries;
739 		     desc_idx++) {
740 			vq->vq_split.ring.avail->ring[desc_idx] = desc_idx;
741 			vq->vq_split.ring.desc[desc_idx].flags =
742 				VRING_DESC_F_WRITE;
743 		}
744 
745 		virtio_rxq_vec_setup(rxvq);
746 	}
747 
748 	if (hw->use_vec_rx) {
749 		memset(rxvq->fake_mbuf, 0, sizeof(*rxvq->fake_mbuf));
750 		for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST; desc_idx++)
751 			vq->rxq.sw_ring[vq->vq_nentries + desc_idx] = rxvq->fake_mbuf;
752 	}
753 
754 	if (hw->use_vec_rx && !virtio_with_packed_queue(hw)) {
755 		while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
756 			virtio_rxq_rearm_vec(rxvq);
757 			nbufs += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
758 		}
759 	} else if (!virtio_with_packed_queue(vq->hw) && in_order) {
760 		if ((!virtqueue_full(vq))) {
761 			uint16_t free_cnt = vq->vq_free_cnt;
762 			struct rte_mbuf *pkts[free_cnt];
763 
764 			if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, pkts,
765 				free_cnt)) {
766 				error = virtqueue_enqueue_refill_inorder(vq,
767 						pkts,
768 						free_cnt);
769 				if (unlikely(error)) {
770 					for (i = 0; i < free_cnt; i++)
771 						rte_pktmbuf_free(pkts[i]);
772 				} else {
773 					nbufs += free_cnt;
774 				}
775 			}
776 
777 			vq_update_avail_idx(vq);
778 		}
779 	} else {
780 		while (!virtqueue_full(vq)) {
781 			m = rte_mbuf_raw_alloc(rxvq->mpool);
782 			if (m == NULL)
783 				break;
784 
785 			/* Enqueue allocated buffers */
786 			if (virtio_with_packed_queue(vq->hw))
787 				error = virtqueue_enqueue_recv_refill_packed_init(vq,
788 						&m, 1);
789 			else
790 				error = virtqueue_enqueue_recv_refill(vq,
791 						&m, 1);
792 			if (error) {
793 				rte_pktmbuf_free(m);
794 				break;
795 			}
796 			nbufs++;
797 		}
798 
799 		if (!virtio_with_packed_queue(vq->hw))
800 			vq_update_avail_idx(vq);
801 	}
802 
803 	PMD_INIT_LOG(DEBUG, "Allocated %d bufs (port=%u queue=%u)", nbufs,
804 		     dev->data->port_id, queue_idx);
805 
806 	VIRTQUEUE_DUMP(vq);
807 
808 	return 0;
809 }
810 
811 /*
812  * struct rte_eth_dev *dev: Used to update dev
813  * uint16_t nb_desc: Defaults to values read from config space
814  * unsigned int socket_id: Used to allocate memzone
815  * const struct rte_eth_txconf *tx_conf: Used to setup tx engine
816  * uint16_t queue_idx: Just used as an index in dev txq list
817  */
818 int
819 virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
820 			uint16_t queue_idx,
821 			uint16_t nb_desc,
822 			unsigned int socket_id __rte_unused,
823 			const struct rte_eth_txconf *tx_conf)
824 {
825 	uint16_t vq_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
826 	struct virtio_hw *hw = dev->data->dev_private;
827 	struct virtqueue *vq = hw->vqs[vq_idx];
828 	struct virtnet_tx *txvq;
829 	uint16_t tx_free_thresh;
830 
831 	PMD_INIT_FUNC_TRACE();
832 
833 	if (tx_conf->tx_deferred_start) {
834 		PMD_INIT_LOG(ERR, "Tx deferred start is not supported");
835 		return -EINVAL;
836 	}
837 
838 	if (nb_desc == 0 || nb_desc > vq->vq_nentries)
839 		nb_desc = vq->vq_nentries;
840 	vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
841 
842 	txvq = &vq->txq;
843 
844 	tx_free_thresh = tx_conf->tx_free_thresh;
845 	if (tx_free_thresh == 0)
846 		tx_free_thresh =
847 			RTE_MIN(vq->vq_nentries / 4, DEFAULT_TX_FREE_THRESH);
848 
849 	if (tx_free_thresh >= (vq->vq_nentries - 3)) {
850 		PMD_DRV_LOG(ERR, "tx_free_thresh must be less than the "
851 			"number of TX entries minus 3 (%u)."
852 			" (tx_free_thresh=%u port=%u queue=%u)",
853 			vq->vq_nentries - 3,
854 			tx_free_thresh, dev->data->port_id, queue_idx);
855 		return -EINVAL;
856 	}
857 
858 	vq->vq_free_thresh = tx_free_thresh;
859 
860 	dev->data->tx_queues[queue_idx] = txvq;
861 	return 0;
862 }
863 
864 int
865 virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
866 				uint16_t queue_idx)
867 {
868 	uint16_t vq_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
869 	struct virtio_hw *hw = dev->data->dev_private;
870 	struct virtqueue *vq = hw->vqs[vq_idx];
871 
872 	PMD_INIT_FUNC_TRACE();
873 
874 	if (!virtio_with_packed_queue(hw)) {
875 		if (virtio_with_feature(hw, VIRTIO_F_IN_ORDER))
876 			vq->vq_split.ring.desc[vq->vq_nentries - 1].next = 0;
877 	}
878 
879 	VIRTQUEUE_DUMP(vq);
880 
881 	return 0;
882 }
883 
884 static inline void
885 virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m)
886 {
887 	int error;
888 	/*
889 	 * Requeue the discarded mbuf. This should always be
890 	 * successful since it was just dequeued.
891 	 */
892 	if (virtio_with_packed_queue(vq->hw))
893 		error = virtqueue_enqueue_recv_refill_packed(vq, &m, 1);
894 	else
895 		error = virtqueue_enqueue_recv_refill(vq, &m, 1);
896 
897 	if (unlikely(error)) {
898 		PMD_DRV_LOG(ERR, "cannot requeue discarded mbuf");
899 		rte_pktmbuf_free(m);
900 	}
901 }
902 
903 static inline void
904 virtio_discard_rxbuf_inorder(struct virtqueue *vq, struct rte_mbuf *m)
905 {
906 	int error;
907 
908 	error = virtqueue_enqueue_refill_inorder(vq, &m, 1);
909 	if (unlikely(error)) {
910 		PMD_DRV_LOG(ERR, "cannot requeue discarded mbuf");
911 		rte_pktmbuf_free(m);
912 	}
913 }
914 
915 /* Optionally fill offload information in structure */
916 static inline int
917 virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
918 {
919 	struct rte_net_hdr_lens hdr_lens;
920 	uint32_t hdrlen, ptype;
921 	int l4_supported = 0;
922 
923 	/* nothing to do */
924 	if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
925 		return 0;
926 
927 	m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN;
928 
929 	ptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK);
930 	m->packet_type = ptype;
931 	if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP ||
932 	    (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP ||
933 	    (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_SCTP)
934 		l4_supported = 1;
935 
936 	if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
937 		hdrlen = hdr_lens.l2_len + hdr_lens.l3_len + hdr_lens.l4_len;
938 		if (hdr->csum_start <= hdrlen && l4_supported) {
939 			m->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_NONE;
940 		} else {
941 			/* Unknown proto or tunnel, do sw cksum. We can assume
942 			 * the cksum field is in the first segment since the
943 			 * buffers we provided to the host are large enough.
944 			 * In case of SCTP, this will be wrong since it's a CRC
945 			 * but there's nothing we can do.
946 			 */
947 			uint16_t csum = 0, off;
948 
949 			if (rte_raw_cksum_mbuf(m, hdr->csum_start,
950 				rte_pktmbuf_pkt_len(m) - hdr->csum_start,
951 				&csum) < 0)
952 				return -EINVAL;
953 			if (likely(csum != 0xffff))
954 				csum = ~csum;
955 			off = hdr->csum_offset + hdr->csum_start;
956 			if (rte_pktmbuf_data_len(m) >= off + 1)
957 				*rte_pktmbuf_mtod_offset(m, uint16_t *,
958 					off) = csum;
959 		}
960 	} else if (hdr->flags & VIRTIO_NET_HDR_F_DATA_VALID && l4_supported) {
961 		m->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
962 	}
963 
964 	/* GSO request, save required information in mbuf */
965 	if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
966 		/* Check unsupported modes */
967 		if ((hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN) ||
968 		    (hdr->gso_size == 0)) {
969 			return -EINVAL;
970 		}
971 
972 		/* Update mss lengths in mbuf */
973 		m->tso_segsz = hdr->gso_size;
974 		switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
975 			case VIRTIO_NET_HDR_GSO_TCPV4:
976 			case VIRTIO_NET_HDR_GSO_TCPV6:
977 				m->ol_flags |= RTE_MBUF_F_RX_LRO |
978 					RTE_MBUF_F_RX_L4_CKSUM_NONE;
979 				break;
980 			default:
981 				return -EINVAL;
982 		}
983 	}
984 
985 	return 0;
986 }
987 
988 #define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc))
989 uint16_t
990 virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
991 {
992 	struct virtnet_rx *rxvq = rx_queue;
993 	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
994 	struct virtio_hw *hw = vq->hw;
995 	struct rte_mbuf *rxm;
996 	uint16_t nb_used, num, nb_rx;
997 	uint32_t len[VIRTIO_MBUF_BURST_SZ];
998 	struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
999 	int error;
1000 	uint32_t i, nb_enqueued;
1001 	uint32_t hdr_size;
1002 	struct virtio_net_hdr *hdr;
1003 
1004 	nb_rx = 0;
1005 	if (unlikely(hw->started == 0))
1006 		return nb_rx;
1007 
1008 	nb_used = virtqueue_nused(vq);
1009 
1010 	num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts;
1011 	if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1012 		num = VIRTIO_MBUF_BURST_SZ;
1013 	if (likely(num > DESC_PER_CACHELINE))
1014 		num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1015 
1016 	num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num);
1017 	PMD_RX_LOG(DEBUG, "used:%d dequeue:%d", nb_used, num);
1018 
1019 	nb_enqueued = 0;
1020 	hdr_size = hw->vtnet_hdr_size;
1021 
1022 	for (i = 0; i < num ; i++) {
1023 		rxm = rcv_pkts[i];
1024 
1025 		PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1026 
1027 		if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1028 			PMD_RX_LOG(ERR, "Packet drop");
1029 			nb_enqueued++;
1030 			virtio_discard_rxbuf(vq, rxm);
1031 			rxvq->stats.errors++;
1032 			continue;
1033 		}
1034 
1035 		rxm->port = hw->port_id;
1036 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1037 		rxm->ol_flags = 0;
1038 		rxm->vlan_tci = 0;
1039 
1040 		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1041 		rxm->data_len = (uint16_t)(len[i] - hdr_size);
1042 
1043 		hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr +
1044 			RTE_PKTMBUF_HEADROOM - hdr_size);
1045 
1046 		if (hw->vlan_strip)
1047 			rte_vlan_strip(rxm);
1048 
1049 		if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) {
1050 			virtio_discard_rxbuf(vq, rxm);
1051 			rxvq->stats.errors++;
1052 			continue;
1053 		}
1054 
1055 		virtio_rx_stats_updated(rxvq, rxm);
1056 
1057 		rx_pkts[nb_rx++] = rxm;
1058 	}
1059 
1060 	rxvq->stats.packets += nb_rx;
1061 
1062 	/* Allocate new mbuf for the used descriptor */
1063 	if (likely(!virtqueue_full(vq))) {
1064 		uint16_t free_cnt = vq->vq_free_cnt;
1065 		struct rte_mbuf *new_pkts[free_cnt];
1066 
1067 		if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts,
1068 						free_cnt) == 0)) {
1069 			error = virtqueue_enqueue_recv_refill(vq, new_pkts,
1070 					free_cnt);
1071 			if (unlikely(error)) {
1072 				for (i = 0; i < free_cnt; i++)
1073 					rte_pktmbuf_free(new_pkts[i]);
1074 			}
1075 			nb_enqueued += free_cnt;
1076 		} else {
1077 			struct rte_eth_dev *dev = &rte_eth_devices[hw->port_id];
1078 			dev->data->rx_mbuf_alloc_failed += free_cnt;
1079 		}
1080 	}
1081 
1082 	if (likely(nb_enqueued)) {
1083 		vq_update_avail_idx(vq);
1084 
1085 		if (unlikely(virtqueue_kick_prepare(vq))) {
1086 			virtqueue_notify(vq);
1087 			PMD_RX_LOG(DEBUG, "Notified");
1088 		}
1089 	}
1090 
1091 	return nb_rx;
1092 }
1093 
1094 uint16_t
1095 virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
1096 			uint16_t nb_pkts)
1097 {
1098 	struct virtnet_rx *rxvq = rx_queue;
1099 	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
1100 	struct virtio_hw *hw = vq->hw;
1101 	struct rte_mbuf *rxm;
1102 	uint16_t num, nb_rx;
1103 	uint32_t len[VIRTIO_MBUF_BURST_SZ];
1104 	struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1105 	int error;
1106 	uint32_t i, nb_enqueued;
1107 	uint32_t hdr_size;
1108 	struct virtio_net_hdr *hdr;
1109 
1110 	nb_rx = 0;
1111 	if (unlikely(hw->started == 0))
1112 		return nb_rx;
1113 
1114 	num = RTE_MIN(VIRTIO_MBUF_BURST_SZ, nb_pkts);
1115 	if (likely(num > DESC_PER_CACHELINE))
1116 		num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1117 
1118 	num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num);
1119 	PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1120 
1121 	nb_enqueued = 0;
1122 	hdr_size = hw->vtnet_hdr_size;
1123 
1124 	for (i = 0; i < num; i++) {
1125 		rxm = rcv_pkts[i];
1126 
1127 		PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1128 
1129 		if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1130 			PMD_RX_LOG(ERR, "Packet drop");
1131 			nb_enqueued++;
1132 			virtio_discard_rxbuf(vq, rxm);
1133 			rxvq->stats.errors++;
1134 			continue;
1135 		}
1136 
1137 		rxm->port = hw->port_id;
1138 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1139 		rxm->ol_flags = 0;
1140 		rxm->vlan_tci = 0;
1141 
1142 		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1143 		rxm->data_len = (uint16_t)(len[i] - hdr_size);
1144 
1145 		hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr +
1146 			RTE_PKTMBUF_HEADROOM - hdr_size);
1147 
1148 		if (hw->vlan_strip)
1149 			rte_vlan_strip(rxm);
1150 
1151 		if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) {
1152 			virtio_discard_rxbuf(vq, rxm);
1153 			rxvq->stats.errors++;
1154 			continue;
1155 		}
1156 
1157 		virtio_rx_stats_updated(rxvq, rxm);
1158 
1159 		rx_pkts[nb_rx++] = rxm;
1160 	}
1161 
1162 	rxvq->stats.packets += nb_rx;
1163 
1164 	/* Allocate new mbuf for the used descriptor */
1165 	if (likely(!virtqueue_full(vq))) {
1166 		uint16_t free_cnt = vq->vq_free_cnt;
1167 		struct rte_mbuf *new_pkts[free_cnt];
1168 
1169 		if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts,
1170 						free_cnt) == 0)) {
1171 			error = virtqueue_enqueue_recv_refill_packed(vq,
1172 					new_pkts, free_cnt);
1173 			if (unlikely(error)) {
1174 				for (i = 0; i < free_cnt; i++)
1175 					rte_pktmbuf_free(new_pkts[i]);
1176 			}
1177 			nb_enqueued += free_cnt;
1178 		} else {
1179 			struct rte_eth_dev *dev = &rte_eth_devices[hw->port_id];
1180 			dev->data->rx_mbuf_alloc_failed += free_cnt;
1181 		}
1182 	}
1183 
1184 	if (likely(nb_enqueued)) {
1185 		if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1186 			virtqueue_notify(vq);
1187 			PMD_RX_LOG(DEBUG, "Notified");
1188 		}
1189 	}
1190 
1191 	return nb_rx;
1192 }
1193 
1194 
1195 uint16_t
1196 virtio_recv_pkts_inorder(void *rx_queue,
1197 			struct rte_mbuf **rx_pkts,
1198 			uint16_t nb_pkts)
1199 {
1200 	struct virtnet_rx *rxvq = rx_queue;
1201 	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
1202 	struct virtio_hw *hw = vq->hw;
1203 	struct rte_mbuf *rxm;
1204 	struct rte_mbuf *prev = NULL;
1205 	uint16_t nb_used, num, nb_rx;
1206 	uint32_t len[VIRTIO_MBUF_BURST_SZ];
1207 	struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1208 	int error;
1209 	uint32_t nb_enqueued;
1210 	uint32_t seg_num;
1211 	uint32_t seg_res;
1212 	uint32_t hdr_size;
1213 	int32_t i;
1214 
1215 	nb_rx = 0;
1216 	if (unlikely(hw->started == 0))
1217 		return nb_rx;
1218 
1219 	nb_used = virtqueue_nused(vq);
1220 	nb_used = RTE_MIN(nb_used, nb_pkts);
1221 	nb_used = RTE_MIN(nb_used, VIRTIO_MBUF_BURST_SZ);
1222 
1223 	PMD_RX_LOG(DEBUG, "used:%d", nb_used);
1224 
1225 	nb_enqueued = 0;
1226 	seg_num = 1;
1227 	seg_res = 0;
1228 	hdr_size = hw->vtnet_hdr_size;
1229 
1230 	num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len, nb_used);
1231 
1232 	for (i = 0; i < num; i++) {
1233 		struct virtio_net_hdr_mrg_rxbuf *header;
1234 
1235 		PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1236 		PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1237 
1238 		rxm = rcv_pkts[i];
1239 
1240 		if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1241 			PMD_RX_LOG(ERR, "Packet drop");
1242 			nb_enqueued++;
1243 			virtio_discard_rxbuf_inorder(vq, rxm);
1244 			rxvq->stats.errors++;
1245 			continue;
1246 		}
1247 
1248 		header = (struct virtio_net_hdr_mrg_rxbuf *)
1249 			 ((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM
1250 			 - hdr_size);
1251 
1252 		if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
1253 			seg_num = header->num_buffers;
1254 			if (seg_num == 0)
1255 				seg_num = 1;
1256 		} else {
1257 			seg_num = 1;
1258 		}
1259 
1260 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1261 		rxm->nb_segs = seg_num;
1262 		rxm->ol_flags = 0;
1263 		rxm->vlan_tci = 0;
1264 		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1265 		rxm->data_len = (uint16_t)(len[i] - hdr_size);
1266 
1267 		rxm->port = hw->port_id;
1268 
1269 		rx_pkts[nb_rx] = rxm;
1270 		prev = rxm;
1271 
1272 		if (vq->hw->has_rx_offload &&
1273 				virtio_rx_offload(rxm, &header->hdr) < 0) {
1274 			virtio_discard_rxbuf_inorder(vq, rxm);
1275 			rxvq->stats.errors++;
1276 			continue;
1277 		}
1278 
1279 		if (hw->vlan_strip)
1280 			rte_vlan_strip(rx_pkts[nb_rx]);
1281 
1282 		seg_res = seg_num - 1;
1283 
1284 		/* Merge remaining segments */
1285 		while (seg_res != 0 && i < (num - 1)) {
1286 			i++;
1287 
1288 			rxm = rcv_pkts[i];
1289 			rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1290 			rxm->pkt_len = (uint32_t)(len[i]);
1291 			rxm->data_len = (uint16_t)(len[i]);
1292 
1293 			rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1294 
1295 			prev->next = rxm;
1296 			prev = rxm;
1297 			seg_res -= 1;
1298 		}
1299 
1300 		if (!seg_res) {
1301 			virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1302 			nb_rx++;
1303 		}
1304 	}
1305 
1306 	/* Last packet still need merge segments */
1307 	while (seg_res != 0) {
1308 		uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1309 					VIRTIO_MBUF_BURST_SZ);
1310 
1311 		if (likely(virtqueue_nused(vq) >= rcv_cnt)) {
1312 			num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len,
1313 							   rcv_cnt);
1314 			uint16_t extra_idx = 0;
1315 
1316 			rcv_cnt = num;
1317 			while (extra_idx < rcv_cnt) {
1318 				rxm = rcv_pkts[extra_idx];
1319 				rxm->data_off =
1320 					RTE_PKTMBUF_HEADROOM - hdr_size;
1321 				rxm->pkt_len = (uint32_t)(len[extra_idx]);
1322 				rxm->data_len = (uint16_t)(len[extra_idx]);
1323 				prev->next = rxm;
1324 				prev = rxm;
1325 				rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1326 				extra_idx += 1;
1327 			};
1328 			seg_res -= rcv_cnt;
1329 
1330 			if (!seg_res) {
1331 				virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1332 				nb_rx++;
1333 			}
1334 		} else {
1335 			PMD_RX_LOG(ERR,
1336 					"No enough segments for packet.");
1337 			rte_pktmbuf_free(rx_pkts[nb_rx]);
1338 			rxvq->stats.errors++;
1339 			break;
1340 		}
1341 	}
1342 
1343 	rxvq->stats.packets += nb_rx;
1344 
1345 	/* Allocate new mbuf for the used descriptor */
1346 
1347 	if (likely(!virtqueue_full(vq))) {
1348 		/* free_cnt may include mrg descs */
1349 		uint16_t free_cnt = vq->vq_free_cnt;
1350 		struct rte_mbuf *new_pkts[free_cnt];
1351 
1352 		if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1353 			error = virtqueue_enqueue_refill_inorder(vq, new_pkts,
1354 					free_cnt);
1355 			if (unlikely(error)) {
1356 				for (i = 0; i < free_cnt; i++)
1357 					rte_pktmbuf_free(new_pkts[i]);
1358 			}
1359 			nb_enqueued += free_cnt;
1360 		} else {
1361 			struct rte_eth_dev *dev = &rte_eth_devices[hw->port_id];
1362 			dev->data->rx_mbuf_alloc_failed += free_cnt;
1363 		}
1364 	}
1365 
1366 	if (likely(nb_enqueued)) {
1367 		vq_update_avail_idx(vq);
1368 
1369 		if (unlikely(virtqueue_kick_prepare(vq))) {
1370 			virtqueue_notify(vq);
1371 			PMD_RX_LOG(DEBUG, "Notified");
1372 		}
1373 	}
1374 
1375 	return nb_rx;
1376 }
1377 
1378 uint16_t
1379 virtio_recv_mergeable_pkts(void *rx_queue,
1380 			struct rte_mbuf **rx_pkts,
1381 			uint16_t nb_pkts)
1382 {
1383 	struct virtnet_rx *rxvq = rx_queue;
1384 	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
1385 	struct virtio_hw *hw = vq->hw;
1386 	struct rte_mbuf *rxm;
1387 	struct rte_mbuf *prev = NULL;
1388 	uint16_t nb_used, num, nb_rx = 0;
1389 	uint32_t len[VIRTIO_MBUF_BURST_SZ];
1390 	struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1391 	int error;
1392 	uint32_t nb_enqueued = 0;
1393 	uint32_t seg_num = 0;
1394 	uint32_t seg_res = 0;
1395 	uint32_t hdr_size = hw->vtnet_hdr_size;
1396 	int32_t i;
1397 
1398 	if (unlikely(hw->started == 0))
1399 		return nb_rx;
1400 
1401 	nb_used = virtqueue_nused(vq);
1402 
1403 	PMD_RX_LOG(DEBUG, "used:%d", nb_used);
1404 
1405 	num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts;
1406 	if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1407 		num = VIRTIO_MBUF_BURST_SZ;
1408 	if (likely(num > DESC_PER_CACHELINE))
1409 		num = num - ((vq->vq_used_cons_idx + num) %
1410 				DESC_PER_CACHELINE);
1411 
1412 
1413 	num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num);
1414 
1415 	for (i = 0; i < num; i++) {
1416 		struct virtio_net_hdr_mrg_rxbuf *header;
1417 
1418 		PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1419 		PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1420 
1421 		rxm = rcv_pkts[i];
1422 
1423 		if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1424 			PMD_RX_LOG(ERR, "Packet drop");
1425 			nb_enqueued++;
1426 			virtio_discard_rxbuf(vq, rxm);
1427 			rxvq->stats.errors++;
1428 			continue;
1429 		}
1430 
1431 		header = (struct virtio_net_hdr_mrg_rxbuf *)
1432 			 ((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM
1433 			 - hdr_size);
1434 		seg_num = header->num_buffers;
1435 		if (seg_num == 0)
1436 			seg_num = 1;
1437 
1438 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1439 		rxm->nb_segs = seg_num;
1440 		rxm->ol_flags = 0;
1441 		rxm->vlan_tci = 0;
1442 		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1443 		rxm->data_len = (uint16_t)(len[i] - hdr_size);
1444 
1445 		rxm->port = hw->port_id;
1446 
1447 		rx_pkts[nb_rx] = rxm;
1448 		prev = rxm;
1449 
1450 		if (hw->has_rx_offload &&
1451 				virtio_rx_offload(rxm, &header->hdr) < 0) {
1452 			virtio_discard_rxbuf(vq, rxm);
1453 			rxvq->stats.errors++;
1454 			continue;
1455 		}
1456 
1457 		if (hw->vlan_strip)
1458 			rte_vlan_strip(rx_pkts[nb_rx]);
1459 
1460 		seg_res = seg_num - 1;
1461 
1462 		/* Merge remaining segments */
1463 		while (seg_res != 0 && i < (num - 1)) {
1464 			i++;
1465 
1466 			rxm = rcv_pkts[i];
1467 			rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1468 			rxm->pkt_len = (uint32_t)(len[i]);
1469 			rxm->data_len = (uint16_t)(len[i]);
1470 
1471 			rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1472 
1473 			prev->next = rxm;
1474 			prev = rxm;
1475 			seg_res -= 1;
1476 		}
1477 
1478 		if (!seg_res) {
1479 			virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1480 			nb_rx++;
1481 		}
1482 	}
1483 
1484 	/* Last packet still need merge segments */
1485 	while (seg_res != 0) {
1486 		uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1487 					VIRTIO_MBUF_BURST_SZ);
1488 
1489 		if (likely(virtqueue_nused(vq) >= rcv_cnt)) {
1490 			num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len,
1491 							   rcv_cnt);
1492 			uint16_t extra_idx = 0;
1493 
1494 			rcv_cnt = num;
1495 			while (extra_idx < rcv_cnt) {
1496 				rxm = rcv_pkts[extra_idx];
1497 				rxm->data_off =
1498 					RTE_PKTMBUF_HEADROOM - hdr_size;
1499 				rxm->pkt_len = (uint32_t)(len[extra_idx]);
1500 				rxm->data_len = (uint16_t)(len[extra_idx]);
1501 				prev->next = rxm;
1502 				prev = rxm;
1503 				rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1504 				extra_idx += 1;
1505 			};
1506 			seg_res -= rcv_cnt;
1507 
1508 			if (!seg_res) {
1509 				virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1510 				nb_rx++;
1511 			}
1512 		} else {
1513 			PMD_RX_LOG(ERR,
1514 					"No enough segments for packet.");
1515 			rte_pktmbuf_free(rx_pkts[nb_rx]);
1516 			rxvq->stats.errors++;
1517 			break;
1518 		}
1519 	}
1520 
1521 	rxvq->stats.packets += nb_rx;
1522 
1523 	/* Allocate new mbuf for the used descriptor */
1524 	if (likely(!virtqueue_full(vq))) {
1525 		/* free_cnt may include mrg descs */
1526 		uint16_t free_cnt = vq->vq_free_cnt;
1527 		struct rte_mbuf *new_pkts[free_cnt];
1528 
1529 		if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1530 			error = virtqueue_enqueue_recv_refill(vq, new_pkts,
1531 					free_cnt);
1532 			if (unlikely(error)) {
1533 				for (i = 0; i < free_cnt; i++)
1534 					rte_pktmbuf_free(new_pkts[i]);
1535 			}
1536 			nb_enqueued += free_cnt;
1537 		} else {
1538 			struct rte_eth_dev *dev = &rte_eth_devices[hw->port_id];
1539 			dev->data->rx_mbuf_alloc_failed += free_cnt;
1540 		}
1541 	}
1542 
1543 	if (likely(nb_enqueued)) {
1544 		vq_update_avail_idx(vq);
1545 
1546 		if (unlikely(virtqueue_kick_prepare(vq))) {
1547 			virtqueue_notify(vq);
1548 			PMD_RX_LOG(DEBUG, "Notified");
1549 		}
1550 	}
1551 
1552 	return nb_rx;
1553 }
1554 
1555 uint16_t
1556 virtio_recv_mergeable_pkts_packed(void *rx_queue,
1557 			struct rte_mbuf **rx_pkts,
1558 			uint16_t nb_pkts)
1559 {
1560 	struct virtnet_rx *rxvq = rx_queue;
1561 	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
1562 	struct virtio_hw *hw = vq->hw;
1563 	struct rte_mbuf *rxm;
1564 	struct rte_mbuf *prev = NULL;
1565 	uint16_t num, nb_rx = 0;
1566 	uint32_t len[VIRTIO_MBUF_BURST_SZ];
1567 	struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1568 	uint32_t nb_enqueued = 0;
1569 	uint32_t seg_num = 0;
1570 	uint32_t seg_res = 0;
1571 	uint32_t hdr_size = hw->vtnet_hdr_size;
1572 	int32_t i;
1573 	int error;
1574 
1575 	if (unlikely(hw->started == 0))
1576 		return nb_rx;
1577 
1578 
1579 	num = nb_pkts;
1580 	if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1581 		num = VIRTIO_MBUF_BURST_SZ;
1582 	if (likely(num > DESC_PER_CACHELINE))
1583 		num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1584 
1585 	num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num);
1586 
1587 	for (i = 0; i < num; i++) {
1588 		struct virtio_net_hdr_mrg_rxbuf *header;
1589 
1590 		PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1591 		PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1592 
1593 		rxm = rcv_pkts[i];
1594 
1595 		if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1596 			PMD_RX_LOG(ERR, "Packet drop");
1597 			nb_enqueued++;
1598 			virtio_discard_rxbuf(vq, rxm);
1599 			rxvq->stats.errors++;
1600 			continue;
1601 		}
1602 
1603 		header = (struct virtio_net_hdr_mrg_rxbuf *)((char *)
1604 			  rxm->buf_addr + RTE_PKTMBUF_HEADROOM - hdr_size);
1605 		seg_num = header->num_buffers;
1606 
1607 		if (seg_num == 0)
1608 			seg_num = 1;
1609 
1610 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1611 		rxm->nb_segs = seg_num;
1612 		rxm->ol_flags = 0;
1613 		rxm->vlan_tci = 0;
1614 		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1615 		rxm->data_len = (uint16_t)(len[i] - hdr_size);
1616 
1617 		rxm->port = hw->port_id;
1618 		rx_pkts[nb_rx] = rxm;
1619 		prev = rxm;
1620 
1621 		if (hw->has_rx_offload &&
1622 				virtio_rx_offload(rxm, &header->hdr) < 0) {
1623 			virtio_discard_rxbuf(vq, rxm);
1624 			rxvq->stats.errors++;
1625 			continue;
1626 		}
1627 
1628 		if (hw->vlan_strip)
1629 			rte_vlan_strip(rx_pkts[nb_rx]);
1630 
1631 		seg_res = seg_num - 1;
1632 
1633 		/* Merge remaining segments */
1634 		while (seg_res != 0 && i < (num - 1)) {
1635 			i++;
1636 
1637 			rxm = rcv_pkts[i];
1638 			rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1639 			rxm->pkt_len = (uint32_t)(len[i]);
1640 			rxm->data_len = (uint16_t)(len[i]);
1641 
1642 			rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1643 
1644 			prev->next = rxm;
1645 			prev = rxm;
1646 			seg_res -= 1;
1647 		}
1648 
1649 		if (!seg_res) {
1650 			virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1651 			nb_rx++;
1652 		}
1653 	}
1654 
1655 	/* Last packet still need merge segments */
1656 	while (seg_res != 0) {
1657 		uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1658 					VIRTIO_MBUF_BURST_SZ);
1659 		uint16_t extra_idx = 0;
1660 
1661 		rcv_cnt = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts,
1662 				len, rcv_cnt);
1663 		if (unlikely(rcv_cnt == 0)) {
1664 			PMD_RX_LOG(ERR, "No enough segments for packet.");
1665 			rte_pktmbuf_free(rx_pkts[nb_rx]);
1666 			rxvq->stats.errors++;
1667 			break;
1668 		}
1669 
1670 		while (extra_idx < rcv_cnt) {
1671 			rxm = rcv_pkts[extra_idx];
1672 
1673 			rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1674 			rxm->pkt_len = (uint32_t)(len[extra_idx]);
1675 			rxm->data_len = (uint16_t)(len[extra_idx]);
1676 
1677 			prev->next = rxm;
1678 			prev = rxm;
1679 			rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1680 			extra_idx += 1;
1681 		}
1682 		seg_res -= rcv_cnt;
1683 		if (!seg_res) {
1684 			virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1685 			nb_rx++;
1686 		}
1687 	}
1688 
1689 	rxvq->stats.packets += nb_rx;
1690 
1691 	/* Allocate new mbuf for the used descriptor */
1692 	if (likely(!virtqueue_full(vq))) {
1693 		/* free_cnt may include mrg descs */
1694 		uint16_t free_cnt = vq->vq_free_cnt;
1695 		struct rte_mbuf *new_pkts[free_cnt];
1696 
1697 		if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1698 			error = virtqueue_enqueue_recv_refill_packed(vq,
1699 					new_pkts, free_cnt);
1700 			if (unlikely(error)) {
1701 				for (i = 0; i < free_cnt; i++)
1702 					rte_pktmbuf_free(new_pkts[i]);
1703 			}
1704 			nb_enqueued += free_cnt;
1705 		} else {
1706 			struct rte_eth_dev *dev = &rte_eth_devices[hw->port_id];
1707 			dev->data->rx_mbuf_alloc_failed += free_cnt;
1708 		}
1709 	}
1710 
1711 	if (likely(nb_enqueued)) {
1712 		if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1713 			virtqueue_notify(vq);
1714 			PMD_RX_LOG(DEBUG, "Notified");
1715 		}
1716 	}
1717 
1718 	return nb_rx;
1719 }
1720 
1721 uint16_t
1722 virtio_xmit_pkts_prepare(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts,
1723 			uint16_t nb_pkts)
1724 {
1725 	uint16_t nb_tx;
1726 	int error;
1727 
1728 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1729 		struct rte_mbuf *m = tx_pkts[nb_tx];
1730 
1731 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
1732 		error = rte_validate_tx_offload(m);
1733 		if (unlikely(error)) {
1734 			rte_errno = -error;
1735 			break;
1736 		}
1737 #endif
1738 
1739 		/* Do VLAN tag insertion */
1740 		if (unlikely(m->ol_flags & RTE_MBUF_F_TX_VLAN)) {
1741 			error = rte_vlan_insert(&m);
1742 			/* rte_vlan_insert() may change pointer
1743 			 * even in the case of failure
1744 			 */
1745 			tx_pkts[nb_tx] = m;
1746 
1747 			if (unlikely(error)) {
1748 				rte_errno = -error;
1749 				break;
1750 			}
1751 		}
1752 
1753 		error = rte_net_intel_cksum_prepare(m);
1754 		if (unlikely(error)) {
1755 			rte_errno = -error;
1756 			break;
1757 		}
1758 
1759 		if (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG)
1760 			virtio_tso_fix_cksum(m);
1761 	}
1762 
1763 	return nb_tx;
1764 }
1765 
1766 uint16_t
1767 virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
1768 			uint16_t nb_pkts)
1769 {
1770 	struct virtnet_tx *txvq = tx_queue;
1771 	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
1772 	struct virtio_hw *hw = vq->hw;
1773 	uint16_t hdr_size = hw->vtnet_hdr_size;
1774 	uint16_t nb_tx = 0;
1775 	bool in_order = virtio_with_feature(hw, VIRTIO_F_IN_ORDER);
1776 
1777 	if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
1778 		return nb_tx;
1779 
1780 	if (unlikely(nb_pkts < 1))
1781 		return nb_pkts;
1782 
1783 	PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
1784 
1785 	if (nb_pkts > vq->vq_free_cnt)
1786 		virtio_xmit_cleanup_packed(vq, nb_pkts - vq->vq_free_cnt,
1787 					   in_order);
1788 
1789 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1790 		struct rte_mbuf *txm = tx_pkts[nb_tx];
1791 		int can_push = 0, use_indirect = 0, slots, need;
1792 
1793 		/* optimize ring usage */
1794 		if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
1795 		      virtio_with_feature(hw, VIRTIO_F_VERSION_1)) &&
1796 		    rte_mbuf_refcnt_read(txm) == 1 &&
1797 		    RTE_MBUF_DIRECT(txm) &&
1798 		    txm->nb_segs == 1 &&
1799 		    rte_pktmbuf_headroom(txm) >= hdr_size &&
1800 		    rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
1801 			   alignof(struct virtio_net_hdr_mrg_rxbuf)))
1802 			can_push = 1;
1803 		else if (virtio_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
1804 			 txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
1805 			use_indirect = 1;
1806 		/* How many main ring entries are needed to this Tx?
1807 		 * indirect   => 1
1808 		 * any_layout => number of segments
1809 		 * default    => number of segments + 1
1810 		 */
1811 		slots = use_indirect ? 1 : (txm->nb_segs + !can_push);
1812 		need = slots - vq->vq_free_cnt;
1813 
1814 		/* Positive value indicates it need free vring descriptors */
1815 		if (unlikely(need > 0)) {
1816 			virtio_xmit_cleanup_packed(vq, need, in_order);
1817 			need = slots - vq->vq_free_cnt;
1818 			if (unlikely(need > 0)) {
1819 				PMD_TX_LOG(ERR,
1820 					   "No free tx descriptors to transmit");
1821 				break;
1822 			}
1823 		}
1824 
1825 		/* Enqueue Packet buffers */
1826 		if (can_push)
1827 			virtqueue_enqueue_xmit_packed_fast(txvq, txm, in_order);
1828 		else
1829 			virtqueue_enqueue_xmit_packed(txvq, txm, slots,
1830 						      use_indirect, 0,
1831 						      in_order);
1832 
1833 		virtio_update_packet_stats(&txvq->stats, txm);
1834 	}
1835 
1836 	txvq->stats.packets += nb_tx;
1837 
1838 	if (likely(nb_tx)) {
1839 		if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1840 			virtqueue_notify(vq);
1841 			PMD_TX_LOG(DEBUG, "Notified backend after xmit");
1842 		}
1843 	}
1844 
1845 	return nb_tx;
1846 }
1847 
1848 uint16_t
1849 virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1850 {
1851 	struct virtnet_tx *txvq = tx_queue;
1852 	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
1853 	struct virtio_hw *hw = vq->hw;
1854 	uint16_t hdr_size = hw->vtnet_hdr_size;
1855 	uint16_t nb_used, nb_tx = 0;
1856 
1857 	if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
1858 		return nb_tx;
1859 
1860 	if (unlikely(nb_pkts < 1))
1861 		return nb_pkts;
1862 
1863 	PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
1864 
1865 	nb_used = virtqueue_nused(vq);
1866 
1867 	if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
1868 		virtio_xmit_cleanup(vq, nb_used);
1869 
1870 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1871 		struct rte_mbuf *txm = tx_pkts[nb_tx];
1872 		int can_push = 0, use_indirect = 0, slots, need;
1873 
1874 		/* optimize ring usage */
1875 		if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
1876 		      virtio_with_feature(hw, VIRTIO_F_VERSION_1)) &&
1877 		    rte_mbuf_refcnt_read(txm) == 1 &&
1878 		    RTE_MBUF_DIRECT(txm) &&
1879 		    txm->nb_segs == 1 &&
1880 		    rte_pktmbuf_headroom(txm) >= hdr_size &&
1881 		    rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
1882 				   alignof(struct virtio_net_hdr_mrg_rxbuf)))
1883 			can_push = 1;
1884 		else if (virtio_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
1885 			 txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
1886 			use_indirect = 1;
1887 
1888 		/* How many main ring entries are needed to this Tx?
1889 		 * any_layout => number of segments
1890 		 * indirect   => 1
1891 		 * default    => number of segments + 1
1892 		 */
1893 		slots = use_indirect ? 1 : (txm->nb_segs + !can_push);
1894 		need = slots - vq->vq_free_cnt;
1895 
1896 		/* Positive value indicates it need free vring descriptors */
1897 		if (unlikely(need > 0)) {
1898 			nb_used = virtqueue_nused(vq);
1899 
1900 			need = RTE_MIN(need, (int)nb_used);
1901 
1902 			virtio_xmit_cleanup(vq, need);
1903 			need = slots - vq->vq_free_cnt;
1904 			if (unlikely(need > 0)) {
1905 				PMD_TX_LOG(ERR,
1906 					   "No free tx descriptors to transmit");
1907 				break;
1908 			}
1909 		}
1910 
1911 		/* Enqueue Packet buffers */
1912 		virtqueue_enqueue_xmit(txvq, txm, slots, use_indirect,
1913 			can_push, 0);
1914 
1915 		virtio_update_packet_stats(&txvq->stats, txm);
1916 	}
1917 
1918 	txvq->stats.packets += nb_tx;
1919 
1920 	if (likely(nb_tx)) {
1921 		vq_update_avail_idx(vq);
1922 
1923 		if (unlikely(virtqueue_kick_prepare(vq))) {
1924 			virtqueue_notify(vq);
1925 			PMD_TX_LOG(DEBUG, "Notified backend after xmit");
1926 		}
1927 	}
1928 
1929 	return nb_tx;
1930 }
1931 
1932 static __rte_always_inline int
1933 virtio_xmit_try_cleanup_inorder(struct virtqueue *vq, uint16_t need)
1934 {
1935 	uint16_t nb_used, nb_clean, nb_descs;
1936 
1937 	nb_descs = vq->vq_free_cnt + need;
1938 	nb_used = virtqueue_nused(vq);
1939 	nb_clean = RTE_MIN(need, (int)nb_used);
1940 
1941 	virtio_xmit_cleanup_inorder(vq, nb_clean);
1942 
1943 	return nb_descs - vq->vq_free_cnt;
1944 }
1945 
1946 uint16_t
1947 virtio_xmit_pkts_inorder(void *tx_queue,
1948 			struct rte_mbuf **tx_pkts,
1949 			uint16_t nb_pkts)
1950 {
1951 	struct virtnet_tx *txvq = tx_queue;
1952 	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
1953 	struct virtio_hw *hw = vq->hw;
1954 	uint16_t hdr_size = hw->vtnet_hdr_size;
1955 	uint16_t nb_used, nb_tx = 0, nb_inorder_pkts = 0;
1956 	struct rte_mbuf *inorder_pkts[nb_pkts];
1957 	int need;
1958 
1959 	if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
1960 		return nb_tx;
1961 
1962 	if (unlikely(nb_pkts < 1))
1963 		return nb_pkts;
1964 
1965 	VIRTQUEUE_DUMP(vq);
1966 	PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
1967 	nb_used = virtqueue_nused(vq);
1968 
1969 	if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
1970 		virtio_xmit_cleanup_inorder(vq, nb_used);
1971 
1972 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1973 		struct rte_mbuf *txm = tx_pkts[nb_tx];
1974 		int slots;
1975 
1976 		/* optimize ring usage */
1977 		if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
1978 		     virtio_with_feature(hw, VIRTIO_F_VERSION_1)) &&
1979 		     rte_mbuf_refcnt_read(txm) == 1 &&
1980 		     RTE_MBUF_DIRECT(txm) &&
1981 		     txm->nb_segs == 1 &&
1982 		     rte_pktmbuf_headroom(txm) >= hdr_size &&
1983 		     rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
1984 				alignof(struct virtio_net_hdr_mrg_rxbuf))) {
1985 			inorder_pkts[nb_inorder_pkts] = txm;
1986 			nb_inorder_pkts++;
1987 
1988 			continue;
1989 		}
1990 
1991 		if (nb_inorder_pkts) {
1992 			need = nb_inorder_pkts - vq->vq_free_cnt;
1993 			if (unlikely(need > 0)) {
1994 				need = virtio_xmit_try_cleanup_inorder(vq,
1995 								       need);
1996 				if (unlikely(need > 0)) {
1997 					PMD_TX_LOG(ERR,
1998 						"No free tx descriptors to "
1999 						"transmit");
2000 					break;
2001 				}
2002 			}
2003 			virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
2004 							nb_inorder_pkts);
2005 			nb_inorder_pkts = 0;
2006 		}
2007 
2008 		slots = txm->nb_segs + 1;
2009 		need = slots - vq->vq_free_cnt;
2010 		if (unlikely(need > 0)) {
2011 			need = virtio_xmit_try_cleanup_inorder(vq, slots);
2012 
2013 			if (unlikely(need > 0)) {
2014 				PMD_TX_LOG(ERR,
2015 					"No free tx descriptors to transmit");
2016 				break;
2017 			}
2018 		}
2019 		/* Enqueue Packet buffers */
2020 		virtqueue_enqueue_xmit(txvq, txm, slots, 0, 0, 1);
2021 
2022 		virtio_update_packet_stats(&txvq->stats, txm);
2023 	}
2024 
2025 	/* Transmit all inorder packets */
2026 	if (nb_inorder_pkts) {
2027 		need = nb_inorder_pkts - vq->vq_free_cnt;
2028 		if (unlikely(need > 0)) {
2029 			need = virtio_xmit_try_cleanup_inorder(vq,
2030 								  need);
2031 			if (unlikely(need > 0)) {
2032 				PMD_TX_LOG(ERR,
2033 					"No free tx descriptors to transmit");
2034 				nb_inorder_pkts = vq->vq_free_cnt;
2035 				nb_tx -= need;
2036 			}
2037 		}
2038 
2039 		virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
2040 						nb_inorder_pkts);
2041 	}
2042 
2043 	txvq->stats.packets += nb_tx;
2044 
2045 	if (likely(nb_tx)) {
2046 		vq_update_avail_idx(vq);
2047 
2048 		if (unlikely(virtqueue_kick_prepare(vq))) {
2049 			virtqueue_notify(vq);
2050 			PMD_TX_LOG(DEBUG, "Notified backend after xmit");
2051 		}
2052 	}
2053 
2054 	VIRTQUEUE_DUMP(vq);
2055 
2056 	return nb_tx;
2057 }
2058 
2059 __rte_weak uint16_t
2060 virtio_recv_pkts_packed_vec(void *rx_queue __rte_unused,
2061 			    struct rte_mbuf **rx_pkts __rte_unused,
2062 			    uint16_t nb_pkts __rte_unused)
2063 {
2064 	return 0;
2065 }
2066 
2067 __rte_weak uint16_t
2068 virtio_xmit_pkts_packed_vec(void *tx_queue __rte_unused,
2069 			    struct rte_mbuf **tx_pkts __rte_unused,
2070 			    uint16_t nb_pkts __rte_unused)
2071 {
2072 	return 0;
2073 }
2074