xref: /dpdk/drivers/net/virtio/virtio_rxtx.c (revision 4b53e9802b6b6040ad5622b1414aaa93d9581d0c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <errno.h>
10 
11 #include <rte_cycles.h>
12 #include <rte_memory.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_mempool.h>
15 #include <rte_malloc.h>
16 #include <rte_mbuf.h>
17 #include <rte_ether.h>
18 #include <ethdev_driver.h>
19 #include <rte_prefetch.h>
20 #include <rte_string_fns.h>
21 #include <rte_errno.h>
22 #include <rte_byteorder.h>
23 #include <rte_net.h>
24 #include <rte_ip.h>
25 #include <rte_udp.h>
26 #include <rte_tcp.h>
27 
28 #include "virtio_logs.h"
29 #include "virtio_ethdev.h"
30 #include "virtio.h"
31 #include "virtqueue.h"
32 #include "virtio_rxtx.h"
33 #include "virtio_rxtx_simple.h"
34 #include "virtio_ring.h"
35 
36 #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
37 #define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len)
38 #else
39 #define  VIRTIO_DUMP_PACKET(m, len) do { } while (0)
40 #endif
41 
42 void
43 vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx, uint16_t num)
44 {
45 	vq->vq_free_cnt += num;
46 	vq->vq_desc_tail_idx = desc_idx & (vq->vq_nentries - 1);
47 }
48 
49 void
50 vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
51 {
52 	struct vring_desc *dp, *dp_tail;
53 	struct vq_desc_extra *dxp;
54 	uint16_t desc_idx_last = desc_idx;
55 
56 	dp  = &vq->vq_split.ring.desc[desc_idx];
57 	dxp = &vq->vq_descx[desc_idx];
58 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
59 	if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
60 		while (dp->flags & VRING_DESC_F_NEXT) {
61 			desc_idx_last = dp->next;
62 			dp = &vq->vq_split.ring.desc[dp->next];
63 		}
64 	}
65 	dxp->ndescs = 0;
66 
67 	/*
68 	 * We must append the existing free chain, if any, to the end of
69 	 * newly freed chain. If the virtqueue was completely used, then
70 	 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
71 	 */
72 	if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) {
73 		vq->vq_desc_head_idx = desc_idx;
74 	} else {
75 		dp_tail = &vq->vq_split.ring.desc[vq->vq_desc_tail_idx];
76 		dp_tail->next = desc_idx;
77 	}
78 
79 	vq->vq_desc_tail_idx = desc_idx_last;
80 	dp->next = VQ_RING_DESC_CHAIN_END;
81 }
82 
83 void
84 virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
85 {
86 	uint32_t s = mbuf->pkt_len;
87 	struct rte_ether_addr *ea;
88 
89 	stats->bytes += s;
90 
91 	if (s == 64) {
92 		stats->size_bins[1]++;
93 	} else if (s > 64 && s < 1024) {
94 		uint32_t bin;
95 
96 		/* count zeros, and offset into correct bin */
97 		bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
98 		stats->size_bins[bin]++;
99 	} else {
100 		if (s < 64)
101 			stats->size_bins[0]++;
102 		else if (s < 1519)
103 			stats->size_bins[6]++;
104 		else
105 			stats->size_bins[7]++;
106 	}
107 
108 	ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
109 	if (rte_is_multicast_ether_addr(ea)) {
110 		if (rte_is_broadcast_ether_addr(ea))
111 			stats->broadcast++;
112 		else
113 			stats->multicast++;
114 	}
115 }
116 
117 static inline void
118 virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
119 {
120 	VIRTIO_DUMP_PACKET(m, m->data_len);
121 
122 	virtio_update_packet_stats(&rxvq->stats, m);
123 }
124 
125 static uint16_t
126 virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq,
127 				  struct rte_mbuf **rx_pkts,
128 				  uint32_t *len,
129 				  uint16_t num)
130 {
131 	struct rte_mbuf *cookie;
132 	uint16_t used_idx;
133 	uint16_t id;
134 	struct vring_packed_desc *desc;
135 	uint16_t i;
136 
137 	desc = vq->vq_packed.ring.desc;
138 
139 	for (i = 0; i < num; i++) {
140 		used_idx = vq->vq_used_cons_idx;
141 		/* desc_is_used has a load-acquire or rte_io_rmb inside
142 		 * and wait for used desc in virtqueue.
143 		 */
144 		if (!desc_is_used(&desc[used_idx], vq))
145 			return i;
146 		len[i] = desc[used_idx].len;
147 		id = desc[used_idx].id;
148 		cookie = (struct rte_mbuf *)vq->vq_descx[id].cookie;
149 		if (unlikely(cookie == NULL)) {
150 			PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
151 				vq->vq_used_cons_idx);
152 			break;
153 		}
154 		rte_prefetch0(cookie);
155 		rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
156 		rx_pkts[i] = cookie;
157 
158 		vq->vq_free_cnt++;
159 		vq->vq_used_cons_idx++;
160 		if (vq->vq_used_cons_idx >= vq->vq_nentries) {
161 			vq->vq_used_cons_idx -= vq->vq_nentries;
162 			vq->vq_packed.used_wrap_counter ^= 1;
163 		}
164 	}
165 
166 	return i;
167 }
168 
169 static uint16_t
170 virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,
171 			   uint32_t *len, uint16_t num)
172 {
173 	struct vring_used_elem *uep;
174 	struct rte_mbuf *cookie;
175 	uint16_t used_idx, desc_idx;
176 	uint16_t i;
177 
178 	/*  Caller does the check */
179 	for (i = 0; i < num ; i++) {
180 		used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
181 		uep = &vq->vq_split.ring.used->ring[used_idx];
182 		desc_idx = (uint16_t) uep->id;
183 		len[i] = uep->len;
184 		cookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie;
185 
186 		if (unlikely(cookie == NULL)) {
187 			PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
188 				vq->vq_used_cons_idx);
189 			break;
190 		}
191 
192 		rte_prefetch0(cookie);
193 		rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
194 		rx_pkts[i]  = cookie;
195 		vq->vq_used_cons_idx++;
196 		vq_ring_free_chain(vq, desc_idx);
197 		vq->vq_descx[desc_idx].cookie = NULL;
198 	}
199 
200 	return i;
201 }
202 
203 static uint16_t
204 virtqueue_dequeue_rx_inorder(struct virtqueue *vq,
205 			struct rte_mbuf **rx_pkts,
206 			uint32_t *len,
207 			uint16_t num)
208 {
209 	struct vring_used_elem *uep;
210 	struct rte_mbuf *cookie;
211 	uint16_t used_idx = 0;
212 	uint16_t i;
213 
214 	if (unlikely(num == 0))
215 		return 0;
216 
217 	for (i = 0; i < num; i++) {
218 		used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
219 		/* Desc idx same as used idx */
220 		uep = &vq->vq_split.ring.used->ring[used_idx];
221 		len[i] = uep->len;
222 		cookie = (struct rte_mbuf *)vq->vq_descx[used_idx].cookie;
223 
224 		if (unlikely(cookie == NULL)) {
225 			PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
226 				vq->vq_used_cons_idx);
227 			break;
228 		}
229 
230 		rte_prefetch0(cookie);
231 		rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
232 		rx_pkts[i]  = cookie;
233 		vq->vq_used_cons_idx++;
234 		vq->vq_descx[used_idx].cookie = NULL;
235 	}
236 
237 	vq_ring_free_inorder(vq, used_idx, i);
238 	return i;
239 }
240 
241 static inline int
242 virtqueue_enqueue_refill_inorder(struct virtqueue *vq,
243 			struct rte_mbuf **cookies,
244 			uint16_t num)
245 {
246 	struct vq_desc_extra *dxp;
247 	struct virtio_hw *hw = vq->hw;
248 	struct vring_desc *start_dp;
249 	uint16_t head_idx, idx, i = 0;
250 
251 	if (unlikely(vq->vq_free_cnt == 0))
252 		return -ENOSPC;
253 	if (unlikely(vq->vq_free_cnt < num))
254 		return -EMSGSIZE;
255 
256 	head_idx = vq->vq_desc_head_idx & (vq->vq_nentries - 1);
257 	start_dp = vq->vq_split.ring.desc;
258 
259 	while (i < num) {
260 		idx = head_idx & (vq->vq_nentries - 1);
261 		dxp = &vq->vq_descx[idx];
262 		dxp->cookie = (void *)cookies[i];
263 		dxp->ndescs = 1;
264 
265 		start_dp[idx].addr = VIRTIO_MBUF_ADDR(cookies[i], vq) +
266 			RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
267 		start_dp[idx].len = cookies[i]->buf_len -
268 			RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
269 		start_dp[idx].flags =  VRING_DESC_F_WRITE;
270 
271 		vq_update_avail_ring(vq, idx);
272 		head_idx++;
273 		i++;
274 	}
275 
276 	vq->vq_desc_head_idx += num;
277 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
278 	return 0;
279 }
280 
281 static inline int
282 virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf **cookie,
283 				uint16_t num)
284 {
285 	struct vq_desc_extra *dxp;
286 	struct virtio_hw *hw = vq->hw;
287 	struct vring_desc *start_dp = vq->vq_split.ring.desc;
288 	uint16_t idx, i;
289 
290 	if (unlikely(vq->vq_free_cnt == 0))
291 		return -ENOSPC;
292 	if (unlikely(vq->vq_free_cnt < num))
293 		return -EMSGSIZE;
294 
295 	if (unlikely(vq->vq_desc_head_idx >= vq->vq_nentries))
296 		return -EFAULT;
297 
298 	for (i = 0; i < num; i++) {
299 		idx = vq->vq_desc_head_idx;
300 		dxp = &vq->vq_descx[idx];
301 		dxp->cookie = (void *)cookie[i];
302 		dxp->ndescs = 1;
303 
304 		start_dp[idx].addr = VIRTIO_MBUF_ADDR(cookie[i], vq) +
305 			RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
306 		start_dp[idx].len = cookie[i]->buf_len - RTE_PKTMBUF_HEADROOM +
307 			hw->vtnet_hdr_size;
308 		start_dp[idx].flags = VRING_DESC_F_WRITE;
309 		vq->vq_desc_head_idx = start_dp[idx].next;
310 		vq_update_avail_ring(vq, idx);
311 		if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) {
312 			vq->vq_desc_tail_idx = vq->vq_desc_head_idx;
313 			break;
314 		}
315 	}
316 
317 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
318 
319 	return 0;
320 }
321 
322 static inline void
323 virtqueue_refill_single_packed(struct virtqueue *vq,
324 			       struct vring_packed_desc *dp,
325 			       struct rte_mbuf *cookie)
326 {
327 	uint16_t flags = vq->vq_packed.cached_flags;
328 	struct virtio_hw *hw = vq->hw;
329 
330 	dp->addr = VIRTIO_MBUF_ADDR(cookie, vq) + RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
331 	dp->len = cookie->buf_len - RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
332 
333 	virtqueue_store_flags_packed(dp, flags, hw->weak_barriers);
334 
335 	if (++vq->vq_avail_idx >= vq->vq_nentries) {
336 		vq->vq_avail_idx -= vq->vq_nentries;
337 		vq->vq_packed.cached_flags ^=
338 			VRING_PACKED_DESC_F_AVAIL_USED;
339 		flags = vq->vq_packed.cached_flags;
340 	}
341 }
342 
343 static inline int
344 virtqueue_enqueue_recv_refill_packed_init(struct virtqueue *vq,
345 				     struct rte_mbuf **cookie, uint16_t num)
346 {
347 	struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc;
348 	struct vq_desc_extra *dxp;
349 	uint16_t idx;
350 	int i;
351 
352 	if (unlikely(vq->vq_free_cnt == 0))
353 		return -ENOSPC;
354 	if (unlikely(vq->vq_free_cnt < num))
355 		return -EMSGSIZE;
356 
357 	for (i = 0; i < num; i++) {
358 		idx = vq->vq_avail_idx;
359 		dxp = &vq->vq_descx[idx];
360 		dxp->cookie = (void *)cookie[i];
361 		dxp->ndescs = 1;
362 
363 		virtqueue_refill_single_packed(vq, &start_dp[idx], cookie[i]);
364 	}
365 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
366 	return 0;
367 }
368 
369 static inline int
370 virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq,
371 				     struct rte_mbuf **cookie, uint16_t num)
372 {
373 	struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc;
374 	struct vq_desc_extra *dxp;
375 	uint16_t idx, did;
376 	int i;
377 
378 	if (unlikely(vq->vq_free_cnt == 0))
379 		return -ENOSPC;
380 	if (unlikely(vq->vq_free_cnt < num))
381 		return -EMSGSIZE;
382 
383 	for (i = 0; i < num; i++) {
384 		idx = vq->vq_avail_idx;
385 		did = start_dp[idx].id;
386 		dxp = &vq->vq_descx[did];
387 		dxp->cookie = (void *)cookie[i];
388 		dxp->ndescs = 1;
389 
390 		virtqueue_refill_single_packed(vq, &start_dp[idx], cookie[i]);
391 	}
392 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
393 	return 0;
394 }
395 
396 /* When doing TSO, the IP length is not included in the pseudo header
397  * checksum of the packet given to the PMD, but for virtio it is
398  * expected.
399  */
400 static void
401 virtio_tso_fix_cksum(struct rte_mbuf *m)
402 {
403 	/* common case: header is not fragmented */
404 	if (likely(rte_pktmbuf_data_len(m) >= m->l2_len + m->l3_len +
405 			m->l4_len)) {
406 		struct rte_ipv4_hdr *iph;
407 		struct rte_ipv6_hdr *ip6h;
408 		struct rte_tcp_hdr *th;
409 		uint16_t prev_cksum, new_cksum, ip_len, ip_paylen;
410 		uint32_t tmp;
411 
412 		iph = rte_pktmbuf_mtod_offset(m,
413 					struct rte_ipv4_hdr *, m->l2_len);
414 		th = RTE_PTR_ADD(iph, m->l3_len);
415 		if ((iph->version_ihl >> 4) == 4) {
416 			iph->hdr_checksum = 0;
417 			iph->hdr_checksum = rte_ipv4_cksum(iph);
418 			ip_len = iph->total_length;
419 			ip_paylen = rte_cpu_to_be_16(rte_be_to_cpu_16(ip_len) -
420 				m->l3_len);
421 		} else {
422 			ip6h = (struct rte_ipv6_hdr *)iph;
423 			ip_paylen = ip6h->payload_len;
424 		}
425 
426 		/* calculate the new phdr checksum not including ip_paylen */
427 		prev_cksum = th->cksum;
428 		tmp = prev_cksum;
429 		tmp += ip_paylen;
430 		tmp = (tmp & 0xffff) + (tmp >> 16);
431 		new_cksum = tmp;
432 
433 		/* replace it in the packet */
434 		th->cksum = new_cksum;
435 	}
436 }
437 
438 
439 
440 
441 static inline void
442 virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
443 			struct rte_mbuf **cookies,
444 			uint16_t num)
445 {
446 	struct vq_desc_extra *dxp;
447 	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
448 	struct vring_desc *start_dp;
449 	struct virtio_net_hdr *hdr;
450 	uint16_t idx;
451 	int16_t head_size = vq->hw->vtnet_hdr_size;
452 	uint16_t i = 0;
453 
454 	idx = vq->vq_desc_head_idx;
455 	start_dp = vq->vq_split.ring.desc;
456 
457 	while (i < num) {
458 		idx = idx & (vq->vq_nentries - 1);
459 		dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
460 		dxp->cookie = (void *)cookies[i];
461 		dxp->ndescs = 1;
462 		virtio_update_packet_stats(&txvq->stats, cookies[i]);
463 
464 		hdr = rte_pktmbuf_mtod_offset(cookies[i],
465 				struct virtio_net_hdr *, -head_size);
466 
467 		/* if offload disabled, hdr is not zeroed yet, do it now */
468 		if (!vq->hw->has_tx_offload)
469 			virtqueue_clear_net_hdr(hdr);
470 		else
471 			virtqueue_xmit_offload(hdr, cookies[i]);
472 
473 		start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookies[i], vq) - head_size;
474 		start_dp[idx].len = cookies[i]->data_len + head_size;
475 		start_dp[idx].flags = 0;
476 
477 
478 		vq_update_avail_ring(vq, idx);
479 
480 		idx++;
481 		i++;
482 	};
483 
484 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
485 	vq->vq_desc_head_idx = idx & (vq->vq_nentries - 1);
486 }
487 
488 static inline void
489 virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq,
490 				   struct rte_mbuf *cookie,
491 				   int in_order)
492 {
493 	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
494 	struct vring_packed_desc *dp;
495 	struct vq_desc_extra *dxp;
496 	uint16_t idx, id, flags;
497 	int16_t head_size = vq->hw->vtnet_hdr_size;
498 	struct virtio_net_hdr *hdr;
499 
500 	id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;
501 	idx = vq->vq_avail_idx;
502 	dp = &vq->vq_packed.ring.desc[idx];
503 
504 	dxp = &vq->vq_descx[id];
505 	dxp->ndescs = 1;
506 	dxp->cookie = cookie;
507 
508 	flags = vq->vq_packed.cached_flags;
509 
510 	/* prepend cannot fail, checked by caller */
511 	hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *,
512 				      -head_size);
513 
514 	/* if offload disabled, hdr is not zeroed yet, do it now */
515 	if (!vq->hw->has_tx_offload)
516 		virtqueue_clear_net_hdr(hdr);
517 	else
518 		virtqueue_xmit_offload(hdr, cookie);
519 
520 	dp->addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq) - head_size;
521 	dp->len = cookie->data_len + head_size;
522 	dp->id = id;
523 
524 	if (++vq->vq_avail_idx >= vq->vq_nentries) {
525 		vq->vq_avail_idx -= vq->vq_nentries;
526 		vq->vq_packed.cached_flags ^= VRING_PACKED_DESC_F_AVAIL_USED;
527 	}
528 
529 	vq->vq_free_cnt--;
530 
531 	if (!in_order) {
532 		vq->vq_desc_head_idx = dxp->next;
533 		if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
534 			vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;
535 	}
536 
537 	virtqueue_store_flags_packed(dp, flags, vq->hw->weak_barriers);
538 }
539 
540 static inline void
541 virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
542 			uint16_t needed, int use_indirect, int can_push,
543 			int in_order)
544 {
545 	struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
546 	struct vq_desc_extra *dxp;
547 	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
548 	struct vring_desc *start_dp;
549 	uint16_t seg_num = cookie->nb_segs;
550 	uint16_t head_idx, idx;
551 	int16_t head_size = vq->hw->vtnet_hdr_size;
552 	bool prepend_header = false;
553 	struct virtio_net_hdr *hdr;
554 
555 	head_idx = vq->vq_desc_head_idx;
556 	idx = head_idx;
557 	if (in_order)
558 		dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
559 	else
560 		dxp = &vq->vq_descx[idx];
561 	dxp->cookie = (void *)cookie;
562 	dxp->ndescs = needed;
563 
564 	start_dp = vq->vq_split.ring.desc;
565 
566 	if (can_push) {
567 		/* prepend cannot fail, checked by caller */
568 		hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *,
569 					      -head_size);
570 		prepend_header = true;
571 
572 		/* if offload disabled, it is not zeroed below, do it now */
573 		if (!vq->hw->has_tx_offload)
574 			virtqueue_clear_net_hdr(hdr);
575 	} else if (use_indirect) {
576 		/* setup tx ring slot to point to indirect
577 		 * descriptor list stored in reserved region.
578 		 *
579 		 * the first slot in indirect ring is already preset
580 		 * to point to the header in reserved region
581 		 */
582 		start_dp[idx].addr  = txvq->virtio_net_hdr_mem +
583 			RTE_PTR_DIFF(&txr[idx].tx_indir, txr);
584 		start_dp[idx].len   = (seg_num + 1) * sizeof(struct vring_desc);
585 		start_dp[idx].flags = VRING_DESC_F_INDIRECT;
586 		hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
587 
588 		/* loop below will fill in rest of the indirect elements */
589 		start_dp = txr[idx].tx_indir;
590 		idx = 1;
591 	} else {
592 		/* setup first tx ring slot to point to header
593 		 * stored in reserved region.
594 		 */
595 		start_dp[idx].addr  = txvq->virtio_net_hdr_mem +
596 			RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
597 		start_dp[idx].len   = vq->hw->vtnet_hdr_size;
598 		start_dp[idx].flags = VRING_DESC_F_NEXT;
599 		hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
600 
601 		idx = start_dp[idx].next;
602 	}
603 
604 	if (vq->hw->has_tx_offload)
605 		virtqueue_xmit_offload(hdr, cookie);
606 
607 	do {
608 		start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
609 		start_dp[idx].len = cookie->data_len;
610 		if (prepend_header) {
611 			start_dp[idx].addr -= head_size;
612 			start_dp[idx].len += head_size;
613 			prepend_header = false;
614 		}
615 		start_dp[idx].flags = cookie->next ? VRING_DESC_F_NEXT : 0;
616 		idx = start_dp[idx].next;
617 	} while ((cookie = cookie->next) != NULL);
618 
619 	if (use_indirect)
620 		idx = vq->vq_split.ring.desc[head_idx].next;
621 
622 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
623 
624 	vq->vq_desc_head_idx = idx;
625 	vq_update_avail_ring(vq, head_idx);
626 
627 	if (!in_order) {
628 		if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
629 			vq->vq_desc_tail_idx = idx;
630 	}
631 }
632 
633 void
634 virtio_dev_cq_start(struct rte_eth_dev *dev)
635 {
636 	struct virtio_hw *hw = dev->data->dev_private;
637 
638 	if (hw->cvq) {
639 		rte_spinlock_init(&hw->cvq->lock);
640 		VIRTQUEUE_DUMP(virtnet_cq_to_vq(hw->cvq));
641 	}
642 }
643 
644 int
645 virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
646 			uint16_t queue_idx,
647 			uint16_t nb_desc,
648 			unsigned int socket_id __rte_unused,
649 			const struct rte_eth_rxconf *rx_conf,
650 			struct rte_mempool *mp)
651 {
652 	uint16_t vq_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
653 	struct virtio_hw *hw = dev->data->dev_private;
654 	struct virtqueue *vq = hw->vqs[vq_idx];
655 	struct virtnet_rx *rxvq;
656 	uint16_t rx_free_thresh;
657 	uint16_t buf_size;
658 	const char *error;
659 
660 	PMD_INIT_FUNC_TRACE();
661 
662 	if (rx_conf->rx_deferred_start) {
663 		PMD_INIT_LOG(ERR, "Rx deferred start is not supported");
664 		return -EINVAL;
665 	}
666 
667 	buf_size = virtio_rx_mem_pool_buf_size(mp);
668 	if (!virtio_rx_check_scatter(hw->max_rx_pkt_len, buf_size,
669 				     hw->rx_ol_scatter, &error)) {
670 		PMD_INIT_LOG(ERR, "RxQ %u Rx scatter check failed: %s",
671 			     queue_idx, error);
672 		return -EINVAL;
673 	}
674 
675 	rx_free_thresh = rx_conf->rx_free_thresh;
676 	if (rx_free_thresh == 0)
677 		rx_free_thresh =
678 			RTE_MIN(vq->vq_nentries / 4, DEFAULT_RX_FREE_THRESH);
679 
680 	if (rx_free_thresh & 0x3) {
681 		PMD_INIT_LOG(ERR, "rx_free_thresh must be multiples of four."
682 			" (rx_free_thresh=%u port=%u queue=%u)",
683 			rx_free_thresh, dev->data->port_id, queue_idx);
684 		return -EINVAL;
685 	}
686 
687 	if (rx_free_thresh >= vq->vq_nentries) {
688 		PMD_INIT_LOG(ERR, "rx_free_thresh must be less than the "
689 			"number of RX entries (%u)."
690 			" (rx_free_thresh=%u port=%u queue=%u)",
691 			vq->vq_nentries,
692 			rx_free_thresh, dev->data->port_id, queue_idx);
693 		return -EINVAL;
694 	}
695 	vq->vq_free_thresh = rx_free_thresh;
696 
697 	/*
698 	 * For split ring vectorized path descriptors number must be
699 	 * equal to the ring size.
700 	 */
701 	if (nb_desc > vq->vq_nentries ||
702 	    (!virtio_with_packed_queue(hw) && hw->use_vec_rx)) {
703 		nb_desc = vq->vq_nentries;
704 	}
705 	vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
706 
707 	rxvq = &vq->rxq;
708 	rxvq->queue_id = queue_idx;
709 	rxvq->mpool = mp;
710 	dev->data->rx_queues[queue_idx] = rxvq;
711 
712 	return 0;
713 }
714 
715 int
716 virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
717 {
718 	uint16_t vq_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
719 	struct virtio_hw *hw = dev->data->dev_private;
720 	struct virtqueue *vq = hw->vqs[vq_idx];
721 	struct virtnet_rx *rxvq = &vq->rxq;
722 	struct rte_mbuf *m;
723 	uint16_t desc_idx;
724 	int error, nbufs, i;
725 	bool in_order = virtio_with_feature(hw, VIRTIO_F_IN_ORDER);
726 
727 	PMD_INIT_FUNC_TRACE();
728 
729 	/* Allocate blank mbufs for the each rx descriptor */
730 	nbufs = 0;
731 
732 	if (hw->use_vec_rx && !virtio_with_packed_queue(hw)) {
733 		for (desc_idx = 0; desc_idx < vq->vq_nentries;
734 		     desc_idx++) {
735 			vq->vq_split.ring.avail->ring[desc_idx] = desc_idx;
736 			vq->vq_split.ring.desc[desc_idx].flags =
737 				VRING_DESC_F_WRITE;
738 		}
739 
740 		virtio_rxq_vec_setup(rxvq);
741 	}
742 
743 	memset(rxvq->fake_mbuf, 0, sizeof(*rxvq->fake_mbuf));
744 	for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST; desc_idx++)
745 		vq->sw_ring[vq->vq_nentries + desc_idx] = rxvq->fake_mbuf;
746 
747 	if (hw->use_vec_rx && !virtio_with_packed_queue(hw)) {
748 		while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
749 			virtio_rxq_rearm_vec(rxvq);
750 			nbufs += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
751 		}
752 	} else if (!virtio_with_packed_queue(vq->hw) && in_order) {
753 		if ((!virtqueue_full(vq))) {
754 			uint16_t free_cnt = vq->vq_free_cnt;
755 			struct rte_mbuf *pkts[free_cnt];
756 
757 			if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, pkts,
758 				free_cnt)) {
759 				error = virtqueue_enqueue_refill_inorder(vq,
760 						pkts,
761 						free_cnt);
762 				if (unlikely(error)) {
763 					for (i = 0; i < free_cnt; i++)
764 						rte_pktmbuf_free(pkts[i]);
765 				} else {
766 					nbufs += free_cnt;
767 				}
768 			}
769 
770 			vq_update_avail_idx(vq);
771 		}
772 	} else {
773 		while (!virtqueue_full(vq)) {
774 			m = rte_mbuf_raw_alloc(rxvq->mpool);
775 			if (m == NULL)
776 				break;
777 
778 			/* Enqueue allocated buffers */
779 			if (virtio_with_packed_queue(vq->hw))
780 				error = virtqueue_enqueue_recv_refill_packed_init(vq,
781 						&m, 1);
782 			else
783 				error = virtqueue_enqueue_recv_refill(vq,
784 						&m, 1);
785 			if (error) {
786 				rte_pktmbuf_free(m);
787 				break;
788 			}
789 			nbufs++;
790 		}
791 
792 		if (!virtio_with_packed_queue(vq->hw))
793 			vq_update_avail_idx(vq);
794 	}
795 
796 	PMD_INIT_LOG(DEBUG, "Allocated %d bufs (port=%u queue=%u)", nbufs,
797 		     dev->data->port_id, queue_idx);
798 
799 	VIRTQUEUE_DUMP(vq);
800 
801 	return 0;
802 }
803 
804 /*
805  * struct rte_eth_dev *dev: Used to update dev
806  * uint16_t nb_desc: Defaults to values read from config space
807  * unsigned int socket_id: Used to allocate memzone
808  * const struct rte_eth_txconf *tx_conf: Used to setup tx engine
809  * uint16_t queue_idx: Just used as an index in dev txq list
810  */
811 int
812 virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
813 			uint16_t queue_idx,
814 			uint16_t nb_desc,
815 			unsigned int socket_id __rte_unused,
816 			const struct rte_eth_txconf *tx_conf)
817 {
818 	uint16_t vq_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
819 	struct virtio_hw *hw = dev->data->dev_private;
820 	struct virtqueue *vq = hw->vqs[vq_idx];
821 	struct virtnet_tx *txvq;
822 	uint16_t tx_free_thresh;
823 
824 	PMD_INIT_FUNC_TRACE();
825 
826 	if (tx_conf->tx_deferred_start) {
827 		PMD_INIT_LOG(ERR, "Tx deferred start is not supported");
828 		return -EINVAL;
829 	}
830 
831 	if (nb_desc == 0 || nb_desc > vq->vq_nentries)
832 		nb_desc = vq->vq_nentries;
833 	vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
834 
835 	txvq = &vq->txq;
836 	txvq->queue_id = queue_idx;
837 
838 	tx_free_thresh = tx_conf->tx_free_thresh;
839 	if (tx_free_thresh == 0)
840 		tx_free_thresh =
841 			RTE_MIN(vq->vq_nentries / 4, DEFAULT_TX_FREE_THRESH);
842 
843 	if (tx_free_thresh >= (vq->vq_nentries - 3)) {
844 		PMD_DRV_LOG(ERR, "tx_free_thresh must be less than the "
845 			"number of TX entries minus 3 (%u)."
846 			" (tx_free_thresh=%u port=%u queue=%u)",
847 			vq->vq_nentries - 3,
848 			tx_free_thresh, dev->data->port_id, queue_idx);
849 		return -EINVAL;
850 	}
851 
852 	vq->vq_free_thresh = tx_free_thresh;
853 
854 	dev->data->tx_queues[queue_idx] = txvq;
855 	return 0;
856 }
857 
858 int
859 virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
860 				uint16_t queue_idx)
861 {
862 	uint16_t vq_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
863 	struct virtio_hw *hw = dev->data->dev_private;
864 	struct virtqueue *vq = hw->vqs[vq_idx];
865 
866 	PMD_INIT_FUNC_TRACE();
867 
868 	if (!virtio_with_packed_queue(hw)) {
869 		if (virtio_with_feature(hw, VIRTIO_F_IN_ORDER))
870 			vq->vq_split.ring.desc[vq->vq_nentries - 1].next = 0;
871 	}
872 
873 	VIRTQUEUE_DUMP(vq);
874 
875 	return 0;
876 }
877 
878 static inline void
879 virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m)
880 {
881 	int error;
882 	/*
883 	 * Requeue the discarded mbuf. This should always be
884 	 * successful since it was just dequeued.
885 	 */
886 	if (virtio_with_packed_queue(vq->hw))
887 		error = virtqueue_enqueue_recv_refill_packed(vq, &m, 1);
888 	else
889 		error = virtqueue_enqueue_recv_refill(vq, &m, 1);
890 
891 	if (unlikely(error)) {
892 		PMD_DRV_LOG(ERR, "cannot requeue discarded mbuf");
893 		rte_pktmbuf_free(m);
894 	}
895 }
896 
897 static inline void
898 virtio_discard_rxbuf_inorder(struct virtqueue *vq, struct rte_mbuf *m)
899 {
900 	int error;
901 
902 	error = virtqueue_enqueue_refill_inorder(vq, &m, 1);
903 	if (unlikely(error)) {
904 		PMD_DRV_LOG(ERR, "cannot requeue discarded mbuf");
905 		rte_pktmbuf_free(m);
906 	}
907 }
908 
909 /* Optionally fill offload information in structure */
910 static inline int
911 virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
912 {
913 	struct rte_net_hdr_lens hdr_lens;
914 	uint32_t hdrlen, ptype;
915 	int l4_supported = 0;
916 
917 	/* nothing to do */
918 	if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
919 		return 0;
920 
921 	m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN;
922 
923 	ptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK);
924 	m->packet_type = ptype;
925 	if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP ||
926 	    (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP ||
927 	    (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_SCTP)
928 		l4_supported = 1;
929 
930 	if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
931 		hdrlen = hdr_lens.l2_len + hdr_lens.l3_len + hdr_lens.l4_len;
932 		if (hdr->csum_start <= hdrlen && l4_supported) {
933 			m->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_NONE;
934 		} else {
935 			/* Unknown proto or tunnel, do sw cksum. We can assume
936 			 * the cksum field is in the first segment since the
937 			 * buffers we provided to the host are large enough.
938 			 * In case of SCTP, this will be wrong since it's a CRC
939 			 * but there's nothing we can do.
940 			 */
941 			uint16_t csum = 0, off;
942 
943 			if (rte_raw_cksum_mbuf(m, hdr->csum_start,
944 				rte_pktmbuf_pkt_len(m) - hdr->csum_start,
945 				&csum) < 0)
946 				return -EINVAL;
947 			if (likely(csum != 0xffff))
948 				csum = ~csum;
949 			off = hdr->csum_offset + hdr->csum_start;
950 			if (rte_pktmbuf_data_len(m) >= off + 1)
951 				*rte_pktmbuf_mtod_offset(m, uint16_t *,
952 					off) = csum;
953 		}
954 	} else if (hdr->flags & VIRTIO_NET_HDR_F_DATA_VALID && l4_supported) {
955 		m->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
956 	}
957 
958 	/* GSO request, save required information in mbuf */
959 	if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
960 		/* Check unsupported modes */
961 		if ((hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN) ||
962 		    (hdr->gso_size == 0)) {
963 			return -EINVAL;
964 		}
965 
966 		/* Update mss lengths in mbuf */
967 		m->tso_segsz = hdr->gso_size;
968 		switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
969 			case VIRTIO_NET_HDR_GSO_TCPV4:
970 			case VIRTIO_NET_HDR_GSO_TCPV6:
971 				m->ol_flags |= RTE_MBUF_F_RX_LRO |
972 					RTE_MBUF_F_RX_L4_CKSUM_NONE;
973 				break;
974 			default:
975 				return -EINVAL;
976 		}
977 	}
978 
979 	return 0;
980 }
981 
982 #define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc))
983 uint16_t
984 virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
985 {
986 	struct virtnet_rx *rxvq = rx_queue;
987 	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
988 	struct virtio_hw *hw = vq->hw;
989 	struct rte_mbuf *rxm;
990 	uint16_t nb_used, num, nb_rx;
991 	uint32_t len[VIRTIO_MBUF_BURST_SZ];
992 	struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
993 	int error;
994 	uint32_t i, nb_enqueued;
995 	uint32_t hdr_size;
996 	struct virtio_net_hdr *hdr;
997 
998 	nb_rx = 0;
999 	if (unlikely(hw->started == 0))
1000 		return nb_rx;
1001 
1002 	nb_used = virtqueue_nused(vq);
1003 
1004 	num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts;
1005 	if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1006 		num = VIRTIO_MBUF_BURST_SZ;
1007 	if (likely(num > DESC_PER_CACHELINE))
1008 		num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1009 
1010 	num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num);
1011 	PMD_RX_LOG(DEBUG, "used:%d dequeue:%d", nb_used, num);
1012 
1013 	nb_enqueued = 0;
1014 	hdr_size = hw->vtnet_hdr_size;
1015 
1016 	for (i = 0; i < num ; i++) {
1017 		rxm = rcv_pkts[i];
1018 
1019 		PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1020 
1021 		if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1022 			PMD_RX_LOG(ERR, "Packet drop");
1023 			nb_enqueued++;
1024 			virtio_discard_rxbuf(vq, rxm);
1025 			rxvq->stats.errors++;
1026 			continue;
1027 		}
1028 
1029 		rxm->port = rxvq->port_id;
1030 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1031 		rxm->ol_flags = 0;
1032 		rxm->vlan_tci = 0;
1033 
1034 		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1035 		rxm->data_len = (uint16_t)(len[i] - hdr_size);
1036 
1037 		hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr +
1038 			RTE_PKTMBUF_HEADROOM - hdr_size);
1039 
1040 		if (hw->vlan_strip)
1041 			rte_vlan_strip(rxm);
1042 
1043 		if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) {
1044 			virtio_discard_rxbuf(vq, rxm);
1045 			rxvq->stats.errors++;
1046 			continue;
1047 		}
1048 
1049 		virtio_rx_stats_updated(rxvq, rxm);
1050 
1051 		rx_pkts[nb_rx++] = rxm;
1052 	}
1053 
1054 	rxvq->stats.packets += nb_rx;
1055 
1056 	/* Allocate new mbuf for the used descriptor */
1057 	if (likely(!virtqueue_full(vq))) {
1058 		uint16_t free_cnt = vq->vq_free_cnt;
1059 		struct rte_mbuf *new_pkts[free_cnt];
1060 
1061 		if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts,
1062 						free_cnt) == 0)) {
1063 			error = virtqueue_enqueue_recv_refill(vq, new_pkts,
1064 					free_cnt);
1065 			if (unlikely(error)) {
1066 				for (i = 0; i < free_cnt; i++)
1067 					rte_pktmbuf_free(new_pkts[i]);
1068 			}
1069 			nb_enqueued += free_cnt;
1070 		} else {
1071 			struct rte_eth_dev *dev =
1072 				&rte_eth_devices[rxvq->port_id];
1073 			dev->data->rx_mbuf_alloc_failed += free_cnt;
1074 		}
1075 	}
1076 
1077 	if (likely(nb_enqueued)) {
1078 		vq_update_avail_idx(vq);
1079 
1080 		if (unlikely(virtqueue_kick_prepare(vq))) {
1081 			virtqueue_notify(vq);
1082 			PMD_RX_LOG(DEBUG, "Notified");
1083 		}
1084 	}
1085 
1086 	return nb_rx;
1087 }
1088 
1089 uint16_t
1090 virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
1091 			uint16_t nb_pkts)
1092 {
1093 	struct virtnet_rx *rxvq = rx_queue;
1094 	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
1095 	struct virtio_hw *hw = vq->hw;
1096 	struct rte_mbuf *rxm;
1097 	uint16_t num, nb_rx;
1098 	uint32_t len[VIRTIO_MBUF_BURST_SZ];
1099 	struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1100 	int error;
1101 	uint32_t i, nb_enqueued;
1102 	uint32_t hdr_size;
1103 	struct virtio_net_hdr *hdr;
1104 
1105 	nb_rx = 0;
1106 	if (unlikely(hw->started == 0))
1107 		return nb_rx;
1108 
1109 	num = RTE_MIN(VIRTIO_MBUF_BURST_SZ, nb_pkts);
1110 	if (likely(num > DESC_PER_CACHELINE))
1111 		num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1112 
1113 	num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num);
1114 	PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1115 
1116 	nb_enqueued = 0;
1117 	hdr_size = hw->vtnet_hdr_size;
1118 
1119 	for (i = 0; i < num; i++) {
1120 		rxm = rcv_pkts[i];
1121 
1122 		PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1123 
1124 		if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1125 			PMD_RX_LOG(ERR, "Packet drop");
1126 			nb_enqueued++;
1127 			virtio_discard_rxbuf(vq, rxm);
1128 			rxvq->stats.errors++;
1129 			continue;
1130 		}
1131 
1132 		rxm->port = rxvq->port_id;
1133 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1134 		rxm->ol_flags = 0;
1135 		rxm->vlan_tci = 0;
1136 
1137 		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1138 		rxm->data_len = (uint16_t)(len[i] - hdr_size);
1139 
1140 		hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr +
1141 			RTE_PKTMBUF_HEADROOM - hdr_size);
1142 
1143 		if (hw->vlan_strip)
1144 			rte_vlan_strip(rxm);
1145 
1146 		if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) {
1147 			virtio_discard_rxbuf(vq, rxm);
1148 			rxvq->stats.errors++;
1149 			continue;
1150 		}
1151 
1152 		virtio_rx_stats_updated(rxvq, rxm);
1153 
1154 		rx_pkts[nb_rx++] = rxm;
1155 	}
1156 
1157 	rxvq->stats.packets += nb_rx;
1158 
1159 	/* Allocate new mbuf for the used descriptor */
1160 	if (likely(!virtqueue_full(vq))) {
1161 		uint16_t free_cnt = vq->vq_free_cnt;
1162 		struct rte_mbuf *new_pkts[free_cnt];
1163 
1164 		if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts,
1165 						free_cnt) == 0)) {
1166 			error = virtqueue_enqueue_recv_refill_packed(vq,
1167 					new_pkts, free_cnt);
1168 			if (unlikely(error)) {
1169 				for (i = 0; i < free_cnt; i++)
1170 					rte_pktmbuf_free(new_pkts[i]);
1171 			}
1172 			nb_enqueued += free_cnt;
1173 		} else {
1174 			struct rte_eth_dev *dev =
1175 				&rte_eth_devices[rxvq->port_id];
1176 			dev->data->rx_mbuf_alloc_failed += free_cnt;
1177 		}
1178 	}
1179 
1180 	if (likely(nb_enqueued)) {
1181 		if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1182 			virtqueue_notify(vq);
1183 			PMD_RX_LOG(DEBUG, "Notified");
1184 		}
1185 	}
1186 
1187 	return nb_rx;
1188 }
1189 
1190 
1191 uint16_t
1192 virtio_recv_pkts_inorder(void *rx_queue,
1193 			struct rte_mbuf **rx_pkts,
1194 			uint16_t nb_pkts)
1195 {
1196 	struct virtnet_rx *rxvq = rx_queue;
1197 	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
1198 	struct virtio_hw *hw = vq->hw;
1199 	struct rte_mbuf *rxm;
1200 	struct rte_mbuf *prev = NULL;
1201 	uint16_t nb_used, num, nb_rx;
1202 	uint32_t len[VIRTIO_MBUF_BURST_SZ];
1203 	struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1204 	int error;
1205 	uint32_t nb_enqueued;
1206 	uint32_t seg_num;
1207 	uint32_t seg_res;
1208 	uint32_t hdr_size;
1209 	int32_t i;
1210 
1211 	nb_rx = 0;
1212 	if (unlikely(hw->started == 0))
1213 		return nb_rx;
1214 
1215 	nb_used = virtqueue_nused(vq);
1216 	nb_used = RTE_MIN(nb_used, nb_pkts);
1217 	nb_used = RTE_MIN(nb_used, VIRTIO_MBUF_BURST_SZ);
1218 
1219 	PMD_RX_LOG(DEBUG, "used:%d", nb_used);
1220 
1221 	nb_enqueued = 0;
1222 	seg_num = 1;
1223 	seg_res = 0;
1224 	hdr_size = hw->vtnet_hdr_size;
1225 
1226 	num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len, nb_used);
1227 
1228 	for (i = 0; i < num; i++) {
1229 		struct virtio_net_hdr_mrg_rxbuf *header;
1230 
1231 		PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1232 		PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1233 
1234 		rxm = rcv_pkts[i];
1235 
1236 		if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1237 			PMD_RX_LOG(ERR, "Packet drop");
1238 			nb_enqueued++;
1239 			virtio_discard_rxbuf_inorder(vq, rxm);
1240 			rxvq->stats.errors++;
1241 			continue;
1242 		}
1243 
1244 		header = (struct virtio_net_hdr_mrg_rxbuf *)
1245 			 ((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM
1246 			 - hdr_size);
1247 
1248 		if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
1249 			seg_num = header->num_buffers;
1250 			if (seg_num == 0)
1251 				seg_num = 1;
1252 		} else {
1253 			seg_num = 1;
1254 		}
1255 
1256 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1257 		rxm->nb_segs = seg_num;
1258 		rxm->ol_flags = 0;
1259 		rxm->vlan_tci = 0;
1260 		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1261 		rxm->data_len = (uint16_t)(len[i] - hdr_size);
1262 
1263 		rxm->port = rxvq->port_id;
1264 
1265 		rx_pkts[nb_rx] = rxm;
1266 		prev = rxm;
1267 
1268 		if (vq->hw->has_rx_offload &&
1269 				virtio_rx_offload(rxm, &header->hdr) < 0) {
1270 			virtio_discard_rxbuf_inorder(vq, rxm);
1271 			rxvq->stats.errors++;
1272 			continue;
1273 		}
1274 
1275 		if (hw->vlan_strip)
1276 			rte_vlan_strip(rx_pkts[nb_rx]);
1277 
1278 		seg_res = seg_num - 1;
1279 
1280 		/* Merge remaining segments */
1281 		while (seg_res != 0 && i < (num - 1)) {
1282 			i++;
1283 
1284 			rxm = rcv_pkts[i];
1285 			rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1286 			rxm->pkt_len = (uint32_t)(len[i]);
1287 			rxm->data_len = (uint16_t)(len[i]);
1288 
1289 			rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1290 
1291 			prev->next = rxm;
1292 			prev = rxm;
1293 			seg_res -= 1;
1294 		}
1295 
1296 		if (!seg_res) {
1297 			virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1298 			nb_rx++;
1299 		}
1300 	}
1301 
1302 	/* Last packet still need merge segments */
1303 	while (seg_res != 0) {
1304 		uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1305 					VIRTIO_MBUF_BURST_SZ);
1306 
1307 		if (likely(virtqueue_nused(vq) >= rcv_cnt)) {
1308 			num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len,
1309 							   rcv_cnt);
1310 			uint16_t extra_idx = 0;
1311 
1312 			rcv_cnt = num;
1313 			while (extra_idx < rcv_cnt) {
1314 				rxm = rcv_pkts[extra_idx];
1315 				rxm->data_off =
1316 					RTE_PKTMBUF_HEADROOM - hdr_size;
1317 				rxm->pkt_len = (uint32_t)(len[extra_idx]);
1318 				rxm->data_len = (uint16_t)(len[extra_idx]);
1319 				prev->next = rxm;
1320 				prev = rxm;
1321 				rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1322 				extra_idx += 1;
1323 			};
1324 			seg_res -= rcv_cnt;
1325 
1326 			if (!seg_res) {
1327 				virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1328 				nb_rx++;
1329 			}
1330 		} else {
1331 			PMD_RX_LOG(ERR,
1332 					"No enough segments for packet.");
1333 			rte_pktmbuf_free(rx_pkts[nb_rx]);
1334 			rxvq->stats.errors++;
1335 			break;
1336 		}
1337 	}
1338 
1339 	rxvq->stats.packets += nb_rx;
1340 
1341 	/* Allocate new mbuf for the used descriptor */
1342 
1343 	if (likely(!virtqueue_full(vq))) {
1344 		/* free_cnt may include mrg descs */
1345 		uint16_t free_cnt = vq->vq_free_cnt;
1346 		struct rte_mbuf *new_pkts[free_cnt];
1347 
1348 		if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1349 			error = virtqueue_enqueue_refill_inorder(vq, new_pkts,
1350 					free_cnt);
1351 			if (unlikely(error)) {
1352 				for (i = 0; i < free_cnt; i++)
1353 					rte_pktmbuf_free(new_pkts[i]);
1354 			}
1355 			nb_enqueued += free_cnt;
1356 		} else {
1357 			struct rte_eth_dev *dev =
1358 				&rte_eth_devices[rxvq->port_id];
1359 			dev->data->rx_mbuf_alloc_failed += free_cnt;
1360 		}
1361 	}
1362 
1363 	if (likely(nb_enqueued)) {
1364 		vq_update_avail_idx(vq);
1365 
1366 		if (unlikely(virtqueue_kick_prepare(vq))) {
1367 			virtqueue_notify(vq);
1368 			PMD_RX_LOG(DEBUG, "Notified");
1369 		}
1370 	}
1371 
1372 	return nb_rx;
1373 }
1374 
1375 uint16_t
1376 virtio_recv_mergeable_pkts(void *rx_queue,
1377 			struct rte_mbuf **rx_pkts,
1378 			uint16_t nb_pkts)
1379 {
1380 	struct virtnet_rx *rxvq = rx_queue;
1381 	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
1382 	struct virtio_hw *hw = vq->hw;
1383 	struct rte_mbuf *rxm;
1384 	struct rte_mbuf *prev = NULL;
1385 	uint16_t nb_used, num, nb_rx = 0;
1386 	uint32_t len[VIRTIO_MBUF_BURST_SZ];
1387 	struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1388 	int error;
1389 	uint32_t nb_enqueued = 0;
1390 	uint32_t seg_num = 0;
1391 	uint32_t seg_res = 0;
1392 	uint32_t hdr_size = hw->vtnet_hdr_size;
1393 	int32_t i;
1394 
1395 	if (unlikely(hw->started == 0))
1396 		return nb_rx;
1397 
1398 	nb_used = virtqueue_nused(vq);
1399 
1400 	PMD_RX_LOG(DEBUG, "used:%d", nb_used);
1401 
1402 	num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts;
1403 	if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1404 		num = VIRTIO_MBUF_BURST_SZ;
1405 	if (likely(num > DESC_PER_CACHELINE))
1406 		num = num - ((vq->vq_used_cons_idx + num) %
1407 				DESC_PER_CACHELINE);
1408 
1409 
1410 	num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num);
1411 
1412 	for (i = 0; i < num; i++) {
1413 		struct virtio_net_hdr_mrg_rxbuf *header;
1414 
1415 		PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1416 		PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1417 
1418 		rxm = rcv_pkts[i];
1419 
1420 		if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1421 			PMD_RX_LOG(ERR, "Packet drop");
1422 			nb_enqueued++;
1423 			virtio_discard_rxbuf(vq, rxm);
1424 			rxvq->stats.errors++;
1425 			continue;
1426 		}
1427 
1428 		header = (struct virtio_net_hdr_mrg_rxbuf *)
1429 			 ((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM
1430 			 - hdr_size);
1431 		seg_num = header->num_buffers;
1432 		if (seg_num == 0)
1433 			seg_num = 1;
1434 
1435 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1436 		rxm->nb_segs = seg_num;
1437 		rxm->ol_flags = 0;
1438 		rxm->vlan_tci = 0;
1439 		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1440 		rxm->data_len = (uint16_t)(len[i] - hdr_size);
1441 
1442 		rxm->port = rxvq->port_id;
1443 
1444 		rx_pkts[nb_rx] = rxm;
1445 		prev = rxm;
1446 
1447 		if (hw->has_rx_offload &&
1448 				virtio_rx_offload(rxm, &header->hdr) < 0) {
1449 			virtio_discard_rxbuf(vq, rxm);
1450 			rxvq->stats.errors++;
1451 			continue;
1452 		}
1453 
1454 		if (hw->vlan_strip)
1455 			rte_vlan_strip(rx_pkts[nb_rx]);
1456 
1457 		seg_res = seg_num - 1;
1458 
1459 		/* Merge remaining segments */
1460 		while (seg_res != 0 && i < (num - 1)) {
1461 			i++;
1462 
1463 			rxm = rcv_pkts[i];
1464 			rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1465 			rxm->pkt_len = (uint32_t)(len[i]);
1466 			rxm->data_len = (uint16_t)(len[i]);
1467 
1468 			rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1469 
1470 			prev->next = rxm;
1471 			prev = rxm;
1472 			seg_res -= 1;
1473 		}
1474 
1475 		if (!seg_res) {
1476 			virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1477 			nb_rx++;
1478 		}
1479 	}
1480 
1481 	/* Last packet still need merge segments */
1482 	while (seg_res != 0) {
1483 		uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1484 					VIRTIO_MBUF_BURST_SZ);
1485 
1486 		if (likely(virtqueue_nused(vq) >= rcv_cnt)) {
1487 			num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len,
1488 							   rcv_cnt);
1489 			uint16_t extra_idx = 0;
1490 
1491 			rcv_cnt = num;
1492 			while (extra_idx < rcv_cnt) {
1493 				rxm = rcv_pkts[extra_idx];
1494 				rxm->data_off =
1495 					RTE_PKTMBUF_HEADROOM - hdr_size;
1496 				rxm->pkt_len = (uint32_t)(len[extra_idx]);
1497 				rxm->data_len = (uint16_t)(len[extra_idx]);
1498 				prev->next = rxm;
1499 				prev = rxm;
1500 				rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1501 				extra_idx += 1;
1502 			};
1503 			seg_res -= rcv_cnt;
1504 
1505 			if (!seg_res) {
1506 				virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1507 				nb_rx++;
1508 			}
1509 		} else {
1510 			PMD_RX_LOG(ERR,
1511 					"No enough segments for packet.");
1512 			rte_pktmbuf_free(rx_pkts[nb_rx]);
1513 			rxvq->stats.errors++;
1514 			break;
1515 		}
1516 	}
1517 
1518 	rxvq->stats.packets += nb_rx;
1519 
1520 	/* Allocate new mbuf for the used descriptor */
1521 	if (likely(!virtqueue_full(vq))) {
1522 		/* free_cnt may include mrg descs */
1523 		uint16_t free_cnt = vq->vq_free_cnt;
1524 		struct rte_mbuf *new_pkts[free_cnt];
1525 
1526 		if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1527 			error = virtqueue_enqueue_recv_refill(vq, new_pkts,
1528 					free_cnt);
1529 			if (unlikely(error)) {
1530 				for (i = 0; i < free_cnt; i++)
1531 					rte_pktmbuf_free(new_pkts[i]);
1532 			}
1533 			nb_enqueued += free_cnt;
1534 		} else {
1535 			struct rte_eth_dev *dev =
1536 				&rte_eth_devices[rxvq->port_id];
1537 			dev->data->rx_mbuf_alloc_failed += free_cnt;
1538 		}
1539 	}
1540 
1541 	if (likely(nb_enqueued)) {
1542 		vq_update_avail_idx(vq);
1543 
1544 		if (unlikely(virtqueue_kick_prepare(vq))) {
1545 			virtqueue_notify(vq);
1546 			PMD_RX_LOG(DEBUG, "Notified");
1547 		}
1548 	}
1549 
1550 	return nb_rx;
1551 }
1552 
1553 uint16_t
1554 virtio_recv_mergeable_pkts_packed(void *rx_queue,
1555 			struct rte_mbuf **rx_pkts,
1556 			uint16_t nb_pkts)
1557 {
1558 	struct virtnet_rx *rxvq = rx_queue;
1559 	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
1560 	struct virtio_hw *hw = vq->hw;
1561 	struct rte_mbuf *rxm;
1562 	struct rte_mbuf *prev = NULL;
1563 	uint16_t num, nb_rx = 0;
1564 	uint32_t len[VIRTIO_MBUF_BURST_SZ];
1565 	struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1566 	uint32_t nb_enqueued = 0;
1567 	uint32_t seg_num = 0;
1568 	uint32_t seg_res = 0;
1569 	uint32_t hdr_size = hw->vtnet_hdr_size;
1570 	int32_t i;
1571 	int error;
1572 
1573 	if (unlikely(hw->started == 0))
1574 		return nb_rx;
1575 
1576 
1577 	num = nb_pkts;
1578 	if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1579 		num = VIRTIO_MBUF_BURST_SZ;
1580 	if (likely(num > DESC_PER_CACHELINE))
1581 		num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1582 
1583 	num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num);
1584 
1585 	for (i = 0; i < num; i++) {
1586 		struct virtio_net_hdr_mrg_rxbuf *header;
1587 
1588 		PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1589 		PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1590 
1591 		rxm = rcv_pkts[i];
1592 
1593 		if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1594 			PMD_RX_LOG(ERR, "Packet drop");
1595 			nb_enqueued++;
1596 			virtio_discard_rxbuf(vq, rxm);
1597 			rxvq->stats.errors++;
1598 			continue;
1599 		}
1600 
1601 		header = (struct virtio_net_hdr_mrg_rxbuf *)((char *)
1602 			  rxm->buf_addr + RTE_PKTMBUF_HEADROOM - hdr_size);
1603 		seg_num = header->num_buffers;
1604 
1605 		if (seg_num == 0)
1606 			seg_num = 1;
1607 
1608 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1609 		rxm->nb_segs = seg_num;
1610 		rxm->ol_flags = 0;
1611 		rxm->vlan_tci = 0;
1612 		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1613 		rxm->data_len = (uint16_t)(len[i] - hdr_size);
1614 
1615 		rxm->port = rxvq->port_id;
1616 		rx_pkts[nb_rx] = rxm;
1617 		prev = rxm;
1618 
1619 		if (hw->has_rx_offload &&
1620 				virtio_rx_offload(rxm, &header->hdr) < 0) {
1621 			virtio_discard_rxbuf(vq, rxm);
1622 			rxvq->stats.errors++;
1623 			continue;
1624 		}
1625 
1626 		if (hw->vlan_strip)
1627 			rte_vlan_strip(rx_pkts[nb_rx]);
1628 
1629 		seg_res = seg_num - 1;
1630 
1631 		/* Merge remaining segments */
1632 		while (seg_res != 0 && i < (num - 1)) {
1633 			i++;
1634 
1635 			rxm = rcv_pkts[i];
1636 			rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1637 			rxm->pkt_len = (uint32_t)(len[i]);
1638 			rxm->data_len = (uint16_t)(len[i]);
1639 
1640 			rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1641 
1642 			prev->next = rxm;
1643 			prev = rxm;
1644 			seg_res -= 1;
1645 		}
1646 
1647 		if (!seg_res) {
1648 			virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1649 			nb_rx++;
1650 		}
1651 	}
1652 
1653 	/* Last packet still need merge segments */
1654 	while (seg_res != 0) {
1655 		uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1656 					VIRTIO_MBUF_BURST_SZ);
1657 		uint16_t extra_idx = 0;
1658 
1659 		rcv_cnt = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts,
1660 				len, rcv_cnt);
1661 		if (unlikely(rcv_cnt == 0)) {
1662 			PMD_RX_LOG(ERR, "No enough segments for packet.");
1663 			rte_pktmbuf_free(rx_pkts[nb_rx]);
1664 			rxvq->stats.errors++;
1665 			break;
1666 		}
1667 
1668 		while (extra_idx < rcv_cnt) {
1669 			rxm = rcv_pkts[extra_idx];
1670 
1671 			rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1672 			rxm->pkt_len = (uint32_t)(len[extra_idx]);
1673 			rxm->data_len = (uint16_t)(len[extra_idx]);
1674 
1675 			prev->next = rxm;
1676 			prev = rxm;
1677 			rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1678 			extra_idx += 1;
1679 		}
1680 		seg_res -= rcv_cnt;
1681 		if (!seg_res) {
1682 			virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1683 			nb_rx++;
1684 		}
1685 	}
1686 
1687 	rxvq->stats.packets += nb_rx;
1688 
1689 	/* Allocate new mbuf for the used descriptor */
1690 	if (likely(!virtqueue_full(vq))) {
1691 		/* free_cnt may include mrg descs */
1692 		uint16_t free_cnt = vq->vq_free_cnt;
1693 		struct rte_mbuf *new_pkts[free_cnt];
1694 
1695 		if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1696 			error = virtqueue_enqueue_recv_refill_packed(vq,
1697 					new_pkts, free_cnt);
1698 			if (unlikely(error)) {
1699 				for (i = 0; i < free_cnt; i++)
1700 					rte_pktmbuf_free(new_pkts[i]);
1701 			}
1702 			nb_enqueued += free_cnt;
1703 		} else {
1704 			struct rte_eth_dev *dev =
1705 				&rte_eth_devices[rxvq->port_id];
1706 			dev->data->rx_mbuf_alloc_failed += free_cnt;
1707 		}
1708 	}
1709 
1710 	if (likely(nb_enqueued)) {
1711 		if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1712 			virtqueue_notify(vq);
1713 			PMD_RX_LOG(DEBUG, "Notified");
1714 		}
1715 	}
1716 
1717 	return nb_rx;
1718 }
1719 
1720 uint16_t
1721 virtio_xmit_pkts_prepare(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts,
1722 			uint16_t nb_pkts)
1723 {
1724 	uint16_t nb_tx;
1725 	int error;
1726 
1727 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1728 		struct rte_mbuf *m = tx_pkts[nb_tx];
1729 
1730 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
1731 		error = rte_validate_tx_offload(m);
1732 		if (unlikely(error)) {
1733 			rte_errno = -error;
1734 			break;
1735 		}
1736 #endif
1737 
1738 		/* Do VLAN tag insertion */
1739 		if (unlikely(m->ol_flags & RTE_MBUF_F_TX_VLAN)) {
1740 			error = rte_vlan_insert(&m);
1741 			/* rte_vlan_insert() may change pointer
1742 			 * even in the case of failure
1743 			 */
1744 			tx_pkts[nb_tx] = m;
1745 
1746 			if (unlikely(error)) {
1747 				rte_errno = -error;
1748 				break;
1749 			}
1750 		}
1751 
1752 		error = rte_net_intel_cksum_prepare(m);
1753 		if (unlikely(error)) {
1754 			rte_errno = -error;
1755 			break;
1756 		}
1757 
1758 		if (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG)
1759 			virtio_tso_fix_cksum(m);
1760 	}
1761 
1762 	return nb_tx;
1763 }
1764 
1765 uint16_t
1766 virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
1767 			uint16_t nb_pkts)
1768 {
1769 	struct virtnet_tx *txvq = tx_queue;
1770 	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
1771 	struct virtio_hw *hw = vq->hw;
1772 	uint16_t hdr_size = hw->vtnet_hdr_size;
1773 	uint16_t nb_tx = 0;
1774 	bool in_order = virtio_with_feature(hw, VIRTIO_F_IN_ORDER);
1775 
1776 	if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
1777 		return nb_tx;
1778 
1779 	if (unlikely(nb_pkts < 1))
1780 		return nb_pkts;
1781 
1782 	PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
1783 
1784 	if (nb_pkts > vq->vq_free_cnt)
1785 		virtio_xmit_cleanup_packed(vq, nb_pkts - vq->vq_free_cnt,
1786 					   in_order);
1787 
1788 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1789 		struct rte_mbuf *txm = tx_pkts[nb_tx];
1790 		int can_push = 0, use_indirect = 0, slots, need;
1791 
1792 		/* optimize ring usage */
1793 		if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
1794 		      virtio_with_feature(hw, VIRTIO_F_VERSION_1)) &&
1795 		    rte_mbuf_refcnt_read(txm) == 1 &&
1796 		    RTE_MBUF_DIRECT(txm) &&
1797 		    txm->nb_segs == 1 &&
1798 		    rte_pktmbuf_headroom(txm) >= hdr_size &&
1799 		    rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
1800 			   __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
1801 			can_push = 1;
1802 		else if (virtio_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
1803 			 txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
1804 			use_indirect = 1;
1805 		/* How many main ring entries are needed to this Tx?
1806 		 * indirect   => 1
1807 		 * any_layout => number of segments
1808 		 * default    => number of segments + 1
1809 		 */
1810 		slots = use_indirect ? 1 : (txm->nb_segs + !can_push);
1811 		need = slots - vq->vq_free_cnt;
1812 
1813 		/* Positive value indicates it need free vring descriptors */
1814 		if (unlikely(need > 0)) {
1815 			virtio_xmit_cleanup_packed(vq, need, in_order);
1816 			need = slots - vq->vq_free_cnt;
1817 			if (unlikely(need > 0)) {
1818 				PMD_TX_LOG(ERR,
1819 					   "No free tx descriptors to transmit");
1820 				break;
1821 			}
1822 		}
1823 
1824 		/* Enqueue Packet buffers */
1825 		if (can_push)
1826 			virtqueue_enqueue_xmit_packed_fast(txvq, txm, in_order);
1827 		else
1828 			virtqueue_enqueue_xmit_packed(txvq, txm, slots,
1829 						      use_indirect, 0,
1830 						      in_order);
1831 
1832 		virtio_update_packet_stats(&txvq->stats, txm);
1833 	}
1834 
1835 	txvq->stats.packets += nb_tx;
1836 
1837 	if (likely(nb_tx)) {
1838 		if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1839 			virtqueue_notify(vq);
1840 			PMD_TX_LOG(DEBUG, "Notified backend after xmit");
1841 		}
1842 	}
1843 
1844 	return nb_tx;
1845 }
1846 
1847 uint16_t
1848 virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1849 {
1850 	struct virtnet_tx *txvq = tx_queue;
1851 	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
1852 	struct virtio_hw *hw = vq->hw;
1853 	uint16_t hdr_size = hw->vtnet_hdr_size;
1854 	uint16_t nb_used, nb_tx = 0;
1855 
1856 	if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
1857 		return nb_tx;
1858 
1859 	if (unlikely(nb_pkts < 1))
1860 		return nb_pkts;
1861 
1862 	PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
1863 
1864 	nb_used = virtqueue_nused(vq);
1865 
1866 	if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
1867 		virtio_xmit_cleanup(vq, nb_used);
1868 
1869 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1870 		struct rte_mbuf *txm = tx_pkts[nb_tx];
1871 		int can_push = 0, use_indirect = 0, slots, need;
1872 
1873 		/* optimize ring usage */
1874 		if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
1875 		      virtio_with_feature(hw, VIRTIO_F_VERSION_1)) &&
1876 		    rte_mbuf_refcnt_read(txm) == 1 &&
1877 		    RTE_MBUF_DIRECT(txm) &&
1878 		    txm->nb_segs == 1 &&
1879 		    rte_pktmbuf_headroom(txm) >= hdr_size &&
1880 		    rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
1881 				   __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
1882 			can_push = 1;
1883 		else if (virtio_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
1884 			 txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
1885 			use_indirect = 1;
1886 
1887 		/* How many main ring entries are needed to this Tx?
1888 		 * any_layout => number of segments
1889 		 * indirect   => 1
1890 		 * default    => number of segments + 1
1891 		 */
1892 		slots = use_indirect ? 1 : (txm->nb_segs + !can_push);
1893 		need = slots - vq->vq_free_cnt;
1894 
1895 		/* Positive value indicates it need free vring descriptors */
1896 		if (unlikely(need > 0)) {
1897 			nb_used = virtqueue_nused(vq);
1898 
1899 			need = RTE_MIN(need, (int)nb_used);
1900 
1901 			virtio_xmit_cleanup(vq, need);
1902 			need = slots - vq->vq_free_cnt;
1903 			if (unlikely(need > 0)) {
1904 				PMD_TX_LOG(ERR,
1905 					   "No free tx descriptors to transmit");
1906 				break;
1907 			}
1908 		}
1909 
1910 		/* Enqueue Packet buffers */
1911 		virtqueue_enqueue_xmit(txvq, txm, slots, use_indirect,
1912 			can_push, 0);
1913 
1914 		virtio_update_packet_stats(&txvq->stats, txm);
1915 	}
1916 
1917 	txvq->stats.packets += nb_tx;
1918 
1919 	if (likely(nb_tx)) {
1920 		vq_update_avail_idx(vq);
1921 
1922 		if (unlikely(virtqueue_kick_prepare(vq))) {
1923 			virtqueue_notify(vq);
1924 			PMD_TX_LOG(DEBUG, "Notified backend after xmit");
1925 		}
1926 	}
1927 
1928 	return nb_tx;
1929 }
1930 
1931 static __rte_always_inline int
1932 virtio_xmit_try_cleanup_inorder(struct virtqueue *vq, uint16_t need)
1933 {
1934 	uint16_t nb_used, nb_clean, nb_descs;
1935 
1936 	nb_descs = vq->vq_free_cnt + need;
1937 	nb_used = virtqueue_nused(vq);
1938 	nb_clean = RTE_MIN(need, (int)nb_used);
1939 
1940 	virtio_xmit_cleanup_inorder(vq, nb_clean);
1941 
1942 	return nb_descs - vq->vq_free_cnt;
1943 }
1944 
1945 uint16_t
1946 virtio_xmit_pkts_inorder(void *tx_queue,
1947 			struct rte_mbuf **tx_pkts,
1948 			uint16_t nb_pkts)
1949 {
1950 	struct virtnet_tx *txvq = tx_queue;
1951 	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
1952 	struct virtio_hw *hw = vq->hw;
1953 	uint16_t hdr_size = hw->vtnet_hdr_size;
1954 	uint16_t nb_used, nb_tx = 0, nb_inorder_pkts = 0;
1955 	struct rte_mbuf *inorder_pkts[nb_pkts];
1956 	int need;
1957 
1958 	if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
1959 		return nb_tx;
1960 
1961 	if (unlikely(nb_pkts < 1))
1962 		return nb_pkts;
1963 
1964 	VIRTQUEUE_DUMP(vq);
1965 	PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
1966 	nb_used = virtqueue_nused(vq);
1967 
1968 	if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
1969 		virtio_xmit_cleanup_inorder(vq, nb_used);
1970 
1971 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1972 		struct rte_mbuf *txm = tx_pkts[nb_tx];
1973 		int slots;
1974 
1975 		/* optimize ring usage */
1976 		if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
1977 		     virtio_with_feature(hw, VIRTIO_F_VERSION_1)) &&
1978 		     rte_mbuf_refcnt_read(txm) == 1 &&
1979 		     RTE_MBUF_DIRECT(txm) &&
1980 		     txm->nb_segs == 1 &&
1981 		     rte_pktmbuf_headroom(txm) >= hdr_size &&
1982 		     rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
1983 				__alignof__(struct virtio_net_hdr_mrg_rxbuf))) {
1984 			inorder_pkts[nb_inorder_pkts] = txm;
1985 			nb_inorder_pkts++;
1986 
1987 			continue;
1988 		}
1989 
1990 		if (nb_inorder_pkts) {
1991 			need = nb_inorder_pkts - vq->vq_free_cnt;
1992 			if (unlikely(need > 0)) {
1993 				need = virtio_xmit_try_cleanup_inorder(vq,
1994 								       need);
1995 				if (unlikely(need > 0)) {
1996 					PMD_TX_LOG(ERR,
1997 						"No free tx descriptors to "
1998 						"transmit");
1999 					break;
2000 				}
2001 			}
2002 			virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
2003 							nb_inorder_pkts);
2004 			nb_inorder_pkts = 0;
2005 		}
2006 
2007 		slots = txm->nb_segs + 1;
2008 		need = slots - vq->vq_free_cnt;
2009 		if (unlikely(need > 0)) {
2010 			need = virtio_xmit_try_cleanup_inorder(vq, slots);
2011 
2012 			if (unlikely(need > 0)) {
2013 				PMD_TX_LOG(ERR,
2014 					"No free tx descriptors to transmit");
2015 				break;
2016 			}
2017 		}
2018 		/* Enqueue Packet buffers */
2019 		virtqueue_enqueue_xmit(txvq, txm, slots, 0, 0, 1);
2020 
2021 		virtio_update_packet_stats(&txvq->stats, txm);
2022 	}
2023 
2024 	/* Transmit all inorder packets */
2025 	if (nb_inorder_pkts) {
2026 		need = nb_inorder_pkts - vq->vq_free_cnt;
2027 		if (unlikely(need > 0)) {
2028 			need = virtio_xmit_try_cleanup_inorder(vq,
2029 								  need);
2030 			if (unlikely(need > 0)) {
2031 				PMD_TX_LOG(ERR,
2032 					"No free tx descriptors to transmit");
2033 				nb_inorder_pkts = vq->vq_free_cnt;
2034 				nb_tx -= need;
2035 			}
2036 		}
2037 
2038 		virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
2039 						nb_inorder_pkts);
2040 	}
2041 
2042 	txvq->stats.packets += nb_tx;
2043 
2044 	if (likely(nb_tx)) {
2045 		vq_update_avail_idx(vq);
2046 
2047 		if (unlikely(virtqueue_kick_prepare(vq))) {
2048 			virtqueue_notify(vq);
2049 			PMD_TX_LOG(DEBUG, "Notified backend after xmit");
2050 		}
2051 	}
2052 
2053 	VIRTQUEUE_DUMP(vq);
2054 
2055 	return nb_tx;
2056 }
2057 
2058 __rte_weak uint16_t
2059 virtio_recv_pkts_packed_vec(void *rx_queue __rte_unused,
2060 			    struct rte_mbuf **rx_pkts __rte_unused,
2061 			    uint16_t nb_pkts __rte_unused)
2062 {
2063 	return 0;
2064 }
2065 
2066 __rte_weak uint16_t
2067 virtio_xmit_pkts_packed_vec(void *tx_queue __rte_unused,
2068 			    struct rte_mbuf **tx_pkts __rte_unused,
2069 			    uint16_t nb_pkts __rte_unused)
2070 {
2071 	return 0;
2072 }
2073