xref: /dpdk/drivers/net/virtio/virtio_rxtx.c (revision 665b49c51639a10c553433bc2bcd85c7331c631e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <errno.h>
10 
11 #include <rte_cycles.h>
12 #include <rte_memory.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_mempool.h>
15 #include <rte_malloc.h>
16 #include <rte_mbuf.h>
17 #include <rte_ether.h>
18 #include <ethdev_driver.h>
19 #include <rte_prefetch.h>
20 #include <rte_string_fns.h>
21 #include <rte_errno.h>
22 #include <rte_byteorder.h>
23 #include <rte_net.h>
24 #include <rte_ip.h>
25 #include <rte_udp.h>
26 #include <rte_tcp.h>
27 
28 #include "virtio_logs.h"
29 #include "virtio_ethdev.h"
30 #include "virtio.h"
31 #include "virtqueue.h"
32 #include "virtio_rxtx.h"
33 #include "virtio_rxtx_simple.h"
34 #include "virtio_ring.h"
35 
36 #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
37 #define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len)
38 #else
39 #define  VIRTIO_DUMP_PACKET(m, len) do { } while (0)
40 #endif
41 
42 void
43 vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx, uint16_t num)
44 {
45 	vq->vq_free_cnt += num;
46 	vq->vq_desc_tail_idx = desc_idx & (vq->vq_nentries - 1);
47 }
48 
49 void
50 vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
51 {
52 	struct vring_desc *dp, *dp_tail;
53 	struct vq_desc_extra *dxp;
54 	uint16_t desc_idx_last = desc_idx;
55 
56 	dp  = &vq->vq_split.ring.desc[desc_idx];
57 	dxp = &vq->vq_descx[desc_idx];
58 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
59 	if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
60 		while (dp->flags & VRING_DESC_F_NEXT) {
61 			desc_idx_last = dp->next;
62 			dp = &vq->vq_split.ring.desc[dp->next];
63 		}
64 	}
65 	dxp->ndescs = 0;
66 
67 	/*
68 	 * We must append the existing free chain, if any, to the end of
69 	 * newly freed chain. If the virtqueue was completely used, then
70 	 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
71 	 */
72 	if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) {
73 		vq->vq_desc_head_idx = desc_idx;
74 	} else {
75 		dp_tail = &vq->vq_split.ring.desc[vq->vq_desc_tail_idx];
76 		dp_tail->next = desc_idx;
77 	}
78 
79 	vq->vq_desc_tail_idx = desc_idx_last;
80 	dp->next = VQ_RING_DESC_CHAIN_END;
81 }
82 
83 void
84 virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
85 {
86 	uint32_t s = mbuf->pkt_len;
87 	struct rte_ether_addr *ea;
88 
89 	stats->bytes += s;
90 
91 	if (s == 64) {
92 		stats->size_bins[1]++;
93 	} else if (s > 64 && s < 1024) {
94 		uint32_t bin;
95 
96 		/* count zeros, and offset into correct bin */
97 		bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
98 		stats->size_bins[bin]++;
99 	} else {
100 		if (s < 64)
101 			stats->size_bins[0]++;
102 		else if (s < 1519)
103 			stats->size_bins[6]++;
104 		else
105 			stats->size_bins[7]++;
106 	}
107 
108 	ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
109 	if (rte_is_multicast_ether_addr(ea)) {
110 		if (rte_is_broadcast_ether_addr(ea))
111 			stats->broadcast++;
112 		else
113 			stats->multicast++;
114 	}
115 }
116 
117 static inline void
118 virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
119 {
120 	VIRTIO_DUMP_PACKET(m, m->data_len);
121 
122 	virtio_update_packet_stats(&rxvq->stats, m);
123 }
124 
125 static uint16_t
126 virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq,
127 				  struct rte_mbuf **rx_pkts,
128 				  uint32_t *len,
129 				  uint16_t num)
130 {
131 	struct rte_mbuf *cookie;
132 	uint16_t used_idx;
133 	uint16_t id;
134 	struct vring_packed_desc *desc;
135 	uint16_t i;
136 
137 	desc = vq->vq_packed.ring.desc;
138 
139 	for (i = 0; i < num; i++) {
140 		used_idx = vq->vq_used_cons_idx;
141 		/* desc_is_used has a load-acquire or rte_io_rmb inside
142 		 * and wait for used desc in virtqueue.
143 		 */
144 		if (!desc_is_used(&desc[used_idx], vq))
145 			return i;
146 		len[i] = desc[used_idx].len;
147 		id = desc[used_idx].id;
148 		cookie = (struct rte_mbuf *)vq->vq_descx[id].cookie;
149 		if (unlikely(cookie == NULL)) {
150 			PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
151 				vq->vq_used_cons_idx);
152 			break;
153 		}
154 		rte_prefetch0(cookie);
155 		rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
156 		rx_pkts[i] = cookie;
157 
158 		vq->vq_free_cnt++;
159 		vq->vq_used_cons_idx++;
160 		if (vq->vq_used_cons_idx >= vq->vq_nentries) {
161 			vq->vq_used_cons_idx -= vq->vq_nentries;
162 			vq->vq_packed.used_wrap_counter ^= 1;
163 		}
164 	}
165 
166 	return i;
167 }
168 
169 static uint16_t
170 virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,
171 			   uint32_t *len, uint16_t num)
172 {
173 	struct vring_used_elem *uep;
174 	struct rte_mbuf *cookie;
175 	uint16_t used_idx, desc_idx;
176 	uint16_t i;
177 
178 	/*  Caller does the check */
179 	for (i = 0; i < num ; i++) {
180 		used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
181 		uep = &vq->vq_split.ring.used->ring[used_idx];
182 		desc_idx = (uint16_t) uep->id;
183 		len[i] = uep->len;
184 		cookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie;
185 
186 		if (unlikely(cookie == NULL)) {
187 			PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
188 				vq->vq_used_cons_idx);
189 			break;
190 		}
191 
192 		rte_prefetch0(cookie);
193 		rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
194 		rx_pkts[i]  = cookie;
195 		vq->vq_used_cons_idx++;
196 		vq_ring_free_chain(vq, desc_idx);
197 		vq->vq_descx[desc_idx].cookie = NULL;
198 	}
199 
200 	return i;
201 }
202 
203 static uint16_t
204 virtqueue_dequeue_rx_inorder(struct virtqueue *vq,
205 			struct rte_mbuf **rx_pkts,
206 			uint32_t *len,
207 			uint16_t num)
208 {
209 	struct vring_used_elem *uep;
210 	struct rte_mbuf *cookie;
211 	uint16_t used_idx = 0;
212 	uint16_t i;
213 
214 	if (unlikely(num == 0))
215 		return 0;
216 
217 	for (i = 0; i < num; i++) {
218 		used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
219 		/* Desc idx same as used idx */
220 		uep = &vq->vq_split.ring.used->ring[used_idx];
221 		len[i] = uep->len;
222 		cookie = (struct rte_mbuf *)vq->vq_descx[used_idx].cookie;
223 
224 		if (unlikely(cookie == NULL)) {
225 			PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
226 				vq->vq_used_cons_idx);
227 			break;
228 		}
229 
230 		rte_prefetch0(cookie);
231 		rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
232 		rx_pkts[i]  = cookie;
233 		vq->vq_used_cons_idx++;
234 		vq->vq_descx[used_idx].cookie = NULL;
235 	}
236 
237 	vq_ring_free_inorder(vq, used_idx, i);
238 	return i;
239 }
240 
241 static inline int
242 virtqueue_enqueue_refill_inorder(struct virtqueue *vq,
243 			struct rte_mbuf **cookies,
244 			uint16_t num)
245 {
246 	struct vq_desc_extra *dxp;
247 	struct virtio_hw *hw = vq->hw;
248 	struct vring_desc *start_dp;
249 	uint16_t head_idx, idx, i = 0;
250 
251 	if (unlikely(vq->vq_free_cnt == 0))
252 		return -ENOSPC;
253 	if (unlikely(vq->vq_free_cnt < num))
254 		return -EMSGSIZE;
255 
256 	head_idx = vq->vq_desc_head_idx & (vq->vq_nentries - 1);
257 	start_dp = vq->vq_split.ring.desc;
258 
259 	while (i < num) {
260 		idx = head_idx & (vq->vq_nentries - 1);
261 		dxp = &vq->vq_descx[idx];
262 		dxp->cookie = (void *)cookies[i];
263 		dxp->ndescs = 1;
264 
265 		start_dp[idx].addr = VIRTIO_MBUF_ADDR(cookies[i], vq) +
266 			RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
267 		start_dp[idx].len = cookies[i]->buf_len -
268 			RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
269 		start_dp[idx].flags =  VRING_DESC_F_WRITE;
270 
271 		vq_update_avail_ring(vq, idx);
272 		head_idx++;
273 		i++;
274 	}
275 
276 	vq->vq_desc_head_idx += num;
277 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
278 	return 0;
279 }
280 
281 static inline int
282 virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf **cookie,
283 				uint16_t num)
284 {
285 	struct vq_desc_extra *dxp;
286 	struct virtio_hw *hw = vq->hw;
287 	struct vring_desc *start_dp = vq->vq_split.ring.desc;
288 	uint16_t idx, i;
289 
290 	if (unlikely(vq->vq_free_cnt == 0))
291 		return -ENOSPC;
292 	if (unlikely(vq->vq_free_cnt < num))
293 		return -EMSGSIZE;
294 
295 	if (unlikely(vq->vq_desc_head_idx >= vq->vq_nentries))
296 		return -EFAULT;
297 
298 	for (i = 0; i < num; i++) {
299 		idx = vq->vq_desc_head_idx;
300 		dxp = &vq->vq_descx[idx];
301 		dxp->cookie = (void *)cookie[i];
302 		dxp->ndescs = 1;
303 
304 		start_dp[idx].addr = VIRTIO_MBUF_ADDR(cookie[i], vq) +
305 			RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
306 		start_dp[idx].len = cookie[i]->buf_len - RTE_PKTMBUF_HEADROOM +
307 			hw->vtnet_hdr_size;
308 		start_dp[idx].flags = VRING_DESC_F_WRITE;
309 		vq->vq_desc_head_idx = start_dp[idx].next;
310 		vq_update_avail_ring(vq, idx);
311 		if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) {
312 			vq->vq_desc_tail_idx = vq->vq_desc_head_idx;
313 			break;
314 		}
315 	}
316 
317 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
318 
319 	return 0;
320 }
321 
322 static inline void
323 virtqueue_refill_single_packed(struct virtqueue *vq,
324 			       struct vring_packed_desc *dp,
325 			       struct rte_mbuf *cookie)
326 {
327 	uint16_t flags = vq->vq_packed.cached_flags;
328 	struct virtio_hw *hw = vq->hw;
329 
330 	dp->addr = VIRTIO_MBUF_ADDR(cookie, vq) + RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
331 	dp->len = cookie->buf_len - RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
332 
333 	virtqueue_store_flags_packed(dp, flags, hw->weak_barriers);
334 
335 	if (++vq->vq_avail_idx >= vq->vq_nentries) {
336 		vq->vq_avail_idx -= vq->vq_nentries;
337 		vq->vq_packed.cached_flags ^=
338 			VRING_PACKED_DESC_F_AVAIL_USED;
339 		flags = vq->vq_packed.cached_flags;
340 	}
341 }
342 
343 static inline int
344 virtqueue_enqueue_recv_refill_packed_init(struct virtqueue *vq,
345 				     struct rte_mbuf **cookie, uint16_t num)
346 {
347 	struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc;
348 	struct vq_desc_extra *dxp;
349 	uint16_t idx;
350 	int i;
351 
352 	if (unlikely(vq->vq_free_cnt == 0))
353 		return -ENOSPC;
354 	if (unlikely(vq->vq_free_cnt < num))
355 		return -EMSGSIZE;
356 
357 	for (i = 0; i < num; i++) {
358 		idx = vq->vq_avail_idx;
359 		dxp = &vq->vq_descx[idx];
360 		dxp->cookie = (void *)cookie[i];
361 		dxp->ndescs = 1;
362 
363 		virtqueue_refill_single_packed(vq, &start_dp[idx], cookie[i]);
364 	}
365 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
366 	return 0;
367 }
368 
369 static inline int
370 virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq,
371 				     struct rte_mbuf **cookie, uint16_t num)
372 {
373 	struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc;
374 	struct vq_desc_extra *dxp;
375 	uint16_t idx, did;
376 	int i;
377 
378 	if (unlikely(vq->vq_free_cnt == 0))
379 		return -ENOSPC;
380 	if (unlikely(vq->vq_free_cnt < num))
381 		return -EMSGSIZE;
382 
383 	for (i = 0; i < num; i++) {
384 		idx = vq->vq_avail_idx;
385 		did = start_dp[idx].id;
386 		dxp = &vq->vq_descx[did];
387 		dxp->cookie = (void *)cookie[i];
388 		dxp->ndescs = 1;
389 
390 		virtqueue_refill_single_packed(vq, &start_dp[idx], cookie[i]);
391 	}
392 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
393 	return 0;
394 }
395 
396 /* When doing TSO, the IP length is not included in the pseudo header
397  * checksum of the packet given to the PMD, but for virtio it is
398  * expected.
399  */
400 static void
401 virtio_tso_fix_cksum(struct rte_mbuf *m)
402 {
403 	/* common case: header is not fragmented */
404 	if (likely(rte_pktmbuf_data_len(m) >= m->l2_len + m->l3_len +
405 			m->l4_len)) {
406 		struct rte_ipv4_hdr *iph;
407 		struct rte_ipv6_hdr *ip6h;
408 		struct rte_tcp_hdr *th;
409 		uint16_t prev_cksum, new_cksum, ip_len, ip_paylen;
410 		uint32_t tmp;
411 
412 		iph = rte_pktmbuf_mtod_offset(m,
413 					struct rte_ipv4_hdr *, m->l2_len);
414 		th = RTE_PTR_ADD(iph, m->l3_len);
415 		if ((iph->version_ihl >> 4) == 4) {
416 			iph->hdr_checksum = 0;
417 			iph->hdr_checksum = rte_ipv4_cksum(iph);
418 			ip_len = iph->total_length;
419 			ip_paylen = rte_cpu_to_be_16(rte_be_to_cpu_16(ip_len) -
420 				m->l3_len);
421 		} else {
422 			ip6h = (struct rte_ipv6_hdr *)iph;
423 			ip_paylen = ip6h->payload_len;
424 		}
425 
426 		/* calculate the new phdr checksum not including ip_paylen */
427 		prev_cksum = th->cksum;
428 		tmp = prev_cksum;
429 		tmp += ip_paylen;
430 		tmp = (tmp & 0xffff) + (tmp >> 16);
431 		new_cksum = tmp;
432 
433 		/* replace it in the packet */
434 		th->cksum = new_cksum;
435 	}
436 }
437 
438 
439 
440 
441 static inline void
442 virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
443 			struct rte_mbuf **cookies,
444 			uint16_t num)
445 {
446 	struct vq_desc_extra *dxp;
447 	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
448 	struct vring_desc *start_dp;
449 	struct virtio_net_hdr *hdr;
450 	uint16_t idx;
451 	int16_t head_size = vq->hw->vtnet_hdr_size;
452 	uint16_t i = 0;
453 
454 	idx = vq->vq_desc_head_idx;
455 	start_dp = vq->vq_split.ring.desc;
456 
457 	while (i < num) {
458 		idx = idx & (vq->vq_nentries - 1);
459 		dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
460 		dxp->cookie = (void *)cookies[i];
461 		dxp->ndescs = 1;
462 		virtio_update_packet_stats(&txvq->stats, cookies[i]);
463 
464 		hdr = rte_pktmbuf_mtod_offset(cookies[i],
465 				struct virtio_net_hdr *, -head_size);
466 
467 		/* if offload disabled, hdr is not zeroed yet, do it now */
468 		if (!vq->hw->has_tx_offload)
469 			virtqueue_clear_net_hdr(hdr);
470 		else
471 			virtqueue_xmit_offload(hdr, cookies[i]);
472 
473 		start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookies[i], vq) - head_size;
474 		start_dp[idx].len = cookies[i]->data_len + head_size;
475 		start_dp[idx].flags = 0;
476 
477 
478 		vq_update_avail_ring(vq, idx);
479 
480 		idx++;
481 		i++;
482 	};
483 
484 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
485 	vq->vq_desc_head_idx = idx & (vq->vq_nentries - 1);
486 }
487 
488 static inline void
489 virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq,
490 				   struct rte_mbuf *cookie,
491 				   int in_order)
492 {
493 	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
494 	struct vring_packed_desc *dp;
495 	struct vq_desc_extra *dxp;
496 	uint16_t idx, id, flags;
497 	int16_t head_size = vq->hw->vtnet_hdr_size;
498 	struct virtio_net_hdr *hdr;
499 
500 	id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;
501 	idx = vq->vq_avail_idx;
502 	dp = &vq->vq_packed.ring.desc[idx];
503 
504 	dxp = &vq->vq_descx[id];
505 	dxp->ndescs = 1;
506 	dxp->cookie = cookie;
507 
508 	flags = vq->vq_packed.cached_flags;
509 
510 	/* prepend cannot fail, checked by caller */
511 	hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *,
512 				      -head_size);
513 
514 	/* if offload disabled, hdr is not zeroed yet, do it now */
515 	if (!vq->hw->has_tx_offload)
516 		virtqueue_clear_net_hdr(hdr);
517 	else
518 		virtqueue_xmit_offload(hdr, cookie);
519 
520 	dp->addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq) - head_size;
521 	dp->len = cookie->data_len + head_size;
522 	dp->id = id;
523 
524 	if (++vq->vq_avail_idx >= vq->vq_nentries) {
525 		vq->vq_avail_idx -= vq->vq_nentries;
526 		vq->vq_packed.cached_flags ^= VRING_PACKED_DESC_F_AVAIL_USED;
527 	}
528 
529 	vq->vq_free_cnt--;
530 
531 	if (!in_order) {
532 		vq->vq_desc_head_idx = dxp->next;
533 		if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
534 			vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;
535 	}
536 
537 	virtqueue_store_flags_packed(dp, flags, vq->hw->weak_barriers);
538 }
539 
540 static inline void
541 virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
542 			uint16_t needed, int use_indirect, int can_push,
543 			int in_order)
544 {
545 	struct virtio_tx_region *txr = txvq->hdr_mz->addr;
546 	struct vq_desc_extra *dxp;
547 	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
548 	struct vring_desc *start_dp;
549 	uint16_t seg_num = cookie->nb_segs;
550 	uint16_t head_idx, idx;
551 	int16_t head_size = vq->hw->vtnet_hdr_size;
552 	bool prepend_header = false;
553 	struct virtio_net_hdr *hdr;
554 
555 	head_idx = vq->vq_desc_head_idx;
556 	idx = head_idx;
557 	if (in_order)
558 		dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
559 	else
560 		dxp = &vq->vq_descx[idx];
561 	dxp->cookie = (void *)cookie;
562 	dxp->ndescs = needed;
563 
564 	start_dp = vq->vq_split.ring.desc;
565 
566 	if (can_push) {
567 		/* prepend cannot fail, checked by caller */
568 		hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *,
569 					      -head_size);
570 		prepend_header = true;
571 
572 		/* if offload disabled, it is not zeroed below, do it now */
573 		if (!vq->hw->has_tx_offload)
574 			virtqueue_clear_net_hdr(hdr);
575 	} else if (use_indirect) {
576 		/* setup tx ring slot to point to indirect
577 		 * descriptor list stored in reserved region.
578 		 *
579 		 * the first slot in indirect ring is already preset
580 		 * to point to the header in reserved region
581 		 */
582 		start_dp[idx].addr = txvq->hdr_mem + RTE_PTR_DIFF(&txr[idx].tx_indir, txr);
583 		start_dp[idx].len = (seg_num + 1) * sizeof(struct vring_desc);
584 		start_dp[idx].flags = VRING_DESC_F_INDIRECT;
585 		hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
586 
587 		/* loop below will fill in rest of the indirect elements */
588 		start_dp = txr[idx].tx_indir;
589 		idx = 1;
590 	} else {
591 		/* setup first tx ring slot to point to header
592 		 * stored in reserved region.
593 		 */
594 		start_dp[idx].addr = txvq->hdr_mem + RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
595 		start_dp[idx].len = vq->hw->vtnet_hdr_size;
596 		start_dp[idx].flags = VRING_DESC_F_NEXT;
597 		hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
598 
599 		idx = start_dp[idx].next;
600 	}
601 
602 	if (vq->hw->has_tx_offload)
603 		virtqueue_xmit_offload(hdr, cookie);
604 
605 	do {
606 		start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
607 		start_dp[idx].len = cookie->data_len;
608 		if (prepend_header) {
609 			start_dp[idx].addr -= head_size;
610 			start_dp[idx].len += head_size;
611 			prepend_header = false;
612 		}
613 		start_dp[idx].flags = cookie->next ? VRING_DESC_F_NEXT : 0;
614 		idx = start_dp[idx].next;
615 	} while ((cookie = cookie->next) != NULL);
616 
617 	if (use_indirect)
618 		idx = vq->vq_split.ring.desc[head_idx].next;
619 
620 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
621 
622 	vq->vq_desc_head_idx = idx;
623 	vq_update_avail_ring(vq, head_idx);
624 
625 	if (!in_order) {
626 		if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
627 			vq->vq_desc_tail_idx = idx;
628 	}
629 }
630 
631 void
632 virtio_dev_cq_start(struct rte_eth_dev *dev)
633 {
634 	struct virtio_hw *hw = dev->data->dev_private;
635 
636 	if (hw->cvq) {
637 		rte_spinlock_init(&hw->cvq->lock);
638 		VIRTQUEUE_DUMP(virtnet_cq_to_vq(hw->cvq));
639 	}
640 }
641 
642 int
643 virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
644 			uint16_t queue_idx,
645 			uint16_t nb_desc,
646 			unsigned int socket_id __rte_unused,
647 			const struct rte_eth_rxconf *rx_conf,
648 			struct rte_mempool *mp)
649 {
650 	uint16_t vq_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
651 	struct virtio_hw *hw = dev->data->dev_private;
652 	struct virtqueue *vq = hw->vqs[vq_idx];
653 	struct virtnet_rx *rxvq;
654 	uint16_t rx_free_thresh;
655 	uint16_t buf_size;
656 	const char *error;
657 
658 	PMD_INIT_FUNC_TRACE();
659 
660 	if (rx_conf->rx_deferred_start) {
661 		PMD_INIT_LOG(ERR, "Rx deferred start is not supported");
662 		return -EINVAL;
663 	}
664 
665 	buf_size = virtio_rx_mem_pool_buf_size(mp);
666 	if (!virtio_rx_check_scatter(hw->max_rx_pkt_len, buf_size,
667 				     hw->rx_ol_scatter, &error)) {
668 		PMD_INIT_LOG(ERR, "RxQ %u Rx scatter check failed: %s",
669 			     queue_idx, error);
670 		return -EINVAL;
671 	}
672 
673 	rx_free_thresh = rx_conf->rx_free_thresh;
674 	if (rx_free_thresh == 0)
675 		rx_free_thresh =
676 			RTE_MIN(vq->vq_nentries / 4, DEFAULT_RX_FREE_THRESH);
677 
678 	if (rx_free_thresh & 0x3) {
679 		PMD_INIT_LOG(ERR, "rx_free_thresh must be multiples of four."
680 			" (rx_free_thresh=%u port=%u queue=%u)",
681 			rx_free_thresh, dev->data->port_id, queue_idx);
682 		return -EINVAL;
683 	}
684 
685 	if (rx_free_thresh >= vq->vq_nentries) {
686 		PMD_INIT_LOG(ERR, "rx_free_thresh must be less than the "
687 			"number of RX entries (%u)."
688 			" (rx_free_thresh=%u port=%u queue=%u)",
689 			vq->vq_nentries,
690 			rx_free_thresh, dev->data->port_id, queue_idx);
691 		return -EINVAL;
692 	}
693 	vq->vq_free_thresh = rx_free_thresh;
694 
695 	/*
696 	 * For split ring vectorized path descriptors number must be
697 	 * equal to the ring size.
698 	 */
699 	if (nb_desc > vq->vq_nentries ||
700 	    (!virtio_with_packed_queue(hw) && hw->use_vec_rx)) {
701 		nb_desc = vq->vq_nentries;
702 	}
703 	vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
704 
705 	rxvq = &vq->rxq;
706 	rxvq->mpool = mp;
707 	dev->data->rx_queues[queue_idx] = rxvq;
708 
709 	return 0;
710 }
711 
712 int
713 virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
714 {
715 	uint16_t vq_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
716 	struct virtio_hw *hw = dev->data->dev_private;
717 	struct virtqueue *vq = hw->vqs[vq_idx];
718 	struct virtnet_rx *rxvq = &vq->rxq;
719 	struct rte_mbuf *m;
720 	uint16_t desc_idx;
721 	int error, nbufs, i;
722 	bool in_order = virtio_with_feature(hw, VIRTIO_F_IN_ORDER);
723 
724 	PMD_INIT_FUNC_TRACE();
725 
726 	/* Allocate blank mbufs for the each rx descriptor */
727 	nbufs = 0;
728 
729 	if (hw->use_vec_rx && !virtio_with_packed_queue(hw)) {
730 		for (desc_idx = 0; desc_idx < vq->vq_nentries;
731 		     desc_idx++) {
732 			vq->vq_split.ring.avail->ring[desc_idx] = desc_idx;
733 			vq->vq_split.ring.desc[desc_idx].flags =
734 				VRING_DESC_F_WRITE;
735 		}
736 
737 		virtio_rxq_vec_setup(rxvq);
738 	}
739 
740 	if (hw->use_vec_rx) {
741 		memset(rxvq->fake_mbuf, 0, sizeof(*rxvq->fake_mbuf));
742 		for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST; desc_idx++)
743 			vq->rxq.sw_ring[vq->vq_nentries + desc_idx] = rxvq->fake_mbuf;
744 	}
745 
746 	if (hw->use_vec_rx && !virtio_with_packed_queue(hw)) {
747 		while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
748 			virtio_rxq_rearm_vec(rxvq);
749 			nbufs += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
750 		}
751 	} else if (!virtio_with_packed_queue(vq->hw) && in_order) {
752 		if ((!virtqueue_full(vq))) {
753 			uint16_t free_cnt = vq->vq_free_cnt;
754 			struct rte_mbuf *pkts[free_cnt];
755 
756 			if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, pkts,
757 				free_cnt)) {
758 				error = virtqueue_enqueue_refill_inorder(vq,
759 						pkts,
760 						free_cnt);
761 				if (unlikely(error)) {
762 					for (i = 0; i < free_cnt; i++)
763 						rte_pktmbuf_free(pkts[i]);
764 				} else {
765 					nbufs += free_cnt;
766 				}
767 			}
768 
769 			vq_update_avail_idx(vq);
770 		}
771 	} else {
772 		while (!virtqueue_full(vq)) {
773 			m = rte_mbuf_raw_alloc(rxvq->mpool);
774 			if (m == NULL)
775 				break;
776 
777 			/* Enqueue allocated buffers */
778 			if (virtio_with_packed_queue(vq->hw))
779 				error = virtqueue_enqueue_recv_refill_packed_init(vq,
780 						&m, 1);
781 			else
782 				error = virtqueue_enqueue_recv_refill(vq,
783 						&m, 1);
784 			if (error) {
785 				rte_pktmbuf_free(m);
786 				break;
787 			}
788 			nbufs++;
789 		}
790 
791 		if (!virtio_with_packed_queue(vq->hw))
792 			vq_update_avail_idx(vq);
793 	}
794 
795 	PMD_INIT_LOG(DEBUG, "Allocated %d bufs (port=%u queue=%u)", nbufs,
796 		     dev->data->port_id, queue_idx);
797 
798 	VIRTQUEUE_DUMP(vq);
799 
800 	return 0;
801 }
802 
803 /*
804  * struct rte_eth_dev *dev: Used to update dev
805  * uint16_t nb_desc: Defaults to values read from config space
806  * unsigned int socket_id: Used to allocate memzone
807  * const struct rte_eth_txconf *tx_conf: Used to setup tx engine
808  * uint16_t queue_idx: Just used as an index in dev txq list
809  */
810 int
811 virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
812 			uint16_t queue_idx,
813 			uint16_t nb_desc,
814 			unsigned int socket_id __rte_unused,
815 			const struct rte_eth_txconf *tx_conf)
816 {
817 	uint16_t vq_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
818 	struct virtio_hw *hw = dev->data->dev_private;
819 	struct virtqueue *vq = hw->vqs[vq_idx];
820 	struct virtnet_tx *txvq;
821 	uint16_t tx_free_thresh;
822 
823 	PMD_INIT_FUNC_TRACE();
824 
825 	if (tx_conf->tx_deferred_start) {
826 		PMD_INIT_LOG(ERR, "Tx deferred start is not supported");
827 		return -EINVAL;
828 	}
829 
830 	if (nb_desc == 0 || nb_desc > vq->vq_nentries)
831 		nb_desc = vq->vq_nentries;
832 	vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
833 
834 	txvq = &vq->txq;
835 
836 	tx_free_thresh = tx_conf->tx_free_thresh;
837 	if (tx_free_thresh == 0)
838 		tx_free_thresh =
839 			RTE_MIN(vq->vq_nentries / 4, DEFAULT_TX_FREE_THRESH);
840 
841 	if (tx_free_thresh >= (vq->vq_nentries - 3)) {
842 		PMD_DRV_LOG(ERR, "tx_free_thresh must be less than the "
843 			"number of TX entries minus 3 (%u)."
844 			" (tx_free_thresh=%u port=%u queue=%u)",
845 			vq->vq_nentries - 3,
846 			tx_free_thresh, dev->data->port_id, queue_idx);
847 		return -EINVAL;
848 	}
849 
850 	vq->vq_free_thresh = tx_free_thresh;
851 
852 	dev->data->tx_queues[queue_idx] = txvq;
853 	return 0;
854 }
855 
856 int
857 virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
858 				uint16_t queue_idx)
859 {
860 	uint16_t vq_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
861 	struct virtio_hw *hw = dev->data->dev_private;
862 	struct virtqueue *vq = hw->vqs[vq_idx];
863 
864 	PMD_INIT_FUNC_TRACE();
865 
866 	if (!virtio_with_packed_queue(hw)) {
867 		if (virtio_with_feature(hw, VIRTIO_F_IN_ORDER))
868 			vq->vq_split.ring.desc[vq->vq_nentries - 1].next = 0;
869 	}
870 
871 	VIRTQUEUE_DUMP(vq);
872 
873 	return 0;
874 }
875 
876 static inline void
877 virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m)
878 {
879 	int error;
880 	/*
881 	 * Requeue the discarded mbuf. This should always be
882 	 * successful since it was just dequeued.
883 	 */
884 	if (virtio_with_packed_queue(vq->hw))
885 		error = virtqueue_enqueue_recv_refill_packed(vq, &m, 1);
886 	else
887 		error = virtqueue_enqueue_recv_refill(vq, &m, 1);
888 
889 	if (unlikely(error)) {
890 		PMD_DRV_LOG(ERR, "cannot requeue discarded mbuf");
891 		rte_pktmbuf_free(m);
892 	}
893 }
894 
895 static inline void
896 virtio_discard_rxbuf_inorder(struct virtqueue *vq, struct rte_mbuf *m)
897 {
898 	int error;
899 
900 	error = virtqueue_enqueue_refill_inorder(vq, &m, 1);
901 	if (unlikely(error)) {
902 		PMD_DRV_LOG(ERR, "cannot requeue discarded mbuf");
903 		rte_pktmbuf_free(m);
904 	}
905 }
906 
907 /* Optionally fill offload information in structure */
908 static inline int
909 virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
910 {
911 	struct rte_net_hdr_lens hdr_lens;
912 	uint32_t hdrlen, ptype;
913 	int l4_supported = 0;
914 
915 	/* nothing to do */
916 	if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
917 		return 0;
918 
919 	m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN;
920 
921 	ptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK);
922 	m->packet_type = ptype;
923 	if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP ||
924 	    (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP ||
925 	    (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_SCTP)
926 		l4_supported = 1;
927 
928 	if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
929 		hdrlen = hdr_lens.l2_len + hdr_lens.l3_len + hdr_lens.l4_len;
930 		if (hdr->csum_start <= hdrlen && l4_supported) {
931 			m->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_NONE;
932 		} else {
933 			/* Unknown proto or tunnel, do sw cksum. We can assume
934 			 * the cksum field is in the first segment since the
935 			 * buffers we provided to the host are large enough.
936 			 * In case of SCTP, this will be wrong since it's a CRC
937 			 * but there's nothing we can do.
938 			 */
939 			uint16_t csum = 0, off;
940 
941 			if (rte_raw_cksum_mbuf(m, hdr->csum_start,
942 				rte_pktmbuf_pkt_len(m) - hdr->csum_start,
943 				&csum) < 0)
944 				return -EINVAL;
945 			if (likely(csum != 0xffff))
946 				csum = ~csum;
947 			off = hdr->csum_offset + hdr->csum_start;
948 			if (rte_pktmbuf_data_len(m) >= off + 1)
949 				*rte_pktmbuf_mtod_offset(m, uint16_t *,
950 					off) = csum;
951 		}
952 	} else if (hdr->flags & VIRTIO_NET_HDR_F_DATA_VALID && l4_supported) {
953 		m->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
954 	}
955 
956 	/* GSO request, save required information in mbuf */
957 	if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
958 		/* Check unsupported modes */
959 		if ((hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN) ||
960 		    (hdr->gso_size == 0)) {
961 			return -EINVAL;
962 		}
963 
964 		/* Update mss lengths in mbuf */
965 		m->tso_segsz = hdr->gso_size;
966 		switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
967 			case VIRTIO_NET_HDR_GSO_TCPV4:
968 			case VIRTIO_NET_HDR_GSO_TCPV6:
969 				m->ol_flags |= RTE_MBUF_F_RX_LRO |
970 					RTE_MBUF_F_RX_L4_CKSUM_NONE;
971 				break;
972 			default:
973 				return -EINVAL;
974 		}
975 	}
976 
977 	return 0;
978 }
979 
980 #define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc))
981 uint16_t
982 virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
983 {
984 	struct virtnet_rx *rxvq = rx_queue;
985 	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
986 	struct virtio_hw *hw = vq->hw;
987 	struct rte_mbuf *rxm;
988 	uint16_t nb_used, num, nb_rx;
989 	uint32_t len[VIRTIO_MBUF_BURST_SZ];
990 	struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
991 	int error;
992 	uint32_t i, nb_enqueued;
993 	uint32_t hdr_size;
994 	struct virtio_net_hdr *hdr;
995 
996 	nb_rx = 0;
997 	if (unlikely(hw->started == 0))
998 		return nb_rx;
999 
1000 	nb_used = virtqueue_nused(vq);
1001 
1002 	num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts;
1003 	if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1004 		num = VIRTIO_MBUF_BURST_SZ;
1005 	if (likely(num > DESC_PER_CACHELINE))
1006 		num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1007 
1008 	num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num);
1009 	PMD_RX_LOG(DEBUG, "used:%d dequeue:%d", nb_used, num);
1010 
1011 	nb_enqueued = 0;
1012 	hdr_size = hw->vtnet_hdr_size;
1013 
1014 	for (i = 0; i < num ; i++) {
1015 		rxm = rcv_pkts[i];
1016 
1017 		PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1018 
1019 		if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1020 			PMD_RX_LOG(ERR, "Packet drop");
1021 			nb_enqueued++;
1022 			virtio_discard_rxbuf(vq, rxm);
1023 			rxvq->stats.errors++;
1024 			continue;
1025 		}
1026 
1027 		rxm->port = hw->port_id;
1028 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1029 		rxm->ol_flags = 0;
1030 		rxm->vlan_tci = 0;
1031 
1032 		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1033 		rxm->data_len = (uint16_t)(len[i] - hdr_size);
1034 
1035 		hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr +
1036 			RTE_PKTMBUF_HEADROOM - hdr_size);
1037 
1038 		if (hw->vlan_strip)
1039 			rte_vlan_strip(rxm);
1040 
1041 		if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) {
1042 			virtio_discard_rxbuf(vq, rxm);
1043 			rxvq->stats.errors++;
1044 			continue;
1045 		}
1046 
1047 		virtio_rx_stats_updated(rxvq, rxm);
1048 
1049 		rx_pkts[nb_rx++] = rxm;
1050 	}
1051 
1052 	rxvq->stats.packets += nb_rx;
1053 
1054 	/* Allocate new mbuf for the used descriptor */
1055 	if (likely(!virtqueue_full(vq))) {
1056 		uint16_t free_cnt = vq->vq_free_cnt;
1057 		struct rte_mbuf *new_pkts[free_cnt];
1058 
1059 		if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts,
1060 						free_cnt) == 0)) {
1061 			error = virtqueue_enqueue_recv_refill(vq, new_pkts,
1062 					free_cnt);
1063 			if (unlikely(error)) {
1064 				for (i = 0; i < free_cnt; i++)
1065 					rte_pktmbuf_free(new_pkts[i]);
1066 			}
1067 			nb_enqueued += free_cnt;
1068 		} else {
1069 			struct rte_eth_dev *dev = &rte_eth_devices[hw->port_id];
1070 			dev->data->rx_mbuf_alloc_failed += free_cnt;
1071 		}
1072 	}
1073 
1074 	if (likely(nb_enqueued)) {
1075 		vq_update_avail_idx(vq);
1076 
1077 		if (unlikely(virtqueue_kick_prepare(vq))) {
1078 			virtqueue_notify(vq);
1079 			PMD_RX_LOG(DEBUG, "Notified");
1080 		}
1081 	}
1082 
1083 	return nb_rx;
1084 }
1085 
1086 uint16_t
1087 virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
1088 			uint16_t nb_pkts)
1089 {
1090 	struct virtnet_rx *rxvq = rx_queue;
1091 	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
1092 	struct virtio_hw *hw = vq->hw;
1093 	struct rte_mbuf *rxm;
1094 	uint16_t num, nb_rx;
1095 	uint32_t len[VIRTIO_MBUF_BURST_SZ];
1096 	struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1097 	int error;
1098 	uint32_t i, nb_enqueued;
1099 	uint32_t hdr_size;
1100 	struct virtio_net_hdr *hdr;
1101 
1102 	nb_rx = 0;
1103 	if (unlikely(hw->started == 0))
1104 		return nb_rx;
1105 
1106 	num = RTE_MIN(VIRTIO_MBUF_BURST_SZ, nb_pkts);
1107 	if (likely(num > DESC_PER_CACHELINE))
1108 		num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1109 
1110 	num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num);
1111 	PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1112 
1113 	nb_enqueued = 0;
1114 	hdr_size = hw->vtnet_hdr_size;
1115 
1116 	for (i = 0; i < num; i++) {
1117 		rxm = rcv_pkts[i];
1118 
1119 		PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1120 
1121 		if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1122 			PMD_RX_LOG(ERR, "Packet drop");
1123 			nb_enqueued++;
1124 			virtio_discard_rxbuf(vq, rxm);
1125 			rxvq->stats.errors++;
1126 			continue;
1127 		}
1128 
1129 		rxm->port = hw->port_id;
1130 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1131 		rxm->ol_flags = 0;
1132 		rxm->vlan_tci = 0;
1133 
1134 		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1135 		rxm->data_len = (uint16_t)(len[i] - hdr_size);
1136 
1137 		hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr +
1138 			RTE_PKTMBUF_HEADROOM - hdr_size);
1139 
1140 		if (hw->vlan_strip)
1141 			rte_vlan_strip(rxm);
1142 
1143 		if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) {
1144 			virtio_discard_rxbuf(vq, rxm);
1145 			rxvq->stats.errors++;
1146 			continue;
1147 		}
1148 
1149 		virtio_rx_stats_updated(rxvq, rxm);
1150 
1151 		rx_pkts[nb_rx++] = rxm;
1152 	}
1153 
1154 	rxvq->stats.packets += nb_rx;
1155 
1156 	/* Allocate new mbuf for the used descriptor */
1157 	if (likely(!virtqueue_full(vq))) {
1158 		uint16_t free_cnt = vq->vq_free_cnt;
1159 		struct rte_mbuf *new_pkts[free_cnt];
1160 
1161 		if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts,
1162 						free_cnt) == 0)) {
1163 			error = virtqueue_enqueue_recv_refill_packed(vq,
1164 					new_pkts, free_cnt);
1165 			if (unlikely(error)) {
1166 				for (i = 0; i < free_cnt; i++)
1167 					rte_pktmbuf_free(new_pkts[i]);
1168 			}
1169 			nb_enqueued += free_cnt;
1170 		} else {
1171 			struct rte_eth_dev *dev = &rte_eth_devices[hw->port_id];
1172 			dev->data->rx_mbuf_alloc_failed += free_cnt;
1173 		}
1174 	}
1175 
1176 	if (likely(nb_enqueued)) {
1177 		if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1178 			virtqueue_notify(vq);
1179 			PMD_RX_LOG(DEBUG, "Notified");
1180 		}
1181 	}
1182 
1183 	return nb_rx;
1184 }
1185 
1186 
1187 uint16_t
1188 virtio_recv_pkts_inorder(void *rx_queue,
1189 			struct rte_mbuf **rx_pkts,
1190 			uint16_t nb_pkts)
1191 {
1192 	struct virtnet_rx *rxvq = rx_queue;
1193 	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
1194 	struct virtio_hw *hw = vq->hw;
1195 	struct rte_mbuf *rxm;
1196 	struct rte_mbuf *prev = NULL;
1197 	uint16_t nb_used, num, nb_rx;
1198 	uint32_t len[VIRTIO_MBUF_BURST_SZ];
1199 	struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1200 	int error;
1201 	uint32_t nb_enqueued;
1202 	uint32_t seg_num;
1203 	uint32_t seg_res;
1204 	uint32_t hdr_size;
1205 	int32_t i;
1206 
1207 	nb_rx = 0;
1208 	if (unlikely(hw->started == 0))
1209 		return nb_rx;
1210 
1211 	nb_used = virtqueue_nused(vq);
1212 	nb_used = RTE_MIN(nb_used, nb_pkts);
1213 	nb_used = RTE_MIN(nb_used, VIRTIO_MBUF_BURST_SZ);
1214 
1215 	PMD_RX_LOG(DEBUG, "used:%d", nb_used);
1216 
1217 	nb_enqueued = 0;
1218 	seg_num = 1;
1219 	seg_res = 0;
1220 	hdr_size = hw->vtnet_hdr_size;
1221 
1222 	num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len, nb_used);
1223 
1224 	for (i = 0; i < num; i++) {
1225 		struct virtio_net_hdr_mrg_rxbuf *header;
1226 
1227 		PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1228 		PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1229 
1230 		rxm = rcv_pkts[i];
1231 
1232 		if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1233 			PMD_RX_LOG(ERR, "Packet drop");
1234 			nb_enqueued++;
1235 			virtio_discard_rxbuf_inorder(vq, rxm);
1236 			rxvq->stats.errors++;
1237 			continue;
1238 		}
1239 
1240 		header = (struct virtio_net_hdr_mrg_rxbuf *)
1241 			 ((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM
1242 			 - hdr_size);
1243 
1244 		if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
1245 			seg_num = header->num_buffers;
1246 			if (seg_num == 0)
1247 				seg_num = 1;
1248 		} else {
1249 			seg_num = 1;
1250 		}
1251 
1252 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1253 		rxm->nb_segs = seg_num;
1254 		rxm->ol_flags = 0;
1255 		rxm->vlan_tci = 0;
1256 		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1257 		rxm->data_len = (uint16_t)(len[i] - hdr_size);
1258 
1259 		rxm->port = hw->port_id;
1260 
1261 		rx_pkts[nb_rx] = rxm;
1262 		prev = rxm;
1263 
1264 		if (vq->hw->has_rx_offload &&
1265 				virtio_rx_offload(rxm, &header->hdr) < 0) {
1266 			virtio_discard_rxbuf_inorder(vq, rxm);
1267 			rxvq->stats.errors++;
1268 			continue;
1269 		}
1270 
1271 		if (hw->vlan_strip)
1272 			rte_vlan_strip(rx_pkts[nb_rx]);
1273 
1274 		seg_res = seg_num - 1;
1275 
1276 		/* Merge remaining segments */
1277 		while (seg_res != 0 && i < (num - 1)) {
1278 			i++;
1279 
1280 			rxm = rcv_pkts[i];
1281 			rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1282 			rxm->pkt_len = (uint32_t)(len[i]);
1283 			rxm->data_len = (uint16_t)(len[i]);
1284 
1285 			rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1286 
1287 			prev->next = rxm;
1288 			prev = rxm;
1289 			seg_res -= 1;
1290 		}
1291 
1292 		if (!seg_res) {
1293 			virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1294 			nb_rx++;
1295 		}
1296 	}
1297 
1298 	/* Last packet still need merge segments */
1299 	while (seg_res != 0) {
1300 		uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1301 					VIRTIO_MBUF_BURST_SZ);
1302 
1303 		if (likely(virtqueue_nused(vq) >= rcv_cnt)) {
1304 			num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len,
1305 							   rcv_cnt);
1306 			uint16_t extra_idx = 0;
1307 
1308 			rcv_cnt = num;
1309 			while (extra_idx < rcv_cnt) {
1310 				rxm = rcv_pkts[extra_idx];
1311 				rxm->data_off =
1312 					RTE_PKTMBUF_HEADROOM - hdr_size;
1313 				rxm->pkt_len = (uint32_t)(len[extra_idx]);
1314 				rxm->data_len = (uint16_t)(len[extra_idx]);
1315 				prev->next = rxm;
1316 				prev = rxm;
1317 				rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1318 				extra_idx += 1;
1319 			};
1320 			seg_res -= rcv_cnt;
1321 
1322 			if (!seg_res) {
1323 				virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1324 				nb_rx++;
1325 			}
1326 		} else {
1327 			PMD_RX_LOG(ERR,
1328 					"No enough segments for packet.");
1329 			rte_pktmbuf_free(rx_pkts[nb_rx]);
1330 			rxvq->stats.errors++;
1331 			break;
1332 		}
1333 	}
1334 
1335 	rxvq->stats.packets += nb_rx;
1336 
1337 	/* Allocate new mbuf for the used descriptor */
1338 
1339 	if (likely(!virtqueue_full(vq))) {
1340 		/* free_cnt may include mrg descs */
1341 		uint16_t free_cnt = vq->vq_free_cnt;
1342 		struct rte_mbuf *new_pkts[free_cnt];
1343 
1344 		if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1345 			error = virtqueue_enqueue_refill_inorder(vq, new_pkts,
1346 					free_cnt);
1347 			if (unlikely(error)) {
1348 				for (i = 0; i < free_cnt; i++)
1349 					rte_pktmbuf_free(new_pkts[i]);
1350 			}
1351 			nb_enqueued += free_cnt;
1352 		} else {
1353 			struct rte_eth_dev *dev = &rte_eth_devices[hw->port_id];
1354 			dev->data->rx_mbuf_alloc_failed += free_cnt;
1355 		}
1356 	}
1357 
1358 	if (likely(nb_enqueued)) {
1359 		vq_update_avail_idx(vq);
1360 
1361 		if (unlikely(virtqueue_kick_prepare(vq))) {
1362 			virtqueue_notify(vq);
1363 			PMD_RX_LOG(DEBUG, "Notified");
1364 		}
1365 	}
1366 
1367 	return nb_rx;
1368 }
1369 
1370 uint16_t
1371 virtio_recv_mergeable_pkts(void *rx_queue,
1372 			struct rte_mbuf **rx_pkts,
1373 			uint16_t nb_pkts)
1374 {
1375 	struct virtnet_rx *rxvq = rx_queue;
1376 	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
1377 	struct virtio_hw *hw = vq->hw;
1378 	struct rte_mbuf *rxm;
1379 	struct rte_mbuf *prev = NULL;
1380 	uint16_t nb_used, num, nb_rx = 0;
1381 	uint32_t len[VIRTIO_MBUF_BURST_SZ];
1382 	struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1383 	int error;
1384 	uint32_t nb_enqueued = 0;
1385 	uint32_t seg_num = 0;
1386 	uint32_t seg_res = 0;
1387 	uint32_t hdr_size = hw->vtnet_hdr_size;
1388 	int32_t i;
1389 
1390 	if (unlikely(hw->started == 0))
1391 		return nb_rx;
1392 
1393 	nb_used = virtqueue_nused(vq);
1394 
1395 	PMD_RX_LOG(DEBUG, "used:%d", nb_used);
1396 
1397 	num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts;
1398 	if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1399 		num = VIRTIO_MBUF_BURST_SZ;
1400 	if (likely(num > DESC_PER_CACHELINE))
1401 		num = num - ((vq->vq_used_cons_idx + num) %
1402 				DESC_PER_CACHELINE);
1403 
1404 
1405 	num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num);
1406 
1407 	for (i = 0; i < num; i++) {
1408 		struct virtio_net_hdr_mrg_rxbuf *header;
1409 
1410 		PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1411 		PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1412 
1413 		rxm = rcv_pkts[i];
1414 
1415 		if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1416 			PMD_RX_LOG(ERR, "Packet drop");
1417 			nb_enqueued++;
1418 			virtio_discard_rxbuf(vq, rxm);
1419 			rxvq->stats.errors++;
1420 			continue;
1421 		}
1422 
1423 		header = (struct virtio_net_hdr_mrg_rxbuf *)
1424 			 ((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM
1425 			 - hdr_size);
1426 		seg_num = header->num_buffers;
1427 		if (seg_num == 0)
1428 			seg_num = 1;
1429 
1430 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1431 		rxm->nb_segs = seg_num;
1432 		rxm->ol_flags = 0;
1433 		rxm->vlan_tci = 0;
1434 		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1435 		rxm->data_len = (uint16_t)(len[i] - hdr_size);
1436 
1437 		rxm->port = hw->port_id;
1438 
1439 		rx_pkts[nb_rx] = rxm;
1440 		prev = rxm;
1441 
1442 		if (hw->has_rx_offload &&
1443 				virtio_rx_offload(rxm, &header->hdr) < 0) {
1444 			virtio_discard_rxbuf(vq, rxm);
1445 			rxvq->stats.errors++;
1446 			continue;
1447 		}
1448 
1449 		if (hw->vlan_strip)
1450 			rte_vlan_strip(rx_pkts[nb_rx]);
1451 
1452 		seg_res = seg_num - 1;
1453 
1454 		/* Merge remaining segments */
1455 		while (seg_res != 0 && i < (num - 1)) {
1456 			i++;
1457 
1458 			rxm = rcv_pkts[i];
1459 			rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1460 			rxm->pkt_len = (uint32_t)(len[i]);
1461 			rxm->data_len = (uint16_t)(len[i]);
1462 
1463 			rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1464 
1465 			prev->next = rxm;
1466 			prev = rxm;
1467 			seg_res -= 1;
1468 		}
1469 
1470 		if (!seg_res) {
1471 			virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1472 			nb_rx++;
1473 		}
1474 	}
1475 
1476 	/* Last packet still need merge segments */
1477 	while (seg_res != 0) {
1478 		uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1479 					VIRTIO_MBUF_BURST_SZ);
1480 
1481 		if (likely(virtqueue_nused(vq) >= rcv_cnt)) {
1482 			num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len,
1483 							   rcv_cnt);
1484 			uint16_t extra_idx = 0;
1485 
1486 			rcv_cnt = num;
1487 			while (extra_idx < rcv_cnt) {
1488 				rxm = rcv_pkts[extra_idx];
1489 				rxm->data_off =
1490 					RTE_PKTMBUF_HEADROOM - hdr_size;
1491 				rxm->pkt_len = (uint32_t)(len[extra_idx]);
1492 				rxm->data_len = (uint16_t)(len[extra_idx]);
1493 				prev->next = rxm;
1494 				prev = rxm;
1495 				rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1496 				extra_idx += 1;
1497 			};
1498 			seg_res -= rcv_cnt;
1499 
1500 			if (!seg_res) {
1501 				virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1502 				nb_rx++;
1503 			}
1504 		} else {
1505 			PMD_RX_LOG(ERR,
1506 					"No enough segments for packet.");
1507 			rte_pktmbuf_free(rx_pkts[nb_rx]);
1508 			rxvq->stats.errors++;
1509 			break;
1510 		}
1511 	}
1512 
1513 	rxvq->stats.packets += nb_rx;
1514 
1515 	/* Allocate new mbuf for the used descriptor */
1516 	if (likely(!virtqueue_full(vq))) {
1517 		/* free_cnt may include mrg descs */
1518 		uint16_t free_cnt = vq->vq_free_cnt;
1519 		struct rte_mbuf *new_pkts[free_cnt];
1520 
1521 		if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1522 			error = virtqueue_enqueue_recv_refill(vq, new_pkts,
1523 					free_cnt);
1524 			if (unlikely(error)) {
1525 				for (i = 0; i < free_cnt; i++)
1526 					rte_pktmbuf_free(new_pkts[i]);
1527 			}
1528 			nb_enqueued += free_cnt;
1529 		} else {
1530 			struct rte_eth_dev *dev = &rte_eth_devices[hw->port_id];
1531 			dev->data->rx_mbuf_alloc_failed += free_cnt;
1532 		}
1533 	}
1534 
1535 	if (likely(nb_enqueued)) {
1536 		vq_update_avail_idx(vq);
1537 
1538 		if (unlikely(virtqueue_kick_prepare(vq))) {
1539 			virtqueue_notify(vq);
1540 			PMD_RX_LOG(DEBUG, "Notified");
1541 		}
1542 	}
1543 
1544 	return nb_rx;
1545 }
1546 
1547 uint16_t
1548 virtio_recv_mergeable_pkts_packed(void *rx_queue,
1549 			struct rte_mbuf **rx_pkts,
1550 			uint16_t nb_pkts)
1551 {
1552 	struct virtnet_rx *rxvq = rx_queue;
1553 	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
1554 	struct virtio_hw *hw = vq->hw;
1555 	struct rte_mbuf *rxm;
1556 	struct rte_mbuf *prev = NULL;
1557 	uint16_t num, nb_rx = 0;
1558 	uint32_t len[VIRTIO_MBUF_BURST_SZ];
1559 	struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1560 	uint32_t nb_enqueued = 0;
1561 	uint32_t seg_num = 0;
1562 	uint32_t seg_res = 0;
1563 	uint32_t hdr_size = hw->vtnet_hdr_size;
1564 	int32_t i;
1565 	int error;
1566 
1567 	if (unlikely(hw->started == 0))
1568 		return nb_rx;
1569 
1570 
1571 	num = nb_pkts;
1572 	if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1573 		num = VIRTIO_MBUF_BURST_SZ;
1574 	if (likely(num > DESC_PER_CACHELINE))
1575 		num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1576 
1577 	num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num);
1578 
1579 	for (i = 0; i < num; i++) {
1580 		struct virtio_net_hdr_mrg_rxbuf *header;
1581 
1582 		PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1583 		PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1584 
1585 		rxm = rcv_pkts[i];
1586 
1587 		if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1588 			PMD_RX_LOG(ERR, "Packet drop");
1589 			nb_enqueued++;
1590 			virtio_discard_rxbuf(vq, rxm);
1591 			rxvq->stats.errors++;
1592 			continue;
1593 		}
1594 
1595 		header = (struct virtio_net_hdr_mrg_rxbuf *)((char *)
1596 			  rxm->buf_addr + RTE_PKTMBUF_HEADROOM - hdr_size);
1597 		seg_num = header->num_buffers;
1598 
1599 		if (seg_num == 0)
1600 			seg_num = 1;
1601 
1602 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1603 		rxm->nb_segs = seg_num;
1604 		rxm->ol_flags = 0;
1605 		rxm->vlan_tci = 0;
1606 		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1607 		rxm->data_len = (uint16_t)(len[i] - hdr_size);
1608 
1609 		rxm->port = hw->port_id;
1610 		rx_pkts[nb_rx] = rxm;
1611 		prev = rxm;
1612 
1613 		if (hw->has_rx_offload &&
1614 				virtio_rx_offload(rxm, &header->hdr) < 0) {
1615 			virtio_discard_rxbuf(vq, rxm);
1616 			rxvq->stats.errors++;
1617 			continue;
1618 		}
1619 
1620 		if (hw->vlan_strip)
1621 			rte_vlan_strip(rx_pkts[nb_rx]);
1622 
1623 		seg_res = seg_num - 1;
1624 
1625 		/* Merge remaining segments */
1626 		while (seg_res != 0 && i < (num - 1)) {
1627 			i++;
1628 
1629 			rxm = rcv_pkts[i];
1630 			rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1631 			rxm->pkt_len = (uint32_t)(len[i]);
1632 			rxm->data_len = (uint16_t)(len[i]);
1633 
1634 			rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1635 
1636 			prev->next = rxm;
1637 			prev = rxm;
1638 			seg_res -= 1;
1639 		}
1640 
1641 		if (!seg_res) {
1642 			virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1643 			nb_rx++;
1644 		}
1645 	}
1646 
1647 	/* Last packet still need merge segments */
1648 	while (seg_res != 0) {
1649 		uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1650 					VIRTIO_MBUF_BURST_SZ);
1651 		uint16_t extra_idx = 0;
1652 
1653 		rcv_cnt = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts,
1654 				len, rcv_cnt);
1655 		if (unlikely(rcv_cnt == 0)) {
1656 			PMD_RX_LOG(ERR, "No enough segments for packet.");
1657 			rte_pktmbuf_free(rx_pkts[nb_rx]);
1658 			rxvq->stats.errors++;
1659 			break;
1660 		}
1661 
1662 		while (extra_idx < rcv_cnt) {
1663 			rxm = rcv_pkts[extra_idx];
1664 
1665 			rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1666 			rxm->pkt_len = (uint32_t)(len[extra_idx]);
1667 			rxm->data_len = (uint16_t)(len[extra_idx]);
1668 
1669 			prev->next = rxm;
1670 			prev = rxm;
1671 			rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1672 			extra_idx += 1;
1673 		}
1674 		seg_res -= rcv_cnt;
1675 		if (!seg_res) {
1676 			virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1677 			nb_rx++;
1678 		}
1679 	}
1680 
1681 	rxvq->stats.packets += nb_rx;
1682 
1683 	/* Allocate new mbuf for the used descriptor */
1684 	if (likely(!virtqueue_full(vq))) {
1685 		/* free_cnt may include mrg descs */
1686 		uint16_t free_cnt = vq->vq_free_cnt;
1687 		struct rte_mbuf *new_pkts[free_cnt];
1688 
1689 		if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1690 			error = virtqueue_enqueue_recv_refill_packed(vq,
1691 					new_pkts, free_cnt);
1692 			if (unlikely(error)) {
1693 				for (i = 0; i < free_cnt; i++)
1694 					rte_pktmbuf_free(new_pkts[i]);
1695 			}
1696 			nb_enqueued += free_cnt;
1697 		} else {
1698 			struct rte_eth_dev *dev = &rte_eth_devices[hw->port_id];
1699 			dev->data->rx_mbuf_alloc_failed += free_cnt;
1700 		}
1701 	}
1702 
1703 	if (likely(nb_enqueued)) {
1704 		if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1705 			virtqueue_notify(vq);
1706 			PMD_RX_LOG(DEBUG, "Notified");
1707 		}
1708 	}
1709 
1710 	return nb_rx;
1711 }
1712 
1713 uint16_t
1714 virtio_xmit_pkts_prepare(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts,
1715 			uint16_t nb_pkts)
1716 {
1717 	uint16_t nb_tx;
1718 	int error;
1719 
1720 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1721 		struct rte_mbuf *m = tx_pkts[nb_tx];
1722 
1723 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
1724 		error = rte_validate_tx_offload(m);
1725 		if (unlikely(error)) {
1726 			rte_errno = -error;
1727 			break;
1728 		}
1729 #endif
1730 
1731 		/* Do VLAN tag insertion */
1732 		if (unlikely(m->ol_flags & RTE_MBUF_F_TX_VLAN)) {
1733 			error = rte_vlan_insert(&m);
1734 			/* rte_vlan_insert() may change pointer
1735 			 * even in the case of failure
1736 			 */
1737 			tx_pkts[nb_tx] = m;
1738 
1739 			if (unlikely(error)) {
1740 				rte_errno = -error;
1741 				break;
1742 			}
1743 		}
1744 
1745 		error = rte_net_intel_cksum_prepare(m);
1746 		if (unlikely(error)) {
1747 			rte_errno = -error;
1748 			break;
1749 		}
1750 
1751 		if (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG)
1752 			virtio_tso_fix_cksum(m);
1753 	}
1754 
1755 	return nb_tx;
1756 }
1757 
1758 uint16_t
1759 virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
1760 			uint16_t nb_pkts)
1761 {
1762 	struct virtnet_tx *txvq = tx_queue;
1763 	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
1764 	struct virtio_hw *hw = vq->hw;
1765 	uint16_t hdr_size = hw->vtnet_hdr_size;
1766 	uint16_t nb_tx = 0;
1767 	bool in_order = virtio_with_feature(hw, VIRTIO_F_IN_ORDER);
1768 
1769 	if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
1770 		return nb_tx;
1771 
1772 	if (unlikely(nb_pkts < 1))
1773 		return nb_pkts;
1774 
1775 	PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
1776 
1777 	if (nb_pkts > vq->vq_free_cnt)
1778 		virtio_xmit_cleanup_packed(vq, nb_pkts - vq->vq_free_cnt,
1779 					   in_order);
1780 
1781 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1782 		struct rte_mbuf *txm = tx_pkts[nb_tx];
1783 		int can_push = 0, use_indirect = 0, slots, need;
1784 
1785 		/* optimize ring usage */
1786 		if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
1787 		      virtio_with_feature(hw, VIRTIO_F_VERSION_1)) &&
1788 		    rte_mbuf_refcnt_read(txm) == 1 &&
1789 		    RTE_MBUF_DIRECT(txm) &&
1790 		    txm->nb_segs == 1 &&
1791 		    rte_pktmbuf_headroom(txm) >= hdr_size &&
1792 		    rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
1793 			   __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
1794 			can_push = 1;
1795 		else if (virtio_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
1796 			 txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
1797 			use_indirect = 1;
1798 		/* How many main ring entries are needed to this Tx?
1799 		 * indirect   => 1
1800 		 * any_layout => number of segments
1801 		 * default    => number of segments + 1
1802 		 */
1803 		slots = use_indirect ? 1 : (txm->nb_segs + !can_push);
1804 		need = slots - vq->vq_free_cnt;
1805 
1806 		/* Positive value indicates it need free vring descriptors */
1807 		if (unlikely(need > 0)) {
1808 			virtio_xmit_cleanup_packed(vq, need, in_order);
1809 			need = slots - vq->vq_free_cnt;
1810 			if (unlikely(need > 0)) {
1811 				PMD_TX_LOG(ERR,
1812 					   "No free tx descriptors to transmit");
1813 				break;
1814 			}
1815 		}
1816 
1817 		/* Enqueue Packet buffers */
1818 		if (can_push)
1819 			virtqueue_enqueue_xmit_packed_fast(txvq, txm, in_order);
1820 		else
1821 			virtqueue_enqueue_xmit_packed(txvq, txm, slots,
1822 						      use_indirect, 0,
1823 						      in_order);
1824 
1825 		virtio_update_packet_stats(&txvq->stats, txm);
1826 	}
1827 
1828 	txvq->stats.packets += nb_tx;
1829 
1830 	if (likely(nb_tx)) {
1831 		if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1832 			virtqueue_notify(vq);
1833 			PMD_TX_LOG(DEBUG, "Notified backend after xmit");
1834 		}
1835 	}
1836 
1837 	return nb_tx;
1838 }
1839 
1840 uint16_t
1841 virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1842 {
1843 	struct virtnet_tx *txvq = tx_queue;
1844 	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
1845 	struct virtio_hw *hw = vq->hw;
1846 	uint16_t hdr_size = hw->vtnet_hdr_size;
1847 	uint16_t nb_used, nb_tx = 0;
1848 
1849 	if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
1850 		return nb_tx;
1851 
1852 	if (unlikely(nb_pkts < 1))
1853 		return nb_pkts;
1854 
1855 	PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
1856 
1857 	nb_used = virtqueue_nused(vq);
1858 
1859 	if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
1860 		virtio_xmit_cleanup(vq, nb_used);
1861 
1862 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1863 		struct rte_mbuf *txm = tx_pkts[nb_tx];
1864 		int can_push = 0, use_indirect = 0, slots, need;
1865 
1866 		/* optimize ring usage */
1867 		if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
1868 		      virtio_with_feature(hw, VIRTIO_F_VERSION_1)) &&
1869 		    rte_mbuf_refcnt_read(txm) == 1 &&
1870 		    RTE_MBUF_DIRECT(txm) &&
1871 		    txm->nb_segs == 1 &&
1872 		    rte_pktmbuf_headroom(txm) >= hdr_size &&
1873 		    rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
1874 				   __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
1875 			can_push = 1;
1876 		else if (virtio_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
1877 			 txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
1878 			use_indirect = 1;
1879 
1880 		/* How many main ring entries are needed to this Tx?
1881 		 * any_layout => number of segments
1882 		 * indirect   => 1
1883 		 * default    => number of segments + 1
1884 		 */
1885 		slots = use_indirect ? 1 : (txm->nb_segs + !can_push);
1886 		need = slots - vq->vq_free_cnt;
1887 
1888 		/* Positive value indicates it need free vring descriptors */
1889 		if (unlikely(need > 0)) {
1890 			nb_used = virtqueue_nused(vq);
1891 
1892 			need = RTE_MIN(need, (int)nb_used);
1893 
1894 			virtio_xmit_cleanup(vq, need);
1895 			need = slots - vq->vq_free_cnt;
1896 			if (unlikely(need > 0)) {
1897 				PMD_TX_LOG(ERR,
1898 					   "No free tx descriptors to transmit");
1899 				break;
1900 			}
1901 		}
1902 
1903 		/* Enqueue Packet buffers */
1904 		virtqueue_enqueue_xmit(txvq, txm, slots, use_indirect,
1905 			can_push, 0);
1906 
1907 		virtio_update_packet_stats(&txvq->stats, txm);
1908 	}
1909 
1910 	txvq->stats.packets += nb_tx;
1911 
1912 	if (likely(nb_tx)) {
1913 		vq_update_avail_idx(vq);
1914 
1915 		if (unlikely(virtqueue_kick_prepare(vq))) {
1916 			virtqueue_notify(vq);
1917 			PMD_TX_LOG(DEBUG, "Notified backend after xmit");
1918 		}
1919 	}
1920 
1921 	return nb_tx;
1922 }
1923 
1924 static __rte_always_inline int
1925 virtio_xmit_try_cleanup_inorder(struct virtqueue *vq, uint16_t need)
1926 {
1927 	uint16_t nb_used, nb_clean, nb_descs;
1928 
1929 	nb_descs = vq->vq_free_cnt + need;
1930 	nb_used = virtqueue_nused(vq);
1931 	nb_clean = RTE_MIN(need, (int)nb_used);
1932 
1933 	virtio_xmit_cleanup_inorder(vq, nb_clean);
1934 
1935 	return nb_descs - vq->vq_free_cnt;
1936 }
1937 
1938 uint16_t
1939 virtio_xmit_pkts_inorder(void *tx_queue,
1940 			struct rte_mbuf **tx_pkts,
1941 			uint16_t nb_pkts)
1942 {
1943 	struct virtnet_tx *txvq = tx_queue;
1944 	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
1945 	struct virtio_hw *hw = vq->hw;
1946 	uint16_t hdr_size = hw->vtnet_hdr_size;
1947 	uint16_t nb_used, nb_tx = 0, nb_inorder_pkts = 0;
1948 	struct rte_mbuf *inorder_pkts[nb_pkts];
1949 	int need;
1950 
1951 	if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
1952 		return nb_tx;
1953 
1954 	if (unlikely(nb_pkts < 1))
1955 		return nb_pkts;
1956 
1957 	VIRTQUEUE_DUMP(vq);
1958 	PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
1959 	nb_used = virtqueue_nused(vq);
1960 
1961 	if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
1962 		virtio_xmit_cleanup_inorder(vq, nb_used);
1963 
1964 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1965 		struct rte_mbuf *txm = tx_pkts[nb_tx];
1966 		int slots;
1967 
1968 		/* optimize ring usage */
1969 		if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
1970 		     virtio_with_feature(hw, VIRTIO_F_VERSION_1)) &&
1971 		     rte_mbuf_refcnt_read(txm) == 1 &&
1972 		     RTE_MBUF_DIRECT(txm) &&
1973 		     txm->nb_segs == 1 &&
1974 		     rte_pktmbuf_headroom(txm) >= hdr_size &&
1975 		     rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
1976 				__alignof__(struct virtio_net_hdr_mrg_rxbuf))) {
1977 			inorder_pkts[nb_inorder_pkts] = txm;
1978 			nb_inorder_pkts++;
1979 
1980 			continue;
1981 		}
1982 
1983 		if (nb_inorder_pkts) {
1984 			need = nb_inorder_pkts - vq->vq_free_cnt;
1985 			if (unlikely(need > 0)) {
1986 				need = virtio_xmit_try_cleanup_inorder(vq,
1987 								       need);
1988 				if (unlikely(need > 0)) {
1989 					PMD_TX_LOG(ERR,
1990 						"No free tx descriptors to "
1991 						"transmit");
1992 					break;
1993 				}
1994 			}
1995 			virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
1996 							nb_inorder_pkts);
1997 			nb_inorder_pkts = 0;
1998 		}
1999 
2000 		slots = txm->nb_segs + 1;
2001 		need = slots - vq->vq_free_cnt;
2002 		if (unlikely(need > 0)) {
2003 			need = virtio_xmit_try_cleanup_inorder(vq, slots);
2004 
2005 			if (unlikely(need > 0)) {
2006 				PMD_TX_LOG(ERR,
2007 					"No free tx descriptors to transmit");
2008 				break;
2009 			}
2010 		}
2011 		/* Enqueue Packet buffers */
2012 		virtqueue_enqueue_xmit(txvq, txm, slots, 0, 0, 1);
2013 
2014 		virtio_update_packet_stats(&txvq->stats, txm);
2015 	}
2016 
2017 	/* Transmit all inorder packets */
2018 	if (nb_inorder_pkts) {
2019 		need = nb_inorder_pkts - vq->vq_free_cnt;
2020 		if (unlikely(need > 0)) {
2021 			need = virtio_xmit_try_cleanup_inorder(vq,
2022 								  need);
2023 			if (unlikely(need > 0)) {
2024 				PMD_TX_LOG(ERR,
2025 					"No free tx descriptors to transmit");
2026 				nb_inorder_pkts = vq->vq_free_cnt;
2027 				nb_tx -= need;
2028 			}
2029 		}
2030 
2031 		virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
2032 						nb_inorder_pkts);
2033 	}
2034 
2035 	txvq->stats.packets += nb_tx;
2036 
2037 	if (likely(nb_tx)) {
2038 		vq_update_avail_idx(vq);
2039 
2040 		if (unlikely(virtqueue_kick_prepare(vq))) {
2041 			virtqueue_notify(vq);
2042 			PMD_TX_LOG(DEBUG, "Notified backend after xmit");
2043 		}
2044 	}
2045 
2046 	VIRTQUEUE_DUMP(vq);
2047 
2048 	return nb_tx;
2049 }
2050 
2051 __rte_weak uint16_t
2052 virtio_recv_pkts_packed_vec(void *rx_queue __rte_unused,
2053 			    struct rte_mbuf **rx_pkts __rte_unused,
2054 			    uint16_t nb_pkts __rte_unused)
2055 {
2056 	return 0;
2057 }
2058 
2059 __rte_weak uint16_t
2060 virtio_xmit_pkts_packed_vec(void *tx_queue __rte_unused,
2061 			    struct rte_mbuf **tx_pkts __rte_unused,
2062 			    uint16_t nb_pkts __rte_unused)
2063 {
2064 	return 0;
2065 }
2066