xref: /dpdk/drivers/net/virtio/virtio_rxtx.c (revision 5b088007afdf5c5b9115df20a724a8c9ceb15c42)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <errno.h>
10 
11 #include <rte_cycles.h>
12 #include <rte_memory.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_mempool.h>
15 #include <rte_malloc.h>
16 #include <rte_mbuf.h>
17 #include <rte_ether.h>
18 #include <ethdev_driver.h>
19 #include <rte_prefetch.h>
20 #include <rte_string_fns.h>
21 #include <rte_errno.h>
22 #include <rte_byteorder.h>
23 #include <rte_net.h>
24 #include <rte_ip.h>
25 #include <rte_udp.h>
26 #include <rte_tcp.h>
27 
28 #include "virtio_logs.h"
29 #include "virtio_ethdev.h"
30 #include "virtio.h"
31 #include "virtqueue.h"
32 #include "virtio_rxtx.h"
33 #include "virtio_rxtx_simple.h"
34 #include "virtio_ring.h"
35 
36 #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
37 #define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len)
38 #else
39 #define  VIRTIO_DUMP_PACKET(m, len) do { } while (0)
40 #endif
41 
42 void
43 vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx, uint16_t num)
44 {
45 	vq->vq_free_cnt += num;
46 	vq->vq_desc_tail_idx = desc_idx & (vq->vq_nentries - 1);
47 }
48 
49 void
50 vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
51 {
52 	struct vring_desc *dp, *dp_tail;
53 	struct vq_desc_extra *dxp;
54 	uint16_t desc_idx_last = desc_idx;
55 
56 	dp  = &vq->vq_split.ring.desc[desc_idx];
57 	dxp = &vq->vq_descx[desc_idx];
58 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
59 	if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
60 		while (dp->flags & VRING_DESC_F_NEXT) {
61 			desc_idx_last = dp->next;
62 			dp = &vq->vq_split.ring.desc[dp->next];
63 		}
64 	}
65 	dxp->ndescs = 0;
66 
67 	/*
68 	 * We must append the existing free chain, if any, to the end of
69 	 * newly freed chain. If the virtqueue was completely used, then
70 	 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
71 	 */
72 	if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) {
73 		vq->vq_desc_head_idx = desc_idx;
74 	} else {
75 		dp_tail = &vq->vq_split.ring.desc[vq->vq_desc_tail_idx];
76 		dp_tail->next = desc_idx;
77 	}
78 
79 	vq->vq_desc_tail_idx = desc_idx_last;
80 	dp->next = VQ_RING_DESC_CHAIN_END;
81 }
82 
83 void
84 virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
85 {
86 	uint32_t s = mbuf->pkt_len;
87 	struct rte_ether_addr *ea;
88 
89 	stats->bytes += s;
90 
91 	if (s == 64) {
92 		stats->size_bins[1]++;
93 	} else if (s > 64 && s < 1024) {
94 		uint32_t bin;
95 
96 		/* count zeros, and offset into correct bin */
97 		bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
98 		stats->size_bins[bin]++;
99 	} else {
100 		if (s < 64)
101 			stats->size_bins[0]++;
102 		else if (s < 1519)
103 			stats->size_bins[6]++;
104 		else
105 			stats->size_bins[7]++;
106 	}
107 
108 	ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
109 	if (rte_is_multicast_ether_addr(ea)) {
110 		if (rte_is_broadcast_ether_addr(ea))
111 			stats->broadcast++;
112 		else
113 			stats->multicast++;
114 	}
115 }
116 
117 static inline void
118 virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
119 {
120 	VIRTIO_DUMP_PACKET(m, m->data_len);
121 
122 	virtio_update_packet_stats(&rxvq->stats, m);
123 }
124 
125 static uint16_t
126 virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq,
127 				  struct rte_mbuf **rx_pkts,
128 				  uint32_t *len,
129 				  uint16_t num)
130 {
131 	struct rte_mbuf *cookie;
132 	uint16_t used_idx;
133 	uint16_t id;
134 	struct vring_packed_desc *desc;
135 	uint16_t i;
136 
137 	desc = vq->vq_packed.ring.desc;
138 
139 	for (i = 0; i < num; i++) {
140 		used_idx = vq->vq_used_cons_idx;
141 		/* desc_is_used has a load-acquire or rte_io_rmb inside
142 		 * and wait for used desc in virtqueue.
143 		 */
144 		if (!desc_is_used(&desc[used_idx], vq))
145 			return i;
146 		len[i] = desc[used_idx].len;
147 		id = desc[used_idx].id;
148 		cookie = (struct rte_mbuf *)vq->vq_descx[id].cookie;
149 		if (unlikely(cookie == NULL)) {
150 			PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
151 				vq->vq_used_cons_idx);
152 			break;
153 		}
154 		rte_prefetch0(cookie);
155 		rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
156 		rx_pkts[i] = cookie;
157 
158 		vq->vq_free_cnt++;
159 		vq->vq_used_cons_idx++;
160 		if (vq->vq_used_cons_idx >= vq->vq_nentries) {
161 			vq->vq_used_cons_idx -= vq->vq_nentries;
162 			vq->vq_packed.used_wrap_counter ^= 1;
163 		}
164 	}
165 
166 	return i;
167 }
168 
169 static uint16_t
170 virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,
171 			   uint32_t *len, uint16_t num)
172 {
173 	struct vring_used_elem *uep;
174 	struct rte_mbuf *cookie;
175 	uint16_t used_idx, desc_idx;
176 	uint16_t i;
177 
178 	/*  Caller does the check */
179 	for (i = 0; i < num ; i++) {
180 		used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
181 		uep = &vq->vq_split.ring.used->ring[used_idx];
182 		desc_idx = (uint16_t) uep->id;
183 		len[i] = uep->len;
184 		cookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie;
185 
186 		if (unlikely(cookie == NULL)) {
187 			PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
188 				vq->vq_used_cons_idx);
189 			break;
190 		}
191 
192 		rte_prefetch0(cookie);
193 		rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
194 		rx_pkts[i]  = cookie;
195 		vq->vq_used_cons_idx++;
196 		vq_ring_free_chain(vq, desc_idx);
197 		vq->vq_descx[desc_idx].cookie = NULL;
198 	}
199 
200 	return i;
201 }
202 
203 static uint16_t
204 virtqueue_dequeue_rx_inorder(struct virtqueue *vq,
205 			struct rte_mbuf **rx_pkts,
206 			uint32_t *len,
207 			uint16_t num)
208 {
209 	struct vring_used_elem *uep;
210 	struct rte_mbuf *cookie;
211 	uint16_t used_idx = 0;
212 	uint16_t i;
213 
214 	if (unlikely(num == 0))
215 		return 0;
216 
217 	for (i = 0; i < num; i++) {
218 		used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
219 		/* Desc idx same as used idx */
220 		uep = &vq->vq_split.ring.used->ring[used_idx];
221 		len[i] = uep->len;
222 		cookie = (struct rte_mbuf *)vq->vq_descx[used_idx].cookie;
223 
224 		if (unlikely(cookie == NULL)) {
225 			PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
226 				vq->vq_used_cons_idx);
227 			break;
228 		}
229 
230 		rte_prefetch0(cookie);
231 		rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
232 		rx_pkts[i]  = cookie;
233 		vq->vq_used_cons_idx++;
234 		vq->vq_descx[used_idx].cookie = NULL;
235 	}
236 
237 	vq_ring_free_inorder(vq, used_idx, i);
238 	return i;
239 }
240 
241 static inline int
242 virtqueue_enqueue_refill_inorder(struct virtqueue *vq,
243 			struct rte_mbuf **cookies,
244 			uint16_t num)
245 {
246 	struct vq_desc_extra *dxp;
247 	struct virtio_hw *hw = vq->hw;
248 	struct vring_desc *start_dp;
249 	uint16_t head_idx, idx, i = 0;
250 
251 	if (unlikely(vq->vq_free_cnt == 0))
252 		return -ENOSPC;
253 	if (unlikely(vq->vq_free_cnt < num))
254 		return -EMSGSIZE;
255 
256 	head_idx = vq->vq_desc_head_idx & (vq->vq_nentries - 1);
257 	start_dp = vq->vq_split.ring.desc;
258 
259 	while (i < num) {
260 		idx = head_idx & (vq->vq_nentries - 1);
261 		dxp = &vq->vq_descx[idx];
262 		dxp->cookie = (void *)cookies[i];
263 		dxp->ndescs = 1;
264 
265 		start_dp[idx].addr = VIRTIO_MBUF_ADDR(cookies[i], vq) +
266 			RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
267 		start_dp[idx].len = cookies[i]->buf_len -
268 			RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
269 		start_dp[idx].flags =  VRING_DESC_F_WRITE;
270 
271 		vq_update_avail_ring(vq, idx);
272 		head_idx++;
273 		i++;
274 	}
275 
276 	vq->vq_desc_head_idx += num;
277 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
278 	return 0;
279 }
280 
281 static inline int
282 virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf **cookie,
283 				uint16_t num)
284 {
285 	struct vq_desc_extra *dxp;
286 	struct virtio_hw *hw = vq->hw;
287 	struct vring_desc *start_dp = vq->vq_split.ring.desc;
288 	uint16_t idx, i;
289 
290 	if (unlikely(vq->vq_free_cnt == 0))
291 		return -ENOSPC;
292 	if (unlikely(vq->vq_free_cnt < num))
293 		return -EMSGSIZE;
294 
295 	if (unlikely(vq->vq_desc_head_idx >= vq->vq_nentries))
296 		return -EFAULT;
297 
298 	for (i = 0; i < num; i++) {
299 		idx = vq->vq_desc_head_idx;
300 		dxp = &vq->vq_descx[idx];
301 		dxp->cookie = (void *)cookie[i];
302 		dxp->ndescs = 1;
303 
304 		start_dp[idx].addr = VIRTIO_MBUF_ADDR(cookie[i], vq) +
305 			RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
306 		start_dp[idx].len = cookie[i]->buf_len - RTE_PKTMBUF_HEADROOM +
307 			hw->vtnet_hdr_size;
308 		start_dp[idx].flags = VRING_DESC_F_WRITE;
309 		vq->vq_desc_head_idx = start_dp[idx].next;
310 		vq_update_avail_ring(vq, idx);
311 		if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) {
312 			vq->vq_desc_tail_idx = vq->vq_desc_head_idx;
313 			break;
314 		}
315 	}
316 
317 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
318 
319 	return 0;
320 }
321 
322 static inline void
323 virtqueue_refill_single_packed(struct virtqueue *vq,
324 			       struct vring_packed_desc *dp,
325 			       struct rte_mbuf *cookie)
326 {
327 	uint16_t flags = vq->vq_packed.cached_flags;
328 	struct virtio_hw *hw = vq->hw;
329 
330 	dp->addr = VIRTIO_MBUF_ADDR(cookie, vq) + RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
331 	dp->len = cookie->buf_len - RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
332 
333 	virtqueue_store_flags_packed(dp, flags, hw->weak_barriers);
334 
335 	if (++vq->vq_avail_idx >= vq->vq_nentries) {
336 		vq->vq_avail_idx -= vq->vq_nentries;
337 		vq->vq_packed.cached_flags ^=
338 			VRING_PACKED_DESC_F_AVAIL_USED;
339 		flags = vq->vq_packed.cached_flags;
340 	}
341 }
342 
343 static inline int
344 virtqueue_enqueue_recv_refill_packed_init(struct virtqueue *vq,
345 				     struct rte_mbuf **cookie, uint16_t num)
346 {
347 	struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc;
348 	struct vq_desc_extra *dxp;
349 	uint16_t idx;
350 	int i;
351 
352 	if (unlikely(vq->vq_free_cnt == 0))
353 		return -ENOSPC;
354 	if (unlikely(vq->vq_free_cnt < num))
355 		return -EMSGSIZE;
356 
357 	for (i = 0; i < num; i++) {
358 		idx = vq->vq_avail_idx;
359 		dxp = &vq->vq_descx[idx];
360 		dxp->cookie = (void *)cookie[i];
361 		dxp->ndescs = 1;
362 
363 		virtqueue_refill_single_packed(vq, &start_dp[idx], cookie[i]);
364 	}
365 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
366 	return 0;
367 }
368 
369 static inline int
370 virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq,
371 				     struct rte_mbuf **cookie, uint16_t num)
372 {
373 	struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc;
374 	struct vq_desc_extra *dxp;
375 	uint16_t idx, did;
376 	int i;
377 
378 	if (unlikely(vq->vq_free_cnt == 0))
379 		return -ENOSPC;
380 	if (unlikely(vq->vq_free_cnt < num))
381 		return -EMSGSIZE;
382 
383 	for (i = 0; i < num; i++) {
384 		idx = vq->vq_avail_idx;
385 		did = start_dp[idx].id;
386 		dxp = &vq->vq_descx[did];
387 		dxp->cookie = (void *)cookie[i];
388 		dxp->ndescs = 1;
389 
390 		virtqueue_refill_single_packed(vq, &start_dp[idx], cookie[i]);
391 	}
392 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
393 	return 0;
394 }
395 
396 /* When doing TSO, the IP length is not included in the pseudo header
397  * checksum of the packet given to the PMD, but for virtio it is
398  * expected.
399  */
400 static void
401 virtio_tso_fix_cksum(struct rte_mbuf *m)
402 {
403 	/* common case: header is not fragmented */
404 	if (likely(rte_pktmbuf_data_len(m) >= m->l2_len + m->l3_len +
405 			m->l4_len)) {
406 		struct rte_ipv4_hdr *iph;
407 		struct rte_ipv6_hdr *ip6h;
408 		struct rte_tcp_hdr *th;
409 		uint16_t prev_cksum, new_cksum, ip_len, ip_paylen;
410 		uint32_t tmp;
411 
412 		iph = rte_pktmbuf_mtod_offset(m,
413 					struct rte_ipv4_hdr *, m->l2_len);
414 		th = RTE_PTR_ADD(iph, m->l3_len);
415 		if ((iph->version_ihl >> 4) == 4) {
416 			iph->hdr_checksum = 0;
417 			iph->hdr_checksum = rte_ipv4_cksum(iph);
418 			ip_len = iph->total_length;
419 			ip_paylen = rte_cpu_to_be_16(rte_be_to_cpu_16(ip_len) -
420 				m->l3_len);
421 		} else {
422 			ip6h = (struct rte_ipv6_hdr *)iph;
423 			ip_paylen = ip6h->payload_len;
424 		}
425 
426 		/* calculate the new phdr checksum not including ip_paylen */
427 		prev_cksum = th->cksum;
428 		tmp = prev_cksum;
429 		tmp += ip_paylen;
430 		tmp = (tmp & 0xffff) + (tmp >> 16);
431 		new_cksum = tmp;
432 
433 		/* replace it in the packet */
434 		th->cksum = new_cksum;
435 	}
436 }
437 
438 
439 
440 
441 static inline void
442 virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
443 			struct rte_mbuf **cookies,
444 			uint16_t num)
445 {
446 	struct vq_desc_extra *dxp;
447 	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
448 	struct vring_desc *start_dp;
449 	struct virtio_net_hdr *hdr;
450 	uint16_t idx;
451 	int16_t head_size = vq->hw->vtnet_hdr_size;
452 	uint16_t i = 0;
453 
454 	idx = vq->vq_desc_head_idx;
455 	start_dp = vq->vq_split.ring.desc;
456 
457 	while (i < num) {
458 		idx = idx & (vq->vq_nentries - 1);
459 		dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
460 		dxp->cookie = (void *)cookies[i];
461 		dxp->ndescs = 1;
462 		virtio_update_packet_stats(&txvq->stats, cookies[i]);
463 
464 		hdr = rte_pktmbuf_mtod_offset(cookies[i],
465 				struct virtio_net_hdr *, -head_size);
466 
467 		/* if offload disabled, hdr is not zeroed yet, do it now */
468 		if (!vq->hw->has_tx_offload)
469 			virtqueue_clear_net_hdr(hdr);
470 		else
471 			virtqueue_xmit_offload(hdr, cookies[i]);
472 
473 		start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookies[i], vq) - head_size;
474 		start_dp[idx].len = cookies[i]->data_len + head_size;
475 		start_dp[idx].flags = 0;
476 
477 
478 		vq_update_avail_ring(vq, idx);
479 
480 		idx++;
481 		i++;
482 	};
483 
484 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
485 	vq->vq_desc_head_idx = idx & (vq->vq_nentries - 1);
486 }
487 
488 static inline void
489 virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq,
490 				   struct rte_mbuf *cookie,
491 				   int in_order)
492 {
493 	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
494 	struct vring_packed_desc *dp;
495 	struct vq_desc_extra *dxp;
496 	uint16_t idx, id, flags;
497 	int16_t head_size = vq->hw->vtnet_hdr_size;
498 	struct virtio_net_hdr *hdr;
499 
500 	id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;
501 	idx = vq->vq_avail_idx;
502 	dp = &vq->vq_packed.ring.desc[idx];
503 
504 	dxp = &vq->vq_descx[id];
505 	dxp->ndescs = 1;
506 	dxp->cookie = cookie;
507 
508 	flags = vq->vq_packed.cached_flags;
509 
510 	/* prepend cannot fail, checked by caller */
511 	hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *,
512 				      -head_size);
513 
514 	/* if offload disabled, hdr is not zeroed yet, do it now */
515 	if (!vq->hw->has_tx_offload)
516 		virtqueue_clear_net_hdr(hdr);
517 	else
518 		virtqueue_xmit_offload(hdr, cookie);
519 
520 	dp->addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq) - head_size;
521 	dp->len = cookie->data_len + head_size;
522 	dp->id = id;
523 
524 	if (++vq->vq_avail_idx >= vq->vq_nentries) {
525 		vq->vq_avail_idx -= vq->vq_nentries;
526 		vq->vq_packed.cached_flags ^= VRING_PACKED_DESC_F_AVAIL_USED;
527 	}
528 
529 	vq->vq_free_cnt--;
530 
531 	if (!in_order) {
532 		vq->vq_desc_head_idx = dxp->next;
533 		if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
534 			vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;
535 	}
536 
537 	virtqueue_store_flags_packed(dp, flags, vq->hw->weak_barriers);
538 }
539 
540 static inline void
541 virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
542 			uint16_t needed, int use_indirect, int can_push,
543 			int in_order)
544 {
545 	struct virtio_tx_region *txr = txvq->hdr_mz->addr;
546 	struct vq_desc_extra *dxp;
547 	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
548 	struct vring_desc *start_dp;
549 	uint16_t seg_num = cookie->nb_segs;
550 	uint16_t head_idx, idx;
551 	int16_t head_size = vq->hw->vtnet_hdr_size;
552 	bool prepend_header = false;
553 	struct virtio_net_hdr *hdr;
554 
555 	head_idx = vq->vq_desc_head_idx;
556 	idx = head_idx;
557 	if (in_order)
558 		dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
559 	else
560 		dxp = &vq->vq_descx[idx];
561 	dxp->cookie = (void *)cookie;
562 	dxp->ndescs = needed;
563 
564 	start_dp = vq->vq_split.ring.desc;
565 
566 	if (can_push) {
567 		/* prepend cannot fail, checked by caller */
568 		hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *,
569 					      -head_size);
570 		prepend_header = true;
571 
572 		/* if offload disabled, it is not zeroed below, do it now */
573 		if (!vq->hw->has_tx_offload)
574 			virtqueue_clear_net_hdr(hdr);
575 	} else if (use_indirect) {
576 		/* setup tx ring slot to point to indirect
577 		 * descriptor list stored in reserved region.
578 		 *
579 		 * the first slot in indirect ring is already preset
580 		 * to point to the header in reserved region
581 		 */
582 		start_dp[idx].addr = txvq->hdr_mem + RTE_PTR_DIFF(&txr[idx].tx_indir, txr);
583 		start_dp[idx].len = (seg_num + 1) * sizeof(struct vring_desc);
584 		start_dp[idx].flags = VRING_DESC_F_INDIRECT;
585 		hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
586 
587 		/* loop below will fill in rest of the indirect elements */
588 		start_dp = txr[idx].tx_indir;
589 		idx = 1;
590 	} else {
591 		/* setup first tx ring slot to point to header
592 		 * stored in reserved region.
593 		 */
594 		start_dp[idx].addr = txvq->hdr_mem + RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
595 		start_dp[idx].len = vq->hw->vtnet_hdr_size;
596 		start_dp[idx].flags = VRING_DESC_F_NEXT;
597 		hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
598 
599 		idx = start_dp[idx].next;
600 	}
601 
602 	if (vq->hw->has_tx_offload)
603 		virtqueue_xmit_offload(hdr, cookie);
604 
605 	do {
606 		start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
607 		start_dp[idx].len = cookie->data_len;
608 		if (prepend_header) {
609 			start_dp[idx].addr -= head_size;
610 			start_dp[idx].len += head_size;
611 			prepend_header = false;
612 		}
613 		start_dp[idx].flags = cookie->next ? VRING_DESC_F_NEXT : 0;
614 		idx = start_dp[idx].next;
615 	} while ((cookie = cookie->next) != NULL);
616 
617 	if (use_indirect)
618 		idx = vq->vq_split.ring.desc[head_idx].next;
619 
620 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
621 
622 	vq->vq_desc_head_idx = idx;
623 	vq_update_avail_ring(vq, head_idx);
624 
625 	if (!in_order) {
626 		if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
627 			vq->vq_desc_tail_idx = idx;
628 	}
629 }
630 
631 void
632 virtio_dev_cq_start(struct rte_eth_dev *dev)
633 {
634 	struct virtio_hw *hw = dev->data->dev_private;
635 
636 	if (hw->cvq) {
637 		rte_spinlock_init(&hw->cvq->lock);
638 		VIRTQUEUE_DUMP(virtnet_cq_to_vq(hw->cvq));
639 	}
640 }
641 
642 int
643 virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
644 			uint16_t queue_idx,
645 			uint16_t nb_desc,
646 			unsigned int socket_id __rte_unused,
647 			const struct rte_eth_rxconf *rx_conf,
648 			struct rte_mempool *mp)
649 {
650 	uint16_t vq_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
651 	struct virtio_hw *hw = dev->data->dev_private;
652 	struct virtqueue *vq = hw->vqs[vq_idx];
653 	struct virtnet_rx *rxvq;
654 	uint16_t rx_free_thresh;
655 	uint16_t buf_size;
656 	const char *error;
657 
658 	PMD_INIT_FUNC_TRACE();
659 
660 	if (rx_conf->rx_deferred_start) {
661 		PMD_INIT_LOG(ERR, "Rx deferred start is not supported");
662 		return -EINVAL;
663 	}
664 
665 	buf_size = virtio_rx_mem_pool_buf_size(mp);
666 	if (!virtio_rx_check_scatter(hw->max_rx_pkt_len, buf_size,
667 				     hw->rx_ol_scatter, &error)) {
668 		PMD_INIT_LOG(ERR, "RxQ %u Rx scatter check failed: %s",
669 			     queue_idx, error);
670 		return -EINVAL;
671 	}
672 
673 	rx_free_thresh = rx_conf->rx_free_thresh;
674 	if (rx_free_thresh == 0)
675 		rx_free_thresh =
676 			RTE_MIN(vq->vq_nentries / 4, DEFAULT_RX_FREE_THRESH);
677 
678 	if (rx_free_thresh & 0x3) {
679 		PMD_INIT_LOG(ERR, "rx_free_thresh must be multiples of four."
680 			" (rx_free_thresh=%u port=%u queue=%u)",
681 			rx_free_thresh, dev->data->port_id, queue_idx);
682 		return -EINVAL;
683 	}
684 
685 	if (rx_free_thresh >= vq->vq_nentries) {
686 		PMD_INIT_LOG(ERR, "rx_free_thresh must be less than the "
687 			"number of RX entries (%u)."
688 			" (rx_free_thresh=%u port=%u queue=%u)",
689 			vq->vq_nentries,
690 			rx_free_thresh, dev->data->port_id, queue_idx);
691 		return -EINVAL;
692 	}
693 	vq->vq_free_thresh = rx_free_thresh;
694 
695 	/*
696 	 * For split ring vectorized path descriptors number must be
697 	 * equal to the ring size.
698 	 */
699 	if (nb_desc > vq->vq_nentries ||
700 	    (!virtio_with_packed_queue(hw) && hw->use_vec_rx)) {
701 		nb_desc = vq->vq_nentries;
702 	}
703 	vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
704 
705 	rxvq = &vq->rxq;
706 	rxvq->mpool = mp;
707 	dev->data->rx_queues[queue_idx] = rxvq;
708 
709 	return 0;
710 }
711 
712 int
713 virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
714 {
715 	uint16_t vq_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
716 	struct virtio_hw *hw = dev->data->dev_private;
717 	struct virtqueue *vq = hw->vqs[vq_idx];
718 	struct virtnet_rx *rxvq = &vq->rxq;
719 	struct rte_mbuf *m;
720 	uint16_t desc_idx;
721 	int error, nbufs, i;
722 	bool in_order = virtio_with_feature(hw, VIRTIO_F_IN_ORDER);
723 
724 	PMD_INIT_FUNC_TRACE();
725 
726 	/* Allocate blank mbufs for the each rx descriptor */
727 	nbufs = 0;
728 
729 	if (hw->use_vec_rx && !virtio_with_packed_queue(hw)) {
730 		for (desc_idx = 0; desc_idx < vq->vq_nentries;
731 		     desc_idx++) {
732 			vq->vq_split.ring.avail->ring[desc_idx] = desc_idx;
733 			vq->vq_split.ring.desc[desc_idx].flags =
734 				VRING_DESC_F_WRITE;
735 		}
736 
737 		virtio_rxq_vec_setup(rxvq);
738 	}
739 
740 	memset(rxvq->fake_mbuf, 0, sizeof(*rxvq->fake_mbuf));
741 	for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST; desc_idx++)
742 		vq->sw_ring[vq->vq_nentries + desc_idx] = rxvq->fake_mbuf;
743 
744 	if (hw->use_vec_rx && !virtio_with_packed_queue(hw)) {
745 		while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
746 			virtio_rxq_rearm_vec(rxvq);
747 			nbufs += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
748 		}
749 	} else if (!virtio_with_packed_queue(vq->hw) && in_order) {
750 		if ((!virtqueue_full(vq))) {
751 			uint16_t free_cnt = vq->vq_free_cnt;
752 			struct rte_mbuf *pkts[free_cnt];
753 
754 			if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, pkts,
755 				free_cnt)) {
756 				error = virtqueue_enqueue_refill_inorder(vq,
757 						pkts,
758 						free_cnt);
759 				if (unlikely(error)) {
760 					for (i = 0; i < free_cnt; i++)
761 						rte_pktmbuf_free(pkts[i]);
762 				} else {
763 					nbufs += free_cnt;
764 				}
765 			}
766 
767 			vq_update_avail_idx(vq);
768 		}
769 	} else {
770 		while (!virtqueue_full(vq)) {
771 			m = rte_mbuf_raw_alloc(rxvq->mpool);
772 			if (m == NULL)
773 				break;
774 
775 			/* Enqueue allocated buffers */
776 			if (virtio_with_packed_queue(vq->hw))
777 				error = virtqueue_enqueue_recv_refill_packed_init(vq,
778 						&m, 1);
779 			else
780 				error = virtqueue_enqueue_recv_refill(vq,
781 						&m, 1);
782 			if (error) {
783 				rte_pktmbuf_free(m);
784 				break;
785 			}
786 			nbufs++;
787 		}
788 
789 		if (!virtio_with_packed_queue(vq->hw))
790 			vq_update_avail_idx(vq);
791 	}
792 
793 	PMD_INIT_LOG(DEBUG, "Allocated %d bufs (port=%u queue=%u)", nbufs,
794 		     dev->data->port_id, queue_idx);
795 
796 	VIRTQUEUE_DUMP(vq);
797 
798 	return 0;
799 }
800 
801 /*
802  * struct rte_eth_dev *dev: Used to update dev
803  * uint16_t nb_desc: Defaults to values read from config space
804  * unsigned int socket_id: Used to allocate memzone
805  * const struct rte_eth_txconf *tx_conf: Used to setup tx engine
806  * uint16_t queue_idx: Just used as an index in dev txq list
807  */
808 int
809 virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
810 			uint16_t queue_idx,
811 			uint16_t nb_desc,
812 			unsigned int socket_id __rte_unused,
813 			const struct rte_eth_txconf *tx_conf)
814 {
815 	uint16_t vq_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
816 	struct virtio_hw *hw = dev->data->dev_private;
817 	struct virtqueue *vq = hw->vqs[vq_idx];
818 	struct virtnet_tx *txvq;
819 	uint16_t tx_free_thresh;
820 
821 	PMD_INIT_FUNC_TRACE();
822 
823 	if (tx_conf->tx_deferred_start) {
824 		PMD_INIT_LOG(ERR, "Tx deferred start is not supported");
825 		return -EINVAL;
826 	}
827 
828 	if (nb_desc == 0 || nb_desc > vq->vq_nentries)
829 		nb_desc = vq->vq_nentries;
830 	vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
831 
832 	txvq = &vq->txq;
833 
834 	tx_free_thresh = tx_conf->tx_free_thresh;
835 	if (tx_free_thresh == 0)
836 		tx_free_thresh =
837 			RTE_MIN(vq->vq_nentries / 4, DEFAULT_TX_FREE_THRESH);
838 
839 	if (tx_free_thresh >= (vq->vq_nentries - 3)) {
840 		PMD_DRV_LOG(ERR, "tx_free_thresh must be less than the "
841 			"number of TX entries minus 3 (%u)."
842 			" (tx_free_thresh=%u port=%u queue=%u)",
843 			vq->vq_nentries - 3,
844 			tx_free_thresh, dev->data->port_id, queue_idx);
845 		return -EINVAL;
846 	}
847 
848 	vq->vq_free_thresh = tx_free_thresh;
849 
850 	dev->data->tx_queues[queue_idx] = txvq;
851 	return 0;
852 }
853 
854 int
855 virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
856 				uint16_t queue_idx)
857 {
858 	uint16_t vq_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
859 	struct virtio_hw *hw = dev->data->dev_private;
860 	struct virtqueue *vq = hw->vqs[vq_idx];
861 
862 	PMD_INIT_FUNC_TRACE();
863 
864 	if (!virtio_with_packed_queue(hw)) {
865 		if (virtio_with_feature(hw, VIRTIO_F_IN_ORDER))
866 			vq->vq_split.ring.desc[vq->vq_nentries - 1].next = 0;
867 	}
868 
869 	VIRTQUEUE_DUMP(vq);
870 
871 	return 0;
872 }
873 
874 static inline void
875 virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m)
876 {
877 	int error;
878 	/*
879 	 * Requeue the discarded mbuf. This should always be
880 	 * successful since it was just dequeued.
881 	 */
882 	if (virtio_with_packed_queue(vq->hw))
883 		error = virtqueue_enqueue_recv_refill_packed(vq, &m, 1);
884 	else
885 		error = virtqueue_enqueue_recv_refill(vq, &m, 1);
886 
887 	if (unlikely(error)) {
888 		PMD_DRV_LOG(ERR, "cannot requeue discarded mbuf");
889 		rte_pktmbuf_free(m);
890 	}
891 }
892 
893 static inline void
894 virtio_discard_rxbuf_inorder(struct virtqueue *vq, struct rte_mbuf *m)
895 {
896 	int error;
897 
898 	error = virtqueue_enqueue_refill_inorder(vq, &m, 1);
899 	if (unlikely(error)) {
900 		PMD_DRV_LOG(ERR, "cannot requeue discarded mbuf");
901 		rte_pktmbuf_free(m);
902 	}
903 }
904 
905 /* Optionally fill offload information in structure */
906 static inline int
907 virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
908 {
909 	struct rte_net_hdr_lens hdr_lens;
910 	uint32_t hdrlen, ptype;
911 	int l4_supported = 0;
912 
913 	/* nothing to do */
914 	if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
915 		return 0;
916 
917 	m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN;
918 
919 	ptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK);
920 	m->packet_type = ptype;
921 	if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP ||
922 	    (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP ||
923 	    (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_SCTP)
924 		l4_supported = 1;
925 
926 	if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
927 		hdrlen = hdr_lens.l2_len + hdr_lens.l3_len + hdr_lens.l4_len;
928 		if (hdr->csum_start <= hdrlen && l4_supported) {
929 			m->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_NONE;
930 		} else {
931 			/* Unknown proto or tunnel, do sw cksum. We can assume
932 			 * the cksum field is in the first segment since the
933 			 * buffers we provided to the host are large enough.
934 			 * In case of SCTP, this will be wrong since it's a CRC
935 			 * but there's nothing we can do.
936 			 */
937 			uint16_t csum = 0, off;
938 
939 			if (rte_raw_cksum_mbuf(m, hdr->csum_start,
940 				rte_pktmbuf_pkt_len(m) - hdr->csum_start,
941 				&csum) < 0)
942 				return -EINVAL;
943 			if (likely(csum != 0xffff))
944 				csum = ~csum;
945 			off = hdr->csum_offset + hdr->csum_start;
946 			if (rte_pktmbuf_data_len(m) >= off + 1)
947 				*rte_pktmbuf_mtod_offset(m, uint16_t *,
948 					off) = csum;
949 		}
950 	} else if (hdr->flags & VIRTIO_NET_HDR_F_DATA_VALID && l4_supported) {
951 		m->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
952 	}
953 
954 	/* GSO request, save required information in mbuf */
955 	if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
956 		/* Check unsupported modes */
957 		if ((hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN) ||
958 		    (hdr->gso_size == 0)) {
959 			return -EINVAL;
960 		}
961 
962 		/* Update mss lengths in mbuf */
963 		m->tso_segsz = hdr->gso_size;
964 		switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
965 			case VIRTIO_NET_HDR_GSO_TCPV4:
966 			case VIRTIO_NET_HDR_GSO_TCPV6:
967 				m->ol_flags |= RTE_MBUF_F_RX_LRO |
968 					RTE_MBUF_F_RX_L4_CKSUM_NONE;
969 				break;
970 			default:
971 				return -EINVAL;
972 		}
973 	}
974 
975 	return 0;
976 }
977 
978 #define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc))
979 uint16_t
980 virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
981 {
982 	struct virtnet_rx *rxvq = rx_queue;
983 	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
984 	struct virtio_hw *hw = vq->hw;
985 	struct rte_mbuf *rxm;
986 	uint16_t nb_used, num, nb_rx;
987 	uint32_t len[VIRTIO_MBUF_BURST_SZ];
988 	struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
989 	int error;
990 	uint32_t i, nb_enqueued;
991 	uint32_t hdr_size;
992 	struct virtio_net_hdr *hdr;
993 
994 	nb_rx = 0;
995 	if (unlikely(hw->started == 0))
996 		return nb_rx;
997 
998 	nb_used = virtqueue_nused(vq);
999 
1000 	num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts;
1001 	if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1002 		num = VIRTIO_MBUF_BURST_SZ;
1003 	if (likely(num > DESC_PER_CACHELINE))
1004 		num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1005 
1006 	num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num);
1007 	PMD_RX_LOG(DEBUG, "used:%d dequeue:%d", nb_used, num);
1008 
1009 	nb_enqueued = 0;
1010 	hdr_size = hw->vtnet_hdr_size;
1011 
1012 	for (i = 0; i < num ; i++) {
1013 		rxm = rcv_pkts[i];
1014 
1015 		PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1016 
1017 		if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1018 			PMD_RX_LOG(ERR, "Packet drop");
1019 			nb_enqueued++;
1020 			virtio_discard_rxbuf(vq, rxm);
1021 			rxvq->stats.errors++;
1022 			continue;
1023 		}
1024 
1025 		rxm->port = hw->port_id;
1026 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1027 		rxm->ol_flags = 0;
1028 		rxm->vlan_tci = 0;
1029 
1030 		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1031 		rxm->data_len = (uint16_t)(len[i] - hdr_size);
1032 
1033 		hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr +
1034 			RTE_PKTMBUF_HEADROOM - hdr_size);
1035 
1036 		if (hw->vlan_strip)
1037 			rte_vlan_strip(rxm);
1038 
1039 		if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) {
1040 			virtio_discard_rxbuf(vq, rxm);
1041 			rxvq->stats.errors++;
1042 			continue;
1043 		}
1044 
1045 		virtio_rx_stats_updated(rxvq, rxm);
1046 
1047 		rx_pkts[nb_rx++] = rxm;
1048 	}
1049 
1050 	rxvq->stats.packets += nb_rx;
1051 
1052 	/* Allocate new mbuf for the used descriptor */
1053 	if (likely(!virtqueue_full(vq))) {
1054 		uint16_t free_cnt = vq->vq_free_cnt;
1055 		struct rte_mbuf *new_pkts[free_cnt];
1056 
1057 		if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts,
1058 						free_cnt) == 0)) {
1059 			error = virtqueue_enqueue_recv_refill(vq, new_pkts,
1060 					free_cnt);
1061 			if (unlikely(error)) {
1062 				for (i = 0; i < free_cnt; i++)
1063 					rte_pktmbuf_free(new_pkts[i]);
1064 			}
1065 			nb_enqueued += free_cnt;
1066 		} else {
1067 			struct rte_eth_dev *dev = &rte_eth_devices[hw->port_id];
1068 			dev->data->rx_mbuf_alloc_failed += free_cnt;
1069 		}
1070 	}
1071 
1072 	if (likely(nb_enqueued)) {
1073 		vq_update_avail_idx(vq);
1074 
1075 		if (unlikely(virtqueue_kick_prepare(vq))) {
1076 			virtqueue_notify(vq);
1077 			PMD_RX_LOG(DEBUG, "Notified");
1078 		}
1079 	}
1080 
1081 	return nb_rx;
1082 }
1083 
1084 uint16_t
1085 virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
1086 			uint16_t nb_pkts)
1087 {
1088 	struct virtnet_rx *rxvq = rx_queue;
1089 	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
1090 	struct virtio_hw *hw = vq->hw;
1091 	struct rte_mbuf *rxm;
1092 	uint16_t num, nb_rx;
1093 	uint32_t len[VIRTIO_MBUF_BURST_SZ];
1094 	struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1095 	int error;
1096 	uint32_t i, nb_enqueued;
1097 	uint32_t hdr_size;
1098 	struct virtio_net_hdr *hdr;
1099 
1100 	nb_rx = 0;
1101 	if (unlikely(hw->started == 0))
1102 		return nb_rx;
1103 
1104 	num = RTE_MIN(VIRTIO_MBUF_BURST_SZ, nb_pkts);
1105 	if (likely(num > DESC_PER_CACHELINE))
1106 		num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1107 
1108 	num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num);
1109 	PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1110 
1111 	nb_enqueued = 0;
1112 	hdr_size = hw->vtnet_hdr_size;
1113 
1114 	for (i = 0; i < num; i++) {
1115 		rxm = rcv_pkts[i];
1116 
1117 		PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1118 
1119 		if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1120 			PMD_RX_LOG(ERR, "Packet drop");
1121 			nb_enqueued++;
1122 			virtio_discard_rxbuf(vq, rxm);
1123 			rxvq->stats.errors++;
1124 			continue;
1125 		}
1126 
1127 		rxm->port = hw->port_id;
1128 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1129 		rxm->ol_flags = 0;
1130 		rxm->vlan_tci = 0;
1131 
1132 		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1133 		rxm->data_len = (uint16_t)(len[i] - hdr_size);
1134 
1135 		hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr +
1136 			RTE_PKTMBUF_HEADROOM - hdr_size);
1137 
1138 		if (hw->vlan_strip)
1139 			rte_vlan_strip(rxm);
1140 
1141 		if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) {
1142 			virtio_discard_rxbuf(vq, rxm);
1143 			rxvq->stats.errors++;
1144 			continue;
1145 		}
1146 
1147 		virtio_rx_stats_updated(rxvq, rxm);
1148 
1149 		rx_pkts[nb_rx++] = rxm;
1150 	}
1151 
1152 	rxvq->stats.packets += nb_rx;
1153 
1154 	/* Allocate new mbuf for the used descriptor */
1155 	if (likely(!virtqueue_full(vq))) {
1156 		uint16_t free_cnt = vq->vq_free_cnt;
1157 		struct rte_mbuf *new_pkts[free_cnt];
1158 
1159 		if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts,
1160 						free_cnt) == 0)) {
1161 			error = virtqueue_enqueue_recv_refill_packed(vq,
1162 					new_pkts, free_cnt);
1163 			if (unlikely(error)) {
1164 				for (i = 0; i < free_cnt; i++)
1165 					rte_pktmbuf_free(new_pkts[i]);
1166 			}
1167 			nb_enqueued += free_cnt;
1168 		} else {
1169 			struct rte_eth_dev *dev = &rte_eth_devices[hw->port_id];
1170 			dev->data->rx_mbuf_alloc_failed += free_cnt;
1171 		}
1172 	}
1173 
1174 	if (likely(nb_enqueued)) {
1175 		if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1176 			virtqueue_notify(vq);
1177 			PMD_RX_LOG(DEBUG, "Notified");
1178 		}
1179 	}
1180 
1181 	return nb_rx;
1182 }
1183 
1184 
1185 uint16_t
1186 virtio_recv_pkts_inorder(void *rx_queue,
1187 			struct rte_mbuf **rx_pkts,
1188 			uint16_t nb_pkts)
1189 {
1190 	struct virtnet_rx *rxvq = rx_queue;
1191 	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
1192 	struct virtio_hw *hw = vq->hw;
1193 	struct rte_mbuf *rxm;
1194 	struct rte_mbuf *prev = NULL;
1195 	uint16_t nb_used, num, nb_rx;
1196 	uint32_t len[VIRTIO_MBUF_BURST_SZ];
1197 	struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1198 	int error;
1199 	uint32_t nb_enqueued;
1200 	uint32_t seg_num;
1201 	uint32_t seg_res;
1202 	uint32_t hdr_size;
1203 	int32_t i;
1204 
1205 	nb_rx = 0;
1206 	if (unlikely(hw->started == 0))
1207 		return nb_rx;
1208 
1209 	nb_used = virtqueue_nused(vq);
1210 	nb_used = RTE_MIN(nb_used, nb_pkts);
1211 	nb_used = RTE_MIN(nb_used, VIRTIO_MBUF_BURST_SZ);
1212 
1213 	PMD_RX_LOG(DEBUG, "used:%d", nb_used);
1214 
1215 	nb_enqueued = 0;
1216 	seg_num = 1;
1217 	seg_res = 0;
1218 	hdr_size = hw->vtnet_hdr_size;
1219 
1220 	num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len, nb_used);
1221 
1222 	for (i = 0; i < num; i++) {
1223 		struct virtio_net_hdr_mrg_rxbuf *header;
1224 
1225 		PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1226 		PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1227 
1228 		rxm = rcv_pkts[i];
1229 
1230 		if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1231 			PMD_RX_LOG(ERR, "Packet drop");
1232 			nb_enqueued++;
1233 			virtio_discard_rxbuf_inorder(vq, rxm);
1234 			rxvq->stats.errors++;
1235 			continue;
1236 		}
1237 
1238 		header = (struct virtio_net_hdr_mrg_rxbuf *)
1239 			 ((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM
1240 			 - hdr_size);
1241 
1242 		if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
1243 			seg_num = header->num_buffers;
1244 			if (seg_num == 0)
1245 				seg_num = 1;
1246 		} else {
1247 			seg_num = 1;
1248 		}
1249 
1250 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1251 		rxm->nb_segs = seg_num;
1252 		rxm->ol_flags = 0;
1253 		rxm->vlan_tci = 0;
1254 		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1255 		rxm->data_len = (uint16_t)(len[i] - hdr_size);
1256 
1257 		rxm->port = hw->port_id;
1258 
1259 		rx_pkts[nb_rx] = rxm;
1260 		prev = rxm;
1261 
1262 		if (vq->hw->has_rx_offload &&
1263 				virtio_rx_offload(rxm, &header->hdr) < 0) {
1264 			virtio_discard_rxbuf_inorder(vq, rxm);
1265 			rxvq->stats.errors++;
1266 			continue;
1267 		}
1268 
1269 		if (hw->vlan_strip)
1270 			rte_vlan_strip(rx_pkts[nb_rx]);
1271 
1272 		seg_res = seg_num - 1;
1273 
1274 		/* Merge remaining segments */
1275 		while (seg_res != 0 && i < (num - 1)) {
1276 			i++;
1277 
1278 			rxm = rcv_pkts[i];
1279 			rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1280 			rxm->pkt_len = (uint32_t)(len[i]);
1281 			rxm->data_len = (uint16_t)(len[i]);
1282 
1283 			rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1284 
1285 			prev->next = rxm;
1286 			prev = rxm;
1287 			seg_res -= 1;
1288 		}
1289 
1290 		if (!seg_res) {
1291 			virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1292 			nb_rx++;
1293 		}
1294 	}
1295 
1296 	/* Last packet still need merge segments */
1297 	while (seg_res != 0) {
1298 		uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1299 					VIRTIO_MBUF_BURST_SZ);
1300 
1301 		if (likely(virtqueue_nused(vq) >= rcv_cnt)) {
1302 			num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len,
1303 							   rcv_cnt);
1304 			uint16_t extra_idx = 0;
1305 
1306 			rcv_cnt = num;
1307 			while (extra_idx < rcv_cnt) {
1308 				rxm = rcv_pkts[extra_idx];
1309 				rxm->data_off =
1310 					RTE_PKTMBUF_HEADROOM - hdr_size;
1311 				rxm->pkt_len = (uint32_t)(len[extra_idx]);
1312 				rxm->data_len = (uint16_t)(len[extra_idx]);
1313 				prev->next = rxm;
1314 				prev = rxm;
1315 				rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1316 				extra_idx += 1;
1317 			};
1318 			seg_res -= rcv_cnt;
1319 
1320 			if (!seg_res) {
1321 				virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1322 				nb_rx++;
1323 			}
1324 		} else {
1325 			PMD_RX_LOG(ERR,
1326 					"No enough segments for packet.");
1327 			rte_pktmbuf_free(rx_pkts[nb_rx]);
1328 			rxvq->stats.errors++;
1329 			break;
1330 		}
1331 	}
1332 
1333 	rxvq->stats.packets += nb_rx;
1334 
1335 	/* Allocate new mbuf for the used descriptor */
1336 
1337 	if (likely(!virtqueue_full(vq))) {
1338 		/* free_cnt may include mrg descs */
1339 		uint16_t free_cnt = vq->vq_free_cnt;
1340 		struct rte_mbuf *new_pkts[free_cnt];
1341 
1342 		if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1343 			error = virtqueue_enqueue_refill_inorder(vq, new_pkts,
1344 					free_cnt);
1345 			if (unlikely(error)) {
1346 				for (i = 0; i < free_cnt; i++)
1347 					rte_pktmbuf_free(new_pkts[i]);
1348 			}
1349 			nb_enqueued += free_cnt;
1350 		} else {
1351 			struct rte_eth_dev *dev = &rte_eth_devices[hw->port_id];
1352 			dev->data->rx_mbuf_alloc_failed += free_cnt;
1353 		}
1354 	}
1355 
1356 	if (likely(nb_enqueued)) {
1357 		vq_update_avail_idx(vq);
1358 
1359 		if (unlikely(virtqueue_kick_prepare(vq))) {
1360 			virtqueue_notify(vq);
1361 			PMD_RX_LOG(DEBUG, "Notified");
1362 		}
1363 	}
1364 
1365 	return nb_rx;
1366 }
1367 
1368 uint16_t
1369 virtio_recv_mergeable_pkts(void *rx_queue,
1370 			struct rte_mbuf **rx_pkts,
1371 			uint16_t nb_pkts)
1372 {
1373 	struct virtnet_rx *rxvq = rx_queue;
1374 	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
1375 	struct virtio_hw *hw = vq->hw;
1376 	struct rte_mbuf *rxm;
1377 	struct rte_mbuf *prev = NULL;
1378 	uint16_t nb_used, num, nb_rx = 0;
1379 	uint32_t len[VIRTIO_MBUF_BURST_SZ];
1380 	struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1381 	int error;
1382 	uint32_t nb_enqueued = 0;
1383 	uint32_t seg_num = 0;
1384 	uint32_t seg_res = 0;
1385 	uint32_t hdr_size = hw->vtnet_hdr_size;
1386 	int32_t i;
1387 
1388 	if (unlikely(hw->started == 0))
1389 		return nb_rx;
1390 
1391 	nb_used = virtqueue_nused(vq);
1392 
1393 	PMD_RX_LOG(DEBUG, "used:%d", nb_used);
1394 
1395 	num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts;
1396 	if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1397 		num = VIRTIO_MBUF_BURST_SZ;
1398 	if (likely(num > DESC_PER_CACHELINE))
1399 		num = num - ((vq->vq_used_cons_idx + num) %
1400 				DESC_PER_CACHELINE);
1401 
1402 
1403 	num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num);
1404 
1405 	for (i = 0; i < num; i++) {
1406 		struct virtio_net_hdr_mrg_rxbuf *header;
1407 
1408 		PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1409 		PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1410 
1411 		rxm = rcv_pkts[i];
1412 
1413 		if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1414 			PMD_RX_LOG(ERR, "Packet drop");
1415 			nb_enqueued++;
1416 			virtio_discard_rxbuf(vq, rxm);
1417 			rxvq->stats.errors++;
1418 			continue;
1419 		}
1420 
1421 		header = (struct virtio_net_hdr_mrg_rxbuf *)
1422 			 ((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM
1423 			 - hdr_size);
1424 		seg_num = header->num_buffers;
1425 		if (seg_num == 0)
1426 			seg_num = 1;
1427 
1428 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1429 		rxm->nb_segs = seg_num;
1430 		rxm->ol_flags = 0;
1431 		rxm->vlan_tci = 0;
1432 		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1433 		rxm->data_len = (uint16_t)(len[i] - hdr_size);
1434 
1435 		rxm->port = hw->port_id;
1436 
1437 		rx_pkts[nb_rx] = rxm;
1438 		prev = rxm;
1439 
1440 		if (hw->has_rx_offload &&
1441 				virtio_rx_offload(rxm, &header->hdr) < 0) {
1442 			virtio_discard_rxbuf(vq, rxm);
1443 			rxvq->stats.errors++;
1444 			continue;
1445 		}
1446 
1447 		if (hw->vlan_strip)
1448 			rte_vlan_strip(rx_pkts[nb_rx]);
1449 
1450 		seg_res = seg_num - 1;
1451 
1452 		/* Merge remaining segments */
1453 		while (seg_res != 0 && i < (num - 1)) {
1454 			i++;
1455 
1456 			rxm = rcv_pkts[i];
1457 			rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1458 			rxm->pkt_len = (uint32_t)(len[i]);
1459 			rxm->data_len = (uint16_t)(len[i]);
1460 
1461 			rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1462 
1463 			prev->next = rxm;
1464 			prev = rxm;
1465 			seg_res -= 1;
1466 		}
1467 
1468 		if (!seg_res) {
1469 			virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1470 			nb_rx++;
1471 		}
1472 	}
1473 
1474 	/* Last packet still need merge segments */
1475 	while (seg_res != 0) {
1476 		uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1477 					VIRTIO_MBUF_BURST_SZ);
1478 
1479 		if (likely(virtqueue_nused(vq) >= rcv_cnt)) {
1480 			num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len,
1481 							   rcv_cnt);
1482 			uint16_t extra_idx = 0;
1483 
1484 			rcv_cnt = num;
1485 			while (extra_idx < rcv_cnt) {
1486 				rxm = rcv_pkts[extra_idx];
1487 				rxm->data_off =
1488 					RTE_PKTMBUF_HEADROOM - hdr_size;
1489 				rxm->pkt_len = (uint32_t)(len[extra_idx]);
1490 				rxm->data_len = (uint16_t)(len[extra_idx]);
1491 				prev->next = rxm;
1492 				prev = rxm;
1493 				rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1494 				extra_idx += 1;
1495 			};
1496 			seg_res -= rcv_cnt;
1497 
1498 			if (!seg_res) {
1499 				virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1500 				nb_rx++;
1501 			}
1502 		} else {
1503 			PMD_RX_LOG(ERR,
1504 					"No enough segments for packet.");
1505 			rte_pktmbuf_free(rx_pkts[nb_rx]);
1506 			rxvq->stats.errors++;
1507 			break;
1508 		}
1509 	}
1510 
1511 	rxvq->stats.packets += nb_rx;
1512 
1513 	/* Allocate new mbuf for the used descriptor */
1514 	if (likely(!virtqueue_full(vq))) {
1515 		/* free_cnt may include mrg descs */
1516 		uint16_t free_cnt = vq->vq_free_cnt;
1517 		struct rte_mbuf *new_pkts[free_cnt];
1518 
1519 		if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1520 			error = virtqueue_enqueue_recv_refill(vq, new_pkts,
1521 					free_cnt);
1522 			if (unlikely(error)) {
1523 				for (i = 0; i < free_cnt; i++)
1524 					rte_pktmbuf_free(new_pkts[i]);
1525 			}
1526 			nb_enqueued += free_cnt;
1527 		} else {
1528 			struct rte_eth_dev *dev = &rte_eth_devices[hw->port_id];
1529 			dev->data->rx_mbuf_alloc_failed += free_cnt;
1530 		}
1531 	}
1532 
1533 	if (likely(nb_enqueued)) {
1534 		vq_update_avail_idx(vq);
1535 
1536 		if (unlikely(virtqueue_kick_prepare(vq))) {
1537 			virtqueue_notify(vq);
1538 			PMD_RX_LOG(DEBUG, "Notified");
1539 		}
1540 	}
1541 
1542 	return nb_rx;
1543 }
1544 
1545 uint16_t
1546 virtio_recv_mergeable_pkts_packed(void *rx_queue,
1547 			struct rte_mbuf **rx_pkts,
1548 			uint16_t nb_pkts)
1549 {
1550 	struct virtnet_rx *rxvq = rx_queue;
1551 	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
1552 	struct virtio_hw *hw = vq->hw;
1553 	struct rte_mbuf *rxm;
1554 	struct rte_mbuf *prev = NULL;
1555 	uint16_t num, nb_rx = 0;
1556 	uint32_t len[VIRTIO_MBUF_BURST_SZ];
1557 	struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1558 	uint32_t nb_enqueued = 0;
1559 	uint32_t seg_num = 0;
1560 	uint32_t seg_res = 0;
1561 	uint32_t hdr_size = hw->vtnet_hdr_size;
1562 	int32_t i;
1563 	int error;
1564 
1565 	if (unlikely(hw->started == 0))
1566 		return nb_rx;
1567 
1568 
1569 	num = nb_pkts;
1570 	if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1571 		num = VIRTIO_MBUF_BURST_SZ;
1572 	if (likely(num > DESC_PER_CACHELINE))
1573 		num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1574 
1575 	num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num);
1576 
1577 	for (i = 0; i < num; i++) {
1578 		struct virtio_net_hdr_mrg_rxbuf *header;
1579 
1580 		PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1581 		PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1582 
1583 		rxm = rcv_pkts[i];
1584 
1585 		if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1586 			PMD_RX_LOG(ERR, "Packet drop");
1587 			nb_enqueued++;
1588 			virtio_discard_rxbuf(vq, rxm);
1589 			rxvq->stats.errors++;
1590 			continue;
1591 		}
1592 
1593 		header = (struct virtio_net_hdr_mrg_rxbuf *)((char *)
1594 			  rxm->buf_addr + RTE_PKTMBUF_HEADROOM - hdr_size);
1595 		seg_num = header->num_buffers;
1596 
1597 		if (seg_num == 0)
1598 			seg_num = 1;
1599 
1600 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1601 		rxm->nb_segs = seg_num;
1602 		rxm->ol_flags = 0;
1603 		rxm->vlan_tci = 0;
1604 		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1605 		rxm->data_len = (uint16_t)(len[i] - hdr_size);
1606 
1607 		rxm->port = hw->port_id;
1608 		rx_pkts[nb_rx] = rxm;
1609 		prev = rxm;
1610 
1611 		if (hw->has_rx_offload &&
1612 				virtio_rx_offload(rxm, &header->hdr) < 0) {
1613 			virtio_discard_rxbuf(vq, rxm);
1614 			rxvq->stats.errors++;
1615 			continue;
1616 		}
1617 
1618 		if (hw->vlan_strip)
1619 			rte_vlan_strip(rx_pkts[nb_rx]);
1620 
1621 		seg_res = seg_num - 1;
1622 
1623 		/* Merge remaining segments */
1624 		while (seg_res != 0 && i < (num - 1)) {
1625 			i++;
1626 
1627 			rxm = rcv_pkts[i];
1628 			rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1629 			rxm->pkt_len = (uint32_t)(len[i]);
1630 			rxm->data_len = (uint16_t)(len[i]);
1631 
1632 			rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1633 
1634 			prev->next = rxm;
1635 			prev = rxm;
1636 			seg_res -= 1;
1637 		}
1638 
1639 		if (!seg_res) {
1640 			virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1641 			nb_rx++;
1642 		}
1643 	}
1644 
1645 	/* Last packet still need merge segments */
1646 	while (seg_res != 0) {
1647 		uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1648 					VIRTIO_MBUF_BURST_SZ);
1649 		uint16_t extra_idx = 0;
1650 
1651 		rcv_cnt = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts,
1652 				len, rcv_cnt);
1653 		if (unlikely(rcv_cnt == 0)) {
1654 			PMD_RX_LOG(ERR, "No enough segments for packet.");
1655 			rte_pktmbuf_free(rx_pkts[nb_rx]);
1656 			rxvq->stats.errors++;
1657 			break;
1658 		}
1659 
1660 		while (extra_idx < rcv_cnt) {
1661 			rxm = rcv_pkts[extra_idx];
1662 
1663 			rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1664 			rxm->pkt_len = (uint32_t)(len[extra_idx]);
1665 			rxm->data_len = (uint16_t)(len[extra_idx]);
1666 
1667 			prev->next = rxm;
1668 			prev = rxm;
1669 			rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1670 			extra_idx += 1;
1671 		}
1672 		seg_res -= rcv_cnt;
1673 		if (!seg_res) {
1674 			virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1675 			nb_rx++;
1676 		}
1677 	}
1678 
1679 	rxvq->stats.packets += nb_rx;
1680 
1681 	/* Allocate new mbuf for the used descriptor */
1682 	if (likely(!virtqueue_full(vq))) {
1683 		/* free_cnt may include mrg descs */
1684 		uint16_t free_cnt = vq->vq_free_cnt;
1685 		struct rte_mbuf *new_pkts[free_cnt];
1686 
1687 		if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1688 			error = virtqueue_enqueue_recv_refill_packed(vq,
1689 					new_pkts, free_cnt);
1690 			if (unlikely(error)) {
1691 				for (i = 0; i < free_cnt; i++)
1692 					rte_pktmbuf_free(new_pkts[i]);
1693 			}
1694 			nb_enqueued += free_cnt;
1695 		} else {
1696 			struct rte_eth_dev *dev = &rte_eth_devices[hw->port_id];
1697 			dev->data->rx_mbuf_alloc_failed += free_cnt;
1698 		}
1699 	}
1700 
1701 	if (likely(nb_enqueued)) {
1702 		if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1703 			virtqueue_notify(vq);
1704 			PMD_RX_LOG(DEBUG, "Notified");
1705 		}
1706 	}
1707 
1708 	return nb_rx;
1709 }
1710 
1711 uint16_t
1712 virtio_xmit_pkts_prepare(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts,
1713 			uint16_t nb_pkts)
1714 {
1715 	uint16_t nb_tx;
1716 	int error;
1717 
1718 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1719 		struct rte_mbuf *m = tx_pkts[nb_tx];
1720 
1721 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
1722 		error = rte_validate_tx_offload(m);
1723 		if (unlikely(error)) {
1724 			rte_errno = -error;
1725 			break;
1726 		}
1727 #endif
1728 
1729 		/* Do VLAN tag insertion */
1730 		if (unlikely(m->ol_flags & RTE_MBUF_F_TX_VLAN)) {
1731 			error = rte_vlan_insert(&m);
1732 			/* rte_vlan_insert() may change pointer
1733 			 * even in the case of failure
1734 			 */
1735 			tx_pkts[nb_tx] = m;
1736 
1737 			if (unlikely(error)) {
1738 				rte_errno = -error;
1739 				break;
1740 			}
1741 		}
1742 
1743 		error = rte_net_intel_cksum_prepare(m);
1744 		if (unlikely(error)) {
1745 			rte_errno = -error;
1746 			break;
1747 		}
1748 
1749 		if (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG)
1750 			virtio_tso_fix_cksum(m);
1751 	}
1752 
1753 	return nb_tx;
1754 }
1755 
1756 uint16_t
1757 virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
1758 			uint16_t nb_pkts)
1759 {
1760 	struct virtnet_tx *txvq = tx_queue;
1761 	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
1762 	struct virtio_hw *hw = vq->hw;
1763 	uint16_t hdr_size = hw->vtnet_hdr_size;
1764 	uint16_t nb_tx = 0;
1765 	bool in_order = virtio_with_feature(hw, VIRTIO_F_IN_ORDER);
1766 
1767 	if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
1768 		return nb_tx;
1769 
1770 	if (unlikely(nb_pkts < 1))
1771 		return nb_pkts;
1772 
1773 	PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
1774 
1775 	if (nb_pkts > vq->vq_free_cnt)
1776 		virtio_xmit_cleanup_packed(vq, nb_pkts - vq->vq_free_cnt,
1777 					   in_order);
1778 
1779 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1780 		struct rte_mbuf *txm = tx_pkts[nb_tx];
1781 		int can_push = 0, use_indirect = 0, slots, need;
1782 
1783 		/* optimize ring usage */
1784 		if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
1785 		      virtio_with_feature(hw, VIRTIO_F_VERSION_1)) &&
1786 		    rte_mbuf_refcnt_read(txm) == 1 &&
1787 		    RTE_MBUF_DIRECT(txm) &&
1788 		    txm->nb_segs == 1 &&
1789 		    rte_pktmbuf_headroom(txm) >= hdr_size &&
1790 		    rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
1791 			   __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
1792 			can_push = 1;
1793 		else if (virtio_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
1794 			 txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
1795 			use_indirect = 1;
1796 		/* How many main ring entries are needed to this Tx?
1797 		 * indirect   => 1
1798 		 * any_layout => number of segments
1799 		 * default    => number of segments + 1
1800 		 */
1801 		slots = use_indirect ? 1 : (txm->nb_segs + !can_push);
1802 		need = slots - vq->vq_free_cnt;
1803 
1804 		/* Positive value indicates it need free vring descriptors */
1805 		if (unlikely(need > 0)) {
1806 			virtio_xmit_cleanup_packed(vq, need, in_order);
1807 			need = slots - vq->vq_free_cnt;
1808 			if (unlikely(need > 0)) {
1809 				PMD_TX_LOG(ERR,
1810 					   "No free tx descriptors to transmit");
1811 				break;
1812 			}
1813 		}
1814 
1815 		/* Enqueue Packet buffers */
1816 		if (can_push)
1817 			virtqueue_enqueue_xmit_packed_fast(txvq, txm, in_order);
1818 		else
1819 			virtqueue_enqueue_xmit_packed(txvq, txm, slots,
1820 						      use_indirect, 0,
1821 						      in_order);
1822 
1823 		virtio_update_packet_stats(&txvq->stats, txm);
1824 	}
1825 
1826 	txvq->stats.packets += nb_tx;
1827 
1828 	if (likely(nb_tx)) {
1829 		if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1830 			virtqueue_notify(vq);
1831 			PMD_TX_LOG(DEBUG, "Notified backend after xmit");
1832 		}
1833 	}
1834 
1835 	return nb_tx;
1836 }
1837 
1838 uint16_t
1839 virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1840 {
1841 	struct virtnet_tx *txvq = tx_queue;
1842 	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
1843 	struct virtio_hw *hw = vq->hw;
1844 	uint16_t hdr_size = hw->vtnet_hdr_size;
1845 	uint16_t nb_used, nb_tx = 0;
1846 
1847 	if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
1848 		return nb_tx;
1849 
1850 	if (unlikely(nb_pkts < 1))
1851 		return nb_pkts;
1852 
1853 	PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
1854 
1855 	nb_used = virtqueue_nused(vq);
1856 
1857 	if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
1858 		virtio_xmit_cleanup(vq, nb_used);
1859 
1860 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1861 		struct rte_mbuf *txm = tx_pkts[nb_tx];
1862 		int can_push = 0, use_indirect = 0, slots, need;
1863 
1864 		/* optimize ring usage */
1865 		if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
1866 		      virtio_with_feature(hw, VIRTIO_F_VERSION_1)) &&
1867 		    rte_mbuf_refcnt_read(txm) == 1 &&
1868 		    RTE_MBUF_DIRECT(txm) &&
1869 		    txm->nb_segs == 1 &&
1870 		    rte_pktmbuf_headroom(txm) >= hdr_size &&
1871 		    rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
1872 				   __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
1873 			can_push = 1;
1874 		else if (virtio_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
1875 			 txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
1876 			use_indirect = 1;
1877 
1878 		/* How many main ring entries are needed to this Tx?
1879 		 * any_layout => number of segments
1880 		 * indirect   => 1
1881 		 * default    => number of segments + 1
1882 		 */
1883 		slots = use_indirect ? 1 : (txm->nb_segs + !can_push);
1884 		need = slots - vq->vq_free_cnt;
1885 
1886 		/* Positive value indicates it need free vring descriptors */
1887 		if (unlikely(need > 0)) {
1888 			nb_used = virtqueue_nused(vq);
1889 
1890 			need = RTE_MIN(need, (int)nb_used);
1891 
1892 			virtio_xmit_cleanup(vq, need);
1893 			need = slots - vq->vq_free_cnt;
1894 			if (unlikely(need > 0)) {
1895 				PMD_TX_LOG(ERR,
1896 					   "No free tx descriptors to transmit");
1897 				break;
1898 			}
1899 		}
1900 
1901 		/* Enqueue Packet buffers */
1902 		virtqueue_enqueue_xmit(txvq, txm, slots, use_indirect,
1903 			can_push, 0);
1904 
1905 		virtio_update_packet_stats(&txvq->stats, txm);
1906 	}
1907 
1908 	txvq->stats.packets += nb_tx;
1909 
1910 	if (likely(nb_tx)) {
1911 		vq_update_avail_idx(vq);
1912 
1913 		if (unlikely(virtqueue_kick_prepare(vq))) {
1914 			virtqueue_notify(vq);
1915 			PMD_TX_LOG(DEBUG, "Notified backend after xmit");
1916 		}
1917 	}
1918 
1919 	return nb_tx;
1920 }
1921 
1922 static __rte_always_inline int
1923 virtio_xmit_try_cleanup_inorder(struct virtqueue *vq, uint16_t need)
1924 {
1925 	uint16_t nb_used, nb_clean, nb_descs;
1926 
1927 	nb_descs = vq->vq_free_cnt + need;
1928 	nb_used = virtqueue_nused(vq);
1929 	nb_clean = RTE_MIN(need, (int)nb_used);
1930 
1931 	virtio_xmit_cleanup_inorder(vq, nb_clean);
1932 
1933 	return nb_descs - vq->vq_free_cnt;
1934 }
1935 
1936 uint16_t
1937 virtio_xmit_pkts_inorder(void *tx_queue,
1938 			struct rte_mbuf **tx_pkts,
1939 			uint16_t nb_pkts)
1940 {
1941 	struct virtnet_tx *txvq = tx_queue;
1942 	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
1943 	struct virtio_hw *hw = vq->hw;
1944 	uint16_t hdr_size = hw->vtnet_hdr_size;
1945 	uint16_t nb_used, nb_tx = 0, nb_inorder_pkts = 0;
1946 	struct rte_mbuf *inorder_pkts[nb_pkts];
1947 	int need;
1948 
1949 	if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
1950 		return nb_tx;
1951 
1952 	if (unlikely(nb_pkts < 1))
1953 		return nb_pkts;
1954 
1955 	VIRTQUEUE_DUMP(vq);
1956 	PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
1957 	nb_used = virtqueue_nused(vq);
1958 
1959 	if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
1960 		virtio_xmit_cleanup_inorder(vq, nb_used);
1961 
1962 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1963 		struct rte_mbuf *txm = tx_pkts[nb_tx];
1964 		int slots;
1965 
1966 		/* optimize ring usage */
1967 		if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
1968 		     virtio_with_feature(hw, VIRTIO_F_VERSION_1)) &&
1969 		     rte_mbuf_refcnt_read(txm) == 1 &&
1970 		     RTE_MBUF_DIRECT(txm) &&
1971 		     txm->nb_segs == 1 &&
1972 		     rte_pktmbuf_headroom(txm) >= hdr_size &&
1973 		     rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
1974 				__alignof__(struct virtio_net_hdr_mrg_rxbuf))) {
1975 			inorder_pkts[nb_inorder_pkts] = txm;
1976 			nb_inorder_pkts++;
1977 
1978 			continue;
1979 		}
1980 
1981 		if (nb_inorder_pkts) {
1982 			need = nb_inorder_pkts - vq->vq_free_cnt;
1983 			if (unlikely(need > 0)) {
1984 				need = virtio_xmit_try_cleanup_inorder(vq,
1985 								       need);
1986 				if (unlikely(need > 0)) {
1987 					PMD_TX_LOG(ERR,
1988 						"No free tx descriptors to "
1989 						"transmit");
1990 					break;
1991 				}
1992 			}
1993 			virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
1994 							nb_inorder_pkts);
1995 			nb_inorder_pkts = 0;
1996 		}
1997 
1998 		slots = txm->nb_segs + 1;
1999 		need = slots - vq->vq_free_cnt;
2000 		if (unlikely(need > 0)) {
2001 			need = virtio_xmit_try_cleanup_inorder(vq, slots);
2002 
2003 			if (unlikely(need > 0)) {
2004 				PMD_TX_LOG(ERR,
2005 					"No free tx descriptors to transmit");
2006 				break;
2007 			}
2008 		}
2009 		/* Enqueue Packet buffers */
2010 		virtqueue_enqueue_xmit(txvq, txm, slots, 0, 0, 1);
2011 
2012 		virtio_update_packet_stats(&txvq->stats, txm);
2013 	}
2014 
2015 	/* Transmit all inorder packets */
2016 	if (nb_inorder_pkts) {
2017 		need = nb_inorder_pkts - vq->vq_free_cnt;
2018 		if (unlikely(need > 0)) {
2019 			need = virtio_xmit_try_cleanup_inorder(vq,
2020 								  need);
2021 			if (unlikely(need > 0)) {
2022 				PMD_TX_LOG(ERR,
2023 					"No free tx descriptors to transmit");
2024 				nb_inorder_pkts = vq->vq_free_cnt;
2025 				nb_tx -= need;
2026 			}
2027 		}
2028 
2029 		virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
2030 						nb_inorder_pkts);
2031 	}
2032 
2033 	txvq->stats.packets += nb_tx;
2034 
2035 	if (likely(nb_tx)) {
2036 		vq_update_avail_idx(vq);
2037 
2038 		if (unlikely(virtqueue_kick_prepare(vq))) {
2039 			virtqueue_notify(vq);
2040 			PMD_TX_LOG(DEBUG, "Notified backend after xmit");
2041 		}
2042 	}
2043 
2044 	VIRTQUEUE_DUMP(vq);
2045 
2046 	return nb_tx;
2047 }
2048 
2049 __rte_weak uint16_t
2050 virtio_recv_pkts_packed_vec(void *rx_queue __rte_unused,
2051 			    struct rte_mbuf **rx_pkts __rte_unused,
2052 			    uint16_t nb_pkts __rte_unused)
2053 {
2054 	return 0;
2055 }
2056 
2057 __rte_weak uint16_t
2058 virtio_xmit_pkts_packed_vec(void *tx_queue __rte_unused,
2059 			    struct rte_mbuf **tx_pkts __rte_unused,
2060 			    uint16_t nb_pkts __rte_unused)
2061 {
2062 	return 0;
2063 }
2064