xref: /dpdk/drivers/net/virtio/virtio_rxtx.c (revision 955d7c1f03d049b2018db3d2c4a78920368d7aa3)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <errno.h>
10 
11 #include <rte_cycles.h>
12 #include <rte_memory.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_mempool.h>
15 #include <rte_malloc.h>
16 #include <rte_mbuf.h>
17 #include <rte_ether.h>
18 #include <rte_ethdev_driver.h>
19 #include <rte_prefetch.h>
20 #include <rte_string_fns.h>
21 #include <rte_errno.h>
22 #include <rte_byteorder.h>
23 #include <rte_net.h>
24 #include <rte_ip.h>
25 #include <rte_udp.h>
26 #include <rte_tcp.h>
27 
28 #include "virtio_logs.h"
29 #include "virtio_ethdev.h"
30 #include "virtio_pci.h"
31 #include "virtqueue.h"
32 #include "virtio_rxtx.h"
33 #include "virtio_rxtx_simple.h"
34 #include "virtio_ring.h"
35 
36 #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
37 #define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len)
38 #else
39 #define  VIRTIO_DUMP_PACKET(m, len) do { } while (0)
40 #endif
41 
42 int
43 virtio_dev_rx_queue_done(void *rxq, uint16_t offset)
44 {
45 	struct virtnet_rx *rxvq = rxq;
46 	struct virtqueue *vq = rxvq->vq;
47 
48 	return VIRTQUEUE_NUSED(vq) >= offset;
49 }
50 
51 void
52 vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx, uint16_t num)
53 {
54 	vq->vq_free_cnt += num;
55 	vq->vq_desc_tail_idx = desc_idx & (vq->vq_nentries - 1);
56 }
57 
58 void
59 vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
60 {
61 	struct vring_desc *dp, *dp_tail;
62 	struct vq_desc_extra *dxp;
63 	uint16_t desc_idx_last = desc_idx;
64 
65 	dp  = &vq->vq_split.ring.desc[desc_idx];
66 	dxp = &vq->vq_descx[desc_idx];
67 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
68 	if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
69 		while (dp->flags & VRING_DESC_F_NEXT) {
70 			desc_idx_last = dp->next;
71 			dp = &vq->vq_split.ring.desc[dp->next];
72 		}
73 	}
74 	dxp->ndescs = 0;
75 
76 	/*
77 	 * We must append the existing free chain, if any, to the end of
78 	 * newly freed chain. If the virtqueue was completely used, then
79 	 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
80 	 */
81 	if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) {
82 		vq->vq_desc_head_idx = desc_idx;
83 	} else {
84 		dp_tail = &vq->vq_split.ring.desc[vq->vq_desc_tail_idx];
85 		dp_tail->next = desc_idx;
86 	}
87 
88 	vq->vq_desc_tail_idx = desc_idx_last;
89 	dp->next = VQ_RING_DESC_CHAIN_END;
90 }
91 
92 static void
93 vq_ring_free_id_packed(struct virtqueue *vq, uint16_t id)
94 {
95 	struct vq_desc_extra *dxp;
96 
97 	dxp = &vq->vq_descx[id];
98 	vq->vq_free_cnt += dxp->ndescs;
99 
100 	if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END)
101 		vq->vq_desc_head_idx = id;
102 	else
103 		vq->vq_descx[vq->vq_desc_tail_idx].next = id;
104 
105 	vq->vq_desc_tail_idx = id;
106 	dxp->next = VQ_RING_DESC_CHAIN_END;
107 }
108 
109 void
110 virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
111 {
112 	uint32_t s = mbuf->pkt_len;
113 	struct rte_ether_addr *ea;
114 
115 	stats->bytes += s;
116 
117 	if (s == 64) {
118 		stats->size_bins[1]++;
119 	} else if (s > 64 && s < 1024) {
120 		uint32_t bin;
121 
122 		/* count zeros, and offset into correct bin */
123 		bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
124 		stats->size_bins[bin]++;
125 	} else {
126 		if (s < 64)
127 			stats->size_bins[0]++;
128 		else if (s < 1519)
129 			stats->size_bins[6]++;
130 		else
131 			stats->size_bins[7]++;
132 	}
133 
134 	ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
135 	if (rte_is_multicast_ether_addr(ea)) {
136 		if (rte_is_broadcast_ether_addr(ea))
137 			stats->broadcast++;
138 		else
139 			stats->multicast++;
140 	}
141 }
142 
143 static inline void
144 virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
145 {
146 	VIRTIO_DUMP_PACKET(m, m->data_len);
147 
148 	virtio_update_packet_stats(&rxvq->stats, m);
149 }
150 
151 static uint16_t
152 virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq,
153 				  struct rte_mbuf **rx_pkts,
154 				  uint32_t *len,
155 				  uint16_t num)
156 {
157 	struct rte_mbuf *cookie;
158 	uint16_t used_idx;
159 	uint16_t id;
160 	struct vring_packed_desc *desc;
161 	uint16_t i;
162 
163 	desc = vq->vq_packed.ring.desc;
164 
165 	for (i = 0; i < num; i++) {
166 		used_idx = vq->vq_used_cons_idx;
167 		/* desc_is_used has a load-acquire or rte_cio_rmb inside
168 		 * and wait for used desc in virtqueue.
169 		 */
170 		if (!desc_is_used(&desc[used_idx], vq))
171 			return i;
172 		len[i] = desc[used_idx].len;
173 		id = desc[used_idx].id;
174 		cookie = (struct rte_mbuf *)vq->vq_descx[id].cookie;
175 		if (unlikely(cookie == NULL)) {
176 			PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
177 				vq->vq_used_cons_idx);
178 			break;
179 		}
180 		rte_prefetch0(cookie);
181 		rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
182 		rx_pkts[i] = cookie;
183 
184 		vq->vq_free_cnt++;
185 		vq->vq_used_cons_idx++;
186 		if (vq->vq_used_cons_idx >= vq->vq_nentries) {
187 			vq->vq_used_cons_idx -= vq->vq_nentries;
188 			vq->vq_packed.used_wrap_counter ^= 1;
189 		}
190 	}
191 
192 	return i;
193 }
194 
195 static uint16_t
196 virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,
197 			   uint32_t *len, uint16_t num)
198 {
199 	struct vring_used_elem *uep;
200 	struct rte_mbuf *cookie;
201 	uint16_t used_idx, desc_idx;
202 	uint16_t i;
203 
204 	/*  Caller does the check */
205 	for (i = 0; i < num ; i++) {
206 		used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
207 		uep = &vq->vq_split.ring.used->ring[used_idx];
208 		desc_idx = (uint16_t) uep->id;
209 		len[i] = uep->len;
210 		cookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie;
211 
212 		if (unlikely(cookie == NULL)) {
213 			PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
214 				vq->vq_used_cons_idx);
215 			break;
216 		}
217 
218 		rte_prefetch0(cookie);
219 		rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
220 		rx_pkts[i]  = cookie;
221 		vq->vq_used_cons_idx++;
222 		vq_ring_free_chain(vq, desc_idx);
223 		vq->vq_descx[desc_idx].cookie = NULL;
224 	}
225 
226 	return i;
227 }
228 
229 static uint16_t
230 virtqueue_dequeue_rx_inorder(struct virtqueue *vq,
231 			struct rte_mbuf **rx_pkts,
232 			uint32_t *len,
233 			uint16_t num)
234 {
235 	struct vring_used_elem *uep;
236 	struct rte_mbuf *cookie;
237 	uint16_t used_idx = 0;
238 	uint16_t i;
239 
240 	if (unlikely(num == 0))
241 		return 0;
242 
243 	for (i = 0; i < num; i++) {
244 		used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
245 		/* Desc idx same as used idx */
246 		uep = &vq->vq_split.ring.used->ring[used_idx];
247 		len[i] = uep->len;
248 		cookie = (struct rte_mbuf *)vq->vq_descx[used_idx].cookie;
249 
250 		if (unlikely(cookie == NULL)) {
251 			PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
252 				vq->vq_used_cons_idx);
253 			break;
254 		}
255 
256 		rte_prefetch0(cookie);
257 		rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
258 		rx_pkts[i]  = cookie;
259 		vq->vq_used_cons_idx++;
260 		vq->vq_descx[used_idx].cookie = NULL;
261 	}
262 
263 	vq_ring_free_inorder(vq, used_idx, i);
264 	return i;
265 }
266 
267 #ifndef DEFAULT_TX_FREE_THRESH
268 #define DEFAULT_TX_FREE_THRESH 32
269 #endif
270 
271 static void
272 virtio_xmit_cleanup_inorder_packed(struct virtqueue *vq, int num)
273 {
274 	uint16_t used_idx, id, curr_id, free_cnt = 0;
275 	uint16_t size = vq->vq_nentries;
276 	struct vring_packed_desc *desc = vq->vq_packed.ring.desc;
277 	struct vq_desc_extra *dxp;
278 
279 	used_idx = vq->vq_used_cons_idx;
280 	/* desc_is_used has a load-acquire or rte_cio_rmb inside
281 	 * and wait for used desc in virtqueue.
282 	 */
283 	while (num > 0 && desc_is_used(&desc[used_idx], vq)) {
284 		id = desc[used_idx].id;
285 		do {
286 			curr_id = used_idx;
287 			dxp = &vq->vq_descx[used_idx];
288 			used_idx += dxp->ndescs;
289 			free_cnt += dxp->ndescs;
290 			num -= dxp->ndescs;
291 			if (used_idx >= size) {
292 				used_idx -= size;
293 				vq->vq_packed.used_wrap_counter ^= 1;
294 			}
295 			if (dxp->cookie != NULL) {
296 				rte_pktmbuf_free(dxp->cookie);
297 				dxp->cookie = NULL;
298 			}
299 		} while (curr_id != id);
300 	}
301 	vq->vq_used_cons_idx = used_idx;
302 	vq->vq_free_cnt += free_cnt;
303 }
304 
305 static void
306 virtio_xmit_cleanup_normal_packed(struct virtqueue *vq, int num)
307 {
308 	uint16_t used_idx, id;
309 	uint16_t size = vq->vq_nentries;
310 	struct vring_packed_desc *desc = vq->vq_packed.ring.desc;
311 	struct vq_desc_extra *dxp;
312 
313 	used_idx = vq->vq_used_cons_idx;
314 	/* desc_is_used has a load-acquire or rte_cio_rmb inside
315 	 * and wait for used desc in virtqueue.
316 	 */
317 	while (num-- && desc_is_used(&desc[used_idx], vq)) {
318 		id = desc[used_idx].id;
319 		dxp = &vq->vq_descx[id];
320 		vq->vq_used_cons_idx += dxp->ndescs;
321 		if (vq->vq_used_cons_idx >= size) {
322 			vq->vq_used_cons_idx -= size;
323 			vq->vq_packed.used_wrap_counter ^= 1;
324 		}
325 		vq_ring_free_id_packed(vq, id);
326 		if (dxp->cookie != NULL) {
327 			rte_pktmbuf_free(dxp->cookie);
328 			dxp->cookie = NULL;
329 		}
330 		used_idx = vq->vq_used_cons_idx;
331 	}
332 }
333 
334 /* Cleanup from completed transmits. */
335 static inline void
336 virtio_xmit_cleanup_packed(struct virtqueue *vq, int num, int in_order)
337 {
338 	if (in_order)
339 		virtio_xmit_cleanup_inorder_packed(vq, num);
340 	else
341 		virtio_xmit_cleanup_normal_packed(vq, num);
342 }
343 
344 static void
345 virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num)
346 {
347 	uint16_t i, used_idx, desc_idx;
348 	for (i = 0; i < num; i++) {
349 		struct vring_used_elem *uep;
350 		struct vq_desc_extra *dxp;
351 
352 		used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
353 		uep = &vq->vq_split.ring.used->ring[used_idx];
354 
355 		desc_idx = (uint16_t) uep->id;
356 		dxp = &vq->vq_descx[desc_idx];
357 		vq->vq_used_cons_idx++;
358 		vq_ring_free_chain(vq, desc_idx);
359 
360 		if (dxp->cookie != NULL) {
361 			rte_pktmbuf_free(dxp->cookie);
362 			dxp->cookie = NULL;
363 		}
364 	}
365 }
366 
367 /* Cleanup from completed inorder transmits. */
368 static __rte_always_inline void
369 virtio_xmit_cleanup_inorder(struct virtqueue *vq, uint16_t num)
370 {
371 	uint16_t i, idx = vq->vq_used_cons_idx;
372 	int16_t free_cnt = 0;
373 	struct vq_desc_extra *dxp = NULL;
374 
375 	if (unlikely(num == 0))
376 		return;
377 
378 	for (i = 0; i < num; i++) {
379 		dxp = &vq->vq_descx[idx++ & (vq->vq_nentries - 1)];
380 		free_cnt += dxp->ndescs;
381 		if (dxp->cookie != NULL) {
382 			rte_pktmbuf_free(dxp->cookie);
383 			dxp->cookie = NULL;
384 		}
385 	}
386 
387 	vq->vq_free_cnt += free_cnt;
388 	vq->vq_used_cons_idx = idx;
389 }
390 
391 static inline int
392 virtqueue_enqueue_refill_inorder(struct virtqueue *vq,
393 			struct rte_mbuf **cookies,
394 			uint16_t num)
395 {
396 	struct vq_desc_extra *dxp;
397 	struct virtio_hw *hw = vq->hw;
398 	struct vring_desc *start_dp;
399 	uint16_t head_idx, idx, i = 0;
400 
401 	if (unlikely(vq->vq_free_cnt == 0))
402 		return -ENOSPC;
403 	if (unlikely(vq->vq_free_cnt < num))
404 		return -EMSGSIZE;
405 
406 	head_idx = vq->vq_desc_head_idx & (vq->vq_nentries - 1);
407 	start_dp = vq->vq_split.ring.desc;
408 
409 	while (i < num) {
410 		idx = head_idx & (vq->vq_nentries - 1);
411 		dxp = &vq->vq_descx[idx];
412 		dxp->cookie = (void *)cookies[i];
413 		dxp->ndescs = 1;
414 
415 		start_dp[idx].addr =
416 				VIRTIO_MBUF_ADDR(cookies[i], vq) +
417 				RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
418 		start_dp[idx].len =
419 				cookies[i]->buf_len -
420 				RTE_PKTMBUF_HEADROOM +
421 				hw->vtnet_hdr_size;
422 		start_dp[idx].flags =  VRING_DESC_F_WRITE;
423 
424 		vq_update_avail_ring(vq, idx);
425 		head_idx++;
426 		i++;
427 	}
428 
429 	vq->vq_desc_head_idx += num;
430 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
431 	return 0;
432 }
433 
434 static inline int
435 virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf **cookie,
436 				uint16_t num)
437 {
438 	struct vq_desc_extra *dxp;
439 	struct virtio_hw *hw = vq->hw;
440 	struct vring_desc *start_dp = vq->vq_split.ring.desc;
441 	uint16_t idx, i;
442 
443 	if (unlikely(vq->vq_free_cnt == 0))
444 		return -ENOSPC;
445 	if (unlikely(vq->vq_free_cnt < num))
446 		return -EMSGSIZE;
447 
448 	if (unlikely(vq->vq_desc_head_idx >= vq->vq_nentries))
449 		return -EFAULT;
450 
451 	for (i = 0; i < num; i++) {
452 		idx = vq->vq_desc_head_idx;
453 		dxp = &vq->vq_descx[idx];
454 		dxp->cookie = (void *)cookie[i];
455 		dxp->ndescs = 1;
456 
457 		start_dp[idx].addr =
458 			VIRTIO_MBUF_ADDR(cookie[i], vq) +
459 			RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
460 		start_dp[idx].len =
461 			cookie[i]->buf_len - RTE_PKTMBUF_HEADROOM +
462 			hw->vtnet_hdr_size;
463 		start_dp[idx].flags = VRING_DESC_F_WRITE;
464 		vq->vq_desc_head_idx = start_dp[idx].next;
465 		vq_update_avail_ring(vq, idx);
466 		if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) {
467 			vq->vq_desc_tail_idx = vq->vq_desc_head_idx;
468 			break;
469 		}
470 	}
471 
472 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
473 
474 	return 0;
475 }
476 
477 static inline int
478 virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq,
479 				     struct rte_mbuf **cookie, uint16_t num)
480 {
481 	struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc;
482 	uint16_t flags = vq->vq_packed.cached_flags;
483 	struct virtio_hw *hw = vq->hw;
484 	struct vq_desc_extra *dxp;
485 	uint16_t idx;
486 	int i;
487 
488 	if (unlikely(vq->vq_free_cnt == 0))
489 		return -ENOSPC;
490 	if (unlikely(vq->vq_free_cnt < num))
491 		return -EMSGSIZE;
492 
493 	for (i = 0; i < num; i++) {
494 		idx = vq->vq_avail_idx;
495 		dxp = &vq->vq_descx[idx];
496 		dxp->cookie = (void *)cookie[i];
497 		dxp->ndescs = 1;
498 
499 		start_dp[idx].addr = VIRTIO_MBUF_ADDR(cookie[i], vq) +
500 				RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
501 		start_dp[idx].len = cookie[i]->buf_len - RTE_PKTMBUF_HEADROOM
502 					+ hw->vtnet_hdr_size;
503 
504 		vq->vq_desc_head_idx = dxp->next;
505 		if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
506 			vq->vq_desc_tail_idx = vq->vq_desc_head_idx;
507 
508 		virtqueue_store_flags_packed(&start_dp[idx], flags,
509 					     hw->weak_barriers);
510 
511 		if (++vq->vq_avail_idx >= vq->vq_nentries) {
512 			vq->vq_avail_idx -= vq->vq_nentries;
513 			vq->vq_packed.cached_flags ^=
514 				VRING_PACKED_DESC_F_AVAIL_USED;
515 			flags = vq->vq_packed.cached_flags;
516 		}
517 	}
518 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
519 	return 0;
520 }
521 
522 /* When doing TSO, the IP length is not included in the pseudo header
523  * checksum of the packet given to the PMD, but for virtio it is
524  * expected.
525  */
526 static void
527 virtio_tso_fix_cksum(struct rte_mbuf *m)
528 {
529 	/* common case: header is not fragmented */
530 	if (likely(rte_pktmbuf_data_len(m) >= m->l2_len + m->l3_len +
531 			m->l4_len)) {
532 		struct rte_ipv4_hdr *iph;
533 		struct rte_ipv6_hdr *ip6h;
534 		struct rte_tcp_hdr *th;
535 		uint16_t prev_cksum, new_cksum, ip_len, ip_paylen;
536 		uint32_t tmp;
537 
538 		iph = rte_pktmbuf_mtod_offset(m,
539 					struct rte_ipv4_hdr *, m->l2_len);
540 		th = RTE_PTR_ADD(iph, m->l3_len);
541 		if ((iph->version_ihl >> 4) == 4) {
542 			iph->hdr_checksum = 0;
543 			iph->hdr_checksum = rte_ipv4_cksum(iph);
544 			ip_len = iph->total_length;
545 			ip_paylen = rte_cpu_to_be_16(rte_be_to_cpu_16(ip_len) -
546 				m->l3_len);
547 		} else {
548 			ip6h = (struct rte_ipv6_hdr *)iph;
549 			ip_paylen = ip6h->payload_len;
550 		}
551 
552 		/* calculate the new phdr checksum not including ip_paylen */
553 		prev_cksum = th->cksum;
554 		tmp = prev_cksum;
555 		tmp += ip_paylen;
556 		tmp = (tmp & 0xffff) + (tmp >> 16);
557 		new_cksum = tmp;
558 
559 		/* replace it in the packet */
560 		th->cksum = new_cksum;
561 	}
562 }
563 
564 
565 /* avoid write operation when necessary, to lessen cache issues */
566 #define ASSIGN_UNLESS_EQUAL(var, val) do {	\
567 	if ((var) != (val))			\
568 		(var) = (val);			\
569 } while (0)
570 
571 #define virtqueue_clear_net_hdr(_hdr) do {		\
572 	ASSIGN_UNLESS_EQUAL((_hdr)->csum_start, 0);	\
573 	ASSIGN_UNLESS_EQUAL((_hdr)->csum_offset, 0);	\
574 	ASSIGN_UNLESS_EQUAL((_hdr)->flags, 0);		\
575 	ASSIGN_UNLESS_EQUAL((_hdr)->gso_type, 0);	\
576 	ASSIGN_UNLESS_EQUAL((_hdr)->gso_size, 0);	\
577 	ASSIGN_UNLESS_EQUAL((_hdr)->hdr_len, 0);	\
578 } while (0)
579 
580 static inline void
581 virtqueue_xmit_offload(struct virtio_net_hdr *hdr,
582 			struct rte_mbuf *cookie,
583 			bool offload)
584 {
585 	if (offload) {
586 		if (cookie->ol_flags & PKT_TX_TCP_SEG)
587 			cookie->ol_flags |= PKT_TX_TCP_CKSUM;
588 
589 		switch (cookie->ol_flags & PKT_TX_L4_MASK) {
590 		case PKT_TX_UDP_CKSUM:
591 			hdr->csum_start = cookie->l2_len + cookie->l3_len;
592 			hdr->csum_offset = offsetof(struct rte_udp_hdr,
593 				dgram_cksum);
594 			hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
595 			break;
596 
597 		case PKT_TX_TCP_CKSUM:
598 			hdr->csum_start = cookie->l2_len + cookie->l3_len;
599 			hdr->csum_offset = offsetof(struct rte_tcp_hdr, cksum);
600 			hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
601 			break;
602 
603 		default:
604 			ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
605 			ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
606 			ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
607 			break;
608 		}
609 
610 		/* TCP Segmentation Offload */
611 		if (cookie->ol_flags & PKT_TX_TCP_SEG) {
612 			hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ?
613 				VIRTIO_NET_HDR_GSO_TCPV6 :
614 				VIRTIO_NET_HDR_GSO_TCPV4;
615 			hdr->gso_size = cookie->tso_segsz;
616 			hdr->hdr_len =
617 				cookie->l2_len +
618 				cookie->l3_len +
619 				cookie->l4_len;
620 		} else {
621 			ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
622 			ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
623 			ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
624 		}
625 	}
626 }
627 
628 static inline void
629 virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
630 			struct rte_mbuf **cookies,
631 			uint16_t num)
632 {
633 	struct vq_desc_extra *dxp;
634 	struct virtqueue *vq = txvq->vq;
635 	struct vring_desc *start_dp;
636 	struct virtio_net_hdr *hdr;
637 	uint16_t idx;
638 	uint16_t head_size = vq->hw->vtnet_hdr_size;
639 	uint16_t i = 0;
640 
641 	idx = vq->vq_desc_head_idx;
642 	start_dp = vq->vq_split.ring.desc;
643 
644 	while (i < num) {
645 		idx = idx & (vq->vq_nentries - 1);
646 		dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
647 		dxp->cookie = (void *)cookies[i];
648 		dxp->ndescs = 1;
649 		virtio_update_packet_stats(&txvq->stats, cookies[i]);
650 
651 		hdr = (struct virtio_net_hdr *)(char *)cookies[i]->buf_addr +
652 			cookies[i]->data_off - head_size;
653 
654 		/* if offload disabled, hdr is not zeroed yet, do it now */
655 		if (!vq->hw->has_tx_offload)
656 			virtqueue_clear_net_hdr(hdr);
657 		else
658 			virtqueue_xmit_offload(hdr, cookies[i], true);
659 
660 		start_dp[idx].addr  =
661 			VIRTIO_MBUF_DATA_DMA_ADDR(cookies[i], vq) - head_size;
662 		start_dp[idx].len   = cookies[i]->data_len + head_size;
663 		start_dp[idx].flags = 0;
664 
665 
666 		vq_update_avail_ring(vq, idx);
667 
668 		idx++;
669 		i++;
670 	};
671 
672 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
673 	vq->vq_desc_head_idx = idx & (vq->vq_nentries - 1);
674 }
675 
676 static inline void
677 virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq,
678 				   struct rte_mbuf *cookie,
679 				   int in_order)
680 {
681 	struct virtqueue *vq = txvq->vq;
682 	struct vring_packed_desc *dp;
683 	struct vq_desc_extra *dxp;
684 	uint16_t idx, id, flags;
685 	uint16_t head_size = vq->hw->vtnet_hdr_size;
686 	struct virtio_net_hdr *hdr;
687 
688 	id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;
689 	idx = vq->vq_avail_idx;
690 	dp = &vq->vq_packed.ring.desc[idx];
691 
692 	dxp = &vq->vq_descx[id];
693 	dxp->ndescs = 1;
694 	dxp->cookie = cookie;
695 
696 	flags = vq->vq_packed.cached_flags;
697 
698 	/* prepend cannot fail, checked by caller */
699 	hdr = (struct virtio_net_hdr *)(char *)cookie->buf_addr +
700 		cookie->data_off - head_size;
701 
702 	/* if offload disabled, hdr is not zeroed yet, do it now */
703 	if (!vq->hw->has_tx_offload)
704 		virtqueue_clear_net_hdr(hdr);
705 	else
706 		virtqueue_xmit_offload(hdr, cookie, true);
707 
708 	dp->addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq) - head_size;
709 	dp->len  = cookie->data_len + head_size;
710 	dp->id   = id;
711 
712 	if (++vq->vq_avail_idx >= vq->vq_nentries) {
713 		vq->vq_avail_idx -= vq->vq_nentries;
714 		vq->vq_packed.cached_flags ^= VRING_PACKED_DESC_F_AVAIL_USED;
715 	}
716 
717 	vq->vq_free_cnt--;
718 
719 	if (!in_order) {
720 		vq->vq_desc_head_idx = dxp->next;
721 		if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
722 			vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;
723 	}
724 
725 	virtqueue_store_flags_packed(dp, flags, vq->hw->weak_barriers);
726 }
727 
728 static inline void
729 virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
730 			      uint16_t needed, int can_push, int in_order)
731 {
732 	struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
733 	struct vq_desc_extra *dxp;
734 	struct virtqueue *vq = txvq->vq;
735 	struct vring_packed_desc *start_dp, *head_dp;
736 	uint16_t idx, id, head_idx, head_flags;
737 	uint16_t head_size = vq->hw->vtnet_hdr_size;
738 	struct virtio_net_hdr *hdr;
739 	uint16_t prev;
740 	bool prepend_header = false;
741 
742 	id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;
743 
744 	dxp = &vq->vq_descx[id];
745 	dxp->ndescs = needed;
746 	dxp->cookie = cookie;
747 
748 	head_idx = vq->vq_avail_idx;
749 	idx = head_idx;
750 	prev = head_idx;
751 	start_dp = vq->vq_packed.ring.desc;
752 
753 	head_dp = &vq->vq_packed.ring.desc[idx];
754 	head_flags = cookie->next ? VRING_DESC_F_NEXT : 0;
755 	head_flags |= vq->vq_packed.cached_flags;
756 
757 	if (can_push) {
758 		/* prepend cannot fail, checked by caller */
759 		hdr = (struct virtio_net_hdr *)(char *)cookie->buf_addr +
760 			cookie->data_off - head_size;
761 		prepend_header = true;
762 
763 		/* if offload disabled, it is not zeroed below, do it now */
764 		if (!vq->hw->has_tx_offload)
765 			virtqueue_clear_net_hdr(hdr);
766 	} else {
767 		/* setup first tx ring slot to point to header
768 		 * stored in reserved region.
769 		 */
770 		start_dp[idx].addr  = txvq->virtio_net_hdr_mem +
771 			RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
772 		start_dp[idx].len   = vq->hw->vtnet_hdr_size;
773 		hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
774 		idx++;
775 		if (idx >= vq->vq_nentries) {
776 			idx -= vq->vq_nentries;
777 			vq->vq_packed.cached_flags ^=
778 				VRING_PACKED_DESC_F_AVAIL_USED;
779 		}
780 	}
781 
782 	virtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload);
783 
784 	do {
785 		uint16_t flags;
786 
787 		start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
788 		start_dp[idx].len  = cookie->data_len;
789 		if (prepend_header) {
790 			start_dp[idx].addr -= head_size;
791 			start_dp[idx].len += head_size;
792 			prepend_header = false;
793 		}
794 
795 		if (likely(idx != head_idx)) {
796 			flags = cookie->next ? VRING_DESC_F_NEXT : 0;
797 			flags |= vq->vq_packed.cached_flags;
798 			start_dp[idx].flags = flags;
799 		}
800 		prev = idx;
801 		idx++;
802 		if (idx >= vq->vq_nentries) {
803 			idx -= vq->vq_nentries;
804 			vq->vq_packed.cached_flags ^=
805 				VRING_PACKED_DESC_F_AVAIL_USED;
806 		}
807 	} while ((cookie = cookie->next) != NULL);
808 
809 	start_dp[prev].id = id;
810 
811 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
812 	vq->vq_avail_idx = idx;
813 
814 	if (!in_order) {
815 		vq->vq_desc_head_idx = dxp->next;
816 		if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
817 			vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;
818 	}
819 
820 	virtqueue_store_flags_packed(head_dp, head_flags,
821 				     vq->hw->weak_barriers);
822 }
823 
824 static inline void
825 virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
826 			uint16_t needed, int use_indirect, int can_push,
827 			int in_order)
828 {
829 	struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
830 	struct vq_desc_extra *dxp;
831 	struct virtqueue *vq = txvq->vq;
832 	struct vring_desc *start_dp;
833 	uint16_t seg_num = cookie->nb_segs;
834 	uint16_t head_idx, idx;
835 	uint16_t head_size = vq->hw->vtnet_hdr_size;
836 	bool prepend_header = false;
837 	struct virtio_net_hdr *hdr;
838 
839 	head_idx = vq->vq_desc_head_idx;
840 	idx = head_idx;
841 	if (in_order)
842 		dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
843 	else
844 		dxp = &vq->vq_descx[idx];
845 	dxp->cookie = (void *)cookie;
846 	dxp->ndescs = needed;
847 
848 	start_dp = vq->vq_split.ring.desc;
849 
850 	if (can_push) {
851 		/* prepend cannot fail, checked by caller */
852 		hdr = (struct virtio_net_hdr *)(char *)cookie->buf_addr +
853 			cookie->data_off - head_size;
854 		prepend_header = true;
855 
856 		/* if offload disabled, it is not zeroed below, do it now */
857 		if (!vq->hw->has_tx_offload)
858 			virtqueue_clear_net_hdr(hdr);
859 	} else if (use_indirect) {
860 		/* setup tx ring slot to point to indirect
861 		 * descriptor list stored in reserved region.
862 		 *
863 		 * the first slot in indirect ring is already preset
864 		 * to point to the header in reserved region
865 		 */
866 		start_dp[idx].addr  = txvq->virtio_net_hdr_mem +
867 			RTE_PTR_DIFF(&txr[idx].tx_indir, txr);
868 		start_dp[idx].len   = (seg_num + 1) * sizeof(struct vring_desc);
869 		start_dp[idx].flags = VRING_DESC_F_INDIRECT;
870 		hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
871 
872 		/* loop below will fill in rest of the indirect elements */
873 		start_dp = txr[idx].tx_indir;
874 		idx = 1;
875 	} else {
876 		/* setup first tx ring slot to point to header
877 		 * stored in reserved region.
878 		 */
879 		start_dp[idx].addr  = txvq->virtio_net_hdr_mem +
880 			RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
881 		start_dp[idx].len   = vq->hw->vtnet_hdr_size;
882 		start_dp[idx].flags = VRING_DESC_F_NEXT;
883 		hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
884 
885 		idx = start_dp[idx].next;
886 	}
887 
888 	virtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload);
889 
890 	do {
891 		start_dp[idx].addr  = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
892 		start_dp[idx].len   = cookie->data_len;
893 		if (prepend_header) {
894 			start_dp[idx].addr -= head_size;
895 			start_dp[idx].len += head_size;
896 			prepend_header = false;
897 		}
898 		start_dp[idx].flags = cookie->next ? VRING_DESC_F_NEXT : 0;
899 		idx = start_dp[idx].next;
900 	} while ((cookie = cookie->next) != NULL);
901 
902 	if (use_indirect)
903 		idx = vq->vq_split.ring.desc[head_idx].next;
904 
905 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
906 
907 	vq->vq_desc_head_idx = idx;
908 	vq_update_avail_ring(vq, head_idx);
909 
910 	if (!in_order) {
911 		if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
912 			vq->vq_desc_tail_idx = idx;
913 	}
914 }
915 
916 void
917 virtio_dev_cq_start(struct rte_eth_dev *dev)
918 {
919 	struct virtio_hw *hw = dev->data->dev_private;
920 
921 	if (hw->cvq && hw->cvq->vq) {
922 		rte_spinlock_init(&hw->cvq->lock);
923 		VIRTQUEUE_DUMP((struct virtqueue *)hw->cvq->vq);
924 	}
925 }
926 
927 int
928 virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
929 			uint16_t queue_idx,
930 			uint16_t nb_desc,
931 			unsigned int socket_id __rte_unused,
932 			const struct rte_eth_rxconf *rx_conf,
933 			struct rte_mempool *mp)
934 {
935 	uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
936 	struct virtio_hw *hw = dev->data->dev_private;
937 	struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
938 	struct virtnet_rx *rxvq;
939 
940 	PMD_INIT_FUNC_TRACE();
941 
942 	if (rx_conf->rx_deferred_start) {
943 		PMD_INIT_LOG(ERR, "Rx deferred start is not supported");
944 		return -EINVAL;
945 	}
946 
947 	if (nb_desc == 0 || nb_desc > vq->vq_nentries)
948 		nb_desc = vq->vq_nentries;
949 	vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
950 
951 	rxvq = &vq->rxq;
952 	rxvq->queue_id = queue_idx;
953 	rxvq->mpool = mp;
954 	dev->data->rx_queues[queue_idx] = rxvq;
955 
956 	return 0;
957 }
958 
959 int
960 virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
961 {
962 	uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
963 	struct virtio_hw *hw = dev->data->dev_private;
964 	struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
965 	struct virtnet_rx *rxvq = &vq->rxq;
966 	struct rte_mbuf *m;
967 	uint16_t desc_idx;
968 	int error, nbufs, i;
969 
970 	PMD_INIT_FUNC_TRACE();
971 
972 	/* Allocate blank mbufs for the each rx descriptor */
973 	nbufs = 0;
974 
975 	if (hw->use_simple_rx) {
976 		for (desc_idx = 0; desc_idx < vq->vq_nentries;
977 		     desc_idx++) {
978 			vq->vq_split.ring.avail->ring[desc_idx] = desc_idx;
979 			vq->vq_split.ring.desc[desc_idx].flags =
980 				VRING_DESC_F_WRITE;
981 		}
982 
983 		virtio_rxq_vec_setup(rxvq);
984 	}
985 
986 	memset(&rxvq->fake_mbuf, 0, sizeof(rxvq->fake_mbuf));
987 	for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST;
988 	     desc_idx++) {
989 		vq->sw_ring[vq->vq_nentries + desc_idx] =
990 			&rxvq->fake_mbuf;
991 	}
992 
993 	if (hw->use_simple_rx) {
994 		while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
995 			virtio_rxq_rearm_vec(rxvq);
996 			nbufs += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
997 		}
998 	} else if (hw->use_inorder_rx) {
999 		if ((!virtqueue_full(vq))) {
1000 			uint16_t free_cnt = vq->vq_free_cnt;
1001 			struct rte_mbuf *pkts[free_cnt];
1002 
1003 			if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, pkts,
1004 				free_cnt)) {
1005 				error = virtqueue_enqueue_refill_inorder(vq,
1006 						pkts,
1007 						free_cnt);
1008 				if (unlikely(error)) {
1009 					for (i = 0; i < free_cnt; i++)
1010 						rte_pktmbuf_free(pkts[i]);
1011 				}
1012 			}
1013 
1014 			nbufs += free_cnt;
1015 			vq_update_avail_idx(vq);
1016 		}
1017 	} else {
1018 		while (!virtqueue_full(vq)) {
1019 			m = rte_mbuf_raw_alloc(rxvq->mpool);
1020 			if (m == NULL)
1021 				break;
1022 
1023 			/* Enqueue allocated buffers */
1024 			if (vtpci_packed_queue(vq->hw))
1025 				error = virtqueue_enqueue_recv_refill_packed(vq,
1026 						&m, 1);
1027 			else
1028 				error = virtqueue_enqueue_recv_refill(vq,
1029 						&m, 1);
1030 			if (error) {
1031 				rte_pktmbuf_free(m);
1032 				break;
1033 			}
1034 			nbufs++;
1035 		}
1036 
1037 		if (!vtpci_packed_queue(vq->hw))
1038 			vq_update_avail_idx(vq);
1039 	}
1040 
1041 	PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs);
1042 
1043 	VIRTQUEUE_DUMP(vq);
1044 
1045 	return 0;
1046 }
1047 
1048 /*
1049  * struct rte_eth_dev *dev: Used to update dev
1050  * uint16_t nb_desc: Defaults to values read from config space
1051  * unsigned int socket_id: Used to allocate memzone
1052  * const struct rte_eth_txconf *tx_conf: Used to setup tx engine
1053  * uint16_t queue_idx: Just used as an index in dev txq list
1054  */
1055 int
1056 virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
1057 			uint16_t queue_idx,
1058 			uint16_t nb_desc,
1059 			unsigned int socket_id __rte_unused,
1060 			const struct rte_eth_txconf *tx_conf)
1061 {
1062 	uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
1063 	struct virtio_hw *hw = dev->data->dev_private;
1064 	struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
1065 	struct virtnet_tx *txvq;
1066 	uint16_t tx_free_thresh;
1067 
1068 	PMD_INIT_FUNC_TRACE();
1069 
1070 	if (nb_desc == 0 || nb_desc > vq->vq_nentries)
1071 		nb_desc = vq->vq_nentries;
1072 	vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
1073 
1074 	txvq = &vq->txq;
1075 	txvq->queue_id = queue_idx;
1076 
1077 	tx_free_thresh = tx_conf->tx_free_thresh;
1078 	if (tx_free_thresh == 0)
1079 		tx_free_thresh =
1080 			RTE_MIN(vq->vq_nentries / 4, DEFAULT_TX_FREE_THRESH);
1081 
1082 	if (tx_free_thresh >= (vq->vq_nentries - 3)) {
1083 		RTE_LOG(ERR, PMD, "tx_free_thresh must be less than the "
1084 			"number of TX entries minus 3 (%u)."
1085 			" (tx_free_thresh=%u port=%u queue=%u)\n",
1086 			vq->vq_nentries - 3,
1087 			tx_free_thresh, dev->data->port_id, queue_idx);
1088 		return -EINVAL;
1089 	}
1090 
1091 	vq->vq_free_thresh = tx_free_thresh;
1092 
1093 	dev->data->tx_queues[queue_idx] = txvq;
1094 	return 0;
1095 }
1096 
1097 int
1098 virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
1099 				uint16_t queue_idx)
1100 {
1101 	uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
1102 	struct virtio_hw *hw = dev->data->dev_private;
1103 	struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
1104 
1105 	PMD_INIT_FUNC_TRACE();
1106 
1107 	if (!vtpci_packed_queue(hw)) {
1108 		if (hw->use_inorder_tx)
1109 			vq->vq_split.ring.desc[vq->vq_nentries - 1].next = 0;
1110 	}
1111 
1112 	VIRTQUEUE_DUMP(vq);
1113 
1114 	return 0;
1115 }
1116 
1117 static inline void
1118 virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m)
1119 {
1120 	int error;
1121 	/*
1122 	 * Requeue the discarded mbuf. This should always be
1123 	 * successful since it was just dequeued.
1124 	 */
1125 	if (vtpci_packed_queue(vq->hw))
1126 		error = virtqueue_enqueue_recv_refill_packed(vq, &m, 1);
1127 	else
1128 		error = virtqueue_enqueue_recv_refill(vq, &m, 1);
1129 
1130 	if (unlikely(error)) {
1131 		RTE_LOG(ERR, PMD, "cannot requeue discarded mbuf");
1132 		rte_pktmbuf_free(m);
1133 	}
1134 }
1135 
1136 static inline void
1137 virtio_discard_rxbuf_inorder(struct virtqueue *vq, struct rte_mbuf *m)
1138 {
1139 	int error;
1140 
1141 	error = virtqueue_enqueue_refill_inorder(vq, &m, 1);
1142 	if (unlikely(error)) {
1143 		RTE_LOG(ERR, PMD, "cannot requeue discarded mbuf");
1144 		rte_pktmbuf_free(m);
1145 	}
1146 }
1147 
1148 /* Optionally fill offload information in structure */
1149 static inline int
1150 virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
1151 {
1152 	struct rte_net_hdr_lens hdr_lens;
1153 	uint32_t hdrlen, ptype;
1154 	int l4_supported = 0;
1155 
1156 	/* nothing to do */
1157 	if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
1158 		return 0;
1159 
1160 	m->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
1161 
1162 	ptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK);
1163 	m->packet_type = ptype;
1164 	if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP ||
1165 	    (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP ||
1166 	    (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_SCTP)
1167 		l4_supported = 1;
1168 
1169 	if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1170 		hdrlen = hdr_lens.l2_len + hdr_lens.l3_len + hdr_lens.l4_len;
1171 		if (hdr->csum_start <= hdrlen && l4_supported) {
1172 			m->ol_flags |= PKT_RX_L4_CKSUM_NONE;
1173 		} else {
1174 			/* Unknown proto or tunnel, do sw cksum. We can assume
1175 			 * the cksum field is in the first segment since the
1176 			 * buffers we provided to the host are large enough.
1177 			 * In case of SCTP, this will be wrong since it's a CRC
1178 			 * but there's nothing we can do.
1179 			 */
1180 			uint16_t csum = 0, off;
1181 
1182 			rte_raw_cksum_mbuf(m, hdr->csum_start,
1183 				rte_pktmbuf_pkt_len(m) - hdr->csum_start,
1184 				&csum);
1185 			if (likely(csum != 0xffff))
1186 				csum = ~csum;
1187 			off = hdr->csum_offset + hdr->csum_start;
1188 			if (rte_pktmbuf_data_len(m) >= off + 1)
1189 				*rte_pktmbuf_mtod_offset(m, uint16_t *,
1190 					off) = csum;
1191 		}
1192 	} else if (hdr->flags & VIRTIO_NET_HDR_F_DATA_VALID && l4_supported) {
1193 		m->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
1194 	}
1195 
1196 	/* GSO request, save required information in mbuf */
1197 	if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
1198 		/* Check unsupported modes */
1199 		if ((hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN) ||
1200 		    (hdr->gso_size == 0)) {
1201 			return -EINVAL;
1202 		}
1203 
1204 		/* Update mss lengthes in mbuf */
1205 		m->tso_segsz = hdr->gso_size;
1206 		switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
1207 			case VIRTIO_NET_HDR_GSO_TCPV4:
1208 			case VIRTIO_NET_HDR_GSO_TCPV6:
1209 				m->ol_flags |= PKT_RX_LRO | \
1210 					PKT_RX_L4_CKSUM_NONE;
1211 				break;
1212 			default:
1213 				return -EINVAL;
1214 		}
1215 	}
1216 
1217 	return 0;
1218 }
1219 
1220 #define VIRTIO_MBUF_BURST_SZ 64
1221 #define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc))
1222 uint16_t
1223 virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1224 {
1225 	struct virtnet_rx *rxvq = rx_queue;
1226 	struct virtqueue *vq = rxvq->vq;
1227 	struct virtio_hw *hw = vq->hw;
1228 	struct rte_mbuf *rxm;
1229 	uint16_t nb_used, num, nb_rx;
1230 	uint32_t len[VIRTIO_MBUF_BURST_SZ];
1231 	struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1232 	int error;
1233 	uint32_t i, nb_enqueued;
1234 	uint32_t hdr_size;
1235 	struct virtio_net_hdr *hdr;
1236 
1237 	nb_rx = 0;
1238 	if (unlikely(hw->started == 0))
1239 		return nb_rx;
1240 
1241 	nb_used = VIRTQUEUE_NUSED(vq);
1242 
1243 	virtio_rmb(hw->weak_barriers);
1244 
1245 	num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts;
1246 	if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1247 		num = VIRTIO_MBUF_BURST_SZ;
1248 	if (likely(num > DESC_PER_CACHELINE))
1249 		num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1250 
1251 	num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num);
1252 	PMD_RX_LOG(DEBUG, "used:%d dequeue:%d", nb_used, num);
1253 
1254 	nb_enqueued = 0;
1255 	hdr_size = hw->vtnet_hdr_size;
1256 
1257 	for (i = 0; i < num ; i++) {
1258 		rxm = rcv_pkts[i];
1259 
1260 		PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1261 
1262 		if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1263 			PMD_RX_LOG(ERR, "Packet drop");
1264 			nb_enqueued++;
1265 			virtio_discard_rxbuf(vq, rxm);
1266 			rxvq->stats.errors++;
1267 			continue;
1268 		}
1269 
1270 		rxm->port = rxvq->port_id;
1271 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1272 		rxm->ol_flags = 0;
1273 		rxm->vlan_tci = 0;
1274 
1275 		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1276 		rxm->data_len = (uint16_t)(len[i] - hdr_size);
1277 
1278 		hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr +
1279 			RTE_PKTMBUF_HEADROOM - hdr_size);
1280 
1281 		if (hw->vlan_strip)
1282 			rte_vlan_strip(rxm);
1283 
1284 		if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) {
1285 			virtio_discard_rxbuf(vq, rxm);
1286 			rxvq->stats.errors++;
1287 			continue;
1288 		}
1289 
1290 		virtio_rx_stats_updated(rxvq, rxm);
1291 
1292 		rx_pkts[nb_rx++] = rxm;
1293 	}
1294 
1295 	rxvq->stats.packets += nb_rx;
1296 
1297 	/* Allocate new mbuf for the used descriptor */
1298 	if (likely(!virtqueue_full(vq))) {
1299 		uint16_t free_cnt = vq->vq_free_cnt;
1300 		struct rte_mbuf *new_pkts[free_cnt];
1301 
1302 		if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts,
1303 						free_cnt) == 0)) {
1304 			error = virtqueue_enqueue_recv_refill(vq, new_pkts,
1305 					free_cnt);
1306 			if (unlikely(error)) {
1307 				for (i = 0; i < free_cnt; i++)
1308 					rte_pktmbuf_free(new_pkts[i]);
1309 			}
1310 			nb_enqueued += free_cnt;
1311 		} else {
1312 			struct rte_eth_dev *dev =
1313 				&rte_eth_devices[rxvq->port_id];
1314 			dev->data->rx_mbuf_alloc_failed += free_cnt;
1315 		}
1316 	}
1317 
1318 	if (likely(nb_enqueued)) {
1319 		vq_update_avail_idx(vq);
1320 
1321 		if (unlikely(virtqueue_kick_prepare(vq))) {
1322 			virtqueue_notify(vq);
1323 			PMD_RX_LOG(DEBUG, "Notified");
1324 		}
1325 	}
1326 
1327 	return nb_rx;
1328 }
1329 
1330 uint16_t
1331 virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
1332 			uint16_t nb_pkts)
1333 {
1334 	struct virtnet_rx *rxvq = rx_queue;
1335 	struct virtqueue *vq = rxvq->vq;
1336 	struct virtio_hw *hw = vq->hw;
1337 	struct rte_mbuf *rxm;
1338 	uint16_t num, nb_rx;
1339 	uint32_t len[VIRTIO_MBUF_BURST_SZ];
1340 	struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1341 	int error;
1342 	uint32_t i, nb_enqueued;
1343 	uint32_t hdr_size;
1344 	struct virtio_net_hdr *hdr;
1345 
1346 	nb_rx = 0;
1347 	if (unlikely(hw->started == 0))
1348 		return nb_rx;
1349 
1350 	num = RTE_MIN(VIRTIO_MBUF_BURST_SZ, nb_pkts);
1351 	if (likely(num > DESC_PER_CACHELINE))
1352 		num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1353 
1354 	num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num);
1355 	PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1356 
1357 	nb_enqueued = 0;
1358 	hdr_size = hw->vtnet_hdr_size;
1359 
1360 	for (i = 0; i < num; i++) {
1361 		rxm = rcv_pkts[i];
1362 
1363 		PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1364 
1365 		if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1366 			PMD_RX_LOG(ERR, "Packet drop");
1367 			nb_enqueued++;
1368 			virtio_discard_rxbuf(vq, rxm);
1369 			rxvq->stats.errors++;
1370 			continue;
1371 		}
1372 
1373 		rxm->port = rxvq->port_id;
1374 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1375 		rxm->ol_flags = 0;
1376 		rxm->vlan_tci = 0;
1377 
1378 		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1379 		rxm->data_len = (uint16_t)(len[i] - hdr_size);
1380 
1381 		hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr +
1382 			RTE_PKTMBUF_HEADROOM - hdr_size);
1383 
1384 		if (hw->vlan_strip)
1385 			rte_vlan_strip(rxm);
1386 
1387 		if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) {
1388 			virtio_discard_rxbuf(vq, rxm);
1389 			rxvq->stats.errors++;
1390 			continue;
1391 		}
1392 
1393 		virtio_rx_stats_updated(rxvq, rxm);
1394 
1395 		rx_pkts[nb_rx++] = rxm;
1396 	}
1397 
1398 	rxvq->stats.packets += nb_rx;
1399 
1400 	/* Allocate new mbuf for the used descriptor */
1401 	if (likely(!virtqueue_full(vq))) {
1402 		uint16_t free_cnt = vq->vq_free_cnt;
1403 		struct rte_mbuf *new_pkts[free_cnt];
1404 
1405 		if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts,
1406 						free_cnt) == 0)) {
1407 			error = virtqueue_enqueue_recv_refill_packed(vq,
1408 					new_pkts, free_cnt);
1409 			if (unlikely(error)) {
1410 				for (i = 0; i < free_cnt; i++)
1411 					rte_pktmbuf_free(new_pkts[i]);
1412 			}
1413 			nb_enqueued += free_cnt;
1414 		} else {
1415 			struct rte_eth_dev *dev =
1416 				&rte_eth_devices[rxvq->port_id];
1417 			dev->data->rx_mbuf_alloc_failed += free_cnt;
1418 		}
1419 	}
1420 
1421 	if (likely(nb_enqueued)) {
1422 		if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1423 			virtqueue_notify(vq);
1424 			PMD_RX_LOG(DEBUG, "Notified");
1425 		}
1426 	}
1427 
1428 	return nb_rx;
1429 }
1430 
1431 
1432 uint16_t
1433 virtio_recv_pkts_inorder(void *rx_queue,
1434 			struct rte_mbuf **rx_pkts,
1435 			uint16_t nb_pkts)
1436 {
1437 	struct virtnet_rx *rxvq = rx_queue;
1438 	struct virtqueue *vq = rxvq->vq;
1439 	struct virtio_hw *hw = vq->hw;
1440 	struct rte_mbuf *rxm;
1441 	struct rte_mbuf *prev = NULL;
1442 	uint16_t nb_used, num, nb_rx;
1443 	uint32_t len[VIRTIO_MBUF_BURST_SZ];
1444 	struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1445 	int error;
1446 	uint32_t nb_enqueued;
1447 	uint32_t seg_num;
1448 	uint32_t seg_res;
1449 	uint32_t hdr_size;
1450 	int32_t i;
1451 
1452 	nb_rx = 0;
1453 	if (unlikely(hw->started == 0))
1454 		return nb_rx;
1455 
1456 	nb_used = VIRTQUEUE_NUSED(vq);
1457 	nb_used = RTE_MIN(nb_used, nb_pkts);
1458 	nb_used = RTE_MIN(nb_used, VIRTIO_MBUF_BURST_SZ);
1459 
1460 	virtio_rmb(hw->weak_barriers);
1461 
1462 	PMD_RX_LOG(DEBUG, "used:%d", nb_used);
1463 
1464 	nb_enqueued = 0;
1465 	seg_num = 1;
1466 	seg_res = 0;
1467 	hdr_size = hw->vtnet_hdr_size;
1468 
1469 	num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len, nb_used);
1470 
1471 	for (i = 0; i < num; i++) {
1472 		struct virtio_net_hdr_mrg_rxbuf *header;
1473 
1474 		PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1475 		PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1476 
1477 		rxm = rcv_pkts[i];
1478 
1479 		if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1480 			PMD_RX_LOG(ERR, "Packet drop");
1481 			nb_enqueued++;
1482 			virtio_discard_rxbuf_inorder(vq, rxm);
1483 			rxvq->stats.errors++;
1484 			continue;
1485 		}
1486 
1487 		header = (struct virtio_net_hdr_mrg_rxbuf *)
1488 			 ((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM
1489 			 - hdr_size);
1490 
1491 		if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
1492 			seg_num = header->num_buffers;
1493 			if (seg_num == 0)
1494 				seg_num = 1;
1495 		} else {
1496 			seg_num = 1;
1497 		}
1498 
1499 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1500 		rxm->nb_segs = seg_num;
1501 		rxm->ol_flags = 0;
1502 		rxm->vlan_tci = 0;
1503 		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1504 		rxm->data_len = (uint16_t)(len[i] - hdr_size);
1505 
1506 		rxm->port = rxvq->port_id;
1507 
1508 		rx_pkts[nb_rx] = rxm;
1509 		prev = rxm;
1510 
1511 		if (vq->hw->has_rx_offload &&
1512 				virtio_rx_offload(rxm, &header->hdr) < 0) {
1513 			virtio_discard_rxbuf_inorder(vq, rxm);
1514 			rxvq->stats.errors++;
1515 			continue;
1516 		}
1517 
1518 		if (hw->vlan_strip)
1519 			rte_vlan_strip(rx_pkts[nb_rx]);
1520 
1521 		seg_res = seg_num - 1;
1522 
1523 		/* Merge remaining segments */
1524 		while (seg_res != 0 && i < (num - 1)) {
1525 			i++;
1526 
1527 			rxm = rcv_pkts[i];
1528 			rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1529 			rxm->pkt_len = (uint32_t)(len[i]);
1530 			rxm->data_len = (uint16_t)(len[i]);
1531 
1532 			rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1533 
1534 			prev->next = rxm;
1535 			prev = rxm;
1536 			seg_res -= 1;
1537 		}
1538 
1539 		if (!seg_res) {
1540 			virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1541 			nb_rx++;
1542 		}
1543 	}
1544 
1545 	/* Last packet still need merge segments */
1546 	while (seg_res != 0) {
1547 		uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1548 					VIRTIO_MBUF_BURST_SZ);
1549 
1550 		if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) {
1551 			virtio_rmb(hw->weak_barriers);
1552 			num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len,
1553 							   rcv_cnt);
1554 			uint16_t extra_idx = 0;
1555 
1556 			rcv_cnt = num;
1557 			while (extra_idx < rcv_cnt) {
1558 				rxm = rcv_pkts[extra_idx];
1559 				rxm->data_off =
1560 					RTE_PKTMBUF_HEADROOM - hdr_size;
1561 				rxm->pkt_len = (uint32_t)(len[extra_idx]);
1562 				rxm->data_len = (uint16_t)(len[extra_idx]);
1563 				prev->next = rxm;
1564 				prev = rxm;
1565 				rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1566 				extra_idx += 1;
1567 			};
1568 			seg_res -= rcv_cnt;
1569 
1570 			if (!seg_res) {
1571 				virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1572 				nb_rx++;
1573 			}
1574 		} else {
1575 			PMD_RX_LOG(ERR,
1576 					"No enough segments for packet.");
1577 			rte_pktmbuf_free(rx_pkts[nb_rx]);
1578 			rxvq->stats.errors++;
1579 			break;
1580 		}
1581 	}
1582 
1583 	rxvq->stats.packets += nb_rx;
1584 
1585 	/* Allocate new mbuf for the used descriptor */
1586 
1587 	if (likely(!virtqueue_full(vq))) {
1588 		/* free_cnt may include mrg descs */
1589 		uint16_t free_cnt = vq->vq_free_cnt;
1590 		struct rte_mbuf *new_pkts[free_cnt];
1591 
1592 		if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1593 			error = virtqueue_enqueue_refill_inorder(vq, new_pkts,
1594 					free_cnt);
1595 			if (unlikely(error)) {
1596 				for (i = 0; i < free_cnt; i++)
1597 					rte_pktmbuf_free(new_pkts[i]);
1598 			}
1599 			nb_enqueued += free_cnt;
1600 		} else {
1601 			struct rte_eth_dev *dev =
1602 				&rte_eth_devices[rxvq->port_id];
1603 			dev->data->rx_mbuf_alloc_failed += free_cnt;
1604 		}
1605 	}
1606 
1607 	if (likely(nb_enqueued)) {
1608 		vq_update_avail_idx(vq);
1609 
1610 		if (unlikely(virtqueue_kick_prepare(vq))) {
1611 			virtqueue_notify(vq);
1612 			PMD_RX_LOG(DEBUG, "Notified");
1613 		}
1614 	}
1615 
1616 	return nb_rx;
1617 }
1618 
1619 uint16_t
1620 virtio_recv_mergeable_pkts(void *rx_queue,
1621 			struct rte_mbuf **rx_pkts,
1622 			uint16_t nb_pkts)
1623 {
1624 	struct virtnet_rx *rxvq = rx_queue;
1625 	struct virtqueue *vq = rxvq->vq;
1626 	struct virtio_hw *hw = vq->hw;
1627 	struct rte_mbuf *rxm;
1628 	struct rte_mbuf *prev = NULL;
1629 	uint16_t nb_used, num, nb_rx = 0;
1630 	uint32_t len[VIRTIO_MBUF_BURST_SZ];
1631 	struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1632 	int error;
1633 	uint32_t nb_enqueued = 0;
1634 	uint32_t seg_num = 0;
1635 	uint32_t seg_res = 0;
1636 	uint32_t hdr_size = hw->vtnet_hdr_size;
1637 	int32_t i;
1638 
1639 	if (unlikely(hw->started == 0))
1640 		return nb_rx;
1641 
1642 	nb_used = VIRTQUEUE_NUSED(vq);
1643 
1644 	virtio_rmb(hw->weak_barriers);
1645 
1646 	PMD_RX_LOG(DEBUG, "used:%d", nb_used);
1647 
1648 	num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts;
1649 	if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1650 		num = VIRTIO_MBUF_BURST_SZ;
1651 	if (likely(num > DESC_PER_CACHELINE))
1652 		num = num - ((vq->vq_used_cons_idx + num) %
1653 				DESC_PER_CACHELINE);
1654 
1655 
1656 	num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num);
1657 
1658 	for (i = 0; i < num; i++) {
1659 		struct virtio_net_hdr_mrg_rxbuf *header;
1660 
1661 		PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1662 		PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1663 
1664 		rxm = rcv_pkts[i];
1665 
1666 		if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1667 			PMD_RX_LOG(ERR, "Packet drop");
1668 			nb_enqueued++;
1669 			virtio_discard_rxbuf(vq, rxm);
1670 			rxvq->stats.errors++;
1671 			continue;
1672 		}
1673 
1674 		header = (struct virtio_net_hdr_mrg_rxbuf *)
1675 			 ((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM
1676 			 - hdr_size);
1677 		seg_num = header->num_buffers;
1678 		if (seg_num == 0)
1679 			seg_num = 1;
1680 
1681 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1682 		rxm->nb_segs = seg_num;
1683 		rxm->ol_flags = 0;
1684 		rxm->vlan_tci = 0;
1685 		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1686 		rxm->data_len = (uint16_t)(len[i] - hdr_size);
1687 
1688 		rxm->port = rxvq->port_id;
1689 
1690 		rx_pkts[nb_rx] = rxm;
1691 		prev = rxm;
1692 
1693 		if (hw->has_rx_offload &&
1694 				virtio_rx_offload(rxm, &header->hdr) < 0) {
1695 			virtio_discard_rxbuf(vq, rxm);
1696 			rxvq->stats.errors++;
1697 			continue;
1698 		}
1699 
1700 		if (hw->vlan_strip)
1701 			rte_vlan_strip(rx_pkts[nb_rx]);
1702 
1703 		seg_res = seg_num - 1;
1704 
1705 		/* Merge remaining segments */
1706 		while (seg_res != 0 && i < (num - 1)) {
1707 			i++;
1708 
1709 			rxm = rcv_pkts[i];
1710 			rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1711 			rxm->pkt_len = (uint32_t)(len[i]);
1712 			rxm->data_len = (uint16_t)(len[i]);
1713 
1714 			rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1715 
1716 			prev->next = rxm;
1717 			prev = rxm;
1718 			seg_res -= 1;
1719 		}
1720 
1721 		if (!seg_res) {
1722 			virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1723 			nb_rx++;
1724 		}
1725 	}
1726 
1727 	/* Last packet still need merge segments */
1728 	while (seg_res != 0) {
1729 		uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1730 					VIRTIO_MBUF_BURST_SZ);
1731 
1732 		if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) {
1733 			virtio_rmb(hw->weak_barriers);
1734 			num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len,
1735 							   rcv_cnt);
1736 			uint16_t extra_idx = 0;
1737 
1738 			rcv_cnt = num;
1739 			while (extra_idx < rcv_cnt) {
1740 				rxm = rcv_pkts[extra_idx];
1741 				rxm->data_off =
1742 					RTE_PKTMBUF_HEADROOM - hdr_size;
1743 				rxm->pkt_len = (uint32_t)(len[extra_idx]);
1744 				rxm->data_len = (uint16_t)(len[extra_idx]);
1745 				prev->next = rxm;
1746 				prev = rxm;
1747 				rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1748 				extra_idx += 1;
1749 			};
1750 			seg_res -= rcv_cnt;
1751 
1752 			if (!seg_res) {
1753 				virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1754 				nb_rx++;
1755 			}
1756 		} else {
1757 			PMD_RX_LOG(ERR,
1758 					"No enough segments for packet.");
1759 			rte_pktmbuf_free(rx_pkts[nb_rx]);
1760 			rxvq->stats.errors++;
1761 			break;
1762 		}
1763 	}
1764 
1765 	rxvq->stats.packets += nb_rx;
1766 
1767 	/* Allocate new mbuf for the used descriptor */
1768 	if (likely(!virtqueue_full(vq))) {
1769 		/* free_cnt may include mrg descs */
1770 		uint16_t free_cnt = vq->vq_free_cnt;
1771 		struct rte_mbuf *new_pkts[free_cnt];
1772 
1773 		if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1774 			error = virtqueue_enqueue_recv_refill(vq, new_pkts,
1775 					free_cnt);
1776 			if (unlikely(error)) {
1777 				for (i = 0; i < free_cnt; i++)
1778 					rte_pktmbuf_free(new_pkts[i]);
1779 			}
1780 			nb_enqueued += free_cnt;
1781 		} else {
1782 			struct rte_eth_dev *dev =
1783 				&rte_eth_devices[rxvq->port_id];
1784 			dev->data->rx_mbuf_alloc_failed += free_cnt;
1785 		}
1786 	}
1787 
1788 	if (likely(nb_enqueued)) {
1789 		vq_update_avail_idx(vq);
1790 
1791 		if (unlikely(virtqueue_kick_prepare(vq))) {
1792 			virtqueue_notify(vq);
1793 			PMD_RX_LOG(DEBUG, "Notified");
1794 		}
1795 	}
1796 
1797 	return nb_rx;
1798 }
1799 
1800 uint16_t
1801 virtio_recv_mergeable_pkts_packed(void *rx_queue,
1802 			struct rte_mbuf **rx_pkts,
1803 			uint16_t nb_pkts)
1804 {
1805 	struct virtnet_rx *rxvq = rx_queue;
1806 	struct virtqueue *vq = rxvq->vq;
1807 	struct virtio_hw *hw = vq->hw;
1808 	struct rte_mbuf *rxm;
1809 	struct rte_mbuf *prev = NULL;
1810 	uint16_t num, nb_rx = 0;
1811 	uint32_t len[VIRTIO_MBUF_BURST_SZ];
1812 	struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1813 	uint32_t nb_enqueued = 0;
1814 	uint32_t seg_num = 0;
1815 	uint32_t seg_res = 0;
1816 	uint32_t hdr_size = hw->vtnet_hdr_size;
1817 	int32_t i;
1818 	int error;
1819 
1820 	if (unlikely(hw->started == 0))
1821 		return nb_rx;
1822 
1823 
1824 	num = nb_pkts;
1825 	if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1826 		num = VIRTIO_MBUF_BURST_SZ;
1827 	if (likely(num > DESC_PER_CACHELINE))
1828 		num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1829 
1830 	num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num);
1831 
1832 	for (i = 0; i < num; i++) {
1833 		struct virtio_net_hdr_mrg_rxbuf *header;
1834 
1835 		PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1836 		PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1837 
1838 		rxm = rcv_pkts[i];
1839 
1840 		if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1841 			PMD_RX_LOG(ERR, "Packet drop");
1842 			nb_enqueued++;
1843 			virtio_discard_rxbuf(vq, rxm);
1844 			rxvq->stats.errors++;
1845 			continue;
1846 		}
1847 
1848 		header = (struct virtio_net_hdr_mrg_rxbuf *)((char *)
1849 			  rxm->buf_addr + RTE_PKTMBUF_HEADROOM - hdr_size);
1850 		seg_num = header->num_buffers;
1851 
1852 		if (seg_num == 0)
1853 			seg_num = 1;
1854 
1855 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1856 		rxm->nb_segs = seg_num;
1857 		rxm->ol_flags = 0;
1858 		rxm->vlan_tci = 0;
1859 		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1860 		rxm->data_len = (uint16_t)(len[i] - hdr_size);
1861 
1862 		rxm->port = rxvq->port_id;
1863 		rx_pkts[nb_rx] = rxm;
1864 		prev = rxm;
1865 
1866 		if (hw->has_rx_offload &&
1867 				virtio_rx_offload(rxm, &header->hdr) < 0) {
1868 			virtio_discard_rxbuf(vq, rxm);
1869 			rxvq->stats.errors++;
1870 			continue;
1871 		}
1872 
1873 		if (hw->vlan_strip)
1874 			rte_vlan_strip(rx_pkts[nb_rx]);
1875 
1876 		seg_res = seg_num - 1;
1877 
1878 		/* Merge remaining segments */
1879 		while (seg_res != 0 && i < (num - 1)) {
1880 			i++;
1881 
1882 			rxm = rcv_pkts[i];
1883 			rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1884 			rxm->pkt_len = (uint32_t)(len[i]);
1885 			rxm->data_len = (uint16_t)(len[i]);
1886 
1887 			rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1888 
1889 			prev->next = rxm;
1890 			prev = rxm;
1891 			seg_res -= 1;
1892 		}
1893 
1894 		if (!seg_res) {
1895 			virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1896 			nb_rx++;
1897 		}
1898 	}
1899 
1900 	/* Last packet still need merge segments */
1901 	while (seg_res != 0) {
1902 		uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1903 					VIRTIO_MBUF_BURST_SZ);
1904 		uint16_t extra_idx = 0;
1905 
1906 		rcv_cnt = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts,
1907 				len, rcv_cnt);
1908 		if (unlikely(rcv_cnt == 0)) {
1909 			PMD_RX_LOG(ERR, "No enough segments for packet.");
1910 			rte_pktmbuf_free(rx_pkts[nb_rx]);
1911 			rxvq->stats.errors++;
1912 			break;
1913 		}
1914 
1915 		while (extra_idx < rcv_cnt) {
1916 			rxm = rcv_pkts[extra_idx];
1917 
1918 			rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1919 			rxm->pkt_len = (uint32_t)(len[extra_idx]);
1920 			rxm->data_len = (uint16_t)(len[extra_idx]);
1921 
1922 			prev->next = rxm;
1923 			prev = rxm;
1924 			rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1925 			extra_idx += 1;
1926 		}
1927 		seg_res -= rcv_cnt;
1928 		if (!seg_res) {
1929 			virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1930 			nb_rx++;
1931 		}
1932 	}
1933 
1934 	rxvq->stats.packets += nb_rx;
1935 
1936 	/* Allocate new mbuf for the used descriptor */
1937 	if (likely(!virtqueue_full(vq))) {
1938 		/* free_cnt may include mrg descs */
1939 		uint16_t free_cnt = vq->vq_free_cnt;
1940 		struct rte_mbuf *new_pkts[free_cnt];
1941 
1942 		if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1943 			error = virtqueue_enqueue_recv_refill_packed(vq,
1944 					new_pkts, free_cnt);
1945 			if (unlikely(error)) {
1946 				for (i = 0; i < free_cnt; i++)
1947 					rte_pktmbuf_free(new_pkts[i]);
1948 			}
1949 			nb_enqueued += free_cnt;
1950 		} else {
1951 			struct rte_eth_dev *dev =
1952 				&rte_eth_devices[rxvq->port_id];
1953 			dev->data->rx_mbuf_alloc_failed += free_cnt;
1954 		}
1955 	}
1956 
1957 	if (likely(nb_enqueued)) {
1958 		if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1959 			virtqueue_notify(vq);
1960 			PMD_RX_LOG(DEBUG, "Notified");
1961 		}
1962 	}
1963 
1964 	return nb_rx;
1965 }
1966 
1967 uint16_t
1968 virtio_xmit_pkts_prepare(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts,
1969 			uint16_t nb_pkts)
1970 {
1971 	uint16_t nb_tx;
1972 	int error;
1973 
1974 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1975 		struct rte_mbuf *m = tx_pkts[nb_tx];
1976 
1977 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
1978 		error = rte_validate_tx_offload(m);
1979 		if (unlikely(error)) {
1980 			rte_errno = -error;
1981 			break;
1982 		}
1983 #endif
1984 
1985 		/* Do VLAN tag insertion */
1986 		if (unlikely(m->ol_flags & PKT_TX_VLAN_PKT)) {
1987 			error = rte_vlan_insert(&m);
1988 			/* rte_vlan_insert() may change pointer
1989 			 * even in the case of failure
1990 			 */
1991 			tx_pkts[nb_tx] = m;
1992 
1993 			if (unlikely(error)) {
1994 				rte_errno = -error;
1995 				break;
1996 			}
1997 		}
1998 
1999 		error = rte_net_intel_cksum_prepare(m);
2000 		if (unlikely(error)) {
2001 			rte_errno = -error;
2002 			break;
2003 		}
2004 
2005 		if (m->ol_flags & PKT_TX_TCP_SEG)
2006 			virtio_tso_fix_cksum(m);
2007 	}
2008 
2009 	return nb_tx;
2010 }
2011 
2012 uint16_t
2013 virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
2014 			uint16_t nb_pkts)
2015 {
2016 	struct virtnet_tx *txvq = tx_queue;
2017 	struct virtqueue *vq = txvq->vq;
2018 	struct virtio_hw *hw = vq->hw;
2019 	uint16_t hdr_size = hw->vtnet_hdr_size;
2020 	uint16_t nb_tx = 0;
2021 	bool in_order = hw->use_inorder_tx;
2022 
2023 	if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
2024 		return nb_tx;
2025 
2026 	if (unlikely(nb_pkts < 1))
2027 		return nb_pkts;
2028 
2029 	PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
2030 
2031 	if (nb_pkts > vq->vq_free_cnt)
2032 		virtio_xmit_cleanup_packed(vq, nb_pkts - vq->vq_free_cnt,
2033 					   in_order);
2034 
2035 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
2036 		struct rte_mbuf *txm = tx_pkts[nb_tx];
2037 		int can_push = 0, slots, need;
2038 
2039 		/* optimize ring usage */
2040 		if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
2041 		      vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
2042 		    rte_mbuf_refcnt_read(txm) == 1 &&
2043 		    RTE_MBUF_DIRECT(txm) &&
2044 		    txm->nb_segs == 1 &&
2045 		    rte_pktmbuf_headroom(txm) >= hdr_size &&
2046 		    rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
2047 			   __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
2048 			can_push = 1;
2049 
2050 		/* How many main ring entries are needed to this Tx?
2051 		 * any_layout => number of segments
2052 		 * default    => number of segments + 1
2053 		 */
2054 		slots = txm->nb_segs + !can_push;
2055 		need = slots - vq->vq_free_cnt;
2056 
2057 		/* Positive value indicates it need free vring descriptors */
2058 		if (unlikely(need > 0)) {
2059 			virtio_xmit_cleanup_packed(vq, need, in_order);
2060 			need = slots - vq->vq_free_cnt;
2061 			if (unlikely(need > 0)) {
2062 				PMD_TX_LOG(ERR,
2063 					   "No free tx descriptors to transmit");
2064 				break;
2065 			}
2066 		}
2067 
2068 		/* Enqueue Packet buffers */
2069 		if (can_push)
2070 			virtqueue_enqueue_xmit_packed_fast(txvq, txm, in_order);
2071 		else
2072 			virtqueue_enqueue_xmit_packed(txvq, txm, slots, 0,
2073 						      in_order);
2074 
2075 		virtio_update_packet_stats(&txvq->stats, txm);
2076 	}
2077 
2078 	txvq->stats.packets += nb_tx;
2079 
2080 	if (likely(nb_tx)) {
2081 		if (unlikely(virtqueue_kick_prepare_packed(vq))) {
2082 			virtqueue_notify(vq);
2083 			PMD_TX_LOG(DEBUG, "Notified backend after xmit");
2084 		}
2085 	}
2086 
2087 	return nb_tx;
2088 }
2089 
2090 uint16_t
2091 virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2092 {
2093 	struct virtnet_tx *txvq = tx_queue;
2094 	struct virtqueue *vq = txvq->vq;
2095 	struct virtio_hw *hw = vq->hw;
2096 	uint16_t hdr_size = hw->vtnet_hdr_size;
2097 	uint16_t nb_used, nb_tx = 0;
2098 
2099 	if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
2100 		return nb_tx;
2101 
2102 	if (unlikely(nb_pkts < 1))
2103 		return nb_pkts;
2104 
2105 	PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
2106 	nb_used = VIRTQUEUE_NUSED(vq);
2107 
2108 	virtio_rmb(hw->weak_barriers);
2109 	if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
2110 		virtio_xmit_cleanup(vq, nb_used);
2111 
2112 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
2113 		struct rte_mbuf *txm = tx_pkts[nb_tx];
2114 		int can_push = 0, use_indirect = 0, slots, need;
2115 
2116 		/* optimize ring usage */
2117 		if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
2118 		      vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
2119 		    rte_mbuf_refcnt_read(txm) == 1 &&
2120 		    RTE_MBUF_DIRECT(txm) &&
2121 		    txm->nb_segs == 1 &&
2122 		    rte_pktmbuf_headroom(txm) >= hdr_size &&
2123 		    rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
2124 				   __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
2125 			can_push = 1;
2126 		else if (vtpci_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
2127 			 txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
2128 			use_indirect = 1;
2129 
2130 		/* How many main ring entries are needed to this Tx?
2131 		 * any_layout => number of segments
2132 		 * indirect   => 1
2133 		 * default    => number of segments + 1
2134 		 */
2135 		slots = use_indirect ? 1 : (txm->nb_segs + !can_push);
2136 		need = slots - vq->vq_free_cnt;
2137 
2138 		/* Positive value indicates it need free vring descriptors */
2139 		if (unlikely(need > 0)) {
2140 			nb_used = VIRTQUEUE_NUSED(vq);
2141 			virtio_rmb(hw->weak_barriers);
2142 			need = RTE_MIN(need, (int)nb_used);
2143 
2144 			virtio_xmit_cleanup(vq, need);
2145 			need = slots - vq->vq_free_cnt;
2146 			if (unlikely(need > 0)) {
2147 				PMD_TX_LOG(ERR,
2148 					   "No free tx descriptors to transmit");
2149 				break;
2150 			}
2151 		}
2152 
2153 		/* Enqueue Packet buffers */
2154 		virtqueue_enqueue_xmit(txvq, txm, slots, use_indirect,
2155 			can_push, 0);
2156 
2157 		virtio_update_packet_stats(&txvq->stats, txm);
2158 	}
2159 
2160 	txvq->stats.packets += nb_tx;
2161 
2162 	if (likely(nb_tx)) {
2163 		vq_update_avail_idx(vq);
2164 
2165 		if (unlikely(virtqueue_kick_prepare(vq))) {
2166 			virtqueue_notify(vq);
2167 			PMD_TX_LOG(DEBUG, "Notified backend after xmit");
2168 		}
2169 	}
2170 
2171 	return nb_tx;
2172 }
2173 
2174 static __rte_always_inline int
2175 virtio_xmit_try_cleanup_inorder(struct virtqueue *vq, uint16_t need)
2176 {
2177 	uint16_t nb_used, nb_clean, nb_descs;
2178 	struct virtio_hw *hw = vq->hw;
2179 
2180 	nb_descs = vq->vq_free_cnt + need;
2181 	nb_used = VIRTQUEUE_NUSED(vq);
2182 	virtio_rmb(hw->weak_barriers);
2183 	nb_clean = RTE_MIN(need, (int)nb_used);
2184 
2185 	virtio_xmit_cleanup_inorder(vq, nb_clean);
2186 
2187 	return nb_descs - vq->vq_free_cnt;
2188 }
2189 
2190 uint16_t
2191 virtio_xmit_pkts_inorder(void *tx_queue,
2192 			struct rte_mbuf **tx_pkts,
2193 			uint16_t nb_pkts)
2194 {
2195 	struct virtnet_tx *txvq = tx_queue;
2196 	struct virtqueue *vq = txvq->vq;
2197 	struct virtio_hw *hw = vq->hw;
2198 	uint16_t hdr_size = hw->vtnet_hdr_size;
2199 	uint16_t nb_used, nb_tx = 0, nb_inorder_pkts = 0;
2200 	struct rte_mbuf *inorder_pkts[nb_pkts];
2201 	int need;
2202 
2203 	if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
2204 		return nb_tx;
2205 
2206 	if (unlikely(nb_pkts < 1))
2207 		return nb_pkts;
2208 
2209 	VIRTQUEUE_DUMP(vq);
2210 	PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
2211 	nb_used = VIRTQUEUE_NUSED(vq);
2212 
2213 	virtio_rmb(hw->weak_barriers);
2214 	if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
2215 		virtio_xmit_cleanup_inorder(vq, nb_used);
2216 
2217 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
2218 		struct rte_mbuf *txm = tx_pkts[nb_tx];
2219 		int slots;
2220 
2221 		/* optimize ring usage */
2222 		if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
2223 		     vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
2224 		     rte_mbuf_refcnt_read(txm) == 1 &&
2225 		     RTE_MBUF_DIRECT(txm) &&
2226 		     txm->nb_segs == 1 &&
2227 		     rte_pktmbuf_headroom(txm) >= hdr_size &&
2228 		     rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
2229 				__alignof__(struct virtio_net_hdr_mrg_rxbuf))) {
2230 			inorder_pkts[nb_inorder_pkts] = txm;
2231 			nb_inorder_pkts++;
2232 
2233 			continue;
2234 		}
2235 
2236 		if (nb_inorder_pkts) {
2237 			need = nb_inorder_pkts - vq->vq_free_cnt;
2238 			if (unlikely(need > 0)) {
2239 				need = virtio_xmit_try_cleanup_inorder(vq,
2240 								       need);
2241 				if (unlikely(need > 0)) {
2242 					PMD_TX_LOG(ERR,
2243 						"No free tx descriptors to "
2244 						"transmit");
2245 					break;
2246 				}
2247 			}
2248 			virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
2249 							nb_inorder_pkts);
2250 			nb_inorder_pkts = 0;
2251 		}
2252 
2253 		slots = txm->nb_segs + 1;
2254 		need = slots - vq->vq_free_cnt;
2255 		if (unlikely(need > 0)) {
2256 			need = virtio_xmit_try_cleanup_inorder(vq, slots);
2257 
2258 			if (unlikely(need > 0)) {
2259 				PMD_TX_LOG(ERR,
2260 					"No free tx descriptors to transmit");
2261 				break;
2262 			}
2263 		}
2264 		/* Enqueue Packet buffers */
2265 		virtqueue_enqueue_xmit(txvq, txm, slots, 0, 0, 1);
2266 
2267 		virtio_update_packet_stats(&txvq->stats, txm);
2268 	}
2269 
2270 	/* Transmit all inorder packets */
2271 	if (nb_inorder_pkts) {
2272 		need = nb_inorder_pkts - vq->vq_free_cnt;
2273 		if (unlikely(need > 0)) {
2274 			need = virtio_xmit_try_cleanup_inorder(vq,
2275 								  need);
2276 			if (unlikely(need > 0)) {
2277 				PMD_TX_LOG(ERR,
2278 					"No free tx descriptors to transmit");
2279 				nb_inorder_pkts = vq->vq_free_cnt;
2280 				nb_tx -= need;
2281 			}
2282 		}
2283 
2284 		virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
2285 						nb_inorder_pkts);
2286 	}
2287 
2288 	txvq->stats.packets += nb_tx;
2289 
2290 	if (likely(nb_tx)) {
2291 		vq_update_avail_idx(vq);
2292 
2293 		if (unlikely(virtqueue_kick_prepare(vq))) {
2294 			virtqueue_notify(vq);
2295 			PMD_TX_LOG(DEBUG, "Notified backend after xmit");
2296 		}
2297 	}
2298 
2299 	VIRTQUEUE_DUMP(vq);
2300 
2301 	return nb_tx;
2302 }
2303