xref: /dpdk/drivers/net/virtio/virtio_rxtx.c (revision 23d25f1a40175343c8a27d539d47fecef477ecf5)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <errno.h>
10 
11 #include <rte_cycles.h>
12 #include <rte_memory.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_mempool.h>
15 #include <rte_malloc.h>
16 #include <rte_mbuf.h>
17 #include <rte_ether.h>
18 #include <rte_ethdev_driver.h>
19 #include <rte_prefetch.h>
20 #include <rte_string_fns.h>
21 #include <rte_errno.h>
22 #include <rte_byteorder.h>
23 #include <rte_net.h>
24 #include <rte_ip.h>
25 #include <rte_udp.h>
26 #include <rte_tcp.h>
27 
28 #include "virtio_logs.h"
29 #include "virtio_ethdev.h"
30 #include "virtio_pci.h"
31 #include "virtqueue.h"
32 #include "virtio_rxtx.h"
33 #include "virtio_rxtx_simple.h"
34 #include "virtio_ring.h"
35 
36 #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
37 #define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len)
38 #else
39 #define  VIRTIO_DUMP_PACKET(m, len) do { } while (0)
40 #endif
41 
42 int
43 virtio_dev_rx_queue_done(void *rxq, uint16_t offset)
44 {
45 	struct virtnet_rx *rxvq = rxq;
46 	struct virtqueue *vq = rxvq->vq;
47 
48 	return VIRTQUEUE_NUSED(vq) >= offset;
49 }
50 
51 void
52 vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx, uint16_t num)
53 {
54 	vq->vq_free_cnt += num;
55 	vq->vq_desc_tail_idx = desc_idx & (vq->vq_nentries - 1);
56 }
57 
58 void
59 vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
60 {
61 	struct vring_desc *dp, *dp_tail;
62 	struct vq_desc_extra *dxp;
63 	uint16_t desc_idx_last = desc_idx;
64 
65 	dp  = &vq->vq_ring.desc[desc_idx];
66 	dxp = &vq->vq_descx[desc_idx];
67 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
68 	if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
69 		while (dp->flags & VRING_DESC_F_NEXT) {
70 			desc_idx_last = dp->next;
71 			dp = &vq->vq_ring.desc[dp->next];
72 		}
73 	}
74 	dxp->ndescs = 0;
75 
76 	/*
77 	 * We must append the existing free chain, if any, to the end of
78 	 * newly freed chain. If the virtqueue was completely used, then
79 	 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
80 	 */
81 	if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) {
82 		vq->vq_desc_head_idx = desc_idx;
83 	} else {
84 		dp_tail = &vq->vq_ring.desc[vq->vq_desc_tail_idx];
85 		dp_tail->next = desc_idx;
86 	}
87 
88 	vq->vq_desc_tail_idx = desc_idx_last;
89 	dp->next = VQ_RING_DESC_CHAIN_END;
90 }
91 
92 static void
93 vq_ring_free_id_packed(struct virtqueue *vq, uint16_t id)
94 {
95 	struct vq_desc_extra *dxp;
96 
97 	dxp = &vq->vq_descx[id];
98 	vq->vq_free_cnt += dxp->ndescs;
99 
100 	if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END)
101 		vq->vq_desc_head_idx = id;
102 	else
103 		vq->vq_descx[vq->vq_desc_tail_idx].next = id;
104 
105 	vq->vq_desc_tail_idx = id;
106 	dxp->next = VQ_RING_DESC_CHAIN_END;
107 }
108 
109 static uint16_t
110 virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq,
111 				  struct rte_mbuf **rx_pkts,
112 				  uint32_t *len,
113 				  uint16_t num)
114 {
115 	struct rte_mbuf *cookie;
116 	uint16_t used_idx;
117 	uint16_t id;
118 	struct vring_packed_desc *desc;
119 	uint16_t i;
120 
121 	desc = vq->ring_packed.desc_packed;
122 
123 	for (i = 0; i < num; i++) {
124 		used_idx = vq->vq_used_cons_idx;
125 		if (!desc_is_used(&desc[used_idx], vq))
126 			return i;
127 		len[i] = desc[used_idx].len;
128 		id = desc[used_idx].id;
129 		cookie = (struct rte_mbuf *)vq->vq_descx[id].cookie;
130 		if (unlikely(cookie == NULL)) {
131 			PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
132 				vq->vq_used_cons_idx);
133 			break;
134 		}
135 		rte_prefetch0(cookie);
136 		rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
137 		rx_pkts[i] = cookie;
138 
139 		vq->vq_free_cnt++;
140 		vq->vq_used_cons_idx++;
141 		if (vq->vq_used_cons_idx >= vq->vq_nentries) {
142 			vq->vq_used_cons_idx -= vq->vq_nentries;
143 			vq->used_wrap_counter ^= 1;
144 		}
145 	}
146 
147 	return i;
148 }
149 
150 static uint16_t
151 virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,
152 			   uint32_t *len, uint16_t num)
153 {
154 	struct vring_used_elem *uep;
155 	struct rte_mbuf *cookie;
156 	uint16_t used_idx, desc_idx;
157 	uint16_t i;
158 
159 	/*  Caller does the check */
160 	for (i = 0; i < num ; i++) {
161 		used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
162 		uep = &vq->vq_ring.used->ring[used_idx];
163 		desc_idx = (uint16_t) uep->id;
164 		len[i] = uep->len;
165 		cookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie;
166 
167 		if (unlikely(cookie == NULL)) {
168 			PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
169 				vq->vq_used_cons_idx);
170 			break;
171 		}
172 
173 		rte_prefetch0(cookie);
174 		rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
175 		rx_pkts[i]  = cookie;
176 		vq->vq_used_cons_idx++;
177 		vq_ring_free_chain(vq, desc_idx);
178 		vq->vq_descx[desc_idx].cookie = NULL;
179 	}
180 
181 	return i;
182 }
183 
184 static uint16_t
185 virtqueue_dequeue_rx_inorder(struct virtqueue *vq,
186 			struct rte_mbuf **rx_pkts,
187 			uint32_t *len,
188 			uint16_t num)
189 {
190 	struct vring_used_elem *uep;
191 	struct rte_mbuf *cookie;
192 	uint16_t used_idx = 0;
193 	uint16_t i;
194 
195 	if (unlikely(num == 0))
196 		return 0;
197 
198 	for (i = 0; i < num; i++) {
199 		used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
200 		/* Desc idx same as used idx */
201 		uep = &vq->vq_ring.used->ring[used_idx];
202 		len[i] = uep->len;
203 		cookie = (struct rte_mbuf *)vq->vq_descx[used_idx].cookie;
204 
205 		if (unlikely(cookie == NULL)) {
206 			PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
207 				vq->vq_used_cons_idx);
208 			break;
209 		}
210 
211 		rte_prefetch0(cookie);
212 		rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
213 		rx_pkts[i]  = cookie;
214 		vq->vq_used_cons_idx++;
215 		vq->vq_descx[used_idx].cookie = NULL;
216 	}
217 
218 	vq_ring_free_inorder(vq, used_idx, i);
219 	return i;
220 }
221 
222 #ifndef DEFAULT_TX_FREE_THRESH
223 #define DEFAULT_TX_FREE_THRESH 32
224 #endif
225 
226 /* Cleanup from completed transmits. */
227 static void
228 virtio_xmit_cleanup_packed(struct virtqueue *vq, int num)
229 {
230 	uint16_t used_idx, id;
231 	uint16_t size = vq->vq_nentries;
232 	struct vring_packed_desc *desc = vq->ring_packed.desc_packed;
233 	struct vq_desc_extra *dxp;
234 
235 	used_idx = vq->vq_used_cons_idx;
236 	while (num-- && desc_is_used(&desc[used_idx], vq)) {
237 		virtio_rmb(vq->hw->weak_barriers);
238 		id = desc[used_idx].id;
239 		dxp = &vq->vq_descx[id];
240 		vq->vq_used_cons_idx += dxp->ndescs;
241 		if (vq->vq_used_cons_idx >= size) {
242 			vq->vq_used_cons_idx -= size;
243 			vq->used_wrap_counter ^= 1;
244 		}
245 		vq_ring_free_id_packed(vq, id);
246 		if (dxp->cookie != NULL) {
247 			rte_pktmbuf_free(dxp->cookie);
248 			dxp->cookie = NULL;
249 		}
250 		used_idx = vq->vq_used_cons_idx;
251 	}
252 }
253 
254 static void
255 virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num)
256 {
257 	uint16_t i, used_idx, desc_idx;
258 	for (i = 0; i < num; i++) {
259 		struct vring_used_elem *uep;
260 		struct vq_desc_extra *dxp;
261 
262 		used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
263 		uep = &vq->vq_ring.used->ring[used_idx];
264 
265 		desc_idx = (uint16_t) uep->id;
266 		dxp = &vq->vq_descx[desc_idx];
267 		vq->vq_used_cons_idx++;
268 		vq_ring_free_chain(vq, desc_idx);
269 
270 		if (dxp->cookie != NULL) {
271 			rte_pktmbuf_free(dxp->cookie);
272 			dxp->cookie = NULL;
273 		}
274 	}
275 }
276 
277 /* Cleanup from completed inorder transmits. */
278 static void
279 virtio_xmit_cleanup_inorder(struct virtqueue *vq, uint16_t num)
280 {
281 	uint16_t i, used_idx, desc_idx = 0, last_idx;
282 	int16_t free_cnt = 0;
283 	struct vq_desc_extra *dxp = NULL;
284 
285 	if (unlikely(num == 0))
286 		return;
287 
288 	for (i = 0; i < num; i++) {
289 		struct vring_used_elem *uep;
290 
291 		used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
292 		uep = &vq->vq_ring.used->ring[used_idx];
293 		desc_idx = (uint16_t)uep->id;
294 
295 		dxp = &vq->vq_descx[desc_idx];
296 		vq->vq_used_cons_idx++;
297 
298 		if (dxp->cookie != NULL) {
299 			rte_pktmbuf_free(dxp->cookie);
300 			dxp->cookie = NULL;
301 		}
302 	}
303 
304 	last_idx = desc_idx + dxp->ndescs - 1;
305 	free_cnt = last_idx - vq->vq_desc_tail_idx;
306 	if (free_cnt <= 0)
307 		free_cnt += vq->vq_nentries;
308 
309 	vq_ring_free_inorder(vq, last_idx, free_cnt);
310 }
311 
312 static inline int
313 virtqueue_enqueue_refill_inorder(struct virtqueue *vq,
314 			struct rte_mbuf **cookies,
315 			uint16_t num)
316 {
317 	struct vq_desc_extra *dxp;
318 	struct virtio_hw *hw = vq->hw;
319 	struct vring_desc *start_dp;
320 	uint16_t head_idx, idx, i = 0;
321 
322 	if (unlikely(vq->vq_free_cnt == 0))
323 		return -ENOSPC;
324 	if (unlikely(vq->vq_free_cnt < num))
325 		return -EMSGSIZE;
326 
327 	head_idx = vq->vq_desc_head_idx & (vq->vq_nentries - 1);
328 	start_dp = vq->vq_ring.desc;
329 
330 	while (i < num) {
331 		idx = head_idx & (vq->vq_nentries - 1);
332 		dxp = &vq->vq_descx[idx];
333 		dxp->cookie = (void *)cookies[i];
334 		dxp->ndescs = 1;
335 
336 		start_dp[idx].addr =
337 				VIRTIO_MBUF_ADDR(cookies[i], vq) +
338 				RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
339 		start_dp[idx].len =
340 				cookies[i]->buf_len -
341 				RTE_PKTMBUF_HEADROOM +
342 				hw->vtnet_hdr_size;
343 		start_dp[idx].flags =  VRING_DESC_F_WRITE;
344 
345 		vq_update_avail_ring(vq, idx);
346 		head_idx++;
347 		i++;
348 	}
349 
350 	vq->vq_desc_head_idx += num;
351 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
352 	return 0;
353 }
354 
355 static inline int
356 virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf **cookie,
357 				uint16_t num)
358 {
359 	struct vq_desc_extra *dxp;
360 	struct virtio_hw *hw = vq->hw;
361 	struct vring_desc *start_dp = vq->vq_ring.desc;
362 	uint16_t idx, i;
363 
364 	if (unlikely(vq->vq_free_cnt == 0))
365 		return -ENOSPC;
366 	if (unlikely(vq->vq_free_cnt < num))
367 		return -EMSGSIZE;
368 
369 	if (unlikely(vq->vq_desc_head_idx >= vq->vq_nentries))
370 		return -EFAULT;
371 
372 	for (i = 0; i < num; i++) {
373 		idx = vq->vq_desc_head_idx;
374 		dxp = &vq->vq_descx[idx];
375 		dxp->cookie = (void *)cookie[i];
376 		dxp->ndescs = 1;
377 
378 		start_dp[idx].addr =
379 			VIRTIO_MBUF_ADDR(cookie[i], vq) +
380 			RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
381 		start_dp[idx].len =
382 			cookie[i]->buf_len - RTE_PKTMBUF_HEADROOM +
383 			hw->vtnet_hdr_size;
384 		start_dp[idx].flags = VRING_DESC_F_WRITE;
385 		vq->vq_desc_head_idx = start_dp[idx].next;
386 		vq_update_avail_ring(vq, idx);
387 		if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) {
388 			vq->vq_desc_tail_idx = vq->vq_desc_head_idx;
389 			break;
390 		}
391 	}
392 
393 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
394 
395 	return 0;
396 }
397 
398 static inline int
399 virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq,
400 				     struct rte_mbuf **cookie, uint16_t num)
401 {
402 	struct vring_packed_desc *start_dp = vq->ring_packed.desc_packed;
403 	uint16_t flags = VRING_DESC_F_WRITE | vq->avail_used_flags;
404 	struct virtio_hw *hw = vq->hw;
405 	struct vq_desc_extra *dxp;
406 	uint16_t idx;
407 	int i;
408 
409 	if (unlikely(vq->vq_free_cnt == 0))
410 		return -ENOSPC;
411 	if (unlikely(vq->vq_free_cnt < num))
412 		return -EMSGSIZE;
413 
414 	for (i = 0; i < num; i++) {
415 		idx = vq->vq_avail_idx;
416 		dxp = &vq->vq_descx[idx];
417 		dxp->cookie = (void *)cookie[i];
418 		dxp->ndescs = 1;
419 
420 		start_dp[idx].addr = VIRTIO_MBUF_ADDR(cookie[i], vq) +
421 				RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
422 		start_dp[idx].len = cookie[i]->buf_len - RTE_PKTMBUF_HEADROOM
423 					+ hw->vtnet_hdr_size;
424 
425 		vq->vq_desc_head_idx = dxp->next;
426 		if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
427 			vq->vq_desc_tail_idx = vq->vq_desc_head_idx;
428 		virtio_wmb(hw->weak_barriers);
429 		start_dp[idx].flags = flags;
430 		if (++vq->vq_avail_idx >= vq->vq_nentries) {
431 			vq->vq_avail_idx -= vq->vq_nentries;
432 			vq->avail_wrap_counter ^= 1;
433 			vq->avail_used_flags =
434 				VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
435 				VRING_DESC_F_USED(!vq->avail_wrap_counter);
436 			flags = VRING_DESC_F_WRITE | vq->avail_used_flags;
437 		}
438 	}
439 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
440 	return 0;
441 }
442 
443 /* When doing TSO, the IP length is not included in the pseudo header
444  * checksum of the packet given to the PMD, but for virtio it is
445  * expected.
446  */
447 static void
448 virtio_tso_fix_cksum(struct rte_mbuf *m)
449 {
450 	/* common case: header is not fragmented */
451 	if (likely(rte_pktmbuf_data_len(m) >= m->l2_len + m->l3_len +
452 			m->l4_len)) {
453 		struct ipv4_hdr *iph;
454 		struct ipv6_hdr *ip6h;
455 		struct tcp_hdr *th;
456 		uint16_t prev_cksum, new_cksum, ip_len, ip_paylen;
457 		uint32_t tmp;
458 
459 		iph = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, m->l2_len);
460 		th = RTE_PTR_ADD(iph, m->l3_len);
461 		if ((iph->version_ihl >> 4) == 4) {
462 			iph->hdr_checksum = 0;
463 			iph->hdr_checksum = rte_ipv4_cksum(iph);
464 			ip_len = iph->total_length;
465 			ip_paylen = rte_cpu_to_be_16(rte_be_to_cpu_16(ip_len) -
466 				m->l3_len);
467 		} else {
468 			ip6h = (struct ipv6_hdr *)iph;
469 			ip_paylen = ip6h->payload_len;
470 		}
471 
472 		/* calculate the new phdr checksum not including ip_paylen */
473 		prev_cksum = th->cksum;
474 		tmp = prev_cksum;
475 		tmp += ip_paylen;
476 		tmp = (tmp & 0xffff) + (tmp >> 16);
477 		new_cksum = tmp;
478 
479 		/* replace it in the packet */
480 		th->cksum = new_cksum;
481 	}
482 }
483 
484 
485 /* avoid write operation when necessary, to lessen cache issues */
486 #define ASSIGN_UNLESS_EQUAL(var, val) do {	\
487 	if ((var) != (val))			\
488 		(var) = (val);			\
489 } while (0)
490 
491 static inline void
492 virtqueue_xmit_offload(struct virtio_net_hdr *hdr,
493 			struct rte_mbuf *cookie,
494 			bool offload)
495 {
496 	if (offload) {
497 		if (cookie->ol_flags & PKT_TX_TCP_SEG)
498 			cookie->ol_flags |= PKT_TX_TCP_CKSUM;
499 
500 		switch (cookie->ol_flags & PKT_TX_L4_MASK) {
501 		case PKT_TX_UDP_CKSUM:
502 			hdr->csum_start = cookie->l2_len + cookie->l3_len;
503 			hdr->csum_offset = offsetof(struct udp_hdr,
504 				dgram_cksum);
505 			hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
506 			break;
507 
508 		case PKT_TX_TCP_CKSUM:
509 			hdr->csum_start = cookie->l2_len + cookie->l3_len;
510 			hdr->csum_offset = offsetof(struct tcp_hdr, cksum);
511 			hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
512 			break;
513 
514 		default:
515 			ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
516 			ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
517 			ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
518 			break;
519 		}
520 
521 		/* TCP Segmentation Offload */
522 		if (cookie->ol_flags & PKT_TX_TCP_SEG) {
523 			virtio_tso_fix_cksum(cookie);
524 			hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ?
525 				VIRTIO_NET_HDR_GSO_TCPV6 :
526 				VIRTIO_NET_HDR_GSO_TCPV4;
527 			hdr->gso_size = cookie->tso_segsz;
528 			hdr->hdr_len =
529 				cookie->l2_len +
530 				cookie->l3_len +
531 				cookie->l4_len;
532 		} else {
533 			ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
534 			ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
535 			ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
536 		}
537 	}
538 }
539 
540 static inline void
541 virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
542 			struct rte_mbuf **cookies,
543 			uint16_t num)
544 {
545 	struct vq_desc_extra *dxp;
546 	struct virtqueue *vq = txvq->vq;
547 	struct vring_desc *start_dp;
548 	struct virtio_net_hdr *hdr;
549 	uint16_t idx;
550 	uint16_t head_size = vq->hw->vtnet_hdr_size;
551 	uint16_t i = 0;
552 
553 	idx = vq->vq_desc_head_idx;
554 	start_dp = vq->vq_ring.desc;
555 
556 	while (i < num) {
557 		idx = idx & (vq->vq_nentries - 1);
558 		dxp = &vq->vq_descx[idx];
559 		dxp->cookie = (void *)cookies[i];
560 		dxp->ndescs = 1;
561 
562 		hdr = (struct virtio_net_hdr *)
563 			rte_pktmbuf_prepend(cookies[i], head_size);
564 		cookies[i]->pkt_len -= head_size;
565 
566 		/* if offload disabled, it is not zeroed below, do it now */
567 		if (!vq->hw->has_tx_offload) {
568 			ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
569 			ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
570 			ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
571 			ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
572 			ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
573 			ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
574 		}
575 
576 		virtqueue_xmit_offload(hdr, cookies[i],
577 				vq->hw->has_tx_offload);
578 
579 		start_dp[idx].addr  = VIRTIO_MBUF_DATA_DMA_ADDR(cookies[i], vq);
580 		start_dp[idx].len   = cookies[i]->data_len;
581 		start_dp[idx].flags = 0;
582 
583 		vq_update_avail_ring(vq, idx);
584 
585 		idx++;
586 		i++;
587 	};
588 
589 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
590 	vq->vq_desc_head_idx = idx & (vq->vq_nentries - 1);
591 }
592 
593 static inline void
594 virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
595 			      uint16_t needed, int can_push)
596 {
597 	struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
598 	struct vq_desc_extra *dxp;
599 	struct virtqueue *vq = txvq->vq;
600 	struct vring_packed_desc *start_dp, *head_dp;
601 	uint16_t idx, id, head_idx, head_flags;
602 	uint16_t head_size = vq->hw->vtnet_hdr_size;
603 	struct virtio_net_hdr *hdr;
604 	uint16_t prev;
605 
606 	id = vq->vq_desc_head_idx;
607 
608 	dxp = &vq->vq_descx[id];
609 	dxp->ndescs = needed;
610 	dxp->cookie = cookie;
611 
612 	head_idx = vq->vq_avail_idx;
613 	idx = head_idx;
614 	prev = head_idx;
615 	start_dp = vq->ring_packed.desc_packed;
616 
617 	head_dp = &vq->ring_packed.desc_packed[idx];
618 	head_flags = cookie->next ? VRING_DESC_F_NEXT : 0;
619 	head_flags |= vq->avail_used_flags;
620 
621 	if (can_push) {
622 		/* prepend cannot fail, checked by caller */
623 		hdr = (struct virtio_net_hdr *)
624 			rte_pktmbuf_prepend(cookie, head_size);
625 		/* rte_pktmbuf_prepend() counts the hdr size to the pkt length,
626 		 * which is wrong. Below subtract restores correct pkt size.
627 		 */
628 		cookie->pkt_len -= head_size;
629 
630 		/* if offload disabled, it is not zeroed below, do it now */
631 		if (!vq->hw->has_tx_offload) {
632 			ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
633 			ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
634 			ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
635 			ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
636 			ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
637 			ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
638 		}
639 	} else {
640 		/* setup first tx ring slot to point to header
641 		 * stored in reserved region.
642 		 */
643 		start_dp[idx].addr  = txvq->virtio_net_hdr_mem +
644 			RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
645 		start_dp[idx].len   = vq->hw->vtnet_hdr_size;
646 		hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
647 		idx++;
648 		if (idx >= vq->vq_nentries) {
649 			idx -= vq->vq_nentries;
650 			vq->avail_wrap_counter ^= 1;
651 			vq->avail_used_flags =
652 				VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
653 				VRING_DESC_F_USED(!vq->avail_wrap_counter);
654 		}
655 	}
656 
657 	virtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload);
658 
659 	do {
660 		uint16_t flags;
661 
662 		start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
663 		start_dp[idx].len  = cookie->data_len;
664 		if (likely(idx != head_idx)) {
665 			flags = cookie->next ? VRING_DESC_F_NEXT : 0;
666 			flags |= vq->avail_used_flags;
667 			start_dp[idx].flags = flags;
668 		}
669 		prev = idx;
670 		idx++;
671 		if (idx >= vq->vq_nentries) {
672 			idx -= vq->vq_nentries;
673 			vq->avail_wrap_counter ^= 1;
674 			vq->avail_used_flags =
675 				VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
676 				VRING_DESC_F_USED(!vq->avail_wrap_counter);
677 		}
678 	} while ((cookie = cookie->next) != NULL);
679 
680 	start_dp[prev].id = id;
681 
682 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
683 
684 	vq->vq_desc_head_idx = dxp->next;
685 	if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
686 		vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;
687 
688 	vq->vq_avail_idx = idx;
689 
690 	virtio_wmb(vq->hw->weak_barriers);
691 	head_dp->flags = head_flags;
692 }
693 
694 static inline void
695 virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
696 			uint16_t needed, int use_indirect, int can_push,
697 			int in_order)
698 {
699 	struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
700 	struct vq_desc_extra *dxp;
701 	struct virtqueue *vq = txvq->vq;
702 	struct vring_desc *start_dp;
703 	uint16_t seg_num = cookie->nb_segs;
704 	uint16_t head_idx, idx;
705 	uint16_t head_size = vq->hw->vtnet_hdr_size;
706 	struct virtio_net_hdr *hdr;
707 
708 	head_idx = vq->vq_desc_head_idx;
709 	idx = head_idx;
710 	dxp = &vq->vq_descx[idx];
711 	dxp->cookie = (void *)cookie;
712 	dxp->ndescs = needed;
713 
714 	start_dp = vq->vq_ring.desc;
715 
716 	if (can_push) {
717 		/* prepend cannot fail, checked by caller */
718 		hdr = (struct virtio_net_hdr *)
719 			rte_pktmbuf_prepend(cookie, head_size);
720 		/* rte_pktmbuf_prepend() counts the hdr size to the pkt length,
721 		 * which is wrong. Below subtract restores correct pkt size.
722 		 */
723 		cookie->pkt_len -= head_size;
724 
725 		/* if offload disabled, it is not zeroed below, do it now */
726 		if (!vq->hw->has_tx_offload) {
727 			ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
728 			ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
729 			ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
730 			ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
731 			ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
732 			ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
733 		}
734 	} else if (use_indirect) {
735 		/* setup tx ring slot to point to indirect
736 		 * descriptor list stored in reserved region.
737 		 *
738 		 * the first slot in indirect ring is already preset
739 		 * to point to the header in reserved region
740 		 */
741 		start_dp[idx].addr  = txvq->virtio_net_hdr_mem +
742 			RTE_PTR_DIFF(&txr[idx].tx_indir, txr);
743 		start_dp[idx].len   = (seg_num + 1) * sizeof(struct vring_desc);
744 		start_dp[idx].flags = VRING_DESC_F_INDIRECT;
745 		hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
746 
747 		/* loop below will fill in rest of the indirect elements */
748 		start_dp = txr[idx].tx_indir;
749 		idx = 1;
750 	} else {
751 		/* setup first tx ring slot to point to header
752 		 * stored in reserved region.
753 		 */
754 		start_dp[idx].addr  = txvq->virtio_net_hdr_mem +
755 			RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
756 		start_dp[idx].len   = vq->hw->vtnet_hdr_size;
757 		start_dp[idx].flags = VRING_DESC_F_NEXT;
758 		hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
759 
760 		idx = start_dp[idx].next;
761 	}
762 
763 	virtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload);
764 
765 	do {
766 		start_dp[idx].addr  = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
767 		start_dp[idx].len   = cookie->data_len;
768 		start_dp[idx].flags = cookie->next ? VRING_DESC_F_NEXT : 0;
769 		idx = start_dp[idx].next;
770 	} while ((cookie = cookie->next) != NULL);
771 
772 	if (use_indirect)
773 		idx = vq->vq_ring.desc[head_idx].next;
774 
775 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
776 
777 	vq->vq_desc_head_idx = idx;
778 	vq_update_avail_ring(vq, head_idx);
779 
780 	if (!in_order) {
781 		if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
782 			vq->vq_desc_tail_idx = idx;
783 	}
784 }
785 
786 void
787 virtio_dev_cq_start(struct rte_eth_dev *dev)
788 {
789 	struct virtio_hw *hw = dev->data->dev_private;
790 
791 	if (hw->cvq && hw->cvq->vq) {
792 		rte_spinlock_init(&hw->cvq->lock);
793 		VIRTQUEUE_DUMP((struct virtqueue *)hw->cvq->vq);
794 	}
795 }
796 
797 int
798 virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
799 			uint16_t queue_idx,
800 			uint16_t nb_desc,
801 			unsigned int socket_id __rte_unused,
802 			const struct rte_eth_rxconf *rx_conf __rte_unused,
803 			struct rte_mempool *mp)
804 {
805 	uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
806 	struct virtio_hw *hw = dev->data->dev_private;
807 	struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
808 	struct virtnet_rx *rxvq;
809 
810 	PMD_INIT_FUNC_TRACE();
811 
812 	if (nb_desc == 0 || nb_desc > vq->vq_nentries)
813 		nb_desc = vq->vq_nentries;
814 	vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
815 
816 	rxvq = &vq->rxq;
817 	rxvq->queue_id = queue_idx;
818 	rxvq->mpool = mp;
819 	if (rxvq->mpool == NULL) {
820 		rte_exit(EXIT_FAILURE,
821 			"Cannot allocate mbufs for rx virtqueue");
822 	}
823 
824 	dev->data->rx_queues[queue_idx] = rxvq;
825 
826 	return 0;
827 }
828 
829 int
830 virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
831 {
832 	uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
833 	struct virtio_hw *hw = dev->data->dev_private;
834 	struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
835 	struct virtnet_rx *rxvq = &vq->rxq;
836 	struct rte_mbuf *m;
837 	uint16_t desc_idx;
838 	int error, nbufs, i;
839 
840 	PMD_INIT_FUNC_TRACE();
841 
842 	/* Allocate blank mbufs for the each rx descriptor */
843 	nbufs = 0;
844 
845 	if (hw->use_simple_rx) {
846 		for (desc_idx = 0; desc_idx < vq->vq_nentries;
847 		     desc_idx++) {
848 			vq->vq_ring.avail->ring[desc_idx] = desc_idx;
849 			vq->vq_ring.desc[desc_idx].flags =
850 				VRING_DESC_F_WRITE;
851 		}
852 
853 		virtio_rxq_vec_setup(rxvq);
854 	}
855 
856 	memset(&rxvq->fake_mbuf, 0, sizeof(rxvq->fake_mbuf));
857 	for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST;
858 	     desc_idx++) {
859 		vq->sw_ring[vq->vq_nentries + desc_idx] =
860 			&rxvq->fake_mbuf;
861 	}
862 
863 	if (hw->use_simple_rx) {
864 		while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
865 			virtio_rxq_rearm_vec(rxvq);
866 			nbufs += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
867 		}
868 	} else if (hw->use_inorder_rx) {
869 		if ((!virtqueue_full(vq))) {
870 			uint16_t free_cnt = vq->vq_free_cnt;
871 			struct rte_mbuf *pkts[free_cnt];
872 
873 			if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, pkts,
874 				free_cnt)) {
875 				error = virtqueue_enqueue_refill_inorder(vq,
876 						pkts,
877 						free_cnt);
878 				if (unlikely(error)) {
879 					for (i = 0; i < free_cnt; i++)
880 						rte_pktmbuf_free(pkts[i]);
881 				}
882 			}
883 
884 			nbufs += free_cnt;
885 			vq_update_avail_idx(vq);
886 		}
887 	} else {
888 		while (!virtqueue_full(vq)) {
889 			m = rte_mbuf_raw_alloc(rxvq->mpool);
890 			if (m == NULL)
891 				break;
892 
893 			/* Enqueue allocated buffers */
894 			if (vtpci_packed_queue(vq->hw))
895 				error = virtqueue_enqueue_recv_refill_packed(vq,
896 						&m, 1);
897 			else
898 				error = virtqueue_enqueue_recv_refill(vq,
899 						&m, 1);
900 			if (error) {
901 				rte_pktmbuf_free(m);
902 				break;
903 			}
904 			nbufs++;
905 		}
906 
907 		if (!vtpci_packed_queue(vq->hw))
908 			vq_update_avail_idx(vq);
909 	}
910 
911 	PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs);
912 
913 	VIRTQUEUE_DUMP(vq);
914 
915 	return 0;
916 }
917 
918 /*
919  * struct rte_eth_dev *dev: Used to update dev
920  * uint16_t nb_desc: Defaults to values read from config space
921  * unsigned int socket_id: Used to allocate memzone
922  * const struct rte_eth_txconf *tx_conf: Used to setup tx engine
923  * uint16_t queue_idx: Just used as an index in dev txq list
924  */
925 int
926 virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
927 			uint16_t queue_idx,
928 			uint16_t nb_desc,
929 			unsigned int socket_id __rte_unused,
930 			const struct rte_eth_txconf *tx_conf)
931 {
932 	uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
933 	struct virtio_hw *hw = dev->data->dev_private;
934 	struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
935 	struct virtnet_tx *txvq;
936 	uint16_t tx_free_thresh;
937 
938 	PMD_INIT_FUNC_TRACE();
939 
940 	if (nb_desc == 0 || nb_desc > vq->vq_nentries)
941 		nb_desc = vq->vq_nentries;
942 	vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
943 
944 	txvq = &vq->txq;
945 	txvq->queue_id = queue_idx;
946 
947 	tx_free_thresh = tx_conf->tx_free_thresh;
948 	if (tx_free_thresh == 0)
949 		tx_free_thresh =
950 			RTE_MIN(vq->vq_nentries / 4, DEFAULT_TX_FREE_THRESH);
951 
952 	if (tx_free_thresh >= (vq->vq_nentries - 3)) {
953 		RTE_LOG(ERR, PMD, "tx_free_thresh must be less than the "
954 			"number of TX entries minus 3 (%u)."
955 			" (tx_free_thresh=%u port=%u queue=%u)\n",
956 			vq->vq_nentries - 3,
957 			tx_free_thresh, dev->data->port_id, queue_idx);
958 		return -EINVAL;
959 	}
960 
961 	vq->vq_free_thresh = tx_free_thresh;
962 
963 	dev->data->tx_queues[queue_idx] = txvq;
964 	return 0;
965 }
966 
967 int
968 virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
969 				uint16_t queue_idx)
970 {
971 	uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
972 	struct virtio_hw *hw = dev->data->dev_private;
973 	struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
974 
975 	PMD_INIT_FUNC_TRACE();
976 
977 	if (!vtpci_packed_queue(hw)) {
978 		if (hw->use_inorder_tx)
979 			vq->vq_ring.desc[vq->vq_nentries - 1].next = 0;
980 	}
981 
982 	VIRTQUEUE_DUMP(vq);
983 
984 	return 0;
985 }
986 
987 static inline void
988 virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m)
989 {
990 	int error;
991 	/*
992 	 * Requeue the discarded mbuf. This should always be
993 	 * successful since it was just dequeued.
994 	 */
995 	if (vtpci_packed_queue(vq->hw))
996 		error = virtqueue_enqueue_recv_refill_packed(vq, &m, 1);
997 	else
998 		error = virtqueue_enqueue_recv_refill(vq, &m, 1);
999 
1000 	if (unlikely(error)) {
1001 		RTE_LOG(ERR, PMD, "cannot requeue discarded mbuf");
1002 		rte_pktmbuf_free(m);
1003 	}
1004 }
1005 
1006 static inline void
1007 virtio_discard_rxbuf_inorder(struct virtqueue *vq, struct rte_mbuf *m)
1008 {
1009 	int error;
1010 
1011 	error = virtqueue_enqueue_refill_inorder(vq, &m, 1);
1012 	if (unlikely(error)) {
1013 		RTE_LOG(ERR, PMD, "cannot requeue discarded mbuf");
1014 		rte_pktmbuf_free(m);
1015 	}
1016 }
1017 
1018 static inline void
1019 virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
1020 {
1021 	uint32_t s = mbuf->pkt_len;
1022 	struct ether_addr *ea;
1023 
1024 	stats->bytes += s;
1025 
1026 	if (s == 64) {
1027 		stats->size_bins[1]++;
1028 	} else if (s > 64 && s < 1024) {
1029 		uint32_t bin;
1030 
1031 		/* count zeros, and offset into correct bin */
1032 		bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
1033 		stats->size_bins[bin]++;
1034 	} else {
1035 		if (s < 64)
1036 			stats->size_bins[0]++;
1037 		else if (s < 1519)
1038 			stats->size_bins[6]++;
1039 		else if (s >= 1519)
1040 			stats->size_bins[7]++;
1041 	}
1042 
1043 	ea = rte_pktmbuf_mtod(mbuf, struct ether_addr *);
1044 	if (is_multicast_ether_addr(ea)) {
1045 		if (is_broadcast_ether_addr(ea))
1046 			stats->broadcast++;
1047 		else
1048 			stats->multicast++;
1049 	}
1050 }
1051 
1052 static inline void
1053 virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
1054 {
1055 	VIRTIO_DUMP_PACKET(m, m->data_len);
1056 
1057 	virtio_update_packet_stats(&rxvq->stats, m);
1058 }
1059 
1060 /* Optionally fill offload information in structure */
1061 static inline int
1062 virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
1063 {
1064 	struct rte_net_hdr_lens hdr_lens;
1065 	uint32_t hdrlen, ptype;
1066 	int l4_supported = 0;
1067 
1068 	/* nothing to do */
1069 	if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
1070 		return 0;
1071 
1072 	m->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
1073 
1074 	ptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK);
1075 	m->packet_type = ptype;
1076 	if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP ||
1077 	    (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP ||
1078 	    (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_SCTP)
1079 		l4_supported = 1;
1080 
1081 	if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1082 		hdrlen = hdr_lens.l2_len + hdr_lens.l3_len + hdr_lens.l4_len;
1083 		if (hdr->csum_start <= hdrlen && l4_supported) {
1084 			m->ol_flags |= PKT_RX_L4_CKSUM_NONE;
1085 		} else {
1086 			/* Unknown proto or tunnel, do sw cksum. We can assume
1087 			 * the cksum field is in the first segment since the
1088 			 * buffers we provided to the host are large enough.
1089 			 * In case of SCTP, this will be wrong since it's a CRC
1090 			 * but there's nothing we can do.
1091 			 */
1092 			uint16_t csum = 0, off;
1093 
1094 			rte_raw_cksum_mbuf(m, hdr->csum_start,
1095 				rte_pktmbuf_pkt_len(m) - hdr->csum_start,
1096 				&csum);
1097 			if (likely(csum != 0xffff))
1098 				csum = ~csum;
1099 			off = hdr->csum_offset + hdr->csum_start;
1100 			if (rte_pktmbuf_data_len(m) >= off + 1)
1101 				*rte_pktmbuf_mtod_offset(m, uint16_t *,
1102 					off) = csum;
1103 		}
1104 	} else if (hdr->flags & VIRTIO_NET_HDR_F_DATA_VALID && l4_supported) {
1105 		m->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
1106 	}
1107 
1108 	/* GSO request, save required information in mbuf */
1109 	if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
1110 		/* Check unsupported modes */
1111 		if ((hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN) ||
1112 		    (hdr->gso_size == 0)) {
1113 			return -EINVAL;
1114 		}
1115 
1116 		/* Update mss lengthes in mbuf */
1117 		m->tso_segsz = hdr->gso_size;
1118 		switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
1119 			case VIRTIO_NET_HDR_GSO_TCPV4:
1120 			case VIRTIO_NET_HDR_GSO_TCPV6:
1121 				m->ol_flags |= PKT_RX_LRO | \
1122 					PKT_RX_L4_CKSUM_NONE;
1123 				break;
1124 			default:
1125 				return -EINVAL;
1126 		}
1127 	}
1128 
1129 	return 0;
1130 }
1131 
1132 #define VIRTIO_MBUF_BURST_SZ 64
1133 #define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc))
1134 uint16_t
1135 virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1136 {
1137 	struct virtnet_rx *rxvq = rx_queue;
1138 	struct virtqueue *vq = rxvq->vq;
1139 	struct virtio_hw *hw = vq->hw;
1140 	struct rte_mbuf *rxm, *new_mbuf;
1141 	uint16_t nb_used, num, nb_rx;
1142 	uint32_t len[VIRTIO_MBUF_BURST_SZ];
1143 	struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1144 	int error;
1145 	uint32_t i, nb_enqueued;
1146 	uint32_t hdr_size;
1147 	struct virtio_net_hdr *hdr;
1148 
1149 	nb_rx = 0;
1150 	if (unlikely(hw->started == 0))
1151 		return nb_rx;
1152 
1153 	nb_used = VIRTQUEUE_NUSED(vq);
1154 
1155 	virtio_rmb(hw->weak_barriers);
1156 
1157 	num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts;
1158 	if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1159 		num = VIRTIO_MBUF_BURST_SZ;
1160 	if (likely(num > DESC_PER_CACHELINE))
1161 		num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1162 
1163 	num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num);
1164 	PMD_RX_LOG(DEBUG, "used:%d dequeue:%d", nb_used, num);
1165 
1166 	nb_enqueued = 0;
1167 	hdr_size = hw->vtnet_hdr_size;
1168 
1169 	for (i = 0; i < num ; i++) {
1170 		rxm = rcv_pkts[i];
1171 
1172 		PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1173 
1174 		if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
1175 			PMD_RX_LOG(ERR, "Packet drop");
1176 			nb_enqueued++;
1177 			virtio_discard_rxbuf(vq, rxm);
1178 			rxvq->stats.errors++;
1179 			continue;
1180 		}
1181 
1182 		rxm->port = rxvq->port_id;
1183 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1184 		rxm->ol_flags = 0;
1185 		rxm->vlan_tci = 0;
1186 
1187 		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1188 		rxm->data_len = (uint16_t)(len[i] - hdr_size);
1189 
1190 		hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr +
1191 			RTE_PKTMBUF_HEADROOM - hdr_size);
1192 
1193 		if (hw->vlan_strip)
1194 			rte_vlan_strip(rxm);
1195 
1196 		if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) {
1197 			virtio_discard_rxbuf(vq, rxm);
1198 			rxvq->stats.errors++;
1199 			continue;
1200 		}
1201 
1202 		virtio_rx_stats_updated(rxvq, rxm);
1203 
1204 		rx_pkts[nb_rx++] = rxm;
1205 	}
1206 
1207 	rxvq->stats.packets += nb_rx;
1208 
1209 	/* Allocate new mbuf for the used descriptor */
1210 	while (likely(!virtqueue_full(vq))) {
1211 		new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool);
1212 		if (unlikely(new_mbuf == NULL)) {
1213 			struct rte_eth_dev *dev
1214 				= &rte_eth_devices[rxvq->port_id];
1215 			dev->data->rx_mbuf_alloc_failed++;
1216 			break;
1217 		}
1218 		error = virtqueue_enqueue_recv_refill(vq, &new_mbuf, 1);
1219 		if (unlikely(error)) {
1220 			rte_pktmbuf_free(new_mbuf);
1221 			break;
1222 		}
1223 		nb_enqueued++;
1224 	}
1225 
1226 	if (likely(nb_enqueued)) {
1227 		vq_update_avail_idx(vq);
1228 
1229 		if (unlikely(virtqueue_kick_prepare(vq))) {
1230 			virtqueue_notify(vq);
1231 			PMD_RX_LOG(DEBUG, "Notified");
1232 		}
1233 	}
1234 
1235 	return nb_rx;
1236 }
1237 
1238 uint16_t
1239 virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
1240 			uint16_t nb_pkts)
1241 {
1242 	struct virtnet_rx *rxvq = rx_queue;
1243 	struct virtqueue *vq = rxvq->vq;
1244 	struct virtio_hw *hw = vq->hw;
1245 	struct rte_mbuf *rxm, *new_mbuf;
1246 	uint16_t num, nb_rx;
1247 	uint32_t len[VIRTIO_MBUF_BURST_SZ];
1248 	struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1249 	int error;
1250 	uint32_t i, nb_enqueued;
1251 	uint32_t hdr_size;
1252 	struct virtio_net_hdr *hdr;
1253 
1254 	nb_rx = 0;
1255 	if (unlikely(hw->started == 0))
1256 		return nb_rx;
1257 
1258 	num = RTE_MIN(VIRTIO_MBUF_BURST_SZ, nb_pkts);
1259 	if (likely(num > DESC_PER_CACHELINE))
1260 		num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1261 
1262 	num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num);
1263 	PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1264 
1265 	nb_enqueued = 0;
1266 	hdr_size = hw->vtnet_hdr_size;
1267 
1268 	for (i = 0; i < num; i++) {
1269 		rxm = rcv_pkts[i];
1270 
1271 		PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1272 
1273 		if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
1274 			PMD_RX_LOG(ERR, "Packet drop");
1275 			nb_enqueued++;
1276 			virtio_discard_rxbuf(vq, rxm);
1277 			rxvq->stats.errors++;
1278 			continue;
1279 		}
1280 
1281 		rxm->port = rxvq->port_id;
1282 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1283 		rxm->ol_flags = 0;
1284 		rxm->vlan_tci = 0;
1285 
1286 		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1287 		rxm->data_len = (uint16_t)(len[i] - hdr_size);
1288 
1289 		hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr +
1290 			RTE_PKTMBUF_HEADROOM - hdr_size);
1291 
1292 		if (hw->vlan_strip)
1293 			rte_vlan_strip(rxm);
1294 
1295 		if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) {
1296 			virtio_discard_rxbuf(vq, rxm);
1297 			rxvq->stats.errors++;
1298 			continue;
1299 		}
1300 
1301 		virtio_rx_stats_updated(rxvq, rxm);
1302 
1303 		rx_pkts[nb_rx++] = rxm;
1304 	}
1305 
1306 	rxvq->stats.packets += nb_rx;
1307 
1308 	/* Allocate new mbuf for the used descriptor */
1309 	while (likely(!virtqueue_full(vq))) {
1310 		new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool);
1311 		if (unlikely(new_mbuf == NULL)) {
1312 			struct rte_eth_dev *dev =
1313 				&rte_eth_devices[rxvq->port_id];
1314 			dev->data->rx_mbuf_alloc_failed++;
1315 			break;
1316 		}
1317 		error = virtqueue_enqueue_recv_refill_packed(vq, &new_mbuf, 1);
1318 		if (unlikely(error)) {
1319 			rte_pktmbuf_free(new_mbuf);
1320 			break;
1321 		}
1322 		nb_enqueued++;
1323 	}
1324 
1325 	if (likely(nb_enqueued)) {
1326 		if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1327 			virtqueue_notify(vq);
1328 			PMD_RX_LOG(DEBUG, "Notified");
1329 		}
1330 	}
1331 
1332 	return nb_rx;
1333 }
1334 
1335 
1336 uint16_t
1337 virtio_recv_pkts_inorder(void *rx_queue,
1338 			struct rte_mbuf **rx_pkts,
1339 			uint16_t nb_pkts)
1340 {
1341 	struct virtnet_rx *rxvq = rx_queue;
1342 	struct virtqueue *vq = rxvq->vq;
1343 	struct virtio_hw *hw = vq->hw;
1344 	struct rte_mbuf *rxm;
1345 	struct rte_mbuf *prev;
1346 	uint16_t nb_used, num, nb_rx;
1347 	uint32_t len[VIRTIO_MBUF_BURST_SZ];
1348 	struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1349 	int error;
1350 	uint32_t nb_enqueued;
1351 	uint32_t seg_num;
1352 	uint32_t seg_res;
1353 	uint32_t hdr_size;
1354 	int32_t i;
1355 
1356 	nb_rx = 0;
1357 	if (unlikely(hw->started == 0))
1358 		return nb_rx;
1359 
1360 	nb_used = VIRTQUEUE_NUSED(vq);
1361 	nb_used = RTE_MIN(nb_used, nb_pkts);
1362 	nb_used = RTE_MIN(nb_used, VIRTIO_MBUF_BURST_SZ);
1363 
1364 	virtio_rmb(hw->weak_barriers);
1365 
1366 	PMD_RX_LOG(DEBUG, "used:%d", nb_used);
1367 
1368 	nb_enqueued = 0;
1369 	seg_num = 1;
1370 	seg_res = 0;
1371 	hdr_size = hw->vtnet_hdr_size;
1372 
1373 	num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len, nb_used);
1374 
1375 	for (i = 0; i < num; i++) {
1376 		struct virtio_net_hdr_mrg_rxbuf *header;
1377 
1378 		PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1379 		PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1380 
1381 		rxm = rcv_pkts[i];
1382 
1383 		if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
1384 			PMD_RX_LOG(ERR, "Packet drop");
1385 			nb_enqueued++;
1386 			virtio_discard_rxbuf_inorder(vq, rxm);
1387 			rxvq->stats.errors++;
1388 			continue;
1389 		}
1390 
1391 		header = (struct virtio_net_hdr_mrg_rxbuf *)
1392 			 ((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM
1393 			 - hdr_size);
1394 
1395 		if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
1396 			seg_num = header->num_buffers;
1397 			if (seg_num == 0)
1398 				seg_num = 1;
1399 		} else {
1400 			seg_num = 1;
1401 		}
1402 
1403 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1404 		rxm->nb_segs = seg_num;
1405 		rxm->ol_flags = 0;
1406 		rxm->vlan_tci = 0;
1407 		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1408 		rxm->data_len = (uint16_t)(len[i] - hdr_size);
1409 
1410 		rxm->port = rxvq->port_id;
1411 
1412 		rx_pkts[nb_rx] = rxm;
1413 		prev = rxm;
1414 
1415 		if (vq->hw->has_rx_offload &&
1416 				virtio_rx_offload(rxm, &header->hdr) < 0) {
1417 			virtio_discard_rxbuf_inorder(vq, rxm);
1418 			rxvq->stats.errors++;
1419 			continue;
1420 		}
1421 
1422 		if (hw->vlan_strip)
1423 			rte_vlan_strip(rx_pkts[nb_rx]);
1424 
1425 		seg_res = seg_num - 1;
1426 
1427 		/* Merge remaining segments */
1428 		while (seg_res != 0 && i < (num - 1)) {
1429 			i++;
1430 
1431 			rxm = rcv_pkts[i];
1432 			rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1433 			rxm->pkt_len = (uint32_t)(len[i]);
1434 			rxm->data_len = (uint16_t)(len[i]);
1435 
1436 			rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1437 			rx_pkts[nb_rx]->data_len += (uint16_t)(len[i]);
1438 
1439 			if (prev)
1440 				prev->next = rxm;
1441 
1442 			prev = rxm;
1443 			seg_res -= 1;
1444 		}
1445 
1446 		if (!seg_res) {
1447 			virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1448 			nb_rx++;
1449 		}
1450 	}
1451 
1452 	/* Last packet still need merge segments */
1453 	while (seg_res != 0) {
1454 		uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1455 					VIRTIO_MBUF_BURST_SZ);
1456 
1457 		prev = rcv_pkts[nb_rx];
1458 		if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) {
1459 			virtio_rmb(hw->weak_barriers);
1460 			num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len,
1461 							   rcv_cnt);
1462 			uint16_t extra_idx = 0;
1463 
1464 			rcv_cnt = num;
1465 			while (extra_idx < rcv_cnt) {
1466 				rxm = rcv_pkts[extra_idx];
1467 				rxm->data_off =
1468 					RTE_PKTMBUF_HEADROOM - hdr_size;
1469 				rxm->pkt_len = (uint32_t)(len[extra_idx]);
1470 				rxm->data_len = (uint16_t)(len[extra_idx]);
1471 				prev->next = rxm;
1472 				prev = rxm;
1473 				rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1474 				rx_pkts[nb_rx]->data_len += len[extra_idx];
1475 				extra_idx += 1;
1476 			};
1477 			seg_res -= rcv_cnt;
1478 
1479 			if (!seg_res) {
1480 				virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1481 				nb_rx++;
1482 			}
1483 		} else {
1484 			PMD_RX_LOG(ERR,
1485 					"No enough segments for packet.");
1486 			virtio_discard_rxbuf_inorder(vq, prev);
1487 			rxvq->stats.errors++;
1488 			break;
1489 		}
1490 	}
1491 
1492 	rxvq->stats.packets += nb_rx;
1493 
1494 	/* Allocate new mbuf for the used descriptor */
1495 
1496 	if (likely(!virtqueue_full(vq))) {
1497 		/* free_cnt may include mrg descs */
1498 		uint16_t free_cnt = vq->vq_free_cnt;
1499 		struct rte_mbuf *new_pkts[free_cnt];
1500 
1501 		if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1502 			error = virtqueue_enqueue_refill_inorder(vq, new_pkts,
1503 					free_cnt);
1504 			if (unlikely(error)) {
1505 				for (i = 0; i < free_cnt; i++)
1506 					rte_pktmbuf_free(new_pkts[i]);
1507 			}
1508 			nb_enqueued += free_cnt;
1509 		} else {
1510 			struct rte_eth_dev *dev =
1511 				&rte_eth_devices[rxvq->port_id];
1512 			dev->data->rx_mbuf_alloc_failed += free_cnt;
1513 		}
1514 	}
1515 
1516 	if (likely(nb_enqueued)) {
1517 		vq_update_avail_idx(vq);
1518 
1519 		if (unlikely(virtqueue_kick_prepare(vq))) {
1520 			virtqueue_notify(vq);
1521 			PMD_RX_LOG(DEBUG, "Notified");
1522 		}
1523 	}
1524 
1525 	return nb_rx;
1526 }
1527 
1528 uint16_t
1529 virtio_recv_mergeable_pkts(void *rx_queue,
1530 			struct rte_mbuf **rx_pkts,
1531 			uint16_t nb_pkts)
1532 {
1533 	struct virtnet_rx *rxvq = rx_queue;
1534 	struct virtqueue *vq = rxvq->vq;
1535 	struct virtio_hw *hw = vq->hw;
1536 	struct rte_mbuf *rxm;
1537 	struct rte_mbuf *prev;
1538 	uint16_t nb_used, num, nb_rx = 0;
1539 	uint32_t len[VIRTIO_MBUF_BURST_SZ];
1540 	struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1541 	int error;
1542 	uint32_t nb_enqueued = 0;
1543 	uint32_t seg_num = 0;
1544 	uint32_t seg_res = 0;
1545 	uint32_t hdr_size = hw->vtnet_hdr_size;
1546 	int32_t i;
1547 
1548 	if (unlikely(hw->started == 0))
1549 		return nb_rx;
1550 
1551 	nb_used = VIRTQUEUE_NUSED(vq);
1552 
1553 	virtio_rmb(hw->weak_barriers);
1554 
1555 	PMD_RX_LOG(DEBUG, "used:%d", nb_used);
1556 
1557 	num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts;
1558 	if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1559 		num = VIRTIO_MBUF_BURST_SZ;
1560 	if (likely(num > DESC_PER_CACHELINE))
1561 		num = num - ((vq->vq_used_cons_idx + num) %
1562 				DESC_PER_CACHELINE);
1563 
1564 
1565 	num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num);
1566 
1567 	for (i = 0; i < num; i++) {
1568 		struct virtio_net_hdr_mrg_rxbuf *header;
1569 
1570 		PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1571 		PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1572 
1573 		rxm = rcv_pkts[i];
1574 
1575 		if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
1576 			PMD_RX_LOG(ERR, "Packet drop");
1577 			nb_enqueued++;
1578 			virtio_discard_rxbuf(vq, rxm);
1579 			rxvq->stats.errors++;
1580 			continue;
1581 		}
1582 
1583 		header = (struct virtio_net_hdr_mrg_rxbuf *)
1584 			 ((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM
1585 			 - hdr_size);
1586 		seg_num = header->num_buffers;
1587 		if (seg_num == 0)
1588 			seg_num = 1;
1589 
1590 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1591 		rxm->nb_segs = seg_num;
1592 		rxm->ol_flags = 0;
1593 		rxm->vlan_tci = 0;
1594 		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1595 		rxm->data_len = (uint16_t)(len[i] - hdr_size);
1596 
1597 		rxm->port = rxvq->port_id;
1598 
1599 		rx_pkts[nb_rx] = rxm;
1600 		prev = rxm;
1601 
1602 		if (hw->has_rx_offload &&
1603 				virtio_rx_offload(rxm, &header->hdr) < 0) {
1604 			virtio_discard_rxbuf(vq, rxm);
1605 			rxvq->stats.errors++;
1606 			continue;
1607 		}
1608 
1609 		if (hw->vlan_strip)
1610 			rte_vlan_strip(rx_pkts[nb_rx]);
1611 
1612 		seg_res = seg_num - 1;
1613 
1614 		/* Merge remaining segments */
1615 		while (seg_res != 0 && i < (num - 1)) {
1616 			i++;
1617 
1618 			rxm = rcv_pkts[i];
1619 			rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1620 			rxm->pkt_len = (uint32_t)(len[i]);
1621 			rxm->data_len = (uint16_t)(len[i]);
1622 
1623 			rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1624 			rx_pkts[nb_rx]->data_len += (uint16_t)(len[i]);
1625 
1626 			if (prev)
1627 				prev->next = rxm;
1628 
1629 			prev = rxm;
1630 			seg_res -= 1;
1631 		}
1632 
1633 		if (!seg_res) {
1634 			virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1635 			nb_rx++;
1636 		}
1637 	}
1638 
1639 	/* Last packet still need merge segments */
1640 	while (seg_res != 0) {
1641 		uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1642 					VIRTIO_MBUF_BURST_SZ);
1643 
1644 		prev = rcv_pkts[nb_rx];
1645 		if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) {
1646 			virtio_rmb(hw->weak_barriers);
1647 			num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len,
1648 							   rcv_cnt);
1649 			uint16_t extra_idx = 0;
1650 
1651 			rcv_cnt = num;
1652 			while (extra_idx < rcv_cnt) {
1653 				rxm = rcv_pkts[extra_idx];
1654 				rxm->data_off =
1655 					RTE_PKTMBUF_HEADROOM - hdr_size;
1656 				rxm->pkt_len = (uint32_t)(len[extra_idx]);
1657 				rxm->data_len = (uint16_t)(len[extra_idx]);
1658 				prev->next = rxm;
1659 				prev = rxm;
1660 				rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1661 				rx_pkts[nb_rx]->data_len += len[extra_idx];
1662 				extra_idx += 1;
1663 			};
1664 			seg_res -= rcv_cnt;
1665 
1666 			if (!seg_res) {
1667 				virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1668 				nb_rx++;
1669 			}
1670 		} else {
1671 			PMD_RX_LOG(ERR,
1672 					"No enough segments for packet.");
1673 			virtio_discard_rxbuf(vq, prev);
1674 			rxvq->stats.errors++;
1675 			break;
1676 		}
1677 	}
1678 
1679 	rxvq->stats.packets += nb_rx;
1680 
1681 	/* Allocate new mbuf for the used descriptor */
1682 	if (likely(!virtqueue_full(vq))) {
1683 		/* free_cnt may include mrg descs */
1684 		uint16_t free_cnt = vq->vq_free_cnt;
1685 		struct rte_mbuf *new_pkts[free_cnt];
1686 
1687 		if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1688 			error = virtqueue_enqueue_recv_refill(vq, new_pkts,
1689 					free_cnt);
1690 			if (unlikely(error)) {
1691 				for (i = 0; i < free_cnt; i++)
1692 					rte_pktmbuf_free(new_pkts[i]);
1693 			}
1694 			nb_enqueued += free_cnt;
1695 		} else {
1696 			struct rte_eth_dev *dev =
1697 				&rte_eth_devices[rxvq->port_id];
1698 			dev->data->rx_mbuf_alloc_failed += free_cnt;
1699 		}
1700 	}
1701 
1702 	if (likely(nb_enqueued)) {
1703 		vq_update_avail_idx(vq);
1704 
1705 		if (unlikely(virtqueue_kick_prepare(vq))) {
1706 			virtqueue_notify(vq);
1707 			PMD_RX_LOG(DEBUG, "Notified");
1708 		}
1709 	}
1710 
1711 	return nb_rx;
1712 }
1713 
1714 uint16_t
1715 virtio_recv_mergeable_pkts_packed(void *rx_queue,
1716 			struct rte_mbuf **rx_pkts,
1717 			uint16_t nb_pkts)
1718 {
1719 	struct virtnet_rx *rxvq = rx_queue;
1720 	struct virtqueue *vq = rxvq->vq;
1721 	struct virtio_hw *hw = vq->hw;
1722 	struct rte_mbuf *rxm;
1723 	struct rte_mbuf *prev = NULL;
1724 	uint16_t num, nb_rx = 0;
1725 	uint32_t len[VIRTIO_MBUF_BURST_SZ];
1726 	struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1727 	uint32_t nb_enqueued = 0;
1728 	uint32_t seg_num = 0;
1729 	uint32_t seg_res = 0;
1730 	uint32_t hdr_size = hw->vtnet_hdr_size;
1731 	int32_t i;
1732 	int error;
1733 
1734 	if (unlikely(hw->started == 0))
1735 		return nb_rx;
1736 
1737 
1738 	num = nb_pkts;
1739 	if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1740 		num = VIRTIO_MBUF_BURST_SZ;
1741 	if (likely(num > DESC_PER_CACHELINE))
1742 		num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1743 
1744 	num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num);
1745 
1746 	for (i = 0; i < num; i++) {
1747 		struct virtio_net_hdr_mrg_rxbuf *header;
1748 
1749 		PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1750 		PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1751 
1752 		rxm = rcv_pkts[i];
1753 
1754 		if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
1755 			PMD_RX_LOG(ERR, "Packet drop");
1756 			nb_enqueued++;
1757 			virtio_discard_rxbuf(vq, rxm);
1758 			rxvq->stats.errors++;
1759 			continue;
1760 		}
1761 
1762 		header = (struct virtio_net_hdr_mrg_rxbuf *)((char *)
1763 			  rxm->buf_addr + RTE_PKTMBUF_HEADROOM - hdr_size);
1764 		seg_num = header->num_buffers;
1765 
1766 		if (seg_num == 0)
1767 			seg_num = 1;
1768 
1769 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1770 		rxm->nb_segs = seg_num;
1771 		rxm->ol_flags = 0;
1772 		rxm->vlan_tci = 0;
1773 		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1774 		rxm->data_len = (uint16_t)(len[i] - hdr_size);
1775 
1776 		rxm->port = rxvq->port_id;
1777 		rx_pkts[nb_rx] = rxm;
1778 		prev = rxm;
1779 
1780 		if (hw->has_rx_offload &&
1781 				virtio_rx_offload(rxm, &header->hdr) < 0) {
1782 			virtio_discard_rxbuf(vq, rxm);
1783 			rxvq->stats.errors++;
1784 			continue;
1785 		}
1786 
1787 		if (hw->vlan_strip)
1788 			rte_vlan_strip(rx_pkts[nb_rx]);
1789 
1790 		seg_res = seg_num - 1;
1791 
1792 		/* Merge remaining segments */
1793 		while (seg_res != 0 && i < (num - 1)) {
1794 			i++;
1795 
1796 			rxm = rcv_pkts[i];
1797 			rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1798 			rxm->pkt_len = (uint32_t)(len[i]);
1799 			rxm->data_len = (uint16_t)(len[i]);
1800 
1801 			rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1802 			rx_pkts[nb_rx]->data_len += (uint16_t)(len[i]);
1803 
1804 			if (prev)
1805 				prev->next = rxm;
1806 
1807 			prev = rxm;
1808 			seg_res -= 1;
1809 		}
1810 
1811 		if (!seg_res) {
1812 			virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1813 			nb_rx++;
1814 		}
1815 	}
1816 
1817 	/* Last packet still need merge segments */
1818 	while (seg_res != 0) {
1819 		uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1820 					VIRTIO_MBUF_BURST_SZ);
1821 		if (likely(vq->vq_free_cnt >= rcv_cnt)) {
1822 			num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts,
1823 					len, rcv_cnt);
1824 			uint16_t extra_idx = 0;
1825 
1826 			rcv_cnt = num;
1827 
1828 			while (extra_idx < rcv_cnt) {
1829 				rxm = rcv_pkts[extra_idx];
1830 
1831 				rxm->data_off =
1832 					RTE_PKTMBUF_HEADROOM - hdr_size;
1833 				rxm->pkt_len = (uint32_t)(len[extra_idx]);
1834 				rxm->data_len = (uint16_t)(len[extra_idx]);
1835 
1836 				prev->next = rxm;
1837 				prev = rxm;
1838 				rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1839 				rx_pkts[nb_rx]->data_len += len[extra_idx];
1840 				extra_idx += 1;
1841 			}
1842 			seg_res -= rcv_cnt;
1843 			if (!seg_res) {
1844 				virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1845 				nb_rx++;
1846 			}
1847 		} else {
1848 			PMD_RX_LOG(ERR,
1849 					"No enough segments for packet.");
1850 			if (prev)
1851 				virtio_discard_rxbuf(vq, prev);
1852 			rxvq->stats.errors++;
1853 			break;
1854 		}
1855 	}
1856 
1857 	rxvq->stats.packets += nb_rx;
1858 
1859 	/* Allocate new mbuf for the used descriptor */
1860 	if (likely(!virtqueue_full(vq))) {
1861 		/* free_cnt may include mrg descs */
1862 		uint16_t free_cnt = vq->vq_free_cnt;
1863 		struct rte_mbuf *new_pkts[free_cnt];
1864 
1865 		if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1866 			error = virtqueue_enqueue_recv_refill_packed(vq,
1867 					new_pkts, free_cnt);
1868 			if (unlikely(error)) {
1869 				for (i = 0; i < free_cnt; i++)
1870 					rte_pktmbuf_free(new_pkts[i]);
1871 			}
1872 			nb_enqueued += free_cnt;
1873 		} else {
1874 			struct rte_eth_dev *dev =
1875 				&rte_eth_devices[rxvq->port_id];
1876 			dev->data->rx_mbuf_alloc_failed += free_cnt;
1877 		}
1878 	}
1879 
1880 	if (likely(nb_enqueued)) {
1881 		if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1882 			virtqueue_notify(vq);
1883 			PMD_RX_LOG(DEBUG, "Notified");
1884 		}
1885 	}
1886 
1887 	return nb_rx;
1888 }
1889 
1890 uint16_t
1891 virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
1892 			uint16_t nb_pkts)
1893 {
1894 	struct virtnet_tx *txvq = tx_queue;
1895 	struct virtqueue *vq = txvq->vq;
1896 	struct virtio_hw *hw = vq->hw;
1897 	uint16_t hdr_size = hw->vtnet_hdr_size;
1898 	uint16_t nb_tx = 0;
1899 	int error;
1900 
1901 	if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
1902 		return nb_tx;
1903 
1904 	if (unlikely(nb_pkts < 1))
1905 		return nb_pkts;
1906 
1907 	PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
1908 
1909 	if (nb_pkts > vq->vq_free_cnt)
1910 		virtio_xmit_cleanup_packed(vq, nb_pkts - vq->vq_free_cnt);
1911 
1912 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1913 		struct rte_mbuf *txm = tx_pkts[nb_tx];
1914 		int can_push = 0, slots, need;
1915 
1916 		/* Do VLAN tag insertion */
1917 		if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {
1918 			error = rte_vlan_insert(&txm);
1919 			if (unlikely(error)) {
1920 				rte_pktmbuf_free(txm);
1921 				continue;
1922 			}
1923 		}
1924 
1925 		/* optimize ring usage */
1926 		if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
1927 		      vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
1928 		    rte_mbuf_refcnt_read(txm) == 1 &&
1929 		    RTE_MBUF_DIRECT(txm) &&
1930 		    txm->nb_segs == 1 &&
1931 		    rte_pktmbuf_headroom(txm) >= hdr_size &&
1932 		    rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
1933 			   __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
1934 			can_push = 1;
1935 
1936 		/* How many main ring entries are needed to this Tx?
1937 		 * any_layout => number of segments
1938 		 * default    => number of segments + 1
1939 		 */
1940 		slots = txm->nb_segs + !can_push;
1941 		need = slots - vq->vq_free_cnt;
1942 
1943 		/* Positive value indicates it need free vring descriptors */
1944 		if (unlikely(need > 0)) {
1945 			need = RTE_MIN(need, (int)nb_pkts);
1946 			virtio_xmit_cleanup_packed(vq, need);
1947 			need = slots - vq->vq_free_cnt;
1948 			if (unlikely(need > 0)) {
1949 				PMD_TX_LOG(ERR,
1950 					   "No free tx descriptors to transmit");
1951 				break;
1952 			}
1953 		}
1954 
1955 		/* Enqueue Packet buffers */
1956 		virtqueue_enqueue_xmit_packed(txvq, txm, slots, can_push);
1957 
1958 		virtio_update_packet_stats(&txvq->stats, txm);
1959 	}
1960 
1961 	txvq->stats.packets += nb_tx;
1962 
1963 	if (likely(nb_tx)) {
1964 		if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1965 			virtqueue_notify(vq);
1966 			PMD_TX_LOG(DEBUG, "Notified backend after xmit");
1967 		}
1968 	}
1969 
1970 	return nb_tx;
1971 }
1972 
1973 uint16_t
1974 virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1975 {
1976 	struct virtnet_tx *txvq = tx_queue;
1977 	struct virtqueue *vq = txvq->vq;
1978 	struct virtio_hw *hw = vq->hw;
1979 	uint16_t hdr_size = hw->vtnet_hdr_size;
1980 	uint16_t nb_used, nb_tx = 0;
1981 	int error;
1982 
1983 	if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
1984 		return nb_tx;
1985 
1986 	if (unlikely(nb_pkts < 1))
1987 		return nb_pkts;
1988 
1989 	PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
1990 	nb_used = VIRTQUEUE_NUSED(vq);
1991 
1992 	virtio_rmb(hw->weak_barriers);
1993 	if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
1994 		virtio_xmit_cleanup(vq, nb_used);
1995 
1996 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1997 		struct rte_mbuf *txm = tx_pkts[nb_tx];
1998 		int can_push = 0, use_indirect = 0, slots, need;
1999 
2000 		/* Do VLAN tag insertion */
2001 		if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {
2002 			error = rte_vlan_insert(&txm);
2003 			if (unlikely(error)) {
2004 				rte_pktmbuf_free(txm);
2005 				continue;
2006 			}
2007 		}
2008 
2009 		/* optimize ring usage */
2010 		if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
2011 		      vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
2012 		    rte_mbuf_refcnt_read(txm) == 1 &&
2013 		    RTE_MBUF_DIRECT(txm) &&
2014 		    txm->nb_segs == 1 &&
2015 		    rte_pktmbuf_headroom(txm) >= hdr_size &&
2016 		    rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
2017 				   __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
2018 			can_push = 1;
2019 		else if (vtpci_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
2020 			 txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
2021 			use_indirect = 1;
2022 
2023 		/* How many main ring entries are needed to this Tx?
2024 		 * any_layout => number of segments
2025 		 * indirect   => 1
2026 		 * default    => number of segments + 1
2027 		 */
2028 		slots = use_indirect ? 1 : (txm->nb_segs + !can_push);
2029 		need = slots - vq->vq_free_cnt;
2030 
2031 		/* Positive value indicates it need free vring descriptors */
2032 		if (unlikely(need > 0)) {
2033 			nb_used = VIRTQUEUE_NUSED(vq);
2034 			virtio_rmb(hw->weak_barriers);
2035 			need = RTE_MIN(need, (int)nb_used);
2036 
2037 			virtio_xmit_cleanup(vq, need);
2038 			need = slots - vq->vq_free_cnt;
2039 			if (unlikely(need > 0)) {
2040 				PMD_TX_LOG(ERR,
2041 					   "No free tx descriptors to transmit");
2042 				break;
2043 			}
2044 		}
2045 
2046 		/* Enqueue Packet buffers */
2047 		virtqueue_enqueue_xmit(txvq, txm, slots, use_indirect,
2048 			can_push, 0);
2049 
2050 		virtio_update_packet_stats(&txvq->stats, txm);
2051 	}
2052 
2053 	txvq->stats.packets += nb_tx;
2054 
2055 	if (likely(nb_tx)) {
2056 		vq_update_avail_idx(vq);
2057 
2058 		if (unlikely(virtqueue_kick_prepare(vq))) {
2059 			virtqueue_notify(vq);
2060 			PMD_TX_LOG(DEBUG, "Notified backend after xmit");
2061 		}
2062 	}
2063 
2064 	return nb_tx;
2065 }
2066 
2067 uint16_t
2068 virtio_xmit_pkts_inorder(void *tx_queue,
2069 			struct rte_mbuf **tx_pkts,
2070 			uint16_t nb_pkts)
2071 {
2072 	struct virtnet_tx *txvq = tx_queue;
2073 	struct virtqueue *vq = txvq->vq;
2074 	struct virtio_hw *hw = vq->hw;
2075 	uint16_t hdr_size = hw->vtnet_hdr_size;
2076 	uint16_t nb_used, nb_avail, nb_tx = 0, nb_inorder_pkts = 0;
2077 	struct rte_mbuf *inorder_pkts[nb_pkts];
2078 	int error;
2079 
2080 	if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
2081 		return nb_tx;
2082 
2083 	if (unlikely(nb_pkts < 1))
2084 		return nb_pkts;
2085 
2086 	VIRTQUEUE_DUMP(vq);
2087 	PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
2088 	nb_used = VIRTQUEUE_NUSED(vq);
2089 
2090 	virtio_rmb(hw->weak_barriers);
2091 	if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
2092 		virtio_xmit_cleanup_inorder(vq, nb_used);
2093 
2094 	if (unlikely(!vq->vq_free_cnt))
2095 		virtio_xmit_cleanup_inorder(vq, nb_used);
2096 
2097 	nb_avail = RTE_MIN(vq->vq_free_cnt, nb_pkts);
2098 
2099 	for (nb_tx = 0; nb_tx < nb_avail; nb_tx++) {
2100 		struct rte_mbuf *txm = tx_pkts[nb_tx];
2101 		int slots, need;
2102 
2103 		/* Do VLAN tag insertion */
2104 		if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {
2105 			error = rte_vlan_insert(&txm);
2106 			if (unlikely(error)) {
2107 				rte_pktmbuf_free(txm);
2108 				continue;
2109 			}
2110 		}
2111 
2112 		/* optimize ring usage */
2113 		if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
2114 		     vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
2115 		     rte_mbuf_refcnt_read(txm) == 1 &&
2116 		     RTE_MBUF_DIRECT(txm) &&
2117 		     txm->nb_segs == 1 &&
2118 		     rte_pktmbuf_headroom(txm) >= hdr_size &&
2119 		     rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
2120 				__alignof__(struct virtio_net_hdr_mrg_rxbuf))) {
2121 			inorder_pkts[nb_inorder_pkts] = txm;
2122 			nb_inorder_pkts++;
2123 
2124 			virtio_update_packet_stats(&txvq->stats, txm);
2125 			continue;
2126 		}
2127 
2128 		if (nb_inorder_pkts) {
2129 			virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
2130 							nb_inorder_pkts);
2131 			nb_inorder_pkts = 0;
2132 		}
2133 
2134 		slots = txm->nb_segs + 1;
2135 		need = slots - vq->vq_free_cnt;
2136 		if (unlikely(need > 0)) {
2137 			nb_used = VIRTQUEUE_NUSED(vq);
2138 			virtio_rmb(hw->weak_barriers);
2139 			need = RTE_MIN(need, (int)nb_used);
2140 
2141 			virtio_xmit_cleanup_inorder(vq, need);
2142 
2143 			need = slots - vq->vq_free_cnt;
2144 
2145 			if (unlikely(need > 0)) {
2146 				PMD_TX_LOG(ERR,
2147 					"No free tx descriptors to transmit");
2148 				break;
2149 			}
2150 		}
2151 		/* Enqueue Packet buffers */
2152 		virtqueue_enqueue_xmit(txvq, txm, slots, 0, 0, 1);
2153 
2154 		virtio_update_packet_stats(&txvq->stats, txm);
2155 	}
2156 
2157 	/* Transmit all inorder packets */
2158 	if (nb_inorder_pkts)
2159 		virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
2160 						nb_inorder_pkts);
2161 
2162 	txvq->stats.packets += nb_tx;
2163 
2164 	if (likely(nb_tx)) {
2165 		vq_update_avail_idx(vq);
2166 
2167 		if (unlikely(virtqueue_kick_prepare(vq))) {
2168 			virtqueue_notify(vq);
2169 			PMD_TX_LOG(DEBUG, "Notified backend after xmit");
2170 		}
2171 	}
2172 
2173 	VIRTQUEUE_DUMP(vq);
2174 
2175 	return nb_tx;
2176 }
2177