xref: /dpdk/drivers/net/virtio/virtio_rxtx.c (revision 2a7bb4fdf61e9edfb7adbaecb50e728b82da9e23)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <errno.h>
10 
11 #include <rte_cycles.h>
12 #include <rte_memory.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_mempool.h>
15 #include <rte_malloc.h>
16 #include <rte_mbuf.h>
17 #include <rte_ether.h>
18 #include <rte_ethdev_driver.h>
19 #include <rte_prefetch.h>
20 #include <rte_string_fns.h>
21 #include <rte_errno.h>
22 #include <rte_byteorder.h>
23 #include <rte_net.h>
24 #include <rte_ip.h>
25 #include <rte_udp.h>
26 #include <rte_tcp.h>
27 
28 #include "virtio_logs.h"
29 #include "virtio_ethdev.h"
30 #include "virtio_pci.h"
31 #include "virtqueue.h"
32 #include "virtio_rxtx.h"
33 #include "virtio_rxtx_simple.h"
34 #include "virtio_ring.h"
35 
36 #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
37 #define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len)
38 #else
39 #define  VIRTIO_DUMP_PACKET(m, len) do { } while (0)
40 #endif
41 
42 int
43 virtio_dev_rx_queue_done(void *rxq, uint16_t offset)
44 {
45 	struct virtnet_rx *rxvq = rxq;
46 	struct virtqueue *vq = rxvq->vq;
47 
48 	return VIRTQUEUE_NUSED(vq) >= offset;
49 }
50 
51 void
52 vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx, uint16_t num)
53 {
54 	vq->vq_free_cnt += num;
55 	vq->vq_desc_tail_idx = desc_idx & (vq->vq_nentries - 1);
56 }
57 
58 void
59 vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
60 {
61 	struct vring_desc *dp, *dp_tail;
62 	struct vq_desc_extra *dxp;
63 	uint16_t desc_idx_last = desc_idx;
64 
65 	dp  = &vq->vq_ring.desc[desc_idx];
66 	dxp = &vq->vq_descx[desc_idx];
67 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
68 	if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
69 		while (dp->flags & VRING_DESC_F_NEXT) {
70 			desc_idx_last = dp->next;
71 			dp = &vq->vq_ring.desc[dp->next];
72 		}
73 	}
74 	dxp->ndescs = 0;
75 
76 	/*
77 	 * We must append the existing free chain, if any, to the end of
78 	 * newly freed chain. If the virtqueue was completely used, then
79 	 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
80 	 */
81 	if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) {
82 		vq->vq_desc_head_idx = desc_idx;
83 	} else {
84 		dp_tail = &vq->vq_ring.desc[vq->vq_desc_tail_idx];
85 		dp_tail->next = desc_idx;
86 	}
87 
88 	vq->vq_desc_tail_idx = desc_idx_last;
89 	dp->next = VQ_RING_DESC_CHAIN_END;
90 }
91 
92 static void
93 vq_ring_free_id_packed(struct virtqueue *vq, uint16_t id)
94 {
95 	struct vq_desc_extra *dxp;
96 
97 	dxp = &vq->vq_descx[id];
98 	vq->vq_free_cnt += dxp->ndescs;
99 
100 	if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END)
101 		vq->vq_desc_head_idx = id;
102 	else
103 		vq->vq_descx[vq->vq_desc_tail_idx].next = id;
104 
105 	vq->vq_desc_tail_idx = id;
106 	dxp->next = VQ_RING_DESC_CHAIN_END;
107 }
108 
109 static uint16_t
110 virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq,
111 				  struct rte_mbuf **rx_pkts,
112 				  uint32_t *len,
113 				  uint16_t num)
114 {
115 	struct rte_mbuf *cookie;
116 	uint16_t used_idx;
117 	uint16_t id;
118 	struct vring_packed_desc *desc;
119 	uint16_t i;
120 
121 	desc = vq->ring_packed.desc_packed;
122 
123 	for (i = 0; i < num; i++) {
124 		used_idx = vq->vq_used_cons_idx;
125 		if (!desc_is_used(&desc[used_idx], vq))
126 			return i;
127 		len[i] = desc[used_idx].len;
128 		id = desc[used_idx].id;
129 		cookie = (struct rte_mbuf *)vq->vq_descx[id].cookie;
130 		if (unlikely(cookie == NULL)) {
131 			PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
132 				vq->vq_used_cons_idx);
133 			break;
134 		}
135 		rte_prefetch0(cookie);
136 		rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
137 		rx_pkts[i] = cookie;
138 
139 		vq->vq_free_cnt++;
140 		vq->vq_used_cons_idx++;
141 		if (vq->vq_used_cons_idx >= vq->vq_nentries) {
142 			vq->vq_used_cons_idx -= vq->vq_nentries;
143 			vq->used_wrap_counter ^= 1;
144 		}
145 	}
146 
147 	return i;
148 }
149 
150 static uint16_t
151 virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,
152 			   uint32_t *len, uint16_t num)
153 {
154 	struct vring_used_elem *uep;
155 	struct rte_mbuf *cookie;
156 	uint16_t used_idx, desc_idx;
157 	uint16_t i;
158 
159 	/*  Caller does the check */
160 	for (i = 0; i < num ; i++) {
161 		used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
162 		uep = &vq->vq_ring.used->ring[used_idx];
163 		desc_idx = (uint16_t) uep->id;
164 		len[i] = uep->len;
165 		cookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie;
166 
167 		if (unlikely(cookie == NULL)) {
168 			PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
169 				vq->vq_used_cons_idx);
170 			break;
171 		}
172 
173 		rte_prefetch0(cookie);
174 		rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
175 		rx_pkts[i]  = cookie;
176 		vq->vq_used_cons_idx++;
177 		vq_ring_free_chain(vq, desc_idx);
178 		vq->vq_descx[desc_idx].cookie = NULL;
179 	}
180 
181 	return i;
182 }
183 
184 static uint16_t
185 virtqueue_dequeue_rx_inorder(struct virtqueue *vq,
186 			struct rte_mbuf **rx_pkts,
187 			uint32_t *len,
188 			uint16_t num)
189 {
190 	struct vring_used_elem *uep;
191 	struct rte_mbuf *cookie;
192 	uint16_t used_idx = 0;
193 	uint16_t i;
194 
195 	if (unlikely(num == 0))
196 		return 0;
197 
198 	for (i = 0; i < num; i++) {
199 		used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
200 		/* Desc idx same as used idx */
201 		uep = &vq->vq_ring.used->ring[used_idx];
202 		len[i] = uep->len;
203 		cookie = (struct rte_mbuf *)vq->vq_descx[used_idx].cookie;
204 
205 		if (unlikely(cookie == NULL)) {
206 			PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
207 				vq->vq_used_cons_idx);
208 			break;
209 		}
210 
211 		rte_prefetch0(cookie);
212 		rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
213 		rx_pkts[i]  = cookie;
214 		vq->vq_used_cons_idx++;
215 		vq->vq_descx[used_idx].cookie = NULL;
216 	}
217 
218 	vq_ring_free_inorder(vq, used_idx, i);
219 	return i;
220 }
221 
222 #ifndef DEFAULT_TX_FREE_THRESH
223 #define DEFAULT_TX_FREE_THRESH 32
224 #endif
225 
226 /* Cleanup from completed transmits. */
227 static void
228 virtio_xmit_cleanup_packed(struct virtqueue *vq, int num)
229 {
230 	uint16_t used_idx, id;
231 	uint16_t size = vq->vq_nentries;
232 	struct vring_packed_desc *desc = vq->ring_packed.desc_packed;
233 	struct vq_desc_extra *dxp;
234 
235 	used_idx = vq->vq_used_cons_idx;
236 	while (num-- && desc_is_used(&desc[used_idx], vq)) {
237 		used_idx = vq->vq_used_cons_idx;
238 		id = desc[used_idx].id;
239 		dxp = &vq->vq_descx[id];
240 		vq->vq_used_cons_idx += dxp->ndescs;
241 		if (vq->vq_used_cons_idx >= size) {
242 			vq->vq_used_cons_idx -= size;
243 			vq->used_wrap_counter ^= 1;
244 		}
245 		vq_ring_free_id_packed(vq, id);
246 		if (dxp->cookie != NULL) {
247 			rte_pktmbuf_free(dxp->cookie);
248 			dxp->cookie = NULL;
249 		}
250 		used_idx = vq->vq_used_cons_idx;
251 	}
252 }
253 
254 static void
255 virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num)
256 {
257 	uint16_t i, used_idx, desc_idx;
258 	for (i = 0; i < num; i++) {
259 		struct vring_used_elem *uep;
260 		struct vq_desc_extra *dxp;
261 
262 		used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
263 		uep = &vq->vq_ring.used->ring[used_idx];
264 
265 		desc_idx = (uint16_t) uep->id;
266 		dxp = &vq->vq_descx[desc_idx];
267 		vq->vq_used_cons_idx++;
268 		vq_ring_free_chain(vq, desc_idx);
269 
270 		if (dxp->cookie != NULL) {
271 			rte_pktmbuf_free(dxp->cookie);
272 			dxp->cookie = NULL;
273 		}
274 	}
275 }
276 
277 /* Cleanup from completed inorder transmits. */
278 static void
279 virtio_xmit_cleanup_inorder(struct virtqueue *vq, uint16_t num)
280 {
281 	uint16_t i, used_idx, desc_idx = 0, last_idx;
282 	int16_t free_cnt = 0;
283 	struct vq_desc_extra *dxp = NULL;
284 
285 	if (unlikely(num == 0))
286 		return;
287 
288 	for (i = 0; i < num; i++) {
289 		struct vring_used_elem *uep;
290 
291 		used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
292 		uep = &vq->vq_ring.used->ring[used_idx];
293 		desc_idx = (uint16_t)uep->id;
294 
295 		dxp = &vq->vq_descx[desc_idx];
296 		vq->vq_used_cons_idx++;
297 
298 		if (dxp->cookie != NULL) {
299 			rte_pktmbuf_free(dxp->cookie);
300 			dxp->cookie = NULL;
301 		}
302 	}
303 
304 	last_idx = desc_idx + dxp->ndescs - 1;
305 	free_cnt = last_idx - vq->vq_desc_tail_idx;
306 	if (free_cnt <= 0)
307 		free_cnt += vq->vq_nentries;
308 
309 	vq_ring_free_inorder(vq, last_idx, free_cnt);
310 }
311 
312 static inline int
313 virtqueue_enqueue_refill_inorder(struct virtqueue *vq,
314 			struct rte_mbuf **cookies,
315 			uint16_t num)
316 {
317 	struct vq_desc_extra *dxp;
318 	struct virtio_hw *hw = vq->hw;
319 	struct vring_desc *start_dp;
320 	uint16_t head_idx, idx, i = 0;
321 
322 	if (unlikely(vq->vq_free_cnt == 0))
323 		return -ENOSPC;
324 	if (unlikely(vq->vq_free_cnt < num))
325 		return -EMSGSIZE;
326 
327 	head_idx = vq->vq_desc_head_idx & (vq->vq_nentries - 1);
328 	start_dp = vq->vq_ring.desc;
329 
330 	while (i < num) {
331 		idx = head_idx & (vq->vq_nentries - 1);
332 		dxp = &vq->vq_descx[idx];
333 		dxp->cookie = (void *)cookies[i];
334 		dxp->ndescs = 1;
335 
336 		start_dp[idx].addr =
337 				VIRTIO_MBUF_ADDR(cookies[i], vq) +
338 				RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
339 		start_dp[idx].len =
340 				cookies[i]->buf_len -
341 				RTE_PKTMBUF_HEADROOM +
342 				hw->vtnet_hdr_size;
343 		start_dp[idx].flags =  VRING_DESC_F_WRITE;
344 
345 		vq_update_avail_ring(vq, idx);
346 		head_idx++;
347 		i++;
348 	}
349 
350 	vq->vq_desc_head_idx += num;
351 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
352 	return 0;
353 }
354 
355 static inline int
356 virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf **cookie,
357 				uint16_t num)
358 {
359 	struct vq_desc_extra *dxp;
360 	struct virtio_hw *hw = vq->hw;
361 	struct vring_desc *start_dp = vq->vq_ring.desc;
362 	uint16_t idx, i;
363 
364 	if (unlikely(vq->vq_free_cnt == 0))
365 		return -ENOSPC;
366 	if (unlikely(vq->vq_free_cnt < num))
367 		return -EMSGSIZE;
368 
369 	if (unlikely(vq->vq_desc_head_idx >= vq->vq_nentries))
370 		return -EFAULT;
371 
372 	for (i = 0; i < num; i++) {
373 		idx = vq->vq_desc_head_idx;
374 		dxp = &vq->vq_descx[idx];
375 		dxp->cookie = (void *)cookie[i];
376 		dxp->ndescs = 1;
377 
378 		start_dp[idx].addr =
379 			VIRTIO_MBUF_ADDR(cookie[i], vq) +
380 			RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
381 		start_dp[idx].len =
382 			cookie[i]->buf_len - RTE_PKTMBUF_HEADROOM +
383 			hw->vtnet_hdr_size;
384 		start_dp[idx].flags = VRING_DESC_F_WRITE;
385 		vq->vq_desc_head_idx = start_dp[idx].next;
386 		vq_update_avail_ring(vq, idx);
387 		if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) {
388 			vq->vq_desc_tail_idx = vq->vq_desc_head_idx;
389 			break;
390 		}
391 	}
392 
393 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
394 
395 	return 0;
396 }
397 
398 static inline int
399 virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq,
400 				     struct rte_mbuf **cookie, uint16_t num)
401 {
402 	struct vring_packed_desc *start_dp = vq->ring_packed.desc_packed;
403 	uint16_t flags = VRING_DESC_F_WRITE | vq->avail_used_flags;
404 	struct virtio_hw *hw = vq->hw;
405 	struct vq_desc_extra *dxp;
406 	uint16_t idx;
407 	int i;
408 
409 	if (unlikely(vq->vq_free_cnt == 0))
410 		return -ENOSPC;
411 	if (unlikely(vq->vq_free_cnt < num))
412 		return -EMSGSIZE;
413 
414 	for (i = 0; i < num; i++) {
415 		idx = vq->vq_avail_idx;
416 		dxp = &vq->vq_descx[idx];
417 		dxp->cookie = (void *)cookie[i];
418 		dxp->ndescs = 1;
419 
420 		start_dp[idx].addr = VIRTIO_MBUF_ADDR(cookie[i], vq) +
421 				RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
422 		start_dp[idx].len = cookie[i]->buf_len - RTE_PKTMBUF_HEADROOM
423 					+ hw->vtnet_hdr_size;
424 
425 		vq->vq_desc_head_idx = dxp->next;
426 		if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
427 			vq->vq_desc_tail_idx = vq->vq_desc_head_idx;
428 		rte_smp_wmb();
429 		start_dp[idx].flags = flags;
430 		if (++vq->vq_avail_idx >= vq->vq_nentries) {
431 			vq->vq_avail_idx -= vq->vq_nentries;
432 			vq->avail_wrap_counter ^= 1;
433 			vq->avail_used_flags =
434 				VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
435 				VRING_DESC_F_USED(!vq->avail_wrap_counter);
436 			flags = VRING_DESC_F_WRITE | vq->avail_used_flags;
437 		}
438 	}
439 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
440 	return 0;
441 }
442 
443 /* When doing TSO, the IP length is not included in the pseudo header
444  * checksum of the packet given to the PMD, but for virtio it is
445  * expected.
446  */
447 static void
448 virtio_tso_fix_cksum(struct rte_mbuf *m)
449 {
450 	/* common case: header is not fragmented */
451 	if (likely(rte_pktmbuf_data_len(m) >= m->l2_len + m->l3_len +
452 			m->l4_len)) {
453 		struct ipv4_hdr *iph;
454 		struct ipv6_hdr *ip6h;
455 		struct tcp_hdr *th;
456 		uint16_t prev_cksum, new_cksum, ip_len, ip_paylen;
457 		uint32_t tmp;
458 
459 		iph = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, m->l2_len);
460 		th = RTE_PTR_ADD(iph, m->l3_len);
461 		if ((iph->version_ihl >> 4) == 4) {
462 			iph->hdr_checksum = 0;
463 			iph->hdr_checksum = rte_ipv4_cksum(iph);
464 			ip_len = iph->total_length;
465 			ip_paylen = rte_cpu_to_be_16(rte_be_to_cpu_16(ip_len) -
466 				m->l3_len);
467 		} else {
468 			ip6h = (struct ipv6_hdr *)iph;
469 			ip_paylen = ip6h->payload_len;
470 		}
471 
472 		/* calculate the new phdr checksum not including ip_paylen */
473 		prev_cksum = th->cksum;
474 		tmp = prev_cksum;
475 		tmp += ip_paylen;
476 		tmp = (tmp & 0xffff) + (tmp >> 16);
477 		new_cksum = tmp;
478 
479 		/* replace it in the packet */
480 		th->cksum = new_cksum;
481 	}
482 }
483 
484 
485 /* avoid write operation when necessary, to lessen cache issues */
486 #define ASSIGN_UNLESS_EQUAL(var, val) do {	\
487 	if ((var) != (val))			\
488 		(var) = (val);			\
489 } while (0)
490 
491 static inline void
492 virtqueue_xmit_offload(struct virtio_net_hdr *hdr,
493 			struct rte_mbuf *cookie,
494 			bool offload)
495 {
496 	if (offload) {
497 		if (cookie->ol_flags & PKT_TX_TCP_SEG)
498 			cookie->ol_flags |= PKT_TX_TCP_CKSUM;
499 
500 		switch (cookie->ol_flags & PKT_TX_L4_MASK) {
501 		case PKT_TX_UDP_CKSUM:
502 			hdr->csum_start = cookie->l2_len + cookie->l3_len;
503 			hdr->csum_offset = offsetof(struct udp_hdr,
504 				dgram_cksum);
505 			hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
506 			break;
507 
508 		case PKT_TX_TCP_CKSUM:
509 			hdr->csum_start = cookie->l2_len + cookie->l3_len;
510 			hdr->csum_offset = offsetof(struct tcp_hdr, cksum);
511 			hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
512 			break;
513 
514 		default:
515 			ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
516 			ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
517 			ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
518 			break;
519 		}
520 
521 		/* TCP Segmentation Offload */
522 		if (cookie->ol_flags & PKT_TX_TCP_SEG) {
523 			virtio_tso_fix_cksum(cookie);
524 			hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ?
525 				VIRTIO_NET_HDR_GSO_TCPV6 :
526 				VIRTIO_NET_HDR_GSO_TCPV4;
527 			hdr->gso_size = cookie->tso_segsz;
528 			hdr->hdr_len =
529 				cookie->l2_len +
530 				cookie->l3_len +
531 				cookie->l4_len;
532 		} else {
533 			ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
534 			ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
535 			ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
536 		}
537 	}
538 }
539 
540 static inline void
541 virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
542 			struct rte_mbuf **cookies,
543 			uint16_t num)
544 {
545 	struct vq_desc_extra *dxp;
546 	struct virtqueue *vq = txvq->vq;
547 	struct vring_desc *start_dp;
548 	struct virtio_net_hdr *hdr;
549 	uint16_t idx;
550 	uint16_t head_size = vq->hw->vtnet_hdr_size;
551 	uint16_t i = 0;
552 
553 	idx = vq->vq_desc_head_idx;
554 	start_dp = vq->vq_ring.desc;
555 
556 	while (i < num) {
557 		idx = idx & (vq->vq_nentries - 1);
558 		dxp = &vq->vq_descx[idx];
559 		dxp->cookie = (void *)cookies[i];
560 		dxp->ndescs = 1;
561 
562 		hdr = (struct virtio_net_hdr *)
563 			rte_pktmbuf_prepend(cookies[i], head_size);
564 		cookies[i]->pkt_len -= head_size;
565 
566 		/* if offload disabled, it is not zeroed below, do it now */
567 		if (!vq->hw->has_tx_offload) {
568 			ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
569 			ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
570 			ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
571 			ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
572 			ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
573 			ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
574 		}
575 
576 		virtqueue_xmit_offload(hdr, cookies[i],
577 				vq->hw->has_tx_offload);
578 
579 		start_dp[idx].addr  = VIRTIO_MBUF_DATA_DMA_ADDR(cookies[i], vq);
580 		start_dp[idx].len   = cookies[i]->data_len;
581 		start_dp[idx].flags = 0;
582 
583 		vq_update_avail_ring(vq, idx);
584 
585 		idx++;
586 		i++;
587 	};
588 
589 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
590 	vq->vq_desc_head_idx = idx & (vq->vq_nentries - 1);
591 }
592 
593 static inline void
594 virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
595 			      uint16_t needed, int can_push)
596 {
597 	struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
598 	struct vq_desc_extra *dxp;
599 	struct virtqueue *vq = txvq->vq;
600 	struct vring_packed_desc *start_dp, *head_dp;
601 	uint16_t idx, id, head_idx, head_flags;
602 	uint16_t head_size = vq->hw->vtnet_hdr_size;
603 	struct virtio_net_hdr *hdr;
604 	uint16_t prev;
605 
606 	id = vq->vq_desc_head_idx;
607 
608 	dxp = &vq->vq_descx[id];
609 	dxp->ndescs = needed;
610 	dxp->cookie = cookie;
611 
612 	head_idx = vq->vq_avail_idx;
613 	idx = head_idx;
614 	prev = head_idx;
615 	start_dp = vq->ring_packed.desc_packed;
616 
617 	head_dp = &vq->ring_packed.desc_packed[idx];
618 	head_flags = cookie->next ? VRING_DESC_F_NEXT : 0;
619 	head_flags |= vq->avail_used_flags;
620 
621 	if (can_push) {
622 		/* prepend cannot fail, checked by caller */
623 		hdr = (struct virtio_net_hdr *)
624 			rte_pktmbuf_prepend(cookie, head_size);
625 		/* rte_pktmbuf_prepend() counts the hdr size to the pkt length,
626 		 * which is wrong. Below subtract restores correct pkt size.
627 		 */
628 		cookie->pkt_len -= head_size;
629 
630 		/* if offload disabled, it is not zeroed below, do it now */
631 		if (!vq->hw->has_tx_offload) {
632 			ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
633 			ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
634 			ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
635 			ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
636 			ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
637 			ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
638 		}
639 	} else {
640 		/* setup first tx ring slot to point to header
641 		 * stored in reserved region.
642 		 */
643 		start_dp[idx].addr  = txvq->virtio_net_hdr_mem +
644 			RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
645 		start_dp[idx].len   = vq->hw->vtnet_hdr_size;
646 		hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
647 		idx++;
648 		if (idx >= vq->vq_nentries) {
649 			idx -= vq->vq_nentries;
650 			vq->avail_wrap_counter ^= 1;
651 			vq->avail_used_flags =
652 				VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
653 				VRING_DESC_F_USED(!vq->avail_wrap_counter);
654 		}
655 	}
656 
657 	virtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload);
658 
659 	do {
660 		uint16_t flags;
661 
662 		start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
663 		start_dp[idx].len  = cookie->data_len;
664 		if (likely(idx != head_idx)) {
665 			flags = cookie->next ? VRING_DESC_F_NEXT : 0;
666 			flags |= vq->avail_used_flags;
667 			start_dp[idx].flags = flags;
668 		}
669 		prev = idx;
670 		idx++;
671 		if (idx >= vq->vq_nentries) {
672 			idx -= vq->vq_nentries;
673 			vq->avail_wrap_counter ^= 1;
674 			vq->avail_used_flags =
675 				VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
676 				VRING_DESC_F_USED(!vq->avail_wrap_counter);
677 		}
678 	} while ((cookie = cookie->next) != NULL);
679 
680 	start_dp[prev].id = id;
681 
682 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
683 
684 	vq->vq_desc_head_idx = dxp->next;
685 	if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
686 		vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;
687 
688 	vq->vq_avail_idx = idx;
689 
690 	rte_smp_wmb();
691 	head_dp->flags = head_flags;
692 }
693 
694 static inline void
695 virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
696 			uint16_t needed, int use_indirect, int can_push,
697 			int in_order)
698 {
699 	struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
700 	struct vq_desc_extra *dxp;
701 	struct virtqueue *vq = txvq->vq;
702 	struct vring_desc *start_dp;
703 	uint16_t seg_num = cookie->nb_segs;
704 	uint16_t head_idx, idx;
705 	uint16_t head_size = vq->hw->vtnet_hdr_size;
706 	struct virtio_net_hdr *hdr;
707 
708 	head_idx = vq->vq_desc_head_idx;
709 	idx = head_idx;
710 	dxp = &vq->vq_descx[idx];
711 	dxp->cookie = (void *)cookie;
712 	dxp->ndescs = needed;
713 
714 	start_dp = vq->vq_ring.desc;
715 
716 	if (can_push) {
717 		/* prepend cannot fail, checked by caller */
718 		hdr = (struct virtio_net_hdr *)
719 			rte_pktmbuf_prepend(cookie, head_size);
720 		/* rte_pktmbuf_prepend() counts the hdr size to the pkt length,
721 		 * which is wrong. Below subtract restores correct pkt size.
722 		 */
723 		cookie->pkt_len -= head_size;
724 
725 		/* if offload disabled, it is not zeroed below, do it now */
726 		if (!vq->hw->has_tx_offload) {
727 			ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
728 			ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
729 			ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
730 			ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
731 			ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
732 			ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
733 		}
734 	} else if (use_indirect) {
735 		/* setup tx ring slot to point to indirect
736 		 * descriptor list stored in reserved region.
737 		 *
738 		 * the first slot in indirect ring is already preset
739 		 * to point to the header in reserved region
740 		 */
741 		start_dp[idx].addr  = txvq->virtio_net_hdr_mem +
742 			RTE_PTR_DIFF(&txr[idx].tx_indir, txr);
743 		start_dp[idx].len   = (seg_num + 1) * sizeof(struct vring_desc);
744 		start_dp[idx].flags = VRING_DESC_F_INDIRECT;
745 		hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
746 
747 		/* loop below will fill in rest of the indirect elements */
748 		start_dp = txr[idx].tx_indir;
749 		idx = 1;
750 	} else {
751 		/* setup first tx ring slot to point to header
752 		 * stored in reserved region.
753 		 */
754 		start_dp[idx].addr  = txvq->virtio_net_hdr_mem +
755 			RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
756 		start_dp[idx].len   = vq->hw->vtnet_hdr_size;
757 		start_dp[idx].flags = VRING_DESC_F_NEXT;
758 		hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
759 
760 		idx = start_dp[idx].next;
761 	}
762 
763 	virtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload);
764 
765 	do {
766 		start_dp[idx].addr  = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
767 		start_dp[idx].len   = cookie->data_len;
768 		start_dp[idx].flags = cookie->next ? VRING_DESC_F_NEXT : 0;
769 		idx = start_dp[idx].next;
770 	} while ((cookie = cookie->next) != NULL);
771 
772 	if (use_indirect)
773 		idx = vq->vq_ring.desc[head_idx].next;
774 
775 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
776 
777 	vq->vq_desc_head_idx = idx;
778 	vq_update_avail_ring(vq, head_idx);
779 
780 	if (!in_order) {
781 		if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
782 			vq->vq_desc_tail_idx = idx;
783 	}
784 }
785 
786 void
787 virtio_dev_cq_start(struct rte_eth_dev *dev)
788 {
789 	struct virtio_hw *hw = dev->data->dev_private;
790 
791 	if (hw->cvq && hw->cvq->vq) {
792 		rte_spinlock_init(&hw->cvq->lock);
793 		VIRTQUEUE_DUMP((struct virtqueue *)hw->cvq->vq);
794 	}
795 }
796 
797 int
798 virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
799 			uint16_t queue_idx,
800 			uint16_t nb_desc,
801 			unsigned int socket_id __rte_unused,
802 			const struct rte_eth_rxconf *rx_conf __rte_unused,
803 			struct rte_mempool *mp)
804 {
805 	uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
806 	struct virtio_hw *hw = dev->data->dev_private;
807 	struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
808 	struct virtnet_rx *rxvq;
809 
810 	PMD_INIT_FUNC_TRACE();
811 
812 	if (nb_desc == 0 || nb_desc > vq->vq_nentries)
813 		nb_desc = vq->vq_nentries;
814 	vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
815 
816 	rxvq = &vq->rxq;
817 	rxvq->queue_id = queue_idx;
818 	rxvq->mpool = mp;
819 	if (rxvq->mpool == NULL) {
820 		rte_exit(EXIT_FAILURE,
821 			"Cannot allocate mbufs for rx virtqueue");
822 	}
823 
824 	dev->data->rx_queues[queue_idx] = rxvq;
825 
826 	return 0;
827 }
828 
829 int
830 virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
831 {
832 	uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
833 	struct virtio_hw *hw = dev->data->dev_private;
834 	struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
835 	struct virtnet_rx *rxvq = &vq->rxq;
836 	struct rte_mbuf *m;
837 	uint16_t desc_idx;
838 	int error, nbufs, i;
839 
840 	PMD_INIT_FUNC_TRACE();
841 
842 	/* Allocate blank mbufs for the each rx descriptor */
843 	nbufs = 0;
844 
845 	if (hw->use_simple_rx) {
846 		for (desc_idx = 0; desc_idx < vq->vq_nentries;
847 		     desc_idx++) {
848 			vq->vq_ring.avail->ring[desc_idx] = desc_idx;
849 			vq->vq_ring.desc[desc_idx].flags =
850 				VRING_DESC_F_WRITE;
851 		}
852 
853 		virtio_rxq_vec_setup(rxvq);
854 	}
855 
856 	memset(&rxvq->fake_mbuf, 0, sizeof(rxvq->fake_mbuf));
857 	for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST;
858 	     desc_idx++) {
859 		vq->sw_ring[vq->vq_nentries + desc_idx] =
860 			&rxvq->fake_mbuf;
861 	}
862 
863 	if (hw->use_simple_rx) {
864 		while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
865 			virtio_rxq_rearm_vec(rxvq);
866 			nbufs += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
867 		}
868 	} else if (hw->use_inorder_rx) {
869 		if ((!virtqueue_full(vq))) {
870 			uint16_t free_cnt = vq->vq_free_cnt;
871 			struct rte_mbuf *pkts[free_cnt];
872 
873 			if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, pkts,
874 				free_cnt)) {
875 				error = virtqueue_enqueue_refill_inorder(vq,
876 						pkts,
877 						free_cnt);
878 				if (unlikely(error)) {
879 					for (i = 0; i < free_cnt; i++)
880 						rte_pktmbuf_free(pkts[i]);
881 				}
882 			}
883 
884 			nbufs += free_cnt;
885 			vq_update_avail_idx(vq);
886 		}
887 	} else {
888 		while (!virtqueue_full(vq)) {
889 			m = rte_mbuf_raw_alloc(rxvq->mpool);
890 			if (m == NULL)
891 				break;
892 
893 			/* Enqueue allocated buffers */
894 			if (vtpci_packed_queue(vq->hw))
895 				error = virtqueue_enqueue_recv_refill_packed(vq,
896 						&m, 1);
897 			else
898 				error = virtqueue_enqueue_recv_refill(vq,
899 						&m, 1);
900 			if (error) {
901 				rte_pktmbuf_free(m);
902 				break;
903 			}
904 			nbufs++;
905 		}
906 
907 		if (!vtpci_packed_queue(vq->hw))
908 			vq_update_avail_idx(vq);
909 	}
910 
911 	PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs);
912 
913 	VIRTQUEUE_DUMP(vq);
914 
915 	return 0;
916 }
917 
918 /*
919  * struct rte_eth_dev *dev: Used to update dev
920  * uint16_t nb_desc: Defaults to values read from config space
921  * unsigned int socket_id: Used to allocate memzone
922  * const struct rte_eth_txconf *tx_conf: Used to setup tx engine
923  * uint16_t queue_idx: Just used as an index in dev txq list
924  */
925 int
926 virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
927 			uint16_t queue_idx,
928 			uint16_t nb_desc,
929 			unsigned int socket_id __rte_unused,
930 			const struct rte_eth_txconf *tx_conf)
931 {
932 	uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
933 	struct virtio_hw *hw = dev->data->dev_private;
934 	struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
935 	struct virtnet_tx *txvq;
936 	uint16_t tx_free_thresh;
937 
938 	PMD_INIT_FUNC_TRACE();
939 
940 	if (nb_desc == 0 || nb_desc > vq->vq_nentries)
941 		nb_desc = vq->vq_nentries;
942 	vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
943 
944 	txvq = &vq->txq;
945 	txvq->queue_id = queue_idx;
946 
947 	tx_free_thresh = tx_conf->tx_free_thresh;
948 	if (tx_free_thresh == 0)
949 		tx_free_thresh =
950 			RTE_MIN(vq->vq_nentries / 4, DEFAULT_TX_FREE_THRESH);
951 
952 	if (tx_free_thresh >= (vq->vq_nentries - 3)) {
953 		RTE_LOG(ERR, PMD, "tx_free_thresh must be less than the "
954 			"number of TX entries minus 3 (%u)."
955 			" (tx_free_thresh=%u port=%u queue=%u)\n",
956 			vq->vq_nentries - 3,
957 			tx_free_thresh, dev->data->port_id, queue_idx);
958 		return -EINVAL;
959 	}
960 
961 	vq->vq_free_thresh = tx_free_thresh;
962 
963 	dev->data->tx_queues[queue_idx] = txvq;
964 	return 0;
965 }
966 
967 int
968 virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
969 				uint16_t queue_idx)
970 {
971 	uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
972 	struct virtio_hw *hw = dev->data->dev_private;
973 	struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
974 
975 	PMD_INIT_FUNC_TRACE();
976 
977 	if (!vtpci_packed_queue(hw)) {
978 		if (hw->use_inorder_tx)
979 			vq->vq_ring.desc[vq->vq_nentries - 1].next = 0;
980 	}
981 
982 	VIRTQUEUE_DUMP(vq);
983 
984 	return 0;
985 }
986 
987 static inline void
988 virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m)
989 {
990 	int error;
991 	/*
992 	 * Requeue the discarded mbuf. This should always be
993 	 * successful since it was just dequeued.
994 	 */
995 	if (vtpci_packed_queue(vq->hw))
996 		error = virtqueue_enqueue_recv_refill_packed(vq, &m, 1);
997 	else
998 		error = virtqueue_enqueue_recv_refill(vq, &m, 1);
999 
1000 	if (unlikely(error)) {
1001 		RTE_LOG(ERR, PMD, "cannot requeue discarded mbuf");
1002 		rte_pktmbuf_free(m);
1003 	}
1004 }
1005 
1006 static inline void
1007 virtio_discard_rxbuf_inorder(struct virtqueue *vq, struct rte_mbuf *m)
1008 {
1009 	int error;
1010 
1011 	error = virtqueue_enqueue_refill_inorder(vq, &m, 1);
1012 	if (unlikely(error)) {
1013 		RTE_LOG(ERR, PMD, "cannot requeue discarded mbuf");
1014 		rte_pktmbuf_free(m);
1015 	}
1016 }
1017 
1018 static inline void
1019 virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
1020 {
1021 	uint32_t s = mbuf->pkt_len;
1022 	struct ether_addr *ea;
1023 
1024 	stats->bytes += s;
1025 
1026 	if (s == 64) {
1027 		stats->size_bins[1]++;
1028 	} else if (s > 64 && s < 1024) {
1029 		uint32_t bin;
1030 
1031 		/* count zeros, and offset into correct bin */
1032 		bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
1033 		stats->size_bins[bin]++;
1034 	} else {
1035 		if (s < 64)
1036 			stats->size_bins[0]++;
1037 		else if (s < 1519)
1038 			stats->size_bins[6]++;
1039 		else if (s >= 1519)
1040 			stats->size_bins[7]++;
1041 	}
1042 
1043 	ea = rte_pktmbuf_mtod(mbuf, struct ether_addr *);
1044 	if (is_multicast_ether_addr(ea)) {
1045 		if (is_broadcast_ether_addr(ea))
1046 			stats->broadcast++;
1047 		else
1048 			stats->multicast++;
1049 	}
1050 }
1051 
1052 static inline void
1053 virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
1054 {
1055 	VIRTIO_DUMP_PACKET(m, m->data_len);
1056 
1057 	virtio_update_packet_stats(&rxvq->stats, m);
1058 }
1059 
1060 /* Optionally fill offload information in structure */
1061 static inline int
1062 virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
1063 {
1064 	struct rte_net_hdr_lens hdr_lens;
1065 	uint32_t hdrlen, ptype;
1066 	int l4_supported = 0;
1067 
1068 	/* nothing to do */
1069 	if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
1070 		return 0;
1071 
1072 	m->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
1073 
1074 	ptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK);
1075 	m->packet_type = ptype;
1076 	if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP ||
1077 	    (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP ||
1078 	    (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_SCTP)
1079 		l4_supported = 1;
1080 
1081 	if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1082 		hdrlen = hdr_lens.l2_len + hdr_lens.l3_len + hdr_lens.l4_len;
1083 		if (hdr->csum_start <= hdrlen && l4_supported) {
1084 			m->ol_flags |= PKT_RX_L4_CKSUM_NONE;
1085 		} else {
1086 			/* Unknown proto or tunnel, do sw cksum. We can assume
1087 			 * the cksum field is in the first segment since the
1088 			 * buffers we provided to the host are large enough.
1089 			 * In case of SCTP, this will be wrong since it's a CRC
1090 			 * but there's nothing we can do.
1091 			 */
1092 			uint16_t csum = 0, off;
1093 
1094 			rte_raw_cksum_mbuf(m, hdr->csum_start,
1095 				rte_pktmbuf_pkt_len(m) - hdr->csum_start,
1096 				&csum);
1097 			if (likely(csum != 0xffff))
1098 				csum = ~csum;
1099 			off = hdr->csum_offset + hdr->csum_start;
1100 			if (rte_pktmbuf_data_len(m) >= off + 1)
1101 				*rte_pktmbuf_mtod_offset(m, uint16_t *,
1102 					off) = csum;
1103 		}
1104 	} else if (hdr->flags & VIRTIO_NET_HDR_F_DATA_VALID && l4_supported) {
1105 		m->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
1106 	}
1107 
1108 	/* GSO request, save required information in mbuf */
1109 	if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
1110 		/* Check unsupported modes */
1111 		if ((hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN) ||
1112 		    (hdr->gso_size == 0)) {
1113 			return -EINVAL;
1114 		}
1115 
1116 		/* Update mss lengthes in mbuf */
1117 		m->tso_segsz = hdr->gso_size;
1118 		switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
1119 			case VIRTIO_NET_HDR_GSO_TCPV4:
1120 			case VIRTIO_NET_HDR_GSO_TCPV6:
1121 				m->ol_flags |= PKT_RX_LRO | \
1122 					PKT_RX_L4_CKSUM_NONE;
1123 				break;
1124 			default:
1125 				return -EINVAL;
1126 		}
1127 	}
1128 
1129 	return 0;
1130 }
1131 
1132 #define VIRTIO_MBUF_BURST_SZ 64
1133 #define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc))
1134 uint16_t
1135 virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1136 {
1137 	struct virtnet_rx *rxvq = rx_queue;
1138 	struct virtqueue *vq = rxvq->vq;
1139 	struct virtio_hw *hw = vq->hw;
1140 	struct rte_mbuf *rxm, *new_mbuf;
1141 	uint16_t nb_used, num, nb_rx;
1142 	uint32_t len[VIRTIO_MBUF_BURST_SZ];
1143 	struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1144 	int error;
1145 	uint32_t i, nb_enqueued;
1146 	uint32_t hdr_size;
1147 	struct virtio_net_hdr *hdr;
1148 
1149 	nb_rx = 0;
1150 	if (unlikely(hw->started == 0))
1151 		return nb_rx;
1152 
1153 	nb_used = VIRTQUEUE_NUSED(vq);
1154 
1155 	virtio_rmb(hw->weak_barriers);
1156 
1157 	num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts;
1158 	if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1159 		num = VIRTIO_MBUF_BURST_SZ;
1160 	if (likely(num > DESC_PER_CACHELINE))
1161 		num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1162 
1163 	num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num);
1164 	PMD_RX_LOG(DEBUG, "used:%d dequeue:%d", nb_used, num);
1165 
1166 	nb_enqueued = 0;
1167 	hdr_size = hw->vtnet_hdr_size;
1168 
1169 	for (i = 0; i < num ; i++) {
1170 		rxm = rcv_pkts[i];
1171 
1172 		PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1173 
1174 		if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
1175 			PMD_RX_LOG(ERR, "Packet drop");
1176 			nb_enqueued++;
1177 			virtio_discard_rxbuf(vq, rxm);
1178 			rxvq->stats.errors++;
1179 			continue;
1180 		}
1181 
1182 		rxm->port = rxvq->port_id;
1183 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1184 		rxm->ol_flags = 0;
1185 		rxm->vlan_tci = 0;
1186 
1187 		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1188 		rxm->data_len = (uint16_t)(len[i] - hdr_size);
1189 
1190 		hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr +
1191 			RTE_PKTMBUF_HEADROOM - hdr_size);
1192 
1193 		if (hw->vlan_strip)
1194 			rte_vlan_strip(rxm);
1195 
1196 		if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) {
1197 			virtio_discard_rxbuf(vq, rxm);
1198 			rxvq->stats.errors++;
1199 			continue;
1200 		}
1201 
1202 		virtio_rx_stats_updated(rxvq, rxm);
1203 
1204 		rx_pkts[nb_rx++] = rxm;
1205 	}
1206 
1207 	rxvq->stats.packets += nb_rx;
1208 
1209 	/* Allocate new mbuf for the used descriptor */
1210 	while (likely(!virtqueue_full(vq))) {
1211 		new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool);
1212 		if (unlikely(new_mbuf == NULL)) {
1213 			struct rte_eth_dev *dev
1214 				= &rte_eth_devices[rxvq->port_id];
1215 			dev->data->rx_mbuf_alloc_failed++;
1216 			break;
1217 		}
1218 		error = virtqueue_enqueue_recv_refill(vq, &new_mbuf, 1);
1219 		if (unlikely(error)) {
1220 			rte_pktmbuf_free(new_mbuf);
1221 			break;
1222 		}
1223 		nb_enqueued++;
1224 	}
1225 
1226 	if (likely(nb_enqueued)) {
1227 		vq_update_avail_idx(vq);
1228 
1229 		if (unlikely(virtqueue_kick_prepare(vq))) {
1230 			virtqueue_notify(vq);
1231 			PMD_RX_LOG(DEBUG, "Notified");
1232 		}
1233 	}
1234 
1235 	return nb_rx;
1236 }
1237 
1238 uint16_t
1239 virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
1240 			uint16_t nb_pkts)
1241 {
1242 	struct virtnet_rx *rxvq = rx_queue;
1243 	struct virtqueue *vq = rxvq->vq;
1244 	struct virtio_hw *hw = vq->hw;
1245 	struct rte_mbuf *rxm, *new_mbuf;
1246 	uint16_t num, nb_rx;
1247 	uint32_t len[VIRTIO_MBUF_BURST_SZ];
1248 	struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1249 	int error;
1250 	uint32_t i, nb_enqueued;
1251 	uint32_t hdr_size;
1252 	struct virtio_net_hdr *hdr;
1253 
1254 	nb_rx = 0;
1255 	if (unlikely(hw->started == 0))
1256 		return nb_rx;
1257 
1258 	num = RTE_MIN(VIRTIO_MBUF_BURST_SZ, nb_pkts);
1259 	if (likely(num > DESC_PER_CACHELINE))
1260 		num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1261 
1262 	num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num);
1263 	PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1264 
1265 	nb_enqueued = 0;
1266 	hdr_size = hw->vtnet_hdr_size;
1267 
1268 	for (i = 0; i < num; i++) {
1269 		rxm = rcv_pkts[i];
1270 
1271 		PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1272 
1273 		if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
1274 			PMD_RX_LOG(ERR, "Packet drop");
1275 			nb_enqueued++;
1276 			virtio_discard_rxbuf(vq, rxm);
1277 			rxvq->stats.errors++;
1278 			continue;
1279 		}
1280 
1281 		rxm->port = rxvq->port_id;
1282 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1283 		rxm->ol_flags = 0;
1284 		rxm->vlan_tci = 0;
1285 
1286 		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1287 		rxm->data_len = (uint16_t)(len[i] - hdr_size);
1288 
1289 		hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr +
1290 			RTE_PKTMBUF_HEADROOM - hdr_size);
1291 
1292 		if (hw->vlan_strip)
1293 			rte_vlan_strip(rxm);
1294 
1295 		if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) {
1296 			virtio_discard_rxbuf(vq, rxm);
1297 			rxvq->stats.errors++;
1298 			continue;
1299 		}
1300 
1301 		virtio_rx_stats_updated(rxvq, rxm);
1302 
1303 		rx_pkts[nb_rx++] = rxm;
1304 	}
1305 
1306 	rxvq->stats.packets += nb_rx;
1307 
1308 	/* Allocate new mbuf for the used descriptor */
1309 	while (likely(!virtqueue_full(vq))) {
1310 		new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool);
1311 		if (unlikely(new_mbuf == NULL)) {
1312 			struct rte_eth_dev *dev =
1313 				&rte_eth_devices[rxvq->port_id];
1314 			dev->data->rx_mbuf_alloc_failed++;
1315 			break;
1316 		}
1317 		error = virtqueue_enqueue_recv_refill_packed(vq, &new_mbuf, 1);
1318 		if (unlikely(error)) {
1319 			rte_pktmbuf_free(new_mbuf);
1320 			break;
1321 		}
1322 		nb_enqueued++;
1323 	}
1324 
1325 	if (likely(nb_enqueued)) {
1326 		if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1327 			virtqueue_notify(vq);
1328 			PMD_RX_LOG(DEBUG, "Notified");
1329 		}
1330 	}
1331 
1332 	return nb_rx;
1333 }
1334 
1335 
1336 uint16_t
1337 virtio_recv_pkts_inorder(void *rx_queue,
1338 			struct rte_mbuf **rx_pkts,
1339 			uint16_t nb_pkts)
1340 {
1341 	struct virtnet_rx *rxvq = rx_queue;
1342 	struct virtqueue *vq = rxvq->vq;
1343 	struct virtio_hw *hw = vq->hw;
1344 	struct rte_mbuf *rxm;
1345 	struct rte_mbuf *prev;
1346 	uint16_t nb_used, num, nb_rx;
1347 	uint32_t len[VIRTIO_MBUF_BURST_SZ];
1348 	struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1349 	int error;
1350 	uint32_t nb_enqueued;
1351 	uint32_t seg_num;
1352 	uint32_t seg_res;
1353 	uint32_t hdr_size;
1354 	int32_t i;
1355 
1356 	nb_rx = 0;
1357 	if (unlikely(hw->started == 0))
1358 		return nb_rx;
1359 
1360 	nb_used = VIRTQUEUE_NUSED(vq);
1361 	nb_used = RTE_MIN(nb_used, nb_pkts);
1362 	nb_used = RTE_MIN(nb_used, VIRTIO_MBUF_BURST_SZ);
1363 
1364 	virtio_rmb(hw->weak_barriers);
1365 
1366 	PMD_RX_LOG(DEBUG, "used:%d", nb_used);
1367 
1368 	nb_enqueued = 0;
1369 	seg_num = 1;
1370 	seg_res = 0;
1371 	hdr_size = hw->vtnet_hdr_size;
1372 
1373 	num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len, nb_used);
1374 
1375 	for (i = 0; i < num; i++) {
1376 		struct virtio_net_hdr_mrg_rxbuf *header;
1377 
1378 		PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1379 		PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1380 
1381 		rxm = rcv_pkts[i];
1382 
1383 		if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
1384 			PMD_RX_LOG(ERR, "Packet drop");
1385 			nb_enqueued++;
1386 			virtio_discard_rxbuf_inorder(vq, rxm);
1387 			rxvq->stats.errors++;
1388 			continue;
1389 		}
1390 
1391 		header = (struct virtio_net_hdr_mrg_rxbuf *)
1392 			 ((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM
1393 			 - hdr_size);
1394 
1395 		if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
1396 			seg_num = header->num_buffers;
1397 			if (seg_num == 0)
1398 				seg_num = 1;
1399 		} else {
1400 			seg_num = 1;
1401 		}
1402 
1403 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1404 		rxm->nb_segs = seg_num;
1405 		rxm->ol_flags = 0;
1406 		rxm->vlan_tci = 0;
1407 		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1408 		rxm->data_len = (uint16_t)(len[i] - hdr_size);
1409 
1410 		rxm->port = rxvq->port_id;
1411 
1412 		rx_pkts[nb_rx] = rxm;
1413 		prev = rxm;
1414 
1415 		if (vq->hw->has_rx_offload &&
1416 				virtio_rx_offload(rxm, &header->hdr) < 0) {
1417 			virtio_discard_rxbuf_inorder(vq, rxm);
1418 			rxvq->stats.errors++;
1419 			continue;
1420 		}
1421 
1422 		if (hw->vlan_strip)
1423 			rte_vlan_strip(rx_pkts[nb_rx]);
1424 
1425 		seg_res = seg_num - 1;
1426 
1427 		/* Merge remaining segments */
1428 		while (seg_res != 0 && i < (num - 1)) {
1429 			i++;
1430 
1431 			rxm = rcv_pkts[i];
1432 			rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1433 			rxm->pkt_len = (uint32_t)(len[i]);
1434 			rxm->data_len = (uint16_t)(len[i]);
1435 
1436 			rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1437 			rx_pkts[nb_rx]->data_len += (uint16_t)(len[i]);
1438 
1439 			if (prev)
1440 				prev->next = rxm;
1441 
1442 			prev = rxm;
1443 			seg_res -= 1;
1444 		}
1445 
1446 		if (!seg_res) {
1447 			virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1448 			nb_rx++;
1449 		}
1450 	}
1451 
1452 	/* Last packet still need merge segments */
1453 	while (seg_res != 0) {
1454 		uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1455 					VIRTIO_MBUF_BURST_SZ);
1456 
1457 		prev = rcv_pkts[nb_rx];
1458 		if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) {
1459 			num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len,
1460 							   rcv_cnt);
1461 			uint16_t extra_idx = 0;
1462 
1463 			rcv_cnt = num;
1464 			while (extra_idx < rcv_cnt) {
1465 				rxm = rcv_pkts[extra_idx];
1466 				rxm->data_off =
1467 					RTE_PKTMBUF_HEADROOM - hdr_size;
1468 				rxm->pkt_len = (uint32_t)(len[extra_idx]);
1469 				rxm->data_len = (uint16_t)(len[extra_idx]);
1470 				prev->next = rxm;
1471 				prev = rxm;
1472 				rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1473 				rx_pkts[nb_rx]->data_len += len[extra_idx];
1474 				extra_idx += 1;
1475 			};
1476 			seg_res -= rcv_cnt;
1477 
1478 			if (!seg_res) {
1479 				virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1480 				nb_rx++;
1481 			}
1482 		} else {
1483 			PMD_RX_LOG(ERR,
1484 					"No enough segments for packet.");
1485 			virtio_discard_rxbuf_inorder(vq, prev);
1486 			rxvq->stats.errors++;
1487 			break;
1488 		}
1489 	}
1490 
1491 	rxvq->stats.packets += nb_rx;
1492 
1493 	/* Allocate new mbuf for the used descriptor */
1494 
1495 	if (likely(!virtqueue_full(vq))) {
1496 		/* free_cnt may include mrg descs */
1497 		uint16_t free_cnt = vq->vq_free_cnt;
1498 		struct rte_mbuf *new_pkts[free_cnt];
1499 
1500 		if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1501 			error = virtqueue_enqueue_refill_inorder(vq, new_pkts,
1502 					free_cnt);
1503 			if (unlikely(error)) {
1504 				for (i = 0; i < free_cnt; i++)
1505 					rte_pktmbuf_free(new_pkts[i]);
1506 			}
1507 			nb_enqueued += free_cnt;
1508 		} else {
1509 			struct rte_eth_dev *dev =
1510 				&rte_eth_devices[rxvq->port_id];
1511 			dev->data->rx_mbuf_alloc_failed += free_cnt;
1512 		}
1513 	}
1514 
1515 	if (likely(nb_enqueued)) {
1516 		vq_update_avail_idx(vq);
1517 
1518 		if (unlikely(virtqueue_kick_prepare(vq))) {
1519 			virtqueue_notify(vq);
1520 			PMD_RX_LOG(DEBUG, "Notified");
1521 		}
1522 	}
1523 
1524 	return nb_rx;
1525 }
1526 
1527 uint16_t
1528 virtio_recv_mergeable_pkts(void *rx_queue,
1529 			struct rte_mbuf **rx_pkts,
1530 			uint16_t nb_pkts)
1531 {
1532 	struct virtnet_rx *rxvq = rx_queue;
1533 	struct virtqueue *vq = rxvq->vq;
1534 	struct virtio_hw *hw = vq->hw;
1535 	struct rte_mbuf *rxm;
1536 	struct rte_mbuf *prev;
1537 	uint16_t nb_used, num, nb_rx = 0;
1538 	uint32_t len[VIRTIO_MBUF_BURST_SZ];
1539 	struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1540 	int error;
1541 	uint32_t nb_enqueued = 0;
1542 	uint32_t seg_num = 0;
1543 	uint32_t seg_res = 0;
1544 	uint32_t hdr_size = hw->vtnet_hdr_size;
1545 	int32_t i;
1546 
1547 	if (unlikely(hw->started == 0))
1548 		return nb_rx;
1549 
1550 	nb_used = VIRTQUEUE_NUSED(vq);
1551 
1552 	virtio_rmb(hw->weak_barriers);
1553 
1554 	PMD_RX_LOG(DEBUG, "used:%d", nb_used);
1555 
1556 	num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts;
1557 	if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1558 		num = VIRTIO_MBUF_BURST_SZ;
1559 	if (likely(num > DESC_PER_CACHELINE))
1560 		num = num - ((vq->vq_used_cons_idx + num) %
1561 				DESC_PER_CACHELINE);
1562 
1563 
1564 	num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num);
1565 
1566 	for (i = 0; i < num; i++) {
1567 		struct virtio_net_hdr_mrg_rxbuf *header;
1568 
1569 		PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1570 		PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1571 
1572 		rxm = rcv_pkts[i];
1573 
1574 		if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
1575 			PMD_RX_LOG(ERR, "Packet drop");
1576 			nb_enqueued++;
1577 			virtio_discard_rxbuf(vq, rxm);
1578 			rxvq->stats.errors++;
1579 			continue;
1580 		}
1581 
1582 		header = (struct virtio_net_hdr_mrg_rxbuf *)
1583 			 ((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM
1584 			 - hdr_size);
1585 		seg_num = header->num_buffers;
1586 		if (seg_num == 0)
1587 			seg_num = 1;
1588 
1589 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1590 		rxm->nb_segs = seg_num;
1591 		rxm->ol_flags = 0;
1592 		rxm->vlan_tci = 0;
1593 		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1594 		rxm->data_len = (uint16_t)(len[i] - hdr_size);
1595 
1596 		rxm->port = rxvq->port_id;
1597 
1598 		rx_pkts[nb_rx] = rxm;
1599 		prev = rxm;
1600 
1601 		if (hw->has_rx_offload &&
1602 				virtio_rx_offload(rxm, &header->hdr) < 0) {
1603 			virtio_discard_rxbuf(vq, rxm);
1604 			rxvq->stats.errors++;
1605 			continue;
1606 		}
1607 
1608 		if (hw->vlan_strip)
1609 			rte_vlan_strip(rx_pkts[nb_rx]);
1610 
1611 		seg_res = seg_num - 1;
1612 
1613 		/* Merge remaining segments */
1614 		while (seg_res != 0 && i < (num - 1)) {
1615 			i++;
1616 
1617 			rxm = rcv_pkts[i];
1618 			rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1619 			rxm->pkt_len = (uint32_t)(len[i]);
1620 			rxm->data_len = (uint16_t)(len[i]);
1621 
1622 			rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1623 			rx_pkts[nb_rx]->data_len += (uint16_t)(len[i]);
1624 
1625 			if (prev)
1626 				prev->next = rxm;
1627 
1628 			prev = rxm;
1629 			seg_res -= 1;
1630 		}
1631 
1632 		if (!seg_res) {
1633 			virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1634 			nb_rx++;
1635 		}
1636 	}
1637 
1638 	/* Last packet still need merge segments */
1639 	while (seg_res != 0) {
1640 		uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1641 					VIRTIO_MBUF_BURST_SZ);
1642 
1643 		prev = rcv_pkts[nb_rx];
1644 		if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) {
1645 			num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len,
1646 							   rcv_cnt);
1647 			uint16_t extra_idx = 0;
1648 
1649 			rcv_cnt = num;
1650 			while (extra_idx < rcv_cnt) {
1651 				rxm = rcv_pkts[extra_idx];
1652 				rxm->data_off =
1653 					RTE_PKTMBUF_HEADROOM - hdr_size;
1654 				rxm->pkt_len = (uint32_t)(len[extra_idx]);
1655 				rxm->data_len = (uint16_t)(len[extra_idx]);
1656 				prev->next = rxm;
1657 				prev = rxm;
1658 				rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1659 				rx_pkts[nb_rx]->data_len += len[extra_idx];
1660 				extra_idx += 1;
1661 			};
1662 			seg_res -= rcv_cnt;
1663 
1664 			if (!seg_res) {
1665 				virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1666 				nb_rx++;
1667 			}
1668 		} else {
1669 			PMD_RX_LOG(ERR,
1670 					"No enough segments for packet.");
1671 			virtio_discard_rxbuf(vq, prev);
1672 			rxvq->stats.errors++;
1673 			break;
1674 		}
1675 	}
1676 
1677 	rxvq->stats.packets += nb_rx;
1678 
1679 	/* Allocate new mbuf for the used descriptor */
1680 	if (likely(!virtqueue_full(vq))) {
1681 		/* free_cnt may include mrg descs */
1682 		uint16_t free_cnt = vq->vq_free_cnt;
1683 		struct rte_mbuf *new_pkts[free_cnt];
1684 
1685 		if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1686 			error = virtqueue_enqueue_recv_refill(vq, new_pkts,
1687 					free_cnt);
1688 			if (unlikely(error)) {
1689 				for (i = 0; i < free_cnt; i++)
1690 					rte_pktmbuf_free(new_pkts[i]);
1691 			}
1692 			nb_enqueued += free_cnt;
1693 		} else {
1694 			struct rte_eth_dev *dev =
1695 				&rte_eth_devices[rxvq->port_id];
1696 			dev->data->rx_mbuf_alloc_failed += free_cnt;
1697 		}
1698 	}
1699 
1700 	if (likely(nb_enqueued)) {
1701 		vq_update_avail_idx(vq);
1702 
1703 		if (unlikely(virtqueue_kick_prepare(vq))) {
1704 			virtqueue_notify(vq);
1705 			PMD_RX_LOG(DEBUG, "Notified");
1706 		}
1707 	}
1708 
1709 	return nb_rx;
1710 }
1711 
1712 uint16_t
1713 virtio_recv_mergeable_pkts_packed(void *rx_queue,
1714 			struct rte_mbuf **rx_pkts,
1715 			uint16_t nb_pkts)
1716 {
1717 	struct virtnet_rx *rxvq = rx_queue;
1718 	struct virtqueue *vq = rxvq->vq;
1719 	struct virtio_hw *hw = vq->hw;
1720 	struct rte_mbuf *rxm;
1721 	struct rte_mbuf *prev = NULL;
1722 	uint16_t num, nb_rx = 0;
1723 	uint32_t len[VIRTIO_MBUF_BURST_SZ];
1724 	struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1725 	uint32_t nb_enqueued = 0;
1726 	uint32_t seg_num = 0;
1727 	uint32_t seg_res = 0;
1728 	uint32_t hdr_size = hw->vtnet_hdr_size;
1729 	int32_t i;
1730 	int error;
1731 
1732 	if (unlikely(hw->started == 0))
1733 		return nb_rx;
1734 
1735 
1736 	num = nb_pkts;
1737 	if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1738 		num = VIRTIO_MBUF_BURST_SZ;
1739 	if (likely(num > DESC_PER_CACHELINE))
1740 		num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1741 
1742 	num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num);
1743 
1744 	for (i = 0; i < num; i++) {
1745 		struct virtio_net_hdr_mrg_rxbuf *header;
1746 
1747 		PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1748 		PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1749 
1750 		rxm = rcv_pkts[i];
1751 
1752 		if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
1753 			PMD_RX_LOG(ERR, "Packet drop");
1754 			nb_enqueued++;
1755 			virtio_discard_rxbuf(vq, rxm);
1756 			rxvq->stats.errors++;
1757 			continue;
1758 		}
1759 
1760 		header = (struct virtio_net_hdr_mrg_rxbuf *)((char *)
1761 			  rxm->buf_addr + RTE_PKTMBUF_HEADROOM - hdr_size);
1762 		seg_num = header->num_buffers;
1763 
1764 		if (seg_num == 0)
1765 			seg_num = 1;
1766 
1767 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1768 		rxm->nb_segs = seg_num;
1769 		rxm->ol_flags = 0;
1770 		rxm->vlan_tci = 0;
1771 		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1772 		rxm->data_len = (uint16_t)(len[i] - hdr_size);
1773 
1774 		rxm->port = rxvq->port_id;
1775 		rx_pkts[nb_rx] = rxm;
1776 		prev = rxm;
1777 
1778 		if (hw->has_rx_offload &&
1779 				virtio_rx_offload(rxm, &header->hdr) < 0) {
1780 			virtio_discard_rxbuf(vq, rxm);
1781 			rxvq->stats.errors++;
1782 			continue;
1783 		}
1784 
1785 		if (hw->vlan_strip)
1786 			rte_vlan_strip(rx_pkts[nb_rx]);
1787 
1788 		seg_res = seg_num - 1;
1789 
1790 		/* Merge remaining segments */
1791 		while (seg_res != 0 && i < (num - 1)) {
1792 			i++;
1793 
1794 			rxm = rcv_pkts[i];
1795 			rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1796 			rxm->pkt_len = (uint32_t)(len[i]);
1797 			rxm->data_len = (uint16_t)(len[i]);
1798 
1799 			rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1800 			rx_pkts[nb_rx]->data_len += (uint16_t)(len[i]);
1801 
1802 			if (prev)
1803 				prev->next = rxm;
1804 
1805 			prev = rxm;
1806 			seg_res -= 1;
1807 		}
1808 
1809 		if (!seg_res) {
1810 			virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1811 			nb_rx++;
1812 		}
1813 	}
1814 
1815 	/* Last packet still need merge segments */
1816 	while (seg_res != 0) {
1817 		uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1818 					VIRTIO_MBUF_BURST_SZ);
1819 		if (likely(vq->vq_free_cnt >= rcv_cnt)) {
1820 			num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts,
1821 					len, rcv_cnt);
1822 			uint16_t extra_idx = 0;
1823 
1824 			rcv_cnt = num;
1825 
1826 			while (extra_idx < rcv_cnt) {
1827 				rxm = rcv_pkts[extra_idx];
1828 
1829 				rxm->data_off =
1830 					RTE_PKTMBUF_HEADROOM - hdr_size;
1831 				rxm->pkt_len = (uint32_t)(len[extra_idx]);
1832 				rxm->data_len = (uint16_t)(len[extra_idx]);
1833 
1834 				prev->next = rxm;
1835 				prev = rxm;
1836 				rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1837 				rx_pkts[nb_rx]->data_len += len[extra_idx];
1838 				extra_idx += 1;
1839 			}
1840 			seg_res -= rcv_cnt;
1841 			if (!seg_res) {
1842 				virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1843 				nb_rx++;
1844 			}
1845 		} else {
1846 			PMD_RX_LOG(ERR,
1847 					"No enough segments for packet.");
1848 			if (prev)
1849 				virtio_discard_rxbuf(vq, prev);
1850 			rxvq->stats.errors++;
1851 			break;
1852 		}
1853 	}
1854 
1855 	rxvq->stats.packets += nb_rx;
1856 
1857 	/* Allocate new mbuf for the used descriptor */
1858 	if (likely(!virtqueue_full(vq))) {
1859 		/* free_cnt may include mrg descs */
1860 		uint16_t free_cnt = vq->vq_free_cnt;
1861 		struct rte_mbuf *new_pkts[free_cnt];
1862 
1863 		if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1864 			error = virtqueue_enqueue_recv_refill_packed(vq,
1865 					new_pkts, free_cnt);
1866 			if (unlikely(error)) {
1867 				for (i = 0; i < free_cnt; i++)
1868 					rte_pktmbuf_free(new_pkts[i]);
1869 			}
1870 			nb_enqueued += free_cnt;
1871 		} else {
1872 			struct rte_eth_dev *dev =
1873 				&rte_eth_devices[rxvq->port_id];
1874 			dev->data->rx_mbuf_alloc_failed += free_cnt;
1875 		}
1876 	}
1877 
1878 	if (likely(nb_enqueued)) {
1879 		if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1880 			virtqueue_notify(vq);
1881 			PMD_RX_LOG(DEBUG, "Notified");
1882 		}
1883 	}
1884 
1885 	return nb_rx;
1886 }
1887 
1888 uint16_t
1889 virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
1890 			uint16_t nb_pkts)
1891 {
1892 	struct virtnet_tx *txvq = tx_queue;
1893 	struct virtqueue *vq = txvq->vq;
1894 	struct virtio_hw *hw = vq->hw;
1895 	uint16_t hdr_size = hw->vtnet_hdr_size;
1896 	uint16_t nb_tx = 0;
1897 	int error;
1898 
1899 	if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
1900 		return nb_tx;
1901 
1902 	if (unlikely(nb_pkts < 1))
1903 		return nb_pkts;
1904 
1905 	PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
1906 
1907 	if (nb_pkts > vq->vq_free_cnt)
1908 		virtio_xmit_cleanup_packed(vq, nb_pkts - vq->vq_free_cnt);
1909 
1910 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1911 		struct rte_mbuf *txm = tx_pkts[nb_tx];
1912 		int can_push = 0, slots, need;
1913 
1914 		/* Do VLAN tag insertion */
1915 		if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {
1916 			error = rte_vlan_insert(&txm);
1917 			if (unlikely(error)) {
1918 				rte_pktmbuf_free(txm);
1919 				continue;
1920 			}
1921 		}
1922 
1923 		/* optimize ring usage */
1924 		if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
1925 		      vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
1926 		    rte_mbuf_refcnt_read(txm) == 1 &&
1927 		    RTE_MBUF_DIRECT(txm) &&
1928 		    txm->nb_segs == 1 &&
1929 		    rte_pktmbuf_headroom(txm) >= hdr_size &&
1930 		    rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
1931 			   __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
1932 			can_push = 1;
1933 
1934 		/* How many main ring entries are needed to this Tx?
1935 		 * any_layout => number of segments
1936 		 * default    => number of segments + 1
1937 		 */
1938 		slots = txm->nb_segs + !can_push;
1939 		need = slots - vq->vq_free_cnt;
1940 
1941 		/* Positive value indicates it need free vring descriptors */
1942 		if (unlikely(need > 0)) {
1943 			virtio_rmb(hw->weak_barriers);
1944 			need = RTE_MIN(need, (int)nb_pkts);
1945 			virtio_xmit_cleanup_packed(vq, need);
1946 			need = slots - vq->vq_free_cnt;
1947 			if (unlikely(need > 0)) {
1948 				PMD_TX_LOG(ERR,
1949 					   "No free tx descriptors to transmit");
1950 				break;
1951 			}
1952 		}
1953 
1954 		/* Enqueue Packet buffers */
1955 		virtqueue_enqueue_xmit_packed(txvq, txm, slots, can_push);
1956 
1957 		virtio_update_packet_stats(&txvq->stats, txm);
1958 	}
1959 
1960 	txvq->stats.packets += nb_tx;
1961 
1962 	if (likely(nb_tx)) {
1963 		if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1964 			virtqueue_notify(vq);
1965 			PMD_TX_LOG(DEBUG, "Notified backend after xmit");
1966 		}
1967 	}
1968 
1969 	return nb_tx;
1970 }
1971 
1972 uint16_t
1973 virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1974 {
1975 	struct virtnet_tx *txvq = tx_queue;
1976 	struct virtqueue *vq = txvq->vq;
1977 	struct virtio_hw *hw = vq->hw;
1978 	uint16_t hdr_size = hw->vtnet_hdr_size;
1979 	uint16_t nb_used, nb_tx = 0;
1980 	int error;
1981 
1982 	if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
1983 		return nb_tx;
1984 
1985 	if (unlikely(nb_pkts < 1))
1986 		return nb_pkts;
1987 
1988 	PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
1989 	nb_used = VIRTQUEUE_NUSED(vq);
1990 
1991 	virtio_rmb(hw->weak_barriers);
1992 	if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
1993 		virtio_xmit_cleanup(vq, nb_used);
1994 
1995 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1996 		struct rte_mbuf *txm = tx_pkts[nb_tx];
1997 		int can_push = 0, use_indirect = 0, slots, need;
1998 
1999 		/* Do VLAN tag insertion */
2000 		if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {
2001 			error = rte_vlan_insert(&txm);
2002 			if (unlikely(error)) {
2003 				rte_pktmbuf_free(txm);
2004 				continue;
2005 			}
2006 		}
2007 
2008 		/* optimize ring usage */
2009 		if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
2010 		      vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
2011 		    rte_mbuf_refcnt_read(txm) == 1 &&
2012 		    RTE_MBUF_DIRECT(txm) &&
2013 		    txm->nb_segs == 1 &&
2014 		    rte_pktmbuf_headroom(txm) >= hdr_size &&
2015 		    rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
2016 				   __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
2017 			can_push = 1;
2018 		else if (vtpci_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
2019 			 txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
2020 			use_indirect = 1;
2021 
2022 		/* How many main ring entries are needed to this Tx?
2023 		 * any_layout => number of segments
2024 		 * indirect   => 1
2025 		 * default    => number of segments + 1
2026 		 */
2027 		slots = use_indirect ? 1 : (txm->nb_segs + !can_push);
2028 		need = slots - vq->vq_free_cnt;
2029 
2030 		/* Positive value indicates it need free vring descriptors */
2031 		if (unlikely(need > 0)) {
2032 			nb_used = VIRTQUEUE_NUSED(vq);
2033 			virtio_rmb(hw->weak_barriers);
2034 			need = RTE_MIN(need, (int)nb_used);
2035 
2036 			virtio_xmit_cleanup(vq, need);
2037 			need = slots - vq->vq_free_cnt;
2038 			if (unlikely(need > 0)) {
2039 				PMD_TX_LOG(ERR,
2040 					   "No free tx descriptors to transmit");
2041 				break;
2042 			}
2043 		}
2044 
2045 		/* Enqueue Packet buffers */
2046 		virtqueue_enqueue_xmit(txvq, txm, slots, use_indirect,
2047 			can_push, 0);
2048 
2049 		virtio_update_packet_stats(&txvq->stats, txm);
2050 	}
2051 
2052 	txvq->stats.packets += nb_tx;
2053 
2054 	if (likely(nb_tx)) {
2055 		vq_update_avail_idx(vq);
2056 
2057 		if (unlikely(virtqueue_kick_prepare(vq))) {
2058 			virtqueue_notify(vq);
2059 			PMD_TX_LOG(DEBUG, "Notified backend after xmit");
2060 		}
2061 	}
2062 
2063 	return nb_tx;
2064 }
2065 
2066 uint16_t
2067 virtio_xmit_pkts_inorder(void *tx_queue,
2068 			struct rte_mbuf **tx_pkts,
2069 			uint16_t nb_pkts)
2070 {
2071 	struct virtnet_tx *txvq = tx_queue;
2072 	struct virtqueue *vq = txvq->vq;
2073 	struct virtio_hw *hw = vq->hw;
2074 	uint16_t hdr_size = hw->vtnet_hdr_size;
2075 	uint16_t nb_used, nb_avail, nb_tx = 0, nb_inorder_pkts = 0;
2076 	struct rte_mbuf *inorder_pkts[nb_pkts];
2077 	int error;
2078 
2079 	if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
2080 		return nb_tx;
2081 
2082 	if (unlikely(nb_pkts < 1))
2083 		return nb_pkts;
2084 
2085 	VIRTQUEUE_DUMP(vq);
2086 	PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
2087 	nb_used = VIRTQUEUE_NUSED(vq);
2088 
2089 	virtio_rmb(hw->weak_barriers);
2090 	if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
2091 		virtio_xmit_cleanup_inorder(vq, nb_used);
2092 
2093 	if (unlikely(!vq->vq_free_cnt))
2094 		virtio_xmit_cleanup_inorder(vq, nb_used);
2095 
2096 	nb_avail = RTE_MIN(vq->vq_free_cnt, nb_pkts);
2097 
2098 	for (nb_tx = 0; nb_tx < nb_avail; nb_tx++) {
2099 		struct rte_mbuf *txm = tx_pkts[nb_tx];
2100 		int slots, need;
2101 
2102 		/* Do VLAN tag insertion */
2103 		if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {
2104 			error = rte_vlan_insert(&txm);
2105 			if (unlikely(error)) {
2106 				rte_pktmbuf_free(txm);
2107 				continue;
2108 			}
2109 		}
2110 
2111 		/* optimize ring usage */
2112 		if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
2113 		     vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
2114 		     rte_mbuf_refcnt_read(txm) == 1 &&
2115 		     RTE_MBUF_DIRECT(txm) &&
2116 		     txm->nb_segs == 1 &&
2117 		     rte_pktmbuf_headroom(txm) >= hdr_size &&
2118 		     rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
2119 				__alignof__(struct virtio_net_hdr_mrg_rxbuf))) {
2120 			inorder_pkts[nb_inorder_pkts] = txm;
2121 			nb_inorder_pkts++;
2122 
2123 			virtio_update_packet_stats(&txvq->stats, txm);
2124 			continue;
2125 		}
2126 
2127 		if (nb_inorder_pkts) {
2128 			virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
2129 							nb_inorder_pkts);
2130 			nb_inorder_pkts = 0;
2131 		}
2132 
2133 		slots = txm->nb_segs + 1;
2134 		need = slots - vq->vq_free_cnt;
2135 		if (unlikely(need > 0)) {
2136 			nb_used = VIRTQUEUE_NUSED(vq);
2137 			virtio_rmb(hw->weak_barriers);
2138 			need = RTE_MIN(need, (int)nb_used);
2139 
2140 			virtio_xmit_cleanup_inorder(vq, need);
2141 
2142 			need = slots - vq->vq_free_cnt;
2143 
2144 			if (unlikely(need > 0)) {
2145 				PMD_TX_LOG(ERR,
2146 					"No free tx descriptors to transmit");
2147 				break;
2148 			}
2149 		}
2150 		/* Enqueue Packet buffers */
2151 		virtqueue_enqueue_xmit(txvq, txm, slots, 0, 0, 1);
2152 
2153 		virtio_update_packet_stats(&txvq->stats, txm);
2154 	}
2155 
2156 	/* Transmit all inorder packets */
2157 	if (nb_inorder_pkts)
2158 		virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
2159 						nb_inorder_pkts);
2160 
2161 	txvq->stats.packets += nb_tx;
2162 
2163 	if (likely(nb_tx)) {
2164 		vq_update_avail_idx(vq);
2165 
2166 		if (unlikely(virtqueue_kick_prepare(vq))) {
2167 			virtqueue_notify(vq);
2168 			PMD_TX_LOG(DEBUG, "Notified backend after xmit");
2169 		}
2170 	}
2171 
2172 	VIRTQUEUE_DUMP(vq);
2173 
2174 	return nb_tx;
2175 }
2176