xref: /dpdk/drivers/net/virtio/virtio_rxtx.c (revision 1f41d98c207aee8982ced709864c96c463d4503a)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <errno.h>
10 
11 #include <rte_cycles.h>
12 #include <rte_memory.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_mempool.h>
15 #include <rte_malloc.h>
16 #include <rte_mbuf.h>
17 #include <rte_ether.h>
18 #include <rte_ethdev_driver.h>
19 #include <rte_prefetch.h>
20 #include <rte_string_fns.h>
21 #include <rte_errno.h>
22 #include <rte_byteorder.h>
23 #include <rte_net.h>
24 #include <rte_ip.h>
25 #include <rte_udp.h>
26 #include <rte_tcp.h>
27 
28 #include "virtio_logs.h"
29 #include "virtio_ethdev.h"
30 #include "virtio_pci.h"
31 #include "virtqueue.h"
32 #include "virtio_rxtx.h"
33 #include "virtio_rxtx_simple.h"
34 #include "virtio_ring.h"
35 
36 #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
37 #define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len)
38 #else
39 #define  VIRTIO_DUMP_PACKET(m, len) do { } while (0)
40 #endif
41 
42 int
43 virtio_dev_rx_queue_done(void *rxq, uint16_t offset)
44 {
45 	struct virtnet_rx *rxvq = rxq;
46 	struct virtqueue *vq = rxvq->vq;
47 
48 	return VIRTQUEUE_NUSED(vq) >= offset;
49 }
50 
51 void
52 vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx, uint16_t num)
53 {
54 	vq->vq_free_cnt += num;
55 	vq->vq_desc_tail_idx = desc_idx & (vq->vq_nentries - 1);
56 }
57 
58 void
59 vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
60 {
61 	struct vring_desc *dp, *dp_tail;
62 	struct vq_desc_extra *dxp;
63 	uint16_t desc_idx_last = desc_idx;
64 
65 	dp  = &vq->vq_split.ring.desc[desc_idx];
66 	dxp = &vq->vq_descx[desc_idx];
67 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
68 	if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
69 		while (dp->flags & VRING_DESC_F_NEXT) {
70 			desc_idx_last = dp->next;
71 			dp = &vq->vq_split.ring.desc[dp->next];
72 		}
73 	}
74 	dxp->ndescs = 0;
75 
76 	/*
77 	 * We must append the existing free chain, if any, to the end of
78 	 * newly freed chain. If the virtqueue was completely used, then
79 	 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
80 	 */
81 	if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) {
82 		vq->vq_desc_head_idx = desc_idx;
83 	} else {
84 		dp_tail = &vq->vq_split.ring.desc[vq->vq_desc_tail_idx];
85 		dp_tail->next = desc_idx;
86 	}
87 
88 	vq->vq_desc_tail_idx = desc_idx_last;
89 	dp->next = VQ_RING_DESC_CHAIN_END;
90 }
91 
92 static void
93 vq_ring_free_id_packed(struct virtqueue *vq, uint16_t id)
94 {
95 	struct vq_desc_extra *dxp;
96 
97 	dxp = &vq->vq_descx[id];
98 	vq->vq_free_cnt += dxp->ndescs;
99 
100 	if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END)
101 		vq->vq_desc_head_idx = id;
102 	else
103 		vq->vq_descx[vq->vq_desc_tail_idx].next = id;
104 
105 	vq->vq_desc_tail_idx = id;
106 	dxp->next = VQ_RING_DESC_CHAIN_END;
107 }
108 
109 void
110 virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
111 {
112 	uint32_t s = mbuf->pkt_len;
113 	struct rte_ether_addr *ea;
114 
115 	stats->bytes += s;
116 
117 	if (s == 64) {
118 		stats->size_bins[1]++;
119 	} else if (s > 64 && s < 1024) {
120 		uint32_t bin;
121 
122 		/* count zeros, and offset into correct bin */
123 		bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
124 		stats->size_bins[bin]++;
125 	} else {
126 		if (s < 64)
127 			stats->size_bins[0]++;
128 		else if (s < 1519)
129 			stats->size_bins[6]++;
130 		else
131 			stats->size_bins[7]++;
132 	}
133 
134 	ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
135 	if (rte_is_multicast_ether_addr(ea)) {
136 		if (rte_is_broadcast_ether_addr(ea))
137 			stats->broadcast++;
138 		else
139 			stats->multicast++;
140 	}
141 }
142 
143 static inline void
144 virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
145 {
146 	VIRTIO_DUMP_PACKET(m, m->data_len);
147 
148 	virtio_update_packet_stats(&rxvq->stats, m);
149 }
150 
151 static uint16_t
152 virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq,
153 				  struct rte_mbuf **rx_pkts,
154 				  uint32_t *len,
155 				  uint16_t num)
156 {
157 	struct rte_mbuf *cookie;
158 	uint16_t used_idx;
159 	uint16_t id;
160 	struct vring_packed_desc *desc;
161 	uint16_t i;
162 
163 	desc = vq->vq_packed.ring.desc;
164 
165 	for (i = 0; i < num; i++) {
166 		used_idx = vq->vq_used_cons_idx;
167 		/* desc_is_used has a load-acquire or rte_cio_rmb inside
168 		 * and wait for used desc in virtqueue.
169 		 */
170 		if (!desc_is_used(&desc[used_idx], vq))
171 			return i;
172 		len[i] = desc[used_idx].len;
173 		id = desc[used_idx].id;
174 		cookie = (struct rte_mbuf *)vq->vq_descx[id].cookie;
175 		if (unlikely(cookie == NULL)) {
176 			PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
177 				vq->vq_used_cons_idx);
178 			break;
179 		}
180 		rte_prefetch0(cookie);
181 		rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
182 		rx_pkts[i] = cookie;
183 
184 		vq->vq_free_cnt++;
185 		vq->vq_used_cons_idx++;
186 		if (vq->vq_used_cons_idx >= vq->vq_nentries) {
187 			vq->vq_used_cons_idx -= vq->vq_nentries;
188 			vq->vq_packed.used_wrap_counter ^= 1;
189 		}
190 	}
191 
192 	return i;
193 }
194 
195 static uint16_t
196 virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,
197 			   uint32_t *len, uint16_t num)
198 {
199 	struct vring_used_elem *uep;
200 	struct rte_mbuf *cookie;
201 	uint16_t used_idx, desc_idx;
202 	uint16_t i;
203 
204 	/*  Caller does the check */
205 	for (i = 0; i < num ; i++) {
206 		used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
207 		uep = &vq->vq_split.ring.used->ring[used_idx];
208 		desc_idx = (uint16_t) uep->id;
209 		len[i] = uep->len;
210 		cookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie;
211 
212 		if (unlikely(cookie == NULL)) {
213 			PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
214 				vq->vq_used_cons_idx);
215 			break;
216 		}
217 
218 		rte_prefetch0(cookie);
219 		rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
220 		rx_pkts[i]  = cookie;
221 		vq->vq_used_cons_idx++;
222 		vq_ring_free_chain(vq, desc_idx);
223 		vq->vq_descx[desc_idx].cookie = NULL;
224 	}
225 
226 	return i;
227 }
228 
229 static uint16_t
230 virtqueue_dequeue_rx_inorder(struct virtqueue *vq,
231 			struct rte_mbuf **rx_pkts,
232 			uint32_t *len,
233 			uint16_t num)
234 {
235 	struct vring_used_elem *uep;
236 	struct rte_mbuf *cookie;
237 	uint16_t used_idx = 0;
238 	uint16_t i;
239 
240 	if (unlikely(num == 0))
241 		return 0;
242 
243 	for (i = 0; i < num; i++) {
244 		used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
245 		/* Desc idx same as used idx */
246 		uep = &vq->vq_split.ring.used->ring[used_idx];
247 		len[i] = uep->len;
248 		cookie = (struct rte_mbuf *)vq->vq_descx[used_idx].cookie;
249 
250 		if (unlikely(cookie == NULL)) {
251 			PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
252 				vq->vq_used_cons_idx);
253 			break;
254 		}
255 
256 		rte_prefetch0(cookie);
257 		rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
258 		rx_pkts[i]  = cookie;
259 		vq->vq_used_cons_idx++;
260 		vq->vq_descx[used_idx].cookie = NULL;
261 	}
262 
263 	vq_ring_free_inorder(vq, used_idx, i);
264 	return i;
265 }
266 
267 #ifndef DEFAULT_TX_FREE_THRESH
268 #define DEFAULT_TX_FREE_THRESH 32
269 #endif
270 
271 static void
272 virtio_xmit_cleanup_inorder_packed(struct virtqueue *vq, int num)
273 {
274 	uint16_t used_idx, id, curr_id, free_cnt = 0;
275 	uint16_t size = vq->vq_nentries;
276 	struct vring_packed_desc *desc = vq->vq_packed.ring.desc;
277 	struct vq_desc_extra *dxp;
278 
279 	used_idx = vq->vq_used_cons_idx;
280 	/* desc_is_used has a load-acquire or rte_cio_rmb inside
281 	 * and wait for used desc in virtqueue.
282 	 */
283 	while (num > 0 && desc_is_used(&desc[used_idx], vq)) {
284 		id = desc[used_idx].id;
285 		do {
286 			curr_id = used_idx;
287 			dxp = &vq->vq_descx[used_idx];
288 			used_idx += dxp->ndescs;
289 			free_cnt += dxp->ndescs;
290 			num -= dxp->ndescs;
291 			if (used_idx >= size) {
292 				used_idx -= size;
293 				vq->vq_packed.used_wrap_counter ^= 1;
294 			}
295 			if (dxp->cookie != NULL) {
296 				rte_pktmbuf_free(dxp->cookie);
297 				dxp->cookie = NULL;
298 			}
299 		} while (curr_id != id);
300 	}
301 	vq->vq_used_cons_idx = used_idx;
302 	vq->vq_free_cnt += free_cnt;
303 }
304 
305 static void
306 virtio_xmit_cleanup_normal_packed(struct virtqueue *vq, int num)
307 {
308 	uint16_t used_idx, id;
309 	uint16_t size = vq->vq_nentries;
310 	struct vring_packed_desc *desc = vq->vq_packed.ring.desc;
311 	struct vq_desc_extra *dxp;
312 
313 	used_idx = vq->vq_used_cons_idx;
314 	/* desc_is_used has a load-acquire or rte_cio_rmb inside
315 	 * and wait for used desc in virtqueue.
316 	 */
317 	while (num-- && desc_is_used(&desc[used_idx], vq)) {
318 		id = desc[used_idx].id;
319 		dxp = &vq->vq_descx[id];
320 		vq->vq_used_cons_idx += dxp->ndescs;
321 		if (vq->vq_used_cons_idx >= size) {
322 			vq->vq_used_cons_idx -= size;
323 			vq->vq_packed.used_wrap_counter ^= 1;
324 		}
325 		vq_ring_free_id_packed(vq, id);
326 		if (dxp->cookie != NULL) {
327 			rte_pktmbuf_free(dxp->cookie);
328 			dxp->cookie = NULL;
329 		}
330 		used_idx = vq->vq_used_cons_idx;
331 	}
332 }
333 
334 /* Cleanup from completed transmits. */
335 static inline void
336 virtio_xmit_cleanup_packed(struct virtqueue *vq, int num, int in_order)
337 {
338 	if (in_order)
339 		virtio_xmit_cleanup_inorder_packed(vq, num);
340 	else
341 		virtio_xmit_cleanup_normal_packed(vq, num);
342 }
343 
344 static void
345 virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num)
346 {
347 	uint16_t i, used_idx, desc_idx;
348 	for (i = 0; i < num; i++) {
349 		struct vring_used_elem *uep;
350 		struct vq_desc_extra *dxp;
351 
352 		used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
353 		uep = &vq->vq_split.ring.used->ring[used_idx];
354 
355 		desc_idx = (uint16_t) uep->id;
356 		dxp = &vq->vq_descx[desc_idx];
357 		vq->vq_used_cons_idx++;
358 		vq_ring_free_chain(vq, desc_idx);
359 
360 		if (dxp->cookie != NULL) {
361 			rte_pktmbuf_free(dxp->cookie);
362 			dxp->cookie = NULL;
363 		}
364 	}
365 }
366 
367 /* Cleanup from completed inorder transmits. */
368 static __rte_always_inline void
369 virtio_xmit_cleanup_inorder(struct virtqueue *vq, uint16_t num)
370 {
371 	uint16_t i, idx = vq->vq_used_cons_idx;
372 	int16_t free_cnt = 0;
373 	struct vq_desc_extra *dxp = NULL;
374 
375 	if (unlikely(num == 0))
376 		return;
377 
378 	for (i = 0; i < num; i++) {
379 		dxp = &vq->vq_descx[idx++ & (vq->vq_nentries - 1)];
380 		free_cnt += dxp->ndescs;
381 		if (dxp->cookie != NULL) {
382 			rte_pktmbuf_free(dxp->cookie);
383 			dxp->cookie = NULL;
384 		}
385 	}
386 
387 	vq->vq_free_cnt += free_cnt;
388 	vq->vq_used_cons_idx = idx;
389 }
390 
391 static inline int
392 virtqueue_enqueue_refill_inorder(struct virtqueue *vq,
393 			struct rte_mbuf **cookies,
394 			uint16_t num)
395 {
396 	struct vq_desc_extra *dxp;
397 	struct virtio_hw *hw = vq->hw;
398 	struct vring_desc *start_dp;
399 	uint16_t head_idx, idx, i = 0;
400 
401 	if (unlikely(vq->vq_free_cnt == 0))
402 		return -ENOSPC;
403 	if (unlikely(vq->vq_free_cnt < num))
404 		return -EMSGSIZE;
405 
406 	head_idx = vq->vq_desc_head_idx & (vq->vq_nentries - 1);
407 	start_dp = vq->vq_split.ring.desc;
408 
409 	while (i < num) {
410 		idx = head_idx & (vq->vq_nentries - 1);
411 		dxp = &vq->vq_descx[idx];
412 		dxp->cookie = (void *)cookies[i];
413 		dxp->ndescs = 1;
414 
415 		start_dp[idx].addr =
416 				VIRTIO_MBUF_ADDR(cookies[i], vq) +
417 				RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
418 		start_dp[idx].len =
419 				cookies[i]->buf_len -
420 				RTE_PKTMBUF_HEADROOM +
421 				hw->vtnet_hdr_size;
422 		start_dp[idx].flags =  VRING_DESC_F_WRITE;
423 
424 		vq_update_avail_ring(vq, idx);
425 		head_idx++;
426 		i++;
427 	}
428 
429 	vq->vq_desc_head_idx += num;
430 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
431 	return 0;
432 }
433 
434 static inline int
435 virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf **cookie,
436 				uint16_t num)
437 {
438 	struct vq_desc_extra *dxp;
439 	struct virtio_hw *hw = vq->hw;
440 	struct vring_desc *start_dp = vq->vq_split.ring.desc;
441 	uint16_t idx, i;
442 
443 	if (unlikely(vq->vq_free_cnt == 0))
444 		return -ENOSPC;
445 	if (unlikely(vq->vq_free_cnt < num))
446 		return -EMSGSIZE;
447 
448 	if (unlikely(vq->vq_desc_head_idx >= vq->vq_nentries))
449 		return -EFAULT;
450 
451 	for (i = 0; i < num; i++) {
452 		idx = vq->vq_desc_head_idx;
453 		dxp = &vq->vq_descx[idx];
454 		dxp->cookie = (void *)cookie[i];
455 		dxp->ndescs = 1;
456 
457 		start_dp[idx].addr =
458 			VIRTIO_MBUF_ADDR(cookie[i], vq) +
459 			RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
460 		start_dp[idx].len =
461 			cookie[i]->buf_len - RTE_PKTMBUF_HEADROOM +
462 			hw->vtnet_hdr_size;
463 		start_dp[idx].flags = VRING_DESC_F_WRITE;
464 		vq->vq_desc_head_idx = start_dp[idx].next;
465 		vq_update_avail_ring(vq, idx);
466 		if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) {
467 			vq->vq_desc_tail_idx = vq->vq_desc_head_idx;
468 			break;
469 		}
470 	}
471 
472 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
473 
474 	return 0;
475 }
476 
477 static inline int
478 virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq,
479 				     struct rte_mbuf **cookie, uint16_t num)
480 {
481 	struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc;
482 	uint16_t flags = vq->vq_packed.cached_flags;
483 	struct virtio_hw *hw = vq->hw;
484 	struct vq_desc_extra *dxp;
485 	uint16_t idx;
486 	int i;
487 
488 	if (unlikely(vq->vq_free_cnt == 0))
489 		return -ENOSPC;
490 	if (unlikely(vq->vq_free_cnt < num))
491 		return -EMSGSIZE;
492 
493 	for (i = 0; i < num; i++) {
494 		idx = vq->vq_avail_idx;
495 		dxp = &vq->vq_descx[idx];
496 		dxp->cookie = (void *)cookie[i];
497 		dxp->ndescs = 1;
498 
499 		start_dp[idx].addr = VIRTIO_MBUF_ADDR(cookie[i], vq) +
500 				RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
501 		start_dp[idx].len = cookie[i]->buf_len - RTE_PKTMBUF_HEADROOM
502 					+ hw->vtnet_hdr_size;
503 
504 		vq->vq_desc_head_idx = dxp->next;
505 		if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
506 			vq->vq_desc_tail_idx = vq->vq_desc_head_idx;
507 
508 		virtqueue_store_flags_packed(&start_dp[idx], flags,
509 					     hw->weak_barriers);
510 
511 		if (++vq->vq_avail_idx >= vq->vq_nentries) {
512 			vq->vq_avail_idx -= vq->vq_nentries;
513 			vq->vq_packed.cached_flags ^=
514 				VRING_PACKED_DESC_F_AVAIL_USED;
515 			flags = vq->vq_packed.cached_flags;
516 		}
517 	}
518 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
519 	return 0;
520 }
521 
522 /* When doing TSO, the IP length is not included in the pseudo header
523  * checksum of the packet given to the PMD, but for virtio it is
524  * expected.
525  */
526 static void
527 virtio_tso_fix_cksum(struct rte_mbuf *m)
528 {
529 	/* common case: header is not fragmented */
530 	if (likely(rte_pktmbuf_data_len(m) >= m->l2_len + m->l3_len +
531 			m->l4_len)) {
532 		struct rte_ipv4_hdr *iph;
533 		struct rte_ipv6_hdr *ip6h;
534 		struct rte_tcp_hdr *th;
535 		uint16_t prev_cksum, new_cksum, ip_len, ip_paylen;
536 		uint32_t tmp;
537 
538 		iph = rte_pktmbuf_mtod_offset(m,
539 					struct rte_ipv4_hdr *, m->l2_len);
540 		th = RTE_PTR_ADD(iph, m->l3_len);
541 		if ((iph->version_ihl >> 4) == 4) {
542 			iph->hdr_checksum = 0;
543 			iph->hdr_checksum = rte_ipv4_cksum(iph);
544 			ip_len = iph->total_length;
545 			ip_paylen = rte_cpu_to_be_16(rte_be_to_cpu_16(ip_len) -
546 				m->l3_len);
547 		} else {
548 			ip6h = (struct rte_ipv6_hdr *)iph;
549 			ip_paylen = ip6h->payload_len;
550 		}
551 
552 		/* calculate the new phdr checksum not including ip_paylen */
553 		prev_cksum = th->cksum;
554 		tmp = prev_cksum;
555 		tmp += ip_paylen;
556 		tmp = (tmp & 0xffff) + (tmp >> 16);
557 		new_cksum = tmp;
558 
559 		/* replace it in the packet */
560 		th->cksum = new_cksum;
561 	}
562 }
563 
564 
565 /* avoid write operation when necessary, to lessen cache issues */
566 #define ASSIGN_UNLESS_EQUAL(var, val) do {	\
567 	if ((var) != (val))			\
568 		(var) = (val);			\
569 } while (0)
570 
571 #define virtqueue_clear_net_hdr(_hdr) do {		\
572 	ASSIGN_UNLESS_EQUAL((_hdr)->csum_start, 0);	\
573 	ASSIGN_UNLESS_EQUAL((_hdr)->csum_offset, 0);	\
574 	ASSIGN_UNLESS_EQUAL((_hdr)->flags, 0);		\
575 	ASSIGN_UNLESS_EQUAL((_hdr)->gso_type, 0);	\
576 	ASSIGN_UNLESS_EQUAL((_hdr)->gso_size, 0);	\
577 	ASSIGN_UNLESS_EQUAL((_hdr)->hdr_len, 0);	\
578 } while (0)
579 
580 static inline void
581 virtqueue_xmit_offload(struct virtio_net_hdr *hdr,
582 			struct rte_mbuf *cookie,
583 			bool offload)
584 {
585 	if (offload) {
586 		if (cookie->ol_flags & PKT_TX_TCP_SEG)
587 			cookie->ol_flags |= PKT_TX_TCP_CKSUM;
588 
589 		switch (cookie->ol_flags & PKT_TX_L4_MASK) {
590 		case PKT_TX_UDP_CKSUM:
591 			hdr->csum_start = cookie->l2_len + cookie->l3_len;
592 			hdr->csum_offset = offsetof(struct rte_udp_hdr,
593 				dgram_cksum);
594 			hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
595 			break;
596 
597 		case PKT_TX_TCP_CKSUM:
598 			hdr->csum_start = cookie->l2_len + cookie->l3_len;
599 			hdr->csum_offset = offsetof(struct rte_tcp_hdr, cksum);
600 			hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
601 			break;
602 
603 		default:
604 			ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
605 			ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
606 			ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
607 			break;
608 		}
609 
610 		/* TCP Segmentation Offload */
611 		if (cookie->ol_flags & PKT_TX_TCP_SEG) {
612 			hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ?
613 				VIRTIO_NET_HDR_GSO_TCPV6 :
614 				VIRTIO_NET_HDR_GSO_TCPV4;
615 			hdr->gso_size = cookie->tso_segsz;
616 			hdr->hdr_len =
617 				cookie->l2_len +
618 				cookie->l3_len +
619 				cookie->l4_len;
620 		} else {
621 			ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
622 			ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
623 			ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
624 		}
625 	}
626 }
627 
628 static inline void
629 virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
630 			struct rte_mbuf **cookies,
631 			uint16_t num)
632 {
633 	struct vq_desc_extra *dxp;
634 	struct virtqueue *vq = txvq->vq;
635 	struct vring_desc *start_dp;
636 	struct virtio_net_hdr *hdr;
637 	uint16_t idx;
638 	int16_t head_size = vq->hw->vtnet_hdr_size;
639 	uint16_t i = 0;
640 
641 	idx = vq->vq_desc_head_idx;
642 	start_dp = vq->vq_split.ring.desc;
643 
644 	while (i < num) {
645 		idx = idx & (vq->vq_nentries - 1);
646 		dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
647 		dxp->cookie = (void *)cookies[i];
648 		dxp->ndescs = 1;
649 		virtio_update_packet_stats(&txvq->stats, cookies[i]);
650 
651 		hdr = rte_pktmbuf_mtod_offset(cookies[i],
652 				struct virtio_net_hdr *, -head_size);
653 
654 		/* if offload disabled, hdr is not zeroed yet, do it now */
655 		if (!vq->hw->has_tx_offload)
656 			virtqueue_clear_net_hdr(hdr);
657 		else
658 			virtqueue_xmit_offload(hdr, cookies[i], true);
659 
660 		start_dp[idx].addr  =
661 			VIRTIO_MBUF_DATA_DMA_ADDR(cookies[i], vq) - head_size;
662 		start_dp[idx].len   = cookies[i]->data_len + head_size;
663 		start_dp[idx].flags = 0;
664 
665 
666 		vq_update_avail_ring(vq, idx);
667 
668 		idx++;
669 		i++;
670 	};
671 
672 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
673 	vq->vq_desc_head_idx = idx & (vq->vq_nentries - 1);
674 }
675 
676 static inline void
677 virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq,
678 				   struct rte_mbuf *cookie,
679 				   int in_order)
680 {
681 	struct virtqueue *vq = txvq->vq;
682 	struct vring_packed_desc *dp;
683 	struct vq_desc_extra *dxp;
684 	uint16_t idx, id, flags;
685 	int16_t head_size = vq->hw->vtnet_hdr_size;
686 	struct virtio_net_hdr *hdr;
687 
688 	id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;
689 	idx = vq->vq_avail_idx;
690 	dp = &vq->vq_packed.ring.desc[idx];
691 
692 	dxp = &vq->vq_descx[id];
693 	dxp->ndescs = 1;
694 	dxp->cookie = cookie;
695 
696 	flags = vq->vq_packed.cached_flags;
697 
698 	/* prepend cannot fail, checked by caller */
699 	hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *,
700 				      -head_size);
701 
702 	/* if offload disabled, hdr is not zeroed yet, do it now */
703 	if (!vq->hw->has_tx_offload)
704 		virtqueue_clear_net_hdr(hdr);
705 	else
706 		virtqueue_xmit_offload(hdr, cookie, true);
707 
708 	dp->addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq) - head_size;
709 	dp->len  = cookie->data_len + head_size;
710 	dp->id   = id;
711 
712 	if (++vq->vq_avail_idx >= vq->vq_nentries) {
713 		vq->vq_avail_idx -= vq->vq_nentries;
714 		vq->vq_packed.cached_flags ^= VRING_PACKED_DESC_F_AVAIL_USED;
715 	}
716 
717 	vq->vq_free_cnt--;
718 
719 	if (!in_order) {
720 		vq->vq_desc_head_idx = dxp->next;
721 		if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
722 			vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;
723 	}
724 
725 	virtqueue_store_flags_packed(dp, flags, vq->hw->weak_barriers);
726 }
727 
728 static inline void
729 virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
730 			      uint16_t needed, int can_push, int in_order)
731 {
732 	struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
733 	struct vq_desc_extra *dxp;
734 	struct virtqueue *vq = txvq->vq;
735 	struct vring_packed_desc *start_dp, *head_dp;
736 	uint16_t idx, id, head_idx, head_flags;
737 	int16_t head_size = vq->hw->vtnet_hdr_size;
738 	struct virtio_net_hdr *hdr;
739 	uint16_t prev;
740 	bool prepend_header = false;
741 
742 	id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;
743 
744 	dxp = &vq->vq_descx[id];
745 	dxp->ndescs = needed;
746 	dxp->cookie = cookie;
747 
748 	head_idx = vq->vq_avail_idx;
749 	idx = head_idx;
750 	prev = head_idx;
751 	start_dp = vq->vq_packed.ring.desc;
752 
753 	head_dp = &vq->vq_packed.ring.desc[idx];
754 	head_flags = cookie->next ? VRING_DESC_F_NEXT : 0;
755 	head_flags |= vq->vq_packed.cached_flags;
756 
757 	if (can_push) {
758 		/* prepend cannot fail, checked by caller */
759 		hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *,
760 					      -head_size);
761 		prepend_header = true;
762 
763 		/* if offload disabled, it is not zeroed below, do it now */
764 		if (!vq->hw->has_tx_offload)
765 			virtqueue_clear_net_hdr(hdr);
766 	} else {
767 		/* setup first tx ring slot to point to header
768 		 * stored in reserved region.
769 		 */
770 		start_dp[idx].addr  = txvq->virtio_net_hdr_mem +
771 			RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
772 		start_dp[idx].len   = vq->hw->vtnet_hdr_size;
773 		hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
774 		idx++;
775 		if (idx >= vq->vq_nentries) {
776 			idx -= vq->vq_nentries;
777 			vq->vq_packed.cached_flags ^=
778 				VRING_PACKED_DESC_F_AVAIL_USED;
779 		}
780 	}
781 
782 	virtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload);
783 
784 	do {
785 		uint16_t flags;
786 
787 		start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
788 		start_dp[idx].len  = cookie->data_len;
789 		if (prepend_header) {
790 			start_dp[idx].addr -= head_size;
791 			start_dp[idx].len += head_size;
792 			prepend_header = false;
793 		}
794 
795 		if (likely(idx != head_idx)) {
796 			flags = cookie->next ? VRING_DESC_F_NEXT : 0;
797 			flags |= vq->vq_packed.cached_flags;
798 			start_dp[idx].flags = flags;
799 		}
800 		prev = idx;
801 		idx++;
802 		if (idx >= vq->vq_nentries) {
803 			idx -= vq->vq_nentries;
804 			vq->vq_packed.cached_flags ^=
805 				VRING_PACKED_DESC_F_AVAIL_USED;
806 		}
807 	} while ((cookie = cookie->next) != NULL);
808 
809 	start_dp[prev].id = id;
810 
811 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
812 	vq->vq_avail_idx = idx;
813 
814 	if (!in_order) {
815 		vq->vq_desc_head_idx = dxp->next;
816 		if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
817 			vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;
818 	}
819 
820 	virtqueue_store_flags_packed(head_dp, head_flags,
821 				     vq->hw->weak_barriers);
822 }
823 
824 static inline void
825 virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
826 			uint16_t needed, int use_indirect, int can_push,
827 			int in_order)
828 {
829 	struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
830 	struct vq_desc_extra *dxp;
831 	struct virtqueue *vq = txvq->vq;
832 	struct vring_desc *start_dp;
833 	uint16_t seg_num = cookie->nb_segs;
834 	uint16_t head_idx, idx;
835 	int16_t head_size = vq->hw->vtnet_hdr_size;
836 	bool prepend_header = false;
837 	struct virtio_net_hdr *hdr;
838 
839 	head_idx = vq->vq_desc_head_idx;
840 	idx = head_idx;
841 	if (in_order)
842 		dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
843 	else
844 		dxp = &vq->vq_descx[idx];
845 	dxp->cookie = (void *)cookie;
846 	dxp->ndescs = needed;
847 
848 	start_dp = vq->vq_split.ring.desc;
849 
850 	if (can_push) {
851 		/* prepend cannot fail, checked by caller */
852 		hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *,
853 					      -head_size);
854 		prepend_header = true;
855 
856 		/* if offload disabled, it is not zeroed below, do it now */
857 		if (!vq->hw->has_tx_offload)
858 			virtqueue_clear_net_hdr(hdr);
859 	} else if (use_indirect) {
860 		/* setup tx ring slot to point to indirect
861 		 * descriptor list stored in reserved region.
862 		 *
863 		 * the first slot in indirect ring is already preset
864 		 * to point to the header in reserved region
865 		 */
866 		start_dp[idx].addr  = txvq->virtio_net_hdr_mem +
867 			RTE_PTR_DIFF(&txr[idx].tx_indir, txr);
868 		start_dp[idx].len   = (seg_num + 1) * sizeof(struct vring_desc);
869 		start_dp[idx].flags = VRING_DESC_F_INDIRECT;
870 		hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
871 
872 		/* loop below will fill in rest of the indirect elements */
873 		start_dp = txr[idx].tx_indir;
874 		idx = 1;
875 	} else {
876 		/* setup first tx ring slot to point to header
877 		 * stored in reserved region.
878 		 */
879 		start_dp[idx].addr  = txvq->virtio_net_hdr_mem +
880 			RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
881 		start_dp[idx].len   = vq->hw->vtnet_hdr_size;
882 		start_dp[idx].flags = VRING_DESC_F_NEXT;
883 		hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
884 
885 		idx = start_dp[idx].next;
886 	}
887 
888 	virtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload);
889 
890 	do {
891 		start_dp[idx].addr  = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
892 		start_dp[idx].len   = cookie->data_len;
893 		if (prepend_header) {
894 			start_dp[idx].addr -= head_size;
895 			start_dp[idx].len += head_size;
896 			prepend_header = false;
897 		}
898 		start_dp[idx].flags = cookie->next ? VRING_DESC_F_NEXT : 0;
899 		idx = start_dp[idx].next;
900 	} while ((cookie = cookie->next) != NULL);
901 
902 	if (use_indirect)
903 		idx = vq->vq_split.ring.desc[head_idx].next;
904 
905 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
906 
907 	vq->vq_desc_head_idx = idx;
908 	vq_update_avail_ring(vq, head_idx);
909 
910 	if (!in_order) {
911 		if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
912 			vq->vq_desc_tail_idx = idx;
913 	}
914 }
915 
916 void
917 virtio_dev_cq_start(struct rte_eth_dev *dev)
918 {
919 	struct virtio_hw *hw = dev->data->dev_private;
920 
921 	if (hw->cvq && hw->cvq->vq) {
922 		rte_spinlock_init(&hw->cvq->lock);
923 		VIRTQUEUE_DUMP((struct virtqueue *)hw->cvq->vq);
924 	}
925 }
926 
927 int
928 virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
929 			uint16_t queue_idx,
930 			uint16_t nb_desc,
931 			unsigned int socket_id __rte_unused,
932 			const struct rte_eth_rxconf *rx_conf,
933 			struct rte_mempool *mp)
934 {
935 	uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
936 	struct virtio_hw *hw = dev->data->dev_private;
937 	struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
938 	struct virtnet_rx *rxvq;
939 
940 	PMD_INIT_FUNC_TRACE();
941 
942 	if (rx_conf->rx_deferred_start) {
943 		PMD_INIT_LOG(ERR, "Rx deferred start is not supported");
944 		return -EINVAL;
945 	}
946 
947 	if (nb_desc == 0 || nb_desc > vq->vq_nentries)
948 		nb_desc = vq->vq_nentries;
949 	vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
950 
951 	rxvq = &vq->rxq;
952 	rxvq->queue_id = queue_idx;
953 	rxvq->mpool = mp;
954 	dev->data->rx_queues[queue_idx] = rxvq;
955 
956 	return 0;
957 }
958 
959 int
960 virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
961 {
962 	uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
963 	struct virtio_hw *hw = dev->data->dev_private;
964 	struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
965 	struct virtnet_rx *rxvq = &vq->rxq;
966 	struct rte_mbuf *m;
967 	uint16_t desc_idx;
968 	int error, nbufs, i;
969 
970 	PMD_INIT_FUNC_TRACE();
971 
972 	/* Allocate blank mbufs for the each rx descriptor */
973 	nbufs = 0;
974 
975 	if (hw->use_simple_rx) {
976 		for (desc_idx = 0; desc_idx < vq->vq_nentries;
977 		     desc_idx++) {
978 			vq->vq_split.ring.avail->ring[desc_idx] = desc_idx;
979 			vq->vq_split.ring.desc[desc_idx].flags =
980 				VRING_DESC_F_WRITE;
981 		}
982 
983 		virtio_rxq_vec_setup(rxvq);
984 	}
985 
986 	memset(&rxvq->fake_mbuf, 0, sizeof(rxvq->fake_mbuf));
987 	for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST;
988 	     desc_idx++) {
989 		vq->sw_ring[vq->vq_nentries + desc_idx] =
990 			&rxvq->fake_mbuf;
991 	}
992 
993 	if (hw->use_simple_rx) {
994 		while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
995 			virtio_rxq_rearm_vec(rxvq);
996 			nbufs += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
997 		}
998 	} else if (hw->use_inorder_rx) {
999 		if ((!virtqueue_full(vq))) {
1000 			uint16_t free_cnt = vq->vq_free_cnt;
1001 			struct rte_mbuf *pkts[free_cnt];
1002 
1003 			if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, pkts,
1004 				free_cnt)) {
1005 				error = virtqueue_enqueue_refill_inorder(vq,
1006 						pkts,
1007 						free_cnt);
1008 				if (unlikely(error)) {
1009 					for (i = 0; i < free_cnt; i++)
1010 						rte_pktmbuf_free(pkts[i]);
1011 				}
1012 			}
1013 
1014 			nbufs += free_cnt;
1015 			vq_update_avail_idx(vq);
1016 		}
1017 	} else {
1018 		while (!virtqueue_full(vq)) {
1019 			m = rte_mbuf_raw_alloc(rxvq->mpool);
1020 			if (m == NULL)
1021 				break;
1022 
1023 			/* Enqueue allocated buffers */
1024 			if (vtpci_packed_queue(vq->hw))
1025 				error = virtqueue_enqueue_recv_refill_packed(vq,
1026 						&m, 1);
1027 			else
1028 				error = virtqueue_enqueue_recv_refill(vq,
1029 						&m, 1);
1030 			if (error) {
1031 				rte_pktmbuf_free(m);
1032 				break;
1033 			}
1034 			nbufs++;
1035 		}
1036 
1037 		if (!vtpci_packed_queue(vq->hw))
1038 			vq_update_avail_idx(vq);
1039 	}
1040 
1041 	PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs);
1042 
1043 	VIRTQUEUE_DUMP(vq);
1044 
1045 	return 0;
1046 }
1047 
1048 /*
1049  * struct rte_eth_dev *dev: Used to update dev
1050  * uint16_t nb_desc: Defaults to values read from config space
1051  * unsigned int socket_id: Used to allocate memzone
1052  * const struct rte_eth_txconf *tx_conf: Used to setup tx engine
1053  * uint16_t queue_idx: Just used as an index in dev txq list
1054  */
1055 int
1056 virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
1057 			uint16_t queue_idx,
1058 			uint16_t nb_desc,
1059 			unsigned int socket_id __rte_unused,
1060 			const struct rte_eth_txconf *tx_conf)
1061 {
1062 	uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
1063 	struct virtio_hw *hw = dev->data->dev_private;
1064 	struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
1065 	struct virtnet_tx *txvq;
1066 	uint16_t tx_free_thresh;
1067 
1068 	PMD_INIT_FUNC_TRACE();
1069 
1070 	if (tx_conf->tx_deferred_start) {
1071 		PMD_INIT_LOG(ERR, "Tx deferred start is not supported");
1072 		return -EINVAL;
1073 	}
1074 
1075 	if (nb_desc == 0 || nb_desc > vq->vq_nentries)
1076 		nb_desc = vq->vq_nentries;
1077 	vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
1078 
1079 	txvq = &vq->txq;
1080 	txvq->queue_id = queue_idx;
1081 
1082 	tx_free_thresh = tx_conf->tx_free_thresh;
1083 	if (tx_free_thresh == 0)
1084 		tx_free_thresh =
1085 			RTE_MIN(vq->vq_nentries / 4, DEFAULT_TX_FREE_THRESH);
1086 
1087 	if (tx_free_thresh >= (vq->vq_nentries - 3)) {
1088 		RTE_LOG(ERR, PMD, "tx_free_thresh must be less than the "
1089 			"number of TX entries minus 3 (%u)."
1090 			" (tx_free_thresh=%u port=%u queue=%u)\n",
1091 			vq->vq_nentries - 3,
1092 			tx_free_thresh, dev->data->port_id, queue_idx);
1093 		return -EINVAL;
1094 	}
1095 
1096 	vq->vq_free_thresh = tx_free_thresh;
1097 
1098 	dev->data->tx_queues[queue_idx] = txvq;
1099 	return 0;
1100 }
1101 
1102 int
1103 virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
1104 				uint16_t queue_idx)
1105 {
1106 	uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
1107 	struct virtio_hw *hw = dev->data->dev_private;
1108 	struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
1109 
1110 	PMD_INIT_FUNC_TRACE();
1111 
1112 	if (!vtpci_packed_queue(hw)) {
1113 		if (hw->use_inorder_tx)
1114 			vq->vq_split.ring.desc[vq->vq_nentries - 1].next = 0;
1115 	}
1116 
1117 	VIRTQUEUE_DUMP(vq);
1118 
1119 	return 0;
1120 }
1121 
1122 static inline void
1123 virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m)
1124 {
1125 	int error;
1126 	/*
1127 	 * Requeue the discarded mbuf. This should always be
1128 	 * successful since it was just dequeued.
1129 	 */
1130 	if (vtpci_packed_queue(vq->hw))
1131 		error = virtqueue_enqueue_recv_refill_packed(vq, &m, 1);
1132 	else
1133 		error = virtqueue_enqueue_recv_refill(vq, &m, 1);
1134 
1135 	if (unlikely(error)) {
1136 		RTE_LOG(ERR, PMD, "cannot requeue discarded mbuf");
1137 		rte_pktmbuf_free(m);
1138 	}
1139 }
1140 
1141 static inline void
1142 virtio_discard_rxbuf_inorder(struct virtqueue *vq, struct rte_mbuf *m)
1143 {
1144 	int error;
1145 
1146 	error = virtqueue_enqueue_refill_inorder(vq, &m, 1);
1147 	if (unlikely(error)) {
1148 		RTE_LOG(ERR, PMD, "cannot requeue discarded mbuf");
1149 		rte_pktmbuf_free(m);
1150 	}
1151 }
1152 
1153 /* Optionally fill offload information in structure */
1154 static inline int
1155 virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
1156 {
1157 	struct rte_net_hdr_lens hdr_lens;
1158 	uint32_t hdrlen, ptype;
1159 	int l4_supported = 0;
1160 
1161 	/* nothing to do */
1162 	if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
1163 		return 0;
1164 
1165 	m->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
1166 
1167 	ptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK);
1168 	m->packet_type = ptype;
1169 	if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP ||
1170 	    (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP ||
1171 	    (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_SCTP)
1172 		l4_supported = 1;
1173 
1174 	if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1175 		hdrlen = hdr_lens.l2_len + hdr_lens.l3_len + hdr_lens.l4_len;
1176 		if (hdr->csum_start <= hdrlen && l4_supported) {
1177 			m->ol_flags |= PKT_RX_L4_CKSUM_NONE;
1178 		} else {
1179 			/* Unknown proto or tunnel, do sw cksum. We can assume
1180 			 * the cksum field is in the first segment since the
1181 			 * buffers we provided to the host are large enough.
1182 			 * In case of SCTP, this will be wrong since it's a CRC
1183 			 * but there's nothing we can do.
1184 			 */
1185 			uint16_t csum = 0, off;
1186 
1187 			rte_raw_cksum_mbuf(m, hdr->csum_start,
1188 				rte_pktmbuf_pkt_len(m) - hdr->csum_start,
1189 				&csum);
1190 			if (likely(csum != 0xffff))
1191 				csum = ~csum;
1192 			off = hdr->csum_offset + hdr->csum_start;
1193 			if (rte_pktmbuf_data_len(m) >= off + 1)
1194 				*rte_pktmbuf_mtod_offset(m, uint16_t *,
1195 					off) = csum;
1196 		}
1197 	} else if (hdr->flags & VIRTIO_NET_HDR_F_DATA_VALID && l4_supported) {
1198 		m->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
1199 	}
1200 
1201 	/* GSO request, save required information in mbuf */
1202 	if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
1203 		/* Check unsupported modes */
1204 		if ((hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN) ||
1205 		    (hdr->gso_size == 0)) {
1206 			return -EINVAL;
1207 		}
1208 
1209 		/* Update mss lengthes in mbuf */
1210 		m->tso_segsz = hdr->gso_size;
1211 		switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
1212 			case VIRTIO_NET_HDR_GSO_TCPV4:
1213 			case VIRTIO_NET_HDR_GSO_TCPV6:
1214 				m->ol_flags |= PKT_RX_LRO | \
1215 					PKT_RX_L4_CKSUM_NONE;
1216 				break;
1217 			default:
1218 				return -EINVAL;
1219 		}
1220 	}
1221 
1222 	return 0;
1223 }
1224 
1225 #define VIRTIO_MBUF_BURST_SZ 64
1226 #define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc))
1227 uint16_t
1228 virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1229 {
1230 	struct virtnet_rx *rxvq = rx_queue;
1231 	struct virtqueue *vq = rxvq->vq;
1232 	struct virtio_hw *hw = vq->hw;
1233 	struct rte_mbuf *rxm;
1234 	uint16_t nb_used, num, nb_rx;
1235 	uint32_t len[VIRTIO_MBUF_BURST_SZ];
1236 	struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1237 	int error;
1238 	uint32_t i, nb_enqueued;
1239 	uint32_t hdr_size;
1240 	struct virtio_net_hdr *hdr;
1241 
1242 	nb_rx = 0;
1243 	if (unlikely(hw->started == 0))
1244 		return nb_rx;
1245 
1246 	nb_used = VIRTQUEUE_NUSED(vq);
1247 
1248 	virtio_rmb(hw->weak_barriers);
1249 
1250 	num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts;
1251 	if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1252 		num = VIRTIO_MBUF_BURST_SZ;
1253 	if (likely(num > DESC_PER_CACHELINE))
1254 		num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1255 
1256 	num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num);
1257 	PMD_RX_LOG(DEBUG, "used:%d dequeue:%d", nb_used, num);
1258 
1259 	nb_enqueued = 0;
1260 	hdr_size = hw->vtnet_hdr_size;
1261 
1262 	for (i = 0; i < num ; i++) {
1263 		rxm = rcv_pkts[i];
1264 
1265 		PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1266 
1267 		if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1268 			PMD_RX_LOG(ERR, "Packet drop");
1269 			nb_enqueued++;
1270 			virtio_discard_rxbuf(vq, rxm);
1271 			rxvq->stats.errors++;
1272 			continue;
1273 		}
1274 
1275 		rxm->port = rxvq->port_id;
1276 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1277 		rxm->ol_flags = 0;
1278 		rxm->vlan_tci = 0;
1279 
1280 		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1281 		rxm->data_len = (uint16_t)(len[i] - hdr_size);
1282 
1283 		hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr +
1284 			RTE_PKTMBUF_HEADROOM - hdr_size);
1285 
1286 		if (hw->vlan_strip)
1287 			rte_vlan_strip(rxm);
1288 
1289 		if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) {
1290 			virtio_discard_rxbuf(vq, rxm);
1291 			rxvq->stats.errors++;
1292 			continue;
1293 		}
1294 
1295 		virtio_rx_stats_updated(rxvq, rxm);
1296 
1297 		rx_pkts[nb_rx++] = rxm;
1298 	}
1299 
1300 	rxvq->stats.packets += nb_rx;
1301 
1302 	/* Allocate new mbuf for the used descriptor */
1303 	if (likely(!virtqueue_full(vq))) {
1304 		uint16_t free_cnt = vq->vq_free_cnt;
1305 		struct rte_mbuf *new_pkts[free_cnt];
1306 
1307 		if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts,
1308 						free_cnt) == 0)) {
1309 			error = virtqueue_enqueue_recv_refill(vq, new_pkts,
1310 					free_cnt);
1311 			if (unlikely(error)) {
1312 				for (i = 0; i < free_cnt; i++)
1313 					rte_pktmbuf_free(new_pkts[i]);
1314 			}
1315 			nb_enqueued += free_cnt;
1316 		} else {
1317 			struct rte_eth_dev *dev =
1318 				&rte_eth_devices[rxvq->port_id];
1319 			dev->data->rx_mbuf_alloc_failed += free_cnt;
1320 		}
1321 	}
1322 
1323 	if (likely(nb_enqueued)) {
1324 		vq_update_avail_idx(vq);
1325 
1326 		if (unlikely(virtqueue_kick_prepare(vq))) {
1327 			virtqueue_notify(vq);
1328 			PMD_RX_LOG(DEBUG, "Notified");
1329 		}
1330 	}
1331 
1332 	return nb_rx;
1333 }
1334 
1335 uint16_t
1336 virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
1337 			uint16_t nb_pkts)
1338 {
1339 	struct virtnet_rx *rxvq = rx_queue;
1340 	struct virtqueue *vq = rxvq->vq;
1341 	struct virtio_hw *hw = vq->hw;
1342 	struct rte_mbuf *rxm;
1343 	uint16_t num, nb_rx;
1344 	uint32_t len[VIRTIO_MBUF_BURST_SZ];
1345 	struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1346 	int error;
1347 	uint32_t i, nb_enqueued;
1348 	uint32_t hdr_size;
1349 	struct virtio_net_hdr *hdr;
1350 
1351 	nb_rx = 0;
1352 	if (unlikely(hw->started == 0))
1353 		return nb_rx;
1354 
1355 	num = RTE_MIN(VIRTIO_MBUF_BURST_SZ, nb_pkts);
1356 	if (likely(num > DESC_PER_CACHELINE))
1357 		num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1358 
1359 	num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num);
1360 	PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1361 
1362 	nb_enqueued = 0;
1363 	hdr_size = hw->vtnet_hdr_size;
1364 
1365 	for (i = 0; i < num; i++) {
1366 		rxm = rcv_pkts[i];
1367 
1368 		PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1369 
1370 		if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1371 			PMD_RX_LOG(ERR, "Packet drop");
1372 			nb_enqueued++;
1373 			virtio_discard_rxbuf(vq, rxm);
1374 			rxvq->stats.errors++;
1375 			continue;
1376 		}
1377 
1378 		rxm->port = rxvq->port_id;
1379 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1380 		rxm->ol_flags = 0;
1381 		rxm->vlan_tci = 0;
1382 
1383 		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1384 		rxm->data_len = (uint16_t)(len[i] - hdr_size);
1385 
1386 		hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr +
1387 			RTE_PKTMBUF_HEADROOM - hdr_size);
1388 
1389 		if (hw->vlan_strip)
1390 			rte_vlan_strip(rxm);
1391 
1392 		if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) {
1393 			virtio_discard_rxbuf(vq, rxm);
1394 			rxvq->stats.errors++;
1395 			continue;
1396 		}
1397 
1398 		virtio_rx_stats_updated(rxvq, rxm);
1399 
1400 		rx_pkts[nb_rx++] = rxm;
1401 	}
1402 
1403 	rxvq->stats.packets += nb_rx;
1404 
1405 	/* Allocate new mbuf for the used descriptor */
1406 	if (likely(!virtqueue_full(vq))) {
1407 		uint16_t free_cnt = vq->vq_free_cnt;
1408 		struct rte_mbuf *new_pkts[free_cnt];
1409 
1410 		if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts,
1411 						free_cnt) == 0)) {
1412 			error = virtqueue_enqueue_recv_refill_packed(vq,
1413 					new_pkts, free_cnt);
1414 			if (unlikely(error)) {
1415 				for (i = 0; i < free_cnt; i++)
1416 					rte_pktmbuf_free(new_pkts[i]);
1417 			}
1418 			nb_enqueued += free_cnt;
1419 		} else {
1420 			struct rte_eth_dev *dev =
1421 				&rte_eth_devices[rxvq->port_id];
1422 			dev->data->rx_mbuf_alloc_failed += free_cnt;
1423 		}
1424 	}
1425 
1426 	if (likely(nb_enqueued)) {
1427 		if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1428 			virtqueue_notify(vq);
1429 			PMD_RX_LOG(DEBUG, "Notified");
1430 		}
1431 	}
1432 
1433 	return nb_rx;
1434 }
1435 
1436 
1437 uint16_t
1438 virtio_recv_pkts_inorder(void *rx_queue,
1439 			struct rte_mbuf **rx_pkts,
1440 			uint16_t nb_pkts)
1441 {
1442 	struct virtnet_rx *rxvq = rx_queue;
1443 	struct virtqueue *vq = rxvq->vq;
1444 	struct virtio_hw *hw = vq->hw;
1445 	struct rte_mbuf *rxm;
1446 	struct rte_mbuf *prev = NULL;
1447 	uint16_t nb_used, num, nb_rx;
1448 	uint32_t len[VIRTIO_MBUF_BURST_SZ];
1449 	struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1450 	int error;
1451 	uint32_t nb_enqueued;
1452 	uint32_t seg_num;
1453 	uint32_t seg_res;
1454 	uint32_t hdr_size;
1455 	int32_t i;
1456 
1457 	nb_rx = 0;
1458 	if (unlikely(hw->started == 0))
1459 		return nb_rx;
1460 
1461 	nb_used = VIRTQUEUE_NUSED(vq);
1462 	nb_used = RTE_MIN(nb_used, nb_pkts);
1463 	nb_used = RTE_MIN(nb_used, VIRTIO_MBUF_BURST_SZ);
1464 
1465 	virtio_rmb(hw->weak_barriers);
1466 
1467 	PMD_RX_LOG(DEBUG, "used:%d", nb_used);
1468 
1469 	nb_enqueued = 0;
1470 	seg_num = 1;
1471 	seg_res = 0;
1472 	hdr_size = hw->vtnet_hdr_size;
1473 
1474 	num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len, nb_used);
1475 
1476 	for (i = 0; i < num; i++) {
1477 		struct virtio_net_hdr_mrg_rxbuf *header;
1478 
1479 		PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1480 		PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1481 
1482 		rxm = rcv_pkts[i];
1483 
1484 		if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1485 			PMD_RX_LOG(ERR, "Packet drop");
1486 			nb_enqueued++;
1487 			virtio_discard_rxbuf_inorder(vq, rxm);
1488 			rxvq->stats.errors++;
1489 			continue;
1490 		}
1491 
1492 		header = (struct virtio_net_hdr_mrg_rxbuf *)
1493 			 ((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM
1494 			 - hdr_size);
1495 
1496 		if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
1497 			seg_num = header->num_buffers;
1498 			if (seg_num == 0)
1499 				seg_num = 1;
1500 		} else {
1501 			seg_num = 1;
1502 		}
1503 
1504 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1505 		rxm->nb_segs = seg_num;
1506 		rxm->ol_flags = 0;
1507 		rxm->vlan_tci = 0;
1508 		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1509 		rxm->data_len = (uint16_t)(len[i] - hdr_size);
1510 
1511 		rxm->port = rxvq->port_id;
1512 
1513 		rx_pkts[nb_rx] = rxm;
1514 		prev = rxm;
1515 
1516 		if (vq->hw->has_rx_offload &&
1517 				virtio_rx_offload(rxm, &header->hdr) < 0) {
1518 			virtio_discard_rxbuf_inorder(vq, rxm);
1519 			rxvq->stats.errors++;
1520 			continue;
1521 		}
1522 
1523 		if (hw->vlan_strip)
1524 			rte_vlan_strip(rx_pkts[nb_rx]);
1525 
1526 		seg_res = seg_num - 1;
1527 
1528 		/* Merge remaining segments */
1529 		while (seg_res != 0 && i < (num - 1)) {
1530 			i++;
1531 
1532 			rxm = rcv_pkts[i];
1533 			rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1534 			rxm->pkt_len = (uint32_t)(len[i]);
1535 			rxm->data_len = (uint16_t)(len[i]);
1536 
1537 			rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1538 
1539 			prev->next = rxm;
1540 			prev = rxm;
1541 			seg_res -= 1;
1542 		}
1543 
1544 		if (!seg_res) {
1545 			virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1546 			nb_rx++;
1547 		}
1548 	}
1549 
1550 	/* Last packet still need merge segments */
1551 	while (seg_res != 0) {
1552 		uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1553 					VIRTIO_MBUF_BURST_SZ);
1554 
1555 		if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) {
1556 			virtio_rmb(hw->weak_barriers);
1557 			num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len,
1558 							   rcv_cnt);
1559 			uint16_t extra_idx = 0;
1560 
1561 			rcv_cnt = num;
1562 			while (extra_idx < rcv_cnt) {
1563 				rxm = rcv_pkts[extra_idx];
1564 				rxm->data_off =
1565 					RTE_PKTMBUF_HEADROOM - hdr_size;
1566 				rxm->pkt_len = (uint32_t)(len[extra_idx]);
1567 				rxm->data_len = (uint16_t)(len[extra_idx]);
1568 				prev->next = rxm;
1569 				prev = rxm;
1570 				rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1571 				extra_idx += 1;
1572 			};
1573 			seg_res -= rcv_cnt;
1574 
1575 			if (!seg_res) {
1576 				virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1577 				nb_rx++;
1578 			}
1579 		} else {
1580 			PMD_RX_LOG(ERR,
1581 					"No enough segments for packet.");
1582 			rte_pktmbuf_free(rx_pkts[nb_rx]);
1583 			rxvq->stats.errors++;
1584 			break;
1585 		}
1586 	}
1587 
1588 	rxvq->stats.packets += nb_rx;
1589 
1590 	/* Allocate new mbuf for the used descriptor */
1591 
1592 	if (likely(!virtqueue_full(vq))) {
1593 		/* free_cnt may include mrg descs */
1594 		uint16_t free_cnt = vq->vq_free_cnt;
1595 		struct rte_mbuf *new_pkts[free_cnt];
1596 
1597 		if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1598 			error = virtqueue_enqueue_refill_inorder(vq, new_pkts,
1599 					free_cnt);
1600 			if (unlikely(error)) {
1601 				for (i = 0; i < free_cnt; i++)
1602 					rte_pktmbuf_free(new_pkts[i]);
1603 			}
1604 			nb_enqueued += free_cnt;
1605 		} else {
1606 			struct rte_eth_dev *dev =
1607 				&rte_eth_devices[rxvq->port_id];
1608 			dev->data->rx_mbuf_alloc_failed += free_cnt;
1609 		}
1610 	}
1611 
1612 	if (likely(nb_enqueued)) {
1613 		vq_update_avail_idx(vq);
1614 
1615 		if (unlikely(virtqueue_kick_prepare(vq))) {
1616 			virtqueue_notify(vq);
1617 			PMD_RX_LOG(DEBUG, "Notified");
1618 		}
1619 	}
1620 
1621 	return nb_rx;
1622 }
1623 
1624 uint16_t
1625 virtio_recv_mergeable_pkts(void *rx_queue,
1626 			struct rte_mbuf **rx_pkts,
1627 			uint16_t nb_pkts)
1628 {
1629 	struct virtnet_rx *rxvq = rx_queue;
1630 	struct virtqueue *vq = rxvq->vq;
1631 	struct virtio_hw *hw = vq->hw;
1632 	struct rte_mbuf *rxm;
1633 	struct rte_mbuf *prev = NULL;
1634 	uint16_t nb_used, num, nb_rx = 0;
1635 	uint32_t len[VIRTIO_MBUF_BURST_SZ];
1636 	struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1637 	int error;
1638 	uint32_t nb_enqueued = 0;
1639 	uint32_t seg_num = 0;
1640 	uint32_t seg_res = 0;
1641 	uint32_t hdr_size = hw->vtnet_hdr_size;
1642 	int32_t i;
1643 
1644 	if (unlikely(hw->started == 0))
1645 		return nb_rx;
1646 
1647 	nb_used = VIRTQUEUE_NUSED(vq);
1648 
1649 	virtio_rmb(hw->weak_barriers);
1650 
1651 	PMD_RX_LOG(DEBUG, "used:%d", nb_used);
1652 
1653 	num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts;
1654 	if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1655 		num = VIRTIO_MBUF_BURST_SZ;
1656 	if (likely(num > DESC_PER_CACHELINE))
1657 		num = num - ((vq->vq_used_cons_idx + num) %
1658 				DESC_PER_CACHELINE);
1659 
1660 
1661 	num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num);
1662 
1663 	for (i = 0; i < num; i++) {
1664 		struct virtio_net_hdr_mrg_rxbuf *header;
1665 
1666 		PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1667 		PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1668 
1669 		rxm = rcv_pkts[i];
1670 
1671 		if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1672 			PMD_RX_LOG(ERR, "Packet drop");
1673 			nb_enqueued++;
1674 			virtio_discard_rxbuf(vq, rxm);
1675 			rxvq->stats.errors++;
1676 			continue;
1677 		}
1678 
1679 		header = (struct virtio_net_hdr_mrg_rxbuf *)
1680 			 ((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM
1681 			 - hdr_size);
1682 		seg_num = header->num_buffers;
1683 		if (seg_num == 0)
1684 			seg_num = 1;
1685 
1686 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1687 		rxm->nb_segs = seg_num;
1688 		rxm->ol_flags = 0;
1689 		rxm->vlan_tci = 0;
1690 		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1691 		rxm->data_len = (uint16_t)(len[i] - hdr_size);
1692 
1693 		rxm->port = rxvq->port_id;
1694 
1695 		rx_pkts[nb_rx] = rxm;
1696 		prev = rxm;
1697 
1698 		if (hw->has_rx_offload &&
1699 				virtio_rx_offload(rxm, &header->hdr) < 0) {
1700 			virtio_discard_rxbuf(vq, rxm);
1701 			rxvq->stats.errors++;
1702 			continue;
1703 		}
1704 
1705 		if (hw->vlan_strip)
1706 			rte_vlan_strip(rx_pkts[nb_rx]);
1707 
1708 		seg_res = seg_num - 1;
1709 
1710 		/* Merge remaining segments */
1711 		while (seg_res != 0 && i < (num - 1)) {
1712 			i++;
1713 
1714 			rxm = rcv_pkts[i];
1715 			rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1716 			rxm->pkt_len = (uint32_t)(len[i]);
1717 			rxm->data_len = (uint16_t)(len[i]);
1718 
1719 			rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1720 
1721 			prev->next = rxm;
1722 			prev = rxm;
1723 			seg_res -= 1;
1724 		}
1725 
1726 		if (!seg_res) {
1727 			virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1728 			nb_rx++;
1729 		}
1730 	}
1731 
1732 	/* Last packet still need merge segments */
1733 	while (seg_res != 0) {
1734 		uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1735 					VIRTIO_MBUF_BURST_SZ);
1736 
1737 		if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) {
1738 			virtio_rmb(hw->weak_barriers);
1739 			num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len,
1740 							   rcv_cnt);
1741 			uint16_t extra_idx = 0;
1742 
1743 			rcv_cnt = num;
1744 			while (extra_idx < rcv_cnt) {
1745 				rxm = rcv_pkts[extra_idx];
1746 				rxm->data_off =
1747 					RTE_PKTMBUF_HEADROOM - hdr_size;
1748 				rxm->pkt_len = (uint32_t)(len[extra_idx]);
1749 				rxm->data_len = (uint16_t)(len[extra_idx]);
1750 				prev->next = rxm;
1751 				prev = rxm;
1752 				rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1753 				extra_idx += 1;
1754 			};
1755 			seg_res -= rcv_cnt;
1756 
1757 			if (!seg_res) {
1758 				virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1759 				nb_rx++;
1760 			}
1761 		} else {
1762 			PMD_RX_LOG(ERR,
1763 					"No enough segments for packet.");
1764 			rte_pktmbuf_free(rx_pkts[nb_rx]);
1765 			rxvq->stats.errors++;
1766 			break;
1767 		}
1768 	}
1769 
1770 	rxvq->stats.packets += nb_rx;
1771 
1772 	/* Allocate new mbuf for the used descriptor */
1773 	if (likely(!virtqueue_full(vq))) {
1774 		/* free_cnt may include mrg descs */
1775 		uint16_t free_cnt = vq->vq_free_cnt;
1776 		struct rte_mbuf *new_pkts[free_cnt];
1777 
1778 		if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1779 			error = virtqueue_enqueue_recv_refill(vq, new_pkts,
1780 					free_cnt);
1781 			if (unlikely(error)) {
1782 				for (i = 0; i < free_cnt; i++)
1783 					rte_pktmbuf_free(new_pkts[i]);
1784 			}
1785 			nb_enqueued += free_cnt;
1786 		} else {
1787 			struct rte_eth_dev *dev =
1788 				&rte_eth_devices[rxvq->port_id];
1789 			dev->data->rx_mbuf_alloc_failed += free_cnt;
1790 		}
1791 	}
1792 
1793 	if (likely(nb_enqueued)) {
1794 		vq_update_avail_idx(vq);
1795 
1796 		if (unlikely(virtqueue_kick_prepare(vq))) {
1797 			virtqueue_notify(vq);
1798 			PMD_RX_LOG(DEBUG, "Notified");
1799 		}
1800 	}
1801 
1802 	return nb_rx;
1803 }
1804 
1805 uint16_t
1806 virtio_recv_mergeable_pkts_packed(void *rx_queue,
1807 			struct rte_mbuf **rx_pkts,
1808 			uint16_t nb_pkts)
1809 {
1810 	struct virtnet_rx *rxvq = rx_queue;
1811 	struct virtqueue *vq = rxvq->vq;
1812 	struct virtio_hw *hw = vq->hw;
1813 	struct rte_mbuf *rxm;
1814 	struct rte_mbuf *prev = NULL;
1815 	uint16_t num, nb_rx = 0;
1816 	uint32_t len[VIRTIO_MBUF_BURST_SZ];
1817 	struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1818 	uint32_t nb_enqueued = 0;
1819 	uint32_t seg_num = 0;
1820 	uint32_t seg_res = 0;
1821 	uint32_t hdr_size = hw->vtnet_hdr_size;
1822 	int32_t i;
1823 	int error;
1824 
1825 	if (unlikely(hw->started == 0))
1826 		return nb_rx;
1827 
1828 
1829 	num = nb_pkts;
1830 	if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1831 		num = VIRTIO_MBUF_BURST_SZ;
1832 	if (likely(num > DESC_PER_CACHELINE))
1833 		num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1834 
1835 	num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num);
1836 
1837 	for (i = 0; i < num; i++) {
1838 		struct virtio_net_hdr_mrg_rxbuf *header;
1839 
1840 		PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1841 		PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1842 
1843 		rxm = rcv_pkts[i];
1844 
1845 		if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1846 			PMD_RX_LOG(ERR, "Packet drop");
1847 			nb_enqueued++;
1848 			virtio_discard_rxbuf(vq, rxm);
1849 			rxvq->stats.errors++;
1850 			continue;
1851 		}
1852 
1853 		header = (struct virtio_net_hdr_mrg_rxbuf *)((char *)
1854 			  rxm->buf_addr + RTE_PKTMBUF_HEADROOM - hdr_size);
1855 		seg_num = header->num_buffers;
1856 
1857 		if (seg_num == 0)
1858 			seg_num = 1;
1859 
1860 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1861 		rxm->nb_segs = seg_num;
1862 		rxm->ol_flags = 0;
1863 		rxm->vlan_tci = 0;
1864 		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1865 		rxm->data_len = (uint16_t)(len[i] - hdr_size);
1866 
1867 		rxm->port = rxvq->port_id;
1868 		rx_pkts[nb_rx] = rxm;
1869 		prev = rxm;
1870 
1871 		if (hw->has_rx_offload &&
1872 				virtio_rx_offload(rxm, &header->hdr) < 0) {
1873 			virtio_discard_rxbuf(vq, rxm);
1874 			rxvq->stats.errors++;
1875 			continue;
1876 		}
1877 
1878 		if (hw->vlan_strip)
1879 			rte_vlan_strip(rx_pkts[nb_rx]);
1880 
1881 		seg_res = seg_num - 1;
1882 
1883 		/* Merge remaining segments */
1884 		while (seg_res != 0 && i < (num - 1)) {
1885 			i++;
1886 
1887 			rxm = rcv_pkts[i];
1888 			rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1889 			rxm->pkt_len = (uint32_t)(len[i]);
1890 			rxm->data_len = (uint16_t)(len[i]);
1891 
1892 			rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1893 
1894 			prev->next = rxm;
1895 			prev = rxm;
1896 			seg_res -= 1;
1897 		}
1898 
1899 		if (!seg_res) {
1900 			virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1901 			nb_rx++;
1902 		}
1903 	}
1904 
1905 	/* Last packet still need merge segments */
1906 	while (seg_res != 0) {
1907 		uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1908 					VIRTIO_MBUF_BURST_SZ);
1909 		uint16_t extra_idx = 0;
1910 
1911 		rcv_cnt = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts,
1912 				len, rcv_cnt);
1913 		if (unlikely(rcv_cnt == 0)) {
1914 			PMD_RX_LOG(ERR, "No enough segments for packet.");
1915 			rte_pktmbuf_free(rx_pkts[nb_rx]);
1916 			rxvq->stats.errors++;
1917 			break;
1918 		}
1919 
1920 		while (extra_idx < rcv_cnt) {
1921 			rxm = rcv_pkts[extra_idx];
1922 
1923 			rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1924 			rxm->pkt_len = (uint32_t)(len[extra_idx]);
1925 			rxm->data_len = (uint16_t)(len[extra_idx]);
1926 
1927 			prev->next = rxm;
1928 			prev = rxm;
1929 			rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1930 			extra_idx += 1;
1931 		}
1932 		seg_res -= rcv_cnt;
1933 		if (!seg_res) {
1934 			virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1935 			nb_rx++;
1936 		}
1937 	}
1938 
1939 	rxvq->stats.packets += nb_rx;
1940 
1941 	/* Allocate new mbuf for the used descriptor */
1942 	if (likely(!virtqueue_full(vq))) {
1943 		/* free_cnt may include mrg descs */
1944 		uint16_t free_cnt = vq->vq_free_cnt;
1945 		struct rte_mbuf *new_pkts[free_cnt];
1946 
1947 		if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1948 			error = virtqueue_enqueue_recv_refill_packed(vq,
1949 					new_pkts, free_cnt);
1950 			if (unlikely(error)) {
1951 				for (i = 0; i < free_cnt; i++)
1952 					rte_pktmbuf_free(new_pkts[i]);
1953 			}
1954 			nb_enqueued += free_cnt;
1955 		} else {
1956 			struct rte_eth_dev *dev =
1957 				&rte_eth_devices[rxvq->port_id];
1958 			dev->data->rx_mbuf_alloc_failed += free_cnt;
1959 		}
1960 	}
1961 
1962 	if (likely(nb_enqueued)) {
1963 		if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1964 			virtqueue_notify(vq);
1965 			PMD_RX_LOG(DEBUG, "Notified");
1966 		}
1967 	}
1968 
1969 	return nb_rx;
1970 }
1971 
1972 uint16_t
1973 virtio_xmit_pkts_prepare(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts,
1974 			uint16_t nb_pkts)
1975 {
1976 	uint16_t nb_tx;
1977 	int error;
1978 
1979 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1980 		struct rte_mbuf *m = tx_pkts[nb_tx];
1981 
1982 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
1983 		error = rte_validate_tx_offload(m);
1984 		if (unlikely(error)) {
1985 			rte_errno = -error;
1986 			break;
1987 		}
1988 #endif
1989 
1990 		/* Do VLAN tag insertion */
1991 		if (unlikely(m->ol_flags & PKT_TX_VLAN_PKT)) {
1992 			error = rte_vlan_insert(&m);
1993 			/* rte_vlan_insert() may change pointer
1994 			 * even in the case of failure
1995 			 */
1996 			tx_pkts[nb_tx] = m;
1997 
1998 			if (unlikely(error)) {
1999 				rte_errno = -error;
2000 				break;
2001 			}
2002 		}
2003 
2004 		error = rte_net_intel_cksum_prepare(m);
2005 		if (unlikely(error)) {
2006 			rte_errno = -error;
2007 			break;
2008 		}
2009 
2010 		if (m->ol_flags & PKT_TX_TCP_SEG)
2011 			virtio_tso_fix_cksum(m);
2012 	}
2013 
2014 	return nb_tx;
2015 }
2016 
2017 uint16_t
2018 virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
2019 			uint16_t nb_pkts)
2020 {
2021 	struct virtnet_tx *txvq = tx_queue;
2022 	struct virtqueue *vq = txvq->vq;
2023 	struct virtio_hw *hw = vq->hw;
2024 	uint16_t hdr_size = hw->vtnet_hdr_size;
2025 	uint16_t nb_tx = 0;
2026 	bool in_order = hw->use_inorder_tx;
2027 
2028 	if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
2029 		return nb_tx;
2030 
2031 	if (unlikely(nb_pkts < 1))
2032 		return nb_pkts;
2033 
2034 	PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
2035 
2036 	if (nb_pkts > vq->vq_free_cnt)
2037 		virtio_xmit_cleanup_packed(vq, nb_pkts - vq->vq_free_cnt,
2038 					   in_order);
2039 
2040 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
2041 		struct rte_mbuf *txm = tx_pkts[nb_tx];
2042 		int can_push = 0, slots, need;
2043 
2044 		/* optimize ring usage */
2045 		if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
2046 		      vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
2047 		    rte_mbuf_refcnt_read(txm) == 1 &&
2048 		    RTE_MBUF_DIRECT(txm) &&
2049 		    txm->nb_segs == 1 &&
2050 		    rte_pktmbuf_headroom(txm) >= hdr_size &&
2051 		    rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
2052 			   __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
2053 			can_push = 1;
2054 
2055 		/* How many main ring entries are needed to this Tx?
2056 		 * any_layout => number of segments
2057 		 * default    => number of segments + 1
2058 		 */
2059 		slots = txm->nb_segs + !can_push;
2060 		need = slots - vq->vq_free_cnt;
2061 
2062 		/* Positive value indicates it need free vring descriptors */
2063 		if (unlikely(need > 0)) {
2064 			virtio_xmit_cleanup_packed(vq, need, in_order);
2065 			need = slots - vq->vq_free_cnt;
2066 			if (unlikely(need > 0)) {
2067 				PMD_TX_LOG(ERR,
2068 					   "No free tx descriptors to transmit");
2069 				break;
2070 			}
2071 		}
2072 
2073 		/* Enqueue Packet buffers */
2074 		if (can_push)
2075 			virtqueue_enqueue_xmit_packed_fast(txvq, txm, in_order);
2076 		else
2077 			virtqueue_enqueue_xmit_packed(txvq, txm, slots, 0,
2078 						      in_order);
2079 
2080 		virtio_update_packet_stats(&txvq->stats, txm);
2081 	}
2082 
2083 	txvq->stats.packets += nb_tx;
2084 
2085 	if (likely(nb_tx)) {
2086 		if (unlikely(virtqueue_kick_prepare_packed(vq))) {
2087 			virtqueue_notify(vq);
2088 			PMD_TX_LOG(DEBUG, "Notified backend after xmit");
2089 		}
2090 	}
2091 
2092 	return nb_tx;
2093 }
2094 
2095 uint16_t
2096 virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2097 {
2098 	struct virtnet_tx *txvq = tx_queue;
2099 	struct virtqueue *vq = txvq->vq;
2100 	struct virtio_hw *hw = vq->hw;
2101 	uint16_t hdr_size = hw->vtnet_hdr_size;
2102 	uint16_t nb_used, nb_tx = 0;
2103 
2104 	if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
2105 		return nb_tx;
2106 
2107 	if (unlikely(nb_pkts < 1))
2108 		return nb_pkts;
2109 
2110 	PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
2111 	nb_used = VIRTQUEUE_NUSED(vq);
2112 
2113 	virtio_rmb(hw->weak_barriers);
2114 	if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
2115 		virtio_xmit_cleanup(vq, nb_used);
2116 
2117 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
2118 		struct rte_mbuf *txm = tx_pkts[nb_tx];
2119 		int can_push = 0, use_indirect = 0, slots, need;
2120 
2121 		/* optimize ring usage */
2122 		if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
2123 		      vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
2124 		    rte_mbuf_refcnt_read(txm) == 1 &&
2125 		    RTE_MBUF_DIRECT(txm) &&
2126 		    txm->nb_segs == 1 &&
2127 		    rte_pktmbuf_headroom(txm) >= hdr_size &&
2128 		    rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
2129 				   __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
2130 			can_push = 1;
2131 		else if (vtpci_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
2132 			 txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
2133 			use_indirect = 1;
2134 
2135 		/* How many main ring entries are needed to this Tx?
2136 		 * any_layout => number of segments
2137 		 * indirect   => 1
2138 		 * default    => number of segments + 1
2139 		 */
2140 		slots = use_indirect ? 1 : (txm->nb_segs + !can_push);
2141 		need = slots - vq->vq_free_cnt;
2142 
2143 		/* Positive value indicates it need free vring descriptors */
2144 		if (unlikely(need > 0)) {
2145 			nb_used = VIRTQUEUE_NUSED(vq);
2146 			virtio_rmb(hw->weak_barriers);
2147 			need = RTE_MIN(need, (int)nb_used);
2148 
2149 			virtio_xmit_cleanup(vq, need);
2150 			need = slots - vq->vq_free_cnt;
2151 			if (unlikely(need > 0)) {
2152 				PMD_TX_LOG(ERR,
2153 					   "No free tx descriptors to transmit");
2154 				break;
2155 			}
2156 		}
2157 
2158 		/* Enqueue Packet buffers */
2159 		virtqueue_enqueue_xmit(txvq, txm, slots, use_indirect,
2160 			can_push, 0);
2161 
2162 		virtio_update_packet_stats(&txvq->stats, txm);
2163 	}
2164 
2165 	txvq->stats.packets += nb_tx;
2166 
2167 	if (likely(nb_tx)) {
2168 		vq_update_avail_idx(vq);
2169 
2170 		if (unlikely(virtqueue_kick_prepare(vq))) {
2171 			virtqueue_notify(vq);
2172 			PMD_TX_LOG(DEBUG, "Notified backend after xmit");
2173 		}
2174 	}
2175 
2176 	return nb_tx;
2177 }
2178 
2179 static __rte_always_inline int
2180 virtio_xmit_try_cleanup_inorder(struct virtqueue *vq, uint16_t need)
2181 {
2182 	uint16_t nb_used, nb_clean, nb_descs;
2183 	struct virtio_hw *hw = vq->hw;
2184 
2185 	nb_descs = vq->vq_free_cnt + need;
2186 	nb_used = VIRTQUEUE_NUSED(vq);
2187 	virtio_rmb(hw->weak_barriers);
2188 	nb_clean = RTE_MIN(need, (int)nb_used);
2189 
2190 	virtio_xmit_cleanup_inorder(vq, nb_clean);
2191 
2192 	return nb_descs - vq->vq_free_cnt;
2193 }
2194 
2195 uint16_t
2196 virtio_xmit_pkts_inorder(void *tx_queue,
2197 			struct rte_mbuf **tx_pkts,
2198 			uint16_t nb_pkts)
2199 {
2200 	struct virtnet_tx *txvq = tx_queue;
2201 	struct virtqueue *vq = txvq->vq;
2202 	struct virtio_hw *hw = vq->hw;
2203 	uint16_t hdr_size = hw->vtnet_hdr_size;
2204 	uint16_t nb_used, nb_tx = 0, nb_inorder_pkts = 0;
2205 	struct rte_mbuf *inorder_pkts[nb_pkts];
2206 	int need;
2207 
2208 	if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
2209 		return nb_tx;
2210 
2211 	if (unlikely(nb_pkts < 1))
2212 		return nb_pkts;
2213 
2214 	VIRTQUEUE_DUMP(vq);
2215 	PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
2216 	nb_used = VIRTQUEUE_NUSED(vq);
2217 
2218 	virtio_rmb(hw->weak_barriers);
2219 	if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
2220 		virtio_xmit_cleanup_inorder(vq, nb_used);
2221 
2222 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
2223 		struct rte_mbuf *txm = tx_pkts[nb_tx];
2224 		int slots;
2225 
2226 		/* optimize ring usage */
2227 		if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
2228 		     vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
2229 		     rte_mbuf_refcnt_read(txm) == 1 &&
2230 		     RTE_MBUF_DIRECT(txm) &&
2231 		     txm->nb_segs == 1 &&
2232 		     rte_pktmbuf_headroom(txm) >= hdr_size &&
2233 		     rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
2234 				__alignof__(struct virtio_net_hdr_mrg_rxbuf))) {
2235 			inorder_pkts[nb_inorder_pkts] = txm;
2236 			nb_inorder_pkts++;
2237 
2238 			continue;
2239 		}
2240 
2241 		if (nb_inorder_pkts) {
2242 			need = nb_inorder_pkts - vq->vq_free_cnt;
2243 			if (unlikely(need > 0)) {
2244 				need = virtio_xmit_try_cleanup_inorder(vq,
2245 								       need);
2246 				if (unlikely(need > 0)) {
2247 					PMD_TX_LOG(ERR,
2248 						"No free tx descriptors to "
2249 						"transmit");
2250 					break;
2251 				}
2252 			}
2253 			virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
2254 							nb_inorder_pkts);
2255 			nb_inorder_pkts = 0;
2256 		}
2257 
2258 		slots = txm->nb_segs + 1;
2259 		need = slots - vq->vq_free_cnt;
2260 		if (unlikely(need > 0)) {
2261 			need = virtio_xmit_try_cleanup_inorder(vq, slots);
2262 
2263 			if (unlikely(need > 0)) {
2264 				PMD_TX_LOG(ERR,
2265 					"No free tx descriptors to transmit");
2266 				break;
2267 			}
2268 		}
2269 		/* Enqueue Packet buffers */
2270 		virtqueue_enqueue_xmit(txvq, txm, slots, 0, 0, 1);
2271 
2272 		virtio_update_packet_stats(&txvq->stats, txm);
2273 	}
2274 
2275 	/* Transmit all inorder packets */
2276 	if (nb_inorder_pkts) {
2277 		need = nb_inorder_pkts - vq->vq_free_cnt;
2278 		if (unlikely(need > 0)) {
2279 			need = virtio_xmit_try_cleanup_inorder(vq,
2280 								  need);
2281 			if (unlikely(need > 0)) {
2282 				PMD_TX_LOG(ERR,
2283 					"No free tx descriptors to transmit");
2284 				nb_inorder_pkts = vq->vq_free_cnt;
2285 				nb_tx -= need;
2286 			}
2287 		}
2288 
2289 		virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
2290 						nb_inorder_pkts);
2291 	}
2292 
2293 	txvq->stats.packets += nb_tx;
2294 
2295 	if (likely(nb_tx)) {
2296 		vq_update_avail_idx(vq);
2297 
2298 		if (unlikely(virtqueue_kick_prepare(vq))) {
2299 			virtqueue_notify(vq);
2300 			PMD_TX_LOG(DEBUG, "Notified backend after xmit");
2301 		}
2302 	}
2303 
2304 	VIRTQUEUE_DUMP(vq);
2305 
2306 	return nb_tx;
2307 }
2308